3 * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
5 * Network Block Device Client Side
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; under version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qapi/error.h"
22 #include "qemu/queue.h"
24 #include "nbd-internal.h"
25 #include "qemu/cutils.h"
27 /* Definitions for opaque data types */
29 static QTAILQ_HEAD(, NBDExport
) exports
= QTAILQ_HEAD_INITIALIZER(exports
);
31 /* That's all folks */
33 /* Basic flow for negotiation
60 /* Send an option request.
62 * The request is for option @opt, with @data containing @len bytes of
63 * additional payload for the request (@len may be -1 to treat @data as
64 * a C string; and @data may be NULL if @len is 0).
65 * Return 0 if successful, -1 with errp set if it is impossible to
67 static int nbd_send_option_request(QIOChannel
*ioc
, uint32_t opt
,
68 uint32_t len
, const char *data
,
73 QEMU_BUILD_BUG_ON(sizeof(req
) != 16);
76 req
.length
= len
= strlen(data
);
78 trace_nbd_send_option_request(opt
, nbd_opt_lookup(opt
), len
);
80 stq_be_p(&req
.magic
, NBD_OPTS_MAGIC
);
81 stl_be_p(&req
.option
, opt
);
82 stl_be_p(&req
.length
, len
);
84 if (nbd_write(ioc
, &req
, sizeof(req
), errp
) < 0) {
85 error_prepend(errp
, "Failed to send option request header: ");
89 if (len
&& nbd_write(ioc
, (char *) data
, len
, errp
) < 0) {
90 error_prepend(errp
, "Failed to send option request data: ");
97 /* Send NBD_OPT_ABORT as a courtesy to let the server know that we are
98 * not going to attempt further negotiation. */
99 static void nbd_send_opt_abort(QIOChannel
*ioc
)
101 /* Technically, a compliant server is supposed to reply to us; but
102 * older servers disconnected instead. At any rate, we're allowed
103 * to disconnect without waiting for the server reply, so we don't
104 * even care if the request makes it to the server, let alone
105 * waiting around for whether the server replies. */
106 nbd_send_option_request(ioc
, NBD_OPT_ABORT
, 0, NULL
, NULL
);
110 /* Receive the header of an option reply, which should match the given
111 * opt. Read through the length field, but NOT the length bytes of
112 * payload. Return 0 if successful, -1 with errp set if it is
113 * impossible to continue. */
114 static int nbd_receive_option_reply(QIOChannel
*ioc
, uint32_t opt
,
115 NBDOptionReply
*reply
, Error
**errp
)
117 QEMU_BUILD_BUG_ON(sizeof(*reply
) != 20);
118 if (nbd_read(ioc
, reply
, sizeof(*reply
), "option reply", errp
) < 0) {
119 nbd_send_opt_abort(ioc
);
122 reply
->magic
= be64_to_cpu(reply
->magic
);
123 reply
->option
= be32_to_cpu(reply
->option
);
124 reply
->type
= be32_to_cpu(reply
->type
);
125 reply
->length
= be32_to_cpu(reply
->length
);
127 trace_nbd_receive_option_reply(reply
->option
, nbd_opt_lookup(reply
->option
),
128 reply
->type
, nbd_rep_lookup(reply
->type
),
131 if (reply
->magic
!= NBD_REP_MAGIC
) {
132 error_setg(errp
, "Unexpected option reply magic");
133 nbd_send_opt_abort(ioc
);
136 if (reply
->option
!= opt
) {
137 error_setg(errp
, "Unexpected option type %u (%s), expected %u (%s)",
138 reply
->option
, nbd_opt_lookup(reply
->option
),
139 opt
, nbd_opt_lookup(opt
));
140 nbd_send_opt_abort(ioc
);
147 * If reply represents success, return 1 without further action. If
148 * reply represents an error, consume the optional payload of the
149 * packet on ioc. Then return 0 for unsupported (so the client can
150 * fall back to other approaches), where @strict determines if only
151 * ERR_UNSUP or all errors fit that category, or -1 with errp set for
154 static int nbd_handle_reply_err(QIOChannel
*ioc
, NBDOptionReply
*reply
,
155 bool strict
, Error
**errp
)
158 g_autofree
char *msg
= NULL
;
160 if (!(reply
->type
& (1 << 31))) {
165 if (reply
->length
> NBD_MAX_BUFFER_SIZE
) {
166 error_setg(errp
, "server error %" PRIu32
167 " (%s) message is too long",
168 reply
->type
, nbd_rep_lookup(reply
->type
));
171 msg
= g_malloc(reply
->length
+ 1);
172 if (nbd_read(ioc
, msg
, reply
->length
, NULL
, errp
) < 0) {
173 error_prepend(errp
, "Failed to read option error %" PRIu32
175 reply
->type
, nbd_rep_lookup(reply
->type
));
178 msg
[reply
->length
] = '\0';
179 trace_nbd_server_error_msg(reply
->type
,
180 nbd_reply_type_lookup(reply
->type
), msg
);
183 if (reply
->type
== NBD_REP_ERR_UNSUP
|| !strict
) {
184 trace_nbd_reply_err_ignored(reply
->option
,
185 nbd_opt_lookup(reply
->option
),
186 reply
->type
, nbd_rep_lookup(reply
->type
));
190 switch (reply
->type
) {
191 case NBD_REP_ERR_POLICY
:
192 error_setg(errp
, "Denied by server for option %" PRIu32
" (%s)",
193 reply
->option
, nbd_opt_lookup(reply
->option
));
196 case NBD_REP_ERR_INVALID
:
197 error_setg(errp
, "Invalid parameters for option %" PRIu32
" (%s)",
198 reply
->option
, nbd_opt_lookup(reply
->option
));
201 case NBD_REP_ERR_PLATFORM
:
202 error_setg(errp
, "Server lacks support for option %" PRIu32
" (%s)",
203 reply
->option
, nbd_opt_lookup(reply
->option
));
206 case NBD_REP_ERR_TLS_REQD
:
207 error_setg(errp
, "TLS negotiation required before option %" PRIu32
208 " (%s)", reply
->option
, nbd_opt_lookup(reply
->option
));
209 error_append_hint(errp
, "Did you forget a valid tls-creds?\n");
212 case NBD_REP_ERR_UNKNOWN
:
213 error_setg(errp
, "Requested export not available");
216 case NBD_REP_ERR_SHUTDOWN
:
217 error_setg(errp
, "Server shutting down before option %" PRIu32
" (%s)",
218 reply
->option
, nbd_opt_lookup(reply
->option
));
221 case NBD_REP_ERR_BLOCK_SIZE_REQD
:
222 error_setg(errp
, "Server requires INFO_BLOCK_SIZE for option %" PRIu32
223 " (%s)", reply
->option
, nbd_opt_lookup(reply
->option
));
227 error_setg(errp
, "Unknown error code when asking for option %" PRIu32
228 " (%s)", reply
->option
, nbd_opt_lookup(reply
->option
));
233 error_append_hint(errp
, "server reported: %s\n", msg
);
237 nbd_send_opt_abort(ioc
);
242 * Process another portion of the NBD_OPT_LIST reply, populating any
243 * name received into *@name. If @description is non-NULL, and the
244 * server provided a description, that is also populated. The caller
245 * must eventually call g_free() on success.
246 * Returns 1 if name and description were set and iteration must continue,
247 * 0 if iteration is complete (including if OPT_LIST unsupported),
248 * -1 with @errp set if an unrecoverable error occurred.
250 static int nbd_receive_list(QIOChannel
*ioc
, char **name
, char **description
,
253 NBDOptionReply reply
;
256 g_autofree
char *local_name
= NULL
;
257 g_autofree
char *local_desc
= NULL
;
260 if (nbd_receive_option_reply(ioc
, NBD_OPT_LIST
, &reply
, errp
) < 0) {
263 error
= nbd_handle_reply_err(ioc
, &reply
, true, errp
);
269 if (reply
.type
== NBD_REP_ACK
) {
271 error_setg(errp
, "length too long for option end");
272 nbd_send_opt_abort(ioc
);
276 } else if (reply
.type
!= NBD_REP_SERVER
) {
277 error_setg(errp
, "Unexpected reply type %u (%s), expected %u (%s)",
278 reply
.type
, nbd_rep_lookup(reply
.type
),
279 NBD_REP_SERVER
, nbd_rep_lookup(NBD_REP_SERVER
));
280 nbd_send_opt_abort(ioc
);
284 if (len
< sizeof(namelen
) || len
> NBD_MAX_BUFFER_SIZE
) {
285 error_setg(errp
, "incorrect option length %" PRIu32
, len
);
286 nbd_send_opt_abort(ioc
);
289 if (nbd_read32(ioc
, &namelen
, "option name length", errp
) < 0) {
290 nbd_send_opt_abort(ioc
);
293 len
-= sizeof(namelen
);
294 if (len
< namelen
|| namelen
> NBD_MAX_STRING_SIZE
) {
295 error_setg(errp
, "incorrect name length in server's list response");
296 nbd_send_opt_abort(ioc
);
300 local_name
= g_malloc(namelen
+ 1);
301 if (nbd_read(ioc
, local_name
, namelen
, "export name", errp
) < 0) {
302 nbd_send_opt_abort(ioc
);
305 local_name
[namelen
] = '\0';
308 if (len
> NBD_MAX_STRING_SIZE
) {
309 error_setg(errp
, "incorrect description length in server's "
311 nbd_send_opt_abort(ioc
);
314 local_desc
= g_malloc(len
+ 1);
315 if (nbd_read(ioc
, local_desc
, len
, "export description", errp
) < 0) {
316 nbd_send_opt_abort(ioc
);
319 local_desc
[len
] = '\0';
322 trace_nbd_receive_list(local_name
, local_desc
?: "");
323 *name
= g_steal_pointer(&local_name
);
325 *description
= g_steal_pointer(&local_desc
);
332 * nbd_opt_info_or_go:
333 * Send option for NBD_OPT_INFO or NBD_OPT_GO and parse the reply.
334 * Returns -1 if the option proves the export @info->name cannot be
335 * used, 0 if the option is unsupported (fall back to NBD_OPT_LIST and
336 * NBD_OPT_EXPORT_NAME in that case), and > 0 if the export is good to
337 * go (with the rest of @info populated).
339 static int nbd_opt_info_or_go(QIOChannel
*ioc
, uint32_t opt
,
340 NBDExportInfo
*info
, Error
**errp
)
343 NBDOptionReply reply
;
344 uint32_t len
= strlen(info
->name
);
349 /* The protocol requires that the server send NBD_INFO_EXPORT with
350 * a non-zero flags (at least NBD_FLAG_HAS_FLAGS must be set); so
351 * flags still 0 is a witness of a broken server. */
354 assert(opt
== NBD_OPT_GO
|| opt
== NBD_OPT_INFO
);
355 trace_nbd_opt_info_go_start(nbd_opt_lookup(opt
), info
->name
);
356 buf
= g_malloc(4 + len
+ 2 + 2 * info
->request_sizes
+ 1);
358 memcpy(buf
+ 4, info
->name
, len
);
359 /* At most one request, everything else up to server */
360 stw_be_p(buf
+ 4 + len
, info
->request_sizes
);
361 if (info
->request_sizes
) {
362 stw_be_p(buf
+ 4 + len
+ 2, NBD_INFO_BLOCK_SIZE
);
364 error
= nbd_send_option_request(ioc
, opt
,
365 4 + len
+ 2 + 2 * info
->request_sizes
,
373 if (nbd_receive_option_reply(ioc
, opt
, &reply
, errp
) < 0) {
376 error
= nbd_handle_reply_err(ioc
, &reply
, true, errp
);
382 if (reply
.type
== NBD_REP_ACK
) {
384 * Server is done sending info, and moved into transmission
385 * phase for NBD_OPT_GO, but make sure it sent flags
388 error_setg(errp
, "server sent invalid NBD_REP_ACK");
392 error_setg(errp
, "broken server omitted NBD_INFO_EXPORT");
395 trace_nbd_opt_info_go_success(nbd_opt_lookup(opt
));
398 if (reply
.type
!= NBD_REP_INFO
) {
399 error_setg(errp
, "unexpected reply type %u (%s), expected %u (%s)",
400 reply
.type
, nbd_rep_lookup(reply
.type
),
401 NBD_REP_INFO
, nbd_rep_lookup(NBD_REP_INFO
));
402 nbd_send_opt_abort(ioc
);
405 if (len
< sizeof(type
)) {
406 error_setg(errp
, "NBD_REP_INFO length %" PRIu32
" is too short",
408 nbd_send_opt_abort(ioc
);
411 if (nbd_read16(ioc
, &type
, "info type", errp
) < 0) {
412 nbd_send_opt_abort(ioc
);
417 case NBD_INFO_EXPORT
:
418 if (len
!= sizeof(info
->size
) + sizeof(info
->flags
)) {
419 error_setg(errp
, "remaining export info len %" PRIu32
420 " is unexpected size", len
);
421 nbd_send_opt_abort(ioc
);
424 if (nbd_read64(ioc
, &info
->size
, "info size", errp
) < 0) {
425 nbd_send_opt_abort(ioc
);
428 if (nbd_read16(ioc
, &info
->flags
, "info flags", errp
) < 0) {
429 nbd_send_opt_abort(ioc
);
432 if (info
->min_block
&&
433 !QEMU_IS_ALIGNED(info
->size
, info
->min_block
)) {
434 error_setg(errp
, "export size %" PRIu64
" is not multiple of "
435 "minimum block size %" PRIu32
, info
->size
,
437 nbd_send_opt_abort(ioc
);
440 trace_nbd_receive_negotiate_size_flags(info
->size
, info
->flags
);
443 case NBD_INFO_BLOCK_SIZE
:
444 if (len
!= sizeof(info
->min_block
) * 3) {
445 error_setg(errp
, "remaining export info len %" PRIu32
446 " is unexpected size", len
);
447 nbd_send_opt_abort(ioc
);
450 if (nbd_read32(ioc
, &info
->min_block
, "info minimum block size",
452 nbd_send_opt_abort(ioc
);
455 if (!is_power_of_2(info
->min_block
)) {
456 error_setg(errp
, "server minimum block size %" PRIu32
457 " is not a power of two", info
->min_block
);
458 nbd_send_opt_abort(ioc
);
461 if (nbd_read32(ioc
, &info
->opt_block
, "info preferred block size",
464 nbd_send_opt_abort(ioc
);
467 if (!is_power_of_2(info
->opt_block
) ||
468 info
->opt_block
< info
->min_block
) {
469 error_setg(errp
, "server preferred block size %" PRIu32
470 " is not valid", info
->opt_block
);
471 nbd_send_opt_abort(ioc
);
474 if (nbd_read32(ioc
, &info
->max_block
, "info maximum block size",
477 nbd_send_opt_abort(ioc
);
480 if (info
->max_block
< info
->min_block
) {
481 error_setg(errp
, "server maximum block size %" PRIu32
482 " is not valid", info
->max_block
);
483 nbd_send_opt_abort(ioc
);
486 trace_nbd_opt_info_block_size(info
->min_block
, info
->opt_block
,
492 * Not worth the bother to check if NBD_INFO_NAME or
493 * NBD_INFO_DESCRIPTION exceed NBD_MAX_STRING_SIZE.
495 trace_nbd_opt_info_unknown(type
, nbd_info_lookup(type
));
496 if (nbd_drop(ioc
, len
, errp
) < 0) {
497 error_prepend(errp
, "Failed to read info payload: ");
498 nbd_send_opt_abort(ioc
);
506 /* Return -1 on failure, 0 if wantname is an available export. */
507 static int nbd_receive_query_exports(QIOChannel
*ioc
,
508 const char *wantname
,
511 bool list_empty
= true;
512 bool found_export
= false;
514 trace_nbd_receive_query_exports_start(wantname
);
515 if (nbd_send_option_request(ioc
, NBD_OPT_LIST
, 0, NULL
, errp
) < 0) {
521 int ret
= nbd_receive_list(ioc
, &name
, NULL
, errp
);
524 /* Server gave unexpected reply */
526 } else if (ret
== 0) {
527 /* Done iterating. */
530 * We don't have enough context to tell a server that
531 * sent an empty list apart from a server that does
532 * not support the list command; but as this function
533 * is just used to trigger a nicer error message
534 * before trying NBD_OPT_EXPORT_NAME, assume the
535 * export is available.
538 } else if (!found_export
) {
539 error_setg(errp
, "No export with name '%s' available",
541 nbd_send_opt_abort(ioc
);
544 trace_nbd_receive_query_exports_success(wantname
);
548 if (!strcmp(name
, wantname
)) {
556 * nbd_request_simple_option: Send an option request, and parse the reply.
557 * @strict controls whether ERR_UNSUP or all errors produce 0 status.
558 * return 1 for successful negotiation,
559 * 0 if operation is unsupported,
560 * -1 with errp set for any other error
562 static int nbd_request_simple_option(QIOChannel
*ioc
, int opt
, bool strict
,
565 NBDOptionReply reply
;
568 if (nbd_send_option_request(ioc
, opt
, 0, NULL
, errp
) < 0) {
572 if (nbd_receive_option_reply(ioc
, opt
, &reply
, errp
) < 0) {
575 error
= nbd_handle_reply_err(ioc
, &reply
, strict
, errp
);
580 if (reply
.type
!= NBD_REP_ACK
) {
581 error_setg(errp
, "Server answered option %d (%s) with unexpected "
582 "reply %" PRIu32
" (%s)", opt
, nbd_opt_lookup(opt
),
583 reply
.type
, nbd_rep_lookup(reply
.type
));
584 nbd_send_opt_abort(ioc
);
588 if (reply
.length
!= 0) {
589 error_setg(errp
, "Option %d ('%s') response length is %" PRIu32
590 " (it should be zero)", opt
, nbd_opt_lookup(opt
),
592 nbd_send_opt_abort(ioc
);
599 /* Callback to learn when QIO TLS upgrade is complete */
600 struct NBDTLSClientHandshakeData
{
606 static void nbd_client_tls_handshake(QIOTask
*task
, void *opaque
)
608 struct NBDTLSClientHandshakeData
*data
= opaque
;
610 qio_task_propagate_error(task
, &data
->error
);
611 data
->complete
= true;
613 g_main_loop_quit(data
->loop
);
617 static QIOChannel
*nbd_receive_starttls(QIOChannel
*ioc
,
618 QCryptoTLSCreds
*tlscreds
,
619 const char *hostname
, Error
**errp
)
623 struct NBDTLSClientHandshakeData data
= { 0 };
625 ret
= nbd_request_simple_option(ioc
, NBD_OPT_STARTTLS
, true, errp
);
628 error_setg(errp
, "Server don't support STARTTLS option");
629 nbd_send_opt_abort(ioc
);
634 trace_nbd_receive_starttls_new_client();
635 tioc
= qio_channel_tls_new_client(ioc
, tlscreds
, hostname
, errp
);
639 qio_channel_set_name(QIO_CHANNEL(tioc
), "nbd-client-tls");
640 trace_nbd_receive_starttls_tls_handshake();
641 qio_channel_tls_handshake(tioc
,
642 nbd_client_tls_handshake
,
647 if (!data
.complete
) {
648 data
.loop
= g_main_loop_new(g_main_context_default(), FALSE
);
649 g_main_loop_run(data
.loop
);
650 assert(data
.complete
);
651 g_main_loop_unref(data
.loop
);
655 error_propagate(errp
, data
.error
);
656 object_unref(OBJECT(tioc
));
660 return QIO_CHANNEL(tioc
);
664 * nbd_send_meta_query:
665 * Send 0 or 1 set/list meta context queries.
666 * Return 0 on success, -1 with errp set for any error
668 static int nbd_send_meta_query(QIOChannel
*ioc
, uint32_t opt
,
669 const char *export
, const char *query
,
674 uint32_t queries
= !!query
;
675 uint32_t query_len
= 0;
680 assert(strnlen(export
, NBD_MAX_STRING_SIZE
+ 1) <= NBD_MAX_STRING_SIZE
);
681 export_len
= strlen(export
);
682 data_len
= sizeof(export_len
) + export_len
+ sizeof(queries
);
684 assert(strnlen(query
, NBD_MAX_STRING_SIZE
+ 1) <= NBD_MAX_STRING_SIZE
);
685 query_len
= strlen(query
);
686 data_len
+= sizeof(query_len
) + query_len
;
688 assert(opt
== NBD_OPT_LIST_META_CONTEXT
);
690 p
= data
= g_malloc(data_len
);
692 trace_nbd_opt_meta_request(nbd_opt_lookup(opt
), query
?: "(all)", export
);
693 stl_be_p(p
, export_len
);
694 memcpy(p
+= sizeof(export_len
), export
, export_len
);
695 stl_be_p(p
+= export_len
, queries
);
697 stl_be_p(p
+= sizeof(queries
), query_len
);
698 memcpy(p
+= sizeof(query_len
), query
, query_len
);
701 ret
= nbd_send_option_request(ioc
, opt
, data_len
, data
, errp
);
707 * nbd_receive_one_meta_context:
708 * Called in a loop to receive and trace one set/list meta context reply.
709 * Pass non-NULL @name or @id to collect results back to the caller, which
710 * must eventually call g_free().
711 * return 1 if name is set and iteration must continue,
712 * 0 if iteration is complete (including if option is unsupported),
713 * -1 with errp set for any error
715 static int nbd_receive_one_meta_context(QIOChannel
*ioc
,
722 NBDOptionReply reply
;
723 char *local_name
= NULL
;
726 if (nbd_receive_option_reply(ioc
, opt
, &reply
, errp
) < 0) {
730 ret
= nbd_handle_reply_err(ioc
, &reply
, false, errp
);
735 if (reply
.type
== NBD_REP_ACK
) {
736 if (reply
.length
!= 0) {
737 error_setg(errp
, "Unexpected length to ACK response");
738 nbd_send_opt_abort(ioc
);
742 } else if (reply
.type
!= NBD_REP_META_CONTEXT
) {
743 error_setg(errp
, "Unexpected reply type %u (%s), expected %u (%s)",
744 reply
.type
, nbd_rep_lookup(reply
.type
),
745 NBD_REP_META_CONTEXT
, nbd_rep_lookup(NBD_REP_META_CONTEXT
));
746 nbd_send_opt_abort(ioc
);
750 if (reply
.length
<= sizeof(local_id
) ||
751 reply
.length
> NBD_MAX_BUFFER_SIZE
) {
752 error_setg(errp
, "Failed to negotiate meta context, server "
753 "answered with unexpected length %" PRIu32
,
755 nbd_send_opt_abort(ioc
);
759 if (nbd_read32(ioc
, &local_id
, "context id", errp
) < 0) {
763 reply
.length
-= sizeof(local_id
);
764 local_name
= g_malloc(reply
.length
+ 1);
765 if (nbd_read(ioc
, local_name
, reply
.length
, "context name", errp
) < 0) {
769 local_name
[reply
.length
] = '\0';
770 trace_nbd_opt_meta_reply(nbd_opt_lookup(opt
), local_name
, local_id
);
784 * nbd_negotiate_simple_meta_context:
785 * Request the server to set the meta context for export @info->name
786 * using @info->x_dirty_bitmap with a fallback to "base:allocation",
787 * setting @info->context_id to the resulting id. Fail if the server
788 * responds with more than one context or with a context different
790 * return 1 for successful negotiation,
791 * 0 if operation is unsupported,
792 * -1 with errp set for any other error
794 static int nbd_negotiate_simple_meta_context(QIOChannel
*ioc
,
799 * TODO: Removing the x_dirty_bitmap hack will mean refactoring
800 * this function to request and store ids for multiple contexts
801 * (both base:allocation and a dirty bitmap), at which point this
802 * function should lose the term _simple.
805 const char *context
= info
->x_dirty_bitmap
?: "base:allocation";
806 bool received
= false;
809 if (nbd_send_meta_query(ioc
, NBD_OPT_SET_META_CONTEXT
,
810 info
->name
, context
, errp
) < 0) {
814 ret
= nbd_receive_one_meta_context(ioc
, NBD_OPT_SET_META_CONTEXT
,
815 &name
, &info
->context_id
, errp
);
820 if (strcmp(context
, name
)) {
821 error_setg(errp
, "Failed to negotiate meta context '%s', server "
822 "answered with different context '%s'", context
,
825 nbd_send_opt_abort(ioc
);
831 ret
= nbd_receive_one_meta_context(ioc
, NBD_OPT_SET_META_CONTEXT
,
838 error_setg(errp
, "Server answered with more than one context");
839 nbd_send_opt_abort(ioc
);
846 * nbd_list_meta_contexts:
847 * Request the server to list all meta contexts for export @info->name.
848 * return 0 if list is complete (even if empty),
849 * -1 with errp set for any error
851 static int nbd_list_meta_contexts(QIOChannel
*ioc
,
856 int seen_any
= false;
857 int seen_qemu
= false;
859 if (nbd_send_meta_query(ioc
, NBD_OPT_LIST_META_CONTEXT
,
860 info
->name
, NULL
, errp
) < 0) {
867 ret
= nbd_receive_one_meta_context(ioc
, NBD_OPT_LIST_META_CONTEXT
,
868 &context
, NULL
, errp
);
869 if (ret
== 0 && seen_any
&& !seen_qemu
) {
871 * Work around qemu 3.0 bug: the server forgot to send
872 * "qemu:" replies to 0 queries. If we saw at least one
873 * reply (probably base:allocation), but none of them were
874 * qemu:, then run a more specific query to make sure.
877 if (nbd_send_meta_query(ioc
, NBD_OPT_LIST_META_CONTEXT
,
878 info
->name
, "qemu:", errp
) < 0) {
887 seen_qemu
|= strstart(context
, "qemu:", NULL
);
888 info
->contexts
= g_renew(char *, info
->contexts
, ++info
->n_contexts
);
889 info
->contexts
[info
->n_contexts
- 1] = context
;
894 * nbd_start_negotiate:
895 * Start the handshake to the server. After a positive return, the server
896 * is ready to accept additional NBD_OPT requests.
897 * Returns: negative errno: failure talking to server
898 * non-negative: enum NBDMode describing server abilities
900 static int nbd_start_negotiate(QIOChannel
*ioc
, QCryptoTLSCreds
*tlscreds
,
901 const char *hostname
, QIOChannel
**outioc
,
902 NBDMode max_mode
, bool *zeroes
,
908 trace_nbd_start_negotiate(tlscreds
, hostname
? hostname
: "<null>");
916 if (tlscreds
&& !outioc
) {
917 error_setg(errp
, "Output I/O channel required for TLS");
921 if (nbd_read64(ioc
, &magic
, "initial magic", errp
) < 0) {
924 trace_nbd_receive_negotiate_magic(magic
);
926 if (magic
!= NBD_INIT_MAGIC
) {
927 error_setg(errp
, "Bad initial magic received: 0x%" PRIx64
, magic
);
931 if (nbd_read64(ioc
, &magic
, "server magic", errp
) < 0) {
934 trace_nbd_receive_negotiate_magic(magic
);
936 if (magic
== NBD_OPTS_MAGIC
) {
937 uint32_t clientflags
= 0;
938 uint16_t globalflags
;
939 bool fixedNewStyle
= false;
941 if (nbd_read16(ioc
, &globalflags
, "server flags", errp
) < 0) {
944 trace_nbd_receive_negotiate_server_flags(globalflags
);
945 if (globalflags
& NBD_FLAG_FIXED_NEWSTYLE
) {
946 fixedNewStyle
= true;
947 clientflags
|= NBD_FLAG_C_FIXED_NEWSTYLE
;
949 if (globalflags
& NBD_FLAG_NO_ZEROES
) {
953 clientflags
|= NBD_FLAG_C_NO_ZEROES
;
955 /* client requested flags */
956 clientflags
= cpu_to_be32(clientflags
);
957 if (nbd_write(ioc
, &clientflags
, sizeof(clientflags
), errp
) < 0) {
958 error_prepend(errp
, "Failed to send clientflags field: ");
963 *outioc
= nbd_receive_starttls(ioc
, tlscreds
, hostname
, errp
);
969 error_setg(errp
, "Server does not support STARTTLS");
976 if (max_mode
>= NBD_MODE_EXTENDED
) {
977 result
= nbd_request_simple_option(ioc
,
978 NBD_OPT_EXTENDED_HEADERS
,
981 return result
< 0 ? -EINVAL
: NBD_MODE_EXTENDED
;
984 if (max_mode
>= NBD_MODE_STRUCTURED
) {
985 result
= nbd_request_simple_option(ioc
,
986 NBD_OPT_STRUCTURED_REPLY
,
989 return result
< 0 ? -EINVAL
: NBD_MODE_STRUCTURED
;
992 return NBD_MODE_SIMPLE
;
994 return NBD_MODE_EXPORT_NAME
;
996 } else if (magic
== NBD_CLIENT_MAGIC
) {
998 error_setg(errp
, "Server does not support STARTTLS");
1001 return NBD_MODE_OLDSTYLE
;
1003 error_setg(errp
, "Bad server magic received: 0x%" PRIx64
, magic
);
1009 * nbd_negotiate_finish_oldstyle:
1010 * Populate @info with the size and export flags from an oldstyle server,
1011 * but does not consume 124 bytes of reserved zero padding.
1012 * Returns 0 on success, -1 with @errp set on failure
1014 static int nbd_negotiate_finish_oldstyle(QIOChannel
*ioc
, NBDExportInfo
*info
,
1019 if (nbd_read64(ioc
, &info
->size
, "export length", errp
) < 0) {
1023 if (nbd_read32(ioc
, &oldflags
, "export flags", errp
) < 0) {
1026 if (oldflags
& ~0xffff) {
1027 error_setg(errp
, "Unexpected export flags %0x" PRIx32
, oldflags
);
1030 info
->flags
= oldflags
;
1035 * nbd_receive_negotiate:
1036 * Connect to server, complete negotiation, and move into transmission phase.
1037 * Returns: negative errno: failure talking to server
1038 * 0: server is connected
1040 int nbd_receive_negotiate(QIOChannel
*ioc
, QCryptoTLSCreds
*tlscreds
,
1041 const char *hostname
, QIOChannel
**outioc
,
1042 NBDExportInfo
*info
, Error
**errp
)
1047 bool base_allocation
= info
->base_allocation
;
1049 assert(info
->name
&& strlen(info
->name
) <= NBD_MAX_STRING_SIZE
);
1050 trace_nbd_receive_negotiate_name(info
->name
);
1052 result
= nbd_start_negotiate(ioc
, tlscreds
, hostname
, outioc
,
1053 info
->mode
, &zeroes
, errp
);
1058 info
->mode
= result
;
1059 info
->base_allocation
= false;
1060 if (tlscreds
&& *outioc
) {
1064 switch (info
->mode
) {
1065 case NBD_MODE_EXTENDED
:
1066 case NBD_MODE_STRUCTURED
:
1067 if (base_allocation
) {
1068 result
= nbd_negotiate_simple_meta_context(ioc
, info
, errp
);
1072 info
->base_allocation
= result
== 1;
1075 case NBD_MODE_SIMPLE
:
1076 /* Try NBD_OPT_GO first - if it works, we are done (it
1077 * also gives us a good message if the server requires
1078 * TLS). If it is not available, fall back to
1079 * NBD_OPT_LIST for nicer error messages about a missing
1080 * export, then use NBD_OPT_EXPORT_NAME. */
1081 result
= nbd_opt_info_or_go(ioc
, NBD_OPT_GO
, info
, errp
);
1088 /* Check our desired export is present in the
1089 * server export list. Since NBD_OPT_EXPORT_NAME
1090 * cannot return an error message, running this
1091 * query gives us better error reporting if the
1092 * export name is not available.
1094 if (nbd_receive_query_exports(ioc
, info
->name
, errp
) < 0) {
1098 case NBD_MODE_EXPORT_NAME
:
1099 /* write the export name request */
1100 if (nbd_send_option_request(ioc
, NBD_OPT_EXPORT_NAME
, -1, info
->name
,
1105 /* Read the response */
1106 if (nbd_read64(ioc
, &info
->size
, "export length", errp
) < 0) {
1110 if (nbd_read16(ioc
, &info
->flags
, "export flags", errp
) < 0) {
1114 case NBD_MODE_OLDSTYLE
:
1116 error_setg(errp
, "Server does not support non-empty export names");
1119 if (nbd_negotiate_finish_oldstyle(ioc
, info
, errp
) < 0) {
1124 g_assert_not_reached();
1127 trace_nbd_receive_negotiate_size_flags(info
->size
, info
->flags
);
1128 if (zeroes
&& nbd_drop(ioc
, 124, errp
) < 0) {
1129 error_prepend(errp
, "Failed to read reserved block: ");
1135 /* Clean up result of nbd_receive_export_list */
1136 void nbd_free_export_list(NBDExportInfo
*info
, int count
)
1144 for (i
= 0; i
< count
; i
++) {
1145 g_free(info
[i
].name
);
1146 g_free(info
[i
].description
);
1147 for (j
= 0; j
< info
[i
].n_contexts
; j
++) {
1148 g_free(info
[i
].contexts
[j
]);
1150 g_free(info
[i
].contexts
);
1156 * nbd_receive_export_list:
1157 * Query details about a server's exports, then disconnect without
1158 * going into transmission phase. Return a count of the exports listed
1159 * in @info by the server, or -1 on error. Caller must free @info using
1160 * nbd_free_export_list().
1162 int nbd_receive_export_list(QIOChannel
*ioc
, QCryptoTLSCreds
*tlscreds
,
1163 const char *hostname
, NBDExportInfo
**info
,
1171 NBDExportInfo
*array
= NULL
;
1172 QIOChannel
*sioc
= NULL
;
1175 result
= nbd_start_negotiate(ioc
, tlscreds
, hostname
, &sioc
,
1176 NBD_MODE_EXTENDED
, NULL
, errp
);
1177 if (tlscreds
&& sioc
) {
1184 switch ((NBDMode
)result
) {
1185 case NBD_MODE_SIMPLE
:
1186 case NBD_MODE_STRUCTURED
:
1187 case NBD_MODE_EXTENDED
:
1188 /* newstyle - use NBD_OPT_LIST to populate array, then try
1189 * NBD_OPT_INFO on each array member. If structured replies
1190 * are enabled, also try NBD_OPT_LIST_META_CONTEXT. */
1191 if (nbd_send_option_request(ioc
, NBD_OPT_LIST
, 0, NULL
, errp
) < 0) {
1198 rc
= nbd_receive_list(ioc
, &name
, &desc
, errp
);
1201 } else if (rc
== 0) {
1204 array
= g_renew(NBDExportInfo
, array
, ++count
);
1205 memset(&array
[count
- 1], 0, sizeof(*array
));
1206 array
[count
- 1].name
= name
;
1207 array
[count
- 1].description
= desc
;
1208 array
[count
- 1].mode
= result
;
1211 for (i
= 0; i
< count
; i
++) {
1212 array
[i
].request_sizes
= true;
1213 rc
= nbd_opt_info_or_go(ioc
, NBD_OPT_INFO
, &array
[i
], errp
);
1216 } else if (rc
== 0) {
1218 * Pointless to try rest of loop. If OPT_INFO doesn't work,
1219 * it's unlikely that meta contexts work either
1224 if (result
>= NBD_MODE_STRUCTURED
&&
1225 nbd_list_meta_contexts(ioc
, &array
[i
], errp
) < 0) {
1230 /* Send NBD_OPT_ABORT as a courtesy before hanging up */
1231 nbd_send_opt_abort(ioc
);
1233 case NBD_MODE_EXPORT_NAME
:
1234 error_setg(errp
, "Server does not support export lists");
1235 /* We can't even send NBD_OPT_ABORT, so merely hang up */
1237 case NBD_MODE_OLDSTYLE
:
1238 /* Lone export name is implied, but we can parse length and flags */
1239 array
= g_new0(NBDExportInfo
, 1);
1240 array
->name
= g_strdup("");
1241 array
->mode
= NBD_MODE_OLDSTYLE
;
1244 if (nbd_negotiate_finish_oldstyle(ioc
, array
, errp
) < 0) {
1248 /* Send NBD_CMD_DISC as a courtesy to the server, but ignore all
1249 * errors now that we have the information we wanted. */
1250 if (nbd_drop(ioc
, 124, NULL
) == 0) {
1251 NBDRequest request
= { .type
= NBD_CMD_DISC
, .mode
= result
};
1253 nbd_send_request(ioc
, &request
);
1257 g_assert_not_reached();
1265 qio_channel_shutdown(ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1266 qio_channel_close(ioc
, NULL
);
1267 object_unref(OBJECT(sioc
));
1268 nbd_free_export_list(array
, count
);
1273 int nbd_init(int fd
, QIOChannelSocket
*sioc
, NBDExportInfo
*info
,
1276 unsigned long sector_size
= MAX(BDRV_SECTOR_SIZE
, info
->min_block
);
1277 unsigned long sectors
= info
->size
/ sector_size
;
1279 /* FIXME: Once the kernel module is patched to honor block sizes,
1280 * and to advertise that fact to user space, we should update the
1281 * hand-off to the kernel to use any block sizes we learned. */
1282 assert(!info
->request_sizes
);
1283 if (info
->size
/ sector_size
!= sectors
) {
1284 error_setg(errp
, "Export size %" PRIu64
" too large for 32-bit kernel",
1289 trace_nbd_init_set_socket();
1291 if (ioctl(fd
, NBD_SET_SOCK
, (unsigned long) sioc
->fd
) < 0) {
1293 error_setg(errp
, "Failed to set NBD socket");
1297 trace_nbd_init_set_block_size(sector_size
);
1299 if (ioctl(fd
, NBD_SET_BLKSIZE
, sector_size
) < 0) {
1301 error_setg(errp
, "Failed setting NBD block size");
1305 trace_nbd_init_set_size(sectors
);
1306 if (info
->size
% sector_size
) {
1307 trace_nbd_init_trailing_bytes(info
->size
% sector_size
);
1310 if (ioctl(fd
, NBD_SET_SIZE_BLOCKS
, sectors
) < 0) {
1312 error_setg(errp
, "Failed setting size (in blocks)");
1316 if (ioctl(fd
, NBD_SET_FLAGS
, (unsigned long) info
->flags
) < 0) {
1317 if (errno
== ENOTTY
) {
1318 int read_only
= (info
->flags
& NBD_FLAG_READ_ONLY
) != 0;
1319 trace_nbd_init_set_readonly();
1321 if (ioctl(fd
, BLKROSET
, (unsigned long) &read_only
) < 0) {
1323 error_setg(errp
, "Failed setting read-only attribute");
1328 error_setg(errp
, "Failed setting flags");
1333 trace_nbd_init_finish();
1338 int nbd_client(int fd
)
1343 trace_nbd_client_loop();
1345 ret
= ioctl(fd
, NBD_DO_IT
);
1346 if (ret
< 0 && errno
== EPIPE
) {
1347 /* NBD_DO_IT normally returns EPIPE when someone has disconnected
1348 * the socket via NBD_DISCONNECT. We do not want to return 1 in
1355 trace_nbd_client_loop_ret(ret
, strerror(serrno
));
1357 trace_nbd_client_clear_queue();
1358 ioctl(fd
, NBD_CLEAR_QUE
);
1360 trace_nbd_client_clear_socket();
1361 ioctl(fd
, NBD_CLEAR_SOCK
);
1367 int nbd_disconnect(int fd
)
1369 ioctl(fd
, NBD_CLEAR_QUE
);
1370 ioctl(fd
, NBD_DISCONNECT
);
1371 ioctl(fd
, NBD_CLEAR_SOCK
);
1375 #endif /* __linux__ */
1377 int nbd_send_request(QIOChannel
*ioc
, NBDRequest
*request
)
1379 uint8_t buf
[NBD_EXTENDED_REQUEST_SIZE
];
1382 trace_nbd_send_request(request
->from
, request
->len
, request
->cookie
,
1383 request
->flags
, request
->type
,
1384 nbd_cmd_lookup(request
->type
));
1386 stw_be_p(buf
+ 4, request
->flags
);
1387 stw_be_p(buf
+ 6, request
->type
);
1388 stq_be_p(buf
+ 8, request
->cookie
);
1389 stq_be_p(buf
+ 16, request
->from
);
1390 if (request
->mode
>= NBD_MODE_EXTENDED
) {
1391 stl_be_p(buf
, NBD_EXTENDED_REQUEST_MAGIC
);
1392 stq_be_p(buf
+ 24, request
->len
);
1393 len
= NBD_EXTENDED_REQUEST_SIZE
;
1395 assert(request
->len
<= UINT32_MAX
);
1396 stl_be_p(buf
, NBD_REQUEST_MAGIC
);
1397 stl_be_p(buf
+ 24, request
->len
);
1398 len
= NBD_REQUEST_SIZE
;
1401 return nbd_write(ioc
, buf
, len
, NULL
);
1404 /* nbd_receive_simple_reply
1405 * Read simple reply except magic field (which should be already read).
1406 * Payload is not read (payload is possible for CMD_READ, but here we even
1407 * don't know whether it take place or not).
1409 static int nbd_receive_simple_reply(QIOChannel
*ioc
, NBDSimpleReply
*reply
,
1414 assert(reply
->magic
== NBD_SIMPLE_REPLY_MAGIC
);
1416 ret
= nbd_read(ioc
, (uint8_t *)reply
+ sizeof(reply
->magic
),
1417 sizeof(*reply
) - sizeof(reply
->magic
), "reply", errp
);
1422 reply
->error
= be32_to_cpu(reply
->error
);
1423 reply
->cookie
= be64_to_cpu(reply
->cookie
);
1428 /* nbd_receive_reply_chunk_header
1429 * Read structured reply chunk except magic field (which should be already
1430 * read). Normalize into the compact form.
1431 * Payload is not read.
1433 static int nbd_receive_reply_chunk_header(QIOChannel
*ioc
, NBDReply
*chunk
,
1438 uint64_t payload_len
;
1440 if (chunk
->magic
== NBD_STRUCTURED_REPLY_MAGIC
) {
1441 len
= sizeof(chunk
->structured
);
1443 assert(chunk
->magic
== NBD_EXTENDED_REPLY_MAGIC
);
1444 len
= sizeof(chunk
->extended
);
1447 ret
= nbd_read(ioc
, (uint8_t *)chunk
+ sizeof(chunk
->magic
),
1448 len
- sizeof(chunk
->magic
), "structured chunk",
1454 /* flags, type, and cookie occupy same space between forms */
1455 chunk
->structured
.flags
= be16_to_cpu(chunk
->structured
.flags
);
1456 chunk
->structured
.type
= be16_to_cpu(chunk
->structured
.type
);
1457 chunk
->structured
.cookie
= be64_to_cpu(chunk
->structured
.cookie
);
1460 * Because we use BLOCK_STATUS with REQ_ONE, and cap READ requests
1461 * at 32M, no valid server should send us payload larger than
1462 * this. Even if we stopped using REQ_ONE, sane servers will cap
1463 * the number of extents they return for block status.
1465 if (chunk
->magic
== NBD_STRUCTURED_REPLY_MAGIC
) {
1466 payload_len
= be32_to_cpu(chunk
->structured
.length
);
1468 /* For now, we are ignoring the extended header offset. */
1469 payload_len
= be64_to_cpu(chunk
->extended
.length
);
1470 chunk
->magic
= NBD_STRUCTURED_REPLY_MAGIC
;
1472 if (payload_len
> NBD_MAX_BUFFER_SIZE
+ sizeof(NBDStructuredReadData
)) {
1473 error_setg(errp
, "server chunk %" PRIu32
" (%s) payload is too long",
1474 chunk
->structured
.type
,
1475 nbd_rep_lookup(chunk
->structured
.type
));
1478 chunk
->structured
.length
= payload_len
;
1484 * Tries to read @size bytes from @ioc.
1485 * Returns 1 on success
1486 * 0 on eof, when no data was read (errp is not set)
1487 * negative errno on failure (errp is set)
1489 static inline int coroutine_fn
1490 nbd_read_eof(BlockDriverState
*bs
, QIOChannel
*ioc
, void *buffer
, size_t size
,
1493 bool partial
= false;
1497 struct iovec iov
= { .iov_base
= buffer
, .iov_len
= size
};
1500 len
= qio_channel_readv(ioc
, &iov
, 1, errp
);
1501 if (len
== QIO_CHANNEL_ERR_BLOCK
) {
1502 qio_channel_yield(ioc
, G_IO_IN
);
1504 } else if (len
< 0) {
1506 } else if (len
== 0) {
1509 "Unexpected end-of-file before all bytes were read");
1518 buffer
= (uint8_t*) buffer
+ len
;
1523 /* nbd_receive_reply
1525 * Wait for a new reply. If this yields, the coroutine must be able to be
1526 * safely reentered for nbd_client_attach_aio_context(). @mode determines
1527 * which reply magic we are expecting, although this normalizes the result
1528 * so that the caller only has to work with compact headers.
1530 * Returns 1 on success
1531 * 0 on eof, when no data was read
1532 * negative errno on failure
1534 int coroutine_fn
nbd_receive_reply(BlockDriverState
*bs
, QIOChannel
*ioc
,
1535 NBDReply
*reply
, NBDMode mode
, Error
**errp
)
1541 ret
= nbd_read_eof(bs
, ioc
, &reply
->magic
, sizeof(reply
->magic
), errp
);
1546 reply
->magic
= be32_to_cpu(reply
->magic
);
1548 /* Diagnose but accept wrong-width header */
1549 switch (reply
->magic
) {
1550 case NBD_SIMPLE_REPLY_MAGIC
:
1551 if (mode
>= NBD_MODE_EXTENDED
) {
1552 trace_nbd_receive_wrong_header(reply
->magic
,
1553 nbd_mode_lookup(mode
));
1555 ret
= nbd_receive_simple_reply(ioc
, &reply
->simple
, errp
);
1559 trace_nbd_receive_simple_reply(reply
->simple
.error
,
1560 nbd_err_lookup(reply
->simple
.error
),
1563 case NBD_STRUCTURED_REPLY_MAGIC
:
1564 case NBD_EXTENDED_REPLY_MAGIC
:
1565 expected
= mode
>= NBD_MODE_EXTENDED
? NBD_EXTENDED_REPLY_MAGIC
1566 : NBD_STRUCTURED_REPLY_MAGIC
;
1567 if (reply
->magic
!= expected
) {
1568 trace_nbd_receive_wrong_header(reply
->magic
,
1569 nbd_mode_lookup(mode
));
1571 ret
= nbd_receive_reply_chunk_header(ioc
, reply
, errp
);
1575 type
= nbd_reply_type_lookup(reply
->structured
.type
);
1576 trace_nbd_receive_reply_chunk_header(reply
->structured
.flags
,
1577 reply
->structured
.type
, type
,
1578 reply
->structured
.cookie
,
1579 reply
->structured
.length
);
1582 trace_nbd_receive_wrong_header(reply
->magic
, nbd_mode_lookup(mode
));
1583 error_setg(errp
, "invalid magic (got 0x%" PRIx32
")", reply
->magic
);