2 * QEMU Block driver for NBD
4 * Copyright (C) 2008 Bull S.A.S.
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu-common.h"
31 #include "block_int.h"
33 #include "qemu_socket.h"
35 #include <sys/types.h>
38 #define EN_OPTSTR ":exportname="
40 /* #define DEBUG_NBD */
42 #if defined(DEBUG_NBD)
43 #define logout(fmt, ...) \
44 fprintf(stderr, "nbd\t%-24s" fmt, __func__, ##__VA_ARGS__)
46 #define logout(fmt, ...) ((void)0)
49 #define MAX_NBD_REQUESTS 16
50 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs))
51 #define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs))
53 typedef struct BDRVNBDState
{
58 char *export_name
; /* An NBD server may export several devices */
62 Coroutine
*send_coroutine
;
65 Coroutine
*recv_coroutine
[MAX_NBD_REQUESTS
];
66 struct nbd_reply reply
;
68 /* If it begins with '/', this is a UNIX domain socket. Otherwise,
69 * it's a string of the form <hostname|ip4|\[ip6\]>:port
74 static int nbd_config(BDRVNBDState
*s
, const char *filename
, int flags
)
78 const char *host_spec
;
82 file
= g_strdup(filename
);
84 export_name
= strstr(file
, EN_OPTSTR
);
86 if (export_name
[strlen(EN_OPTSTR
)] == 0) {
89 export_name
[0] = 0; /* truncate 'file' */
90 export_name
+= strlen(EN_OPTSTR
);
91 s
->export_name
= g_strdup(export_name
);
94 /* extract the host_spec - fail if it's not nbd:... */
95 if (!strstart(file
, "nbd:", &host_spec
)) {
99 /* are we a UNIX or TCP socket? */
100 if (strstart(host_spec
, "unix:", &unixpath
)) {
101 if (unixpath
[0] != '/') { /* We demand an absolute path*/
104 s
->host_spec
= g_strdup(unixpath
);
106 s
->host_spec
= g_strdup(host_spec
);
114 g_free(s
->export_name
);
115 g_free(s
->host_spec
);
120 static void nbd_coroutine_start(BDRVNBDState
*s
, struct nbd_request
*request
)
124 /* Poor man semaphore. The free_sema is locked when no other request
125 * can be accepted, and unlocked after receiving one reply. */
126 if (s
->in_flight
>= MAX_NBD_REQUESTS
- 1) {
127 qemu_co_mutex_lock(&s
->free_sema
);
128 assert(s
->in_flight
< MAX_NBD_REQUESTS
);
132 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
133 if (s
->recv_coroutine
[i
] == NULL
) {
134 s
->recv_coroutine
[i
] = qemu_coroutine_self();
139 assert(i
< MAX_NBD_REQUESTS
);
140 request
->handle
= INDEX_TO_HANDLE(s
, i
);
143 static int nbd_have_request(void *opaque
)
145 BDRVNBDState
*s
= opaque
;
147 return s
->in_flight
> 0;
150 static void nbd_reply_ready(void *opaque
)
152 BDRVNBDState
*s
= opaque
;
155 if (s
->reply
.handle
== 0) {
156 /* No reply already in flight. Fetch a header. */
157 if (nbd_receive_reply(s
->sock
, &s
->reply
) < 0) {
163 /* There's no need for a mutex on the receive side, because the
164 * handler acts as a synchronization point and ensures that only
165 * one coroutine is called until the reply finishes. */
166 i
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
167 if (s
->recv_coroutine
[i
]) {
168 qemu_coroutine_enter(s
->recv_coroutine
[i
], NULL
);
173 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
174 if (s
->recv_coroutine
[i
]) {
175 qemu_coroutine_enter(s
->recv_coroutine
[i
], NULL
);
180 static void nbd_restart_write(void *opaque
)
182 BDRVNBDState
*s
= opaque
;
183 qemu_coroutine_enter(s
->send_coroutine
, NULL
);
186 static int nbd_co_send_request(BDRVNBDState
*s
, struct nbd_request
*request
,
187 struct iovec
*iov
, int offset
)
191 qemu_co_mutex_lock(&s
->send_mutex
);
192 s
->send_coroutine
= qemu_coroutine_self();
193 qemu_aio_set_fd_handler(s
->sock
, nbd_reply_ready
, nbd_restart_write
,
194 nbd_have_request
, NULL
, s
);
195 rc
= nbd_send_request(s
->sock
, request
);
196 if (rc
!= -1 && iov
) {
197 ret
= qemu_co_sendv(s
->sock
, iov
, request
->len
, offset
);
198 if (ret
!= request
->len
) {
203 qemu_aio_set_fd_handler(s
->sock
, nbd_reply_ready
, NULL
,
204 nbd_have_request
, NULL
, s
);
205 s
->send_coroutine
= NULL
;
206 qemu_co_mutex_unlock(&s
->send_mutex
);
210 static void nbd_co_receive_reply(BDRVNBDState
*s
, struct nbd_request
*request
,
211 struct nbd_reply
*reply
,
212 struct iovec
*iov
, int offset
)
216 /* Wait until we're woken up by the read handler. TODO: perhaps
217 * peek at the next reply and avoid yielding if it's ours? */
218 qemu_coroutine_yield();
220 if (reply
->handle
!= request
->handle
) {
223 if (iov
&& reply
->error
== 0) {
224 ret
= qemu_co_recvv(s
->sock
, iov
, request
->len
, offset
);
225 if (ret
!= request
->len
) {
230 /* Tell the read handler to read another header. */
235 static void nbd_coroutine_end(BDRVNBDState
*s
, struct nbd_request
*request
)
237 int i
= HANDLE_TO_INDEX(s
, request
->handle
);
238 s
->recv_coroutine
[i
] = NULL
;
239 if (s
->in_flight
-- == MAX_NBD_REQUESTS
) {
240 qemu_co_mutex_unlock(&s
->free_sema
);
244 static int nbd_establish_connection(BlockDriverState
*bs
)
246 BDRVNBDState
*s
= bs
->opaque
;
252 if (s
->host_spec
[0] == '/') {
253 sock
= unix_socket_outgoing(s
->host_spec
);
255 sock
= tcp_socket_outgoing_spec(s
->host_spec
);
258 /* Failed to establish connection */
260 logout("Failed to establish connection to NBD server\n");
265 ret
= nbd_receive_negotiate(sock
, s
->export_name
, &s
->nbdflags
, &size
,
268 logout("Failed to negotiate with the NBD server\n");
273 /* Now that we're connected, set the socket to be non-blocking and
274 * kick the reply mechanism. */
275 socket_set_nonblock(sock
);
276 qemu_aio_set_fd_handler(s
->sock
, nbd_reply_ready
, NULL
,
277 nbd_have_request
, NULL
, s
);
281 s
->blocksize
= blocksize
;
283 logout("Established connection with NBD server\n");
287 static void nbd_teardown_connection(BlockDriverState
*bs
)
289 BDRVNBDState
*s
= bs
->opaque
;
290 struct nbd_request request
;
292 request
.type
= NBD_CMD_DISC
;
295 nbd_send_request(s
->sock
, &request
);
297 qemu_aio_set_fd_handler(s
->sock
, NULL
, NULL
, NULL
, NULL
, NULL
);
298 closesocket(s
->sock
);
301 static int nbd_open(BlockDriverState
*bs
, const char* filename
, int flags
)
303 BDRVNBDState
*s
= bs
->opaque
;
306 qemu_co_mutex_init(&s
->send_mutex
);
307 qemu_co_mutex_init(&s
->free_sema
);
309 /* Pop the config into our state object. Exit if invalid. */
310 result
= nbd_config(s
, filename
, flags
);
315 /* establish TCP connection, return error if it fails
316 * TODO: Configurable retry-until-timeout behaviour.
318 result
= nbd_establish_connection(bs
);
323 static int nbd_co_readv_1(BlockDriverState
*bs
, int64_t sector_num
,
324 int nb_sectors
, QEMUIOVector
*qiov
,
327 BDRVNBDState
*s
= bs
->opaque
;
328 struct nbd_request request
;
329 struct nbd_reply reply
;
331 request
.type
= NBD_CMD_READ
;
332 request
.from
= sector_num
* 512;
333 request
.len
= nb_sectors
* 512;
335 nbd_coroutine_start(s
, &request
);
336 if (nbd_co_send_request(s
, &request
, NULL
, 0) == -1) {
339 nbd_co_receive_reply(s
, &request
, &reply
, qiov
->iov
, offset
);
341 nbd_coroutine_end(s
, &request
);
346 static int nbd_co_writev_1(BlockDriverState
*bs
, int64_t sector_num
,
347 int nb_sectors
, QEMUIOVector
*qiov
,
350 BDRVNBDState
*s
= bs
->opaque
;
351 struct nbd_request request
;
352 struct nbd_reply reply
;
354 request
.type
= NBD_CMD_WRITE
;
355 if (!bdrv_enable_write_cache(bs
) && (s
->nbdflags
& NBD_FLAG_SEND_FUA
)) {
356 request
.type
|= NBD_CMD_FLAG_FUA
;
359 request
.from
= sector_num
* 512;
360 request
.len
= nb_sectors
* 512;
362 nbd_coroutine_start(s
, &request
);
363 if (nbd_co_send_request(s
, &request
, qiov
->iov
, offset
) == -1) {
366 nbd_co_receive_reply(s
, &request
, &reply
, NULL
, 0);
368 nbd_coroutine_end(s
, &request
);
372 /* qemu-nbd has a limit of slightly less than 1M per request. Try to
373 * remain aligned to 4K. */
374 #define NBD_MAX_SECTORS 2040
376 static int nbd_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
377 int nb_sectors
, QEMUIOVector
*qiov
)
381 while (nb_sectors
> NBD_MAX_SECTORS
) {
382 ret
= nbd_co_readv_1(bs
, sector_num
, NBD_MAX_SECTORS
, qiov
, offset
);
386 offset
+= NBD_MAX_SECTORS
* 512;
387 sector_num
+= NBD_MAX_SECTORS
;
388 nb_sectors
-= NBD_MAX_SECTORS
;
390 return nbd_co_readv_1(bs
, sector_num
, nb_sectors
, qiov
, offset
);
393 static int nbd_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
394 int nb_sectors
, QEMUIOVector
*qiov
)
398 while (nb_sectors
> NBD_MAX_SECTORS
) {
399 ret
= nbd_co_writev_1(bs
, sector_num
, NBD_MAX_SECTORS
, qiov
, offset
);
403 offset
+= NBD_MAX_SECTORS
* 512;
404 sector_num
+= NBD_MAX_SECTORS
;
405 nb_sectors
-= NBD_MAX_SECTORS
;
407 return nbd_co_writev_1(bs
, sector_num
, nb_sectors
, qiov
, offset
);
410 static int nbd_co_flush(BlockDriverState
*bs
)
412 BDRVNBDState
*s
= bs
->opaque
;
413 struct nbd_request request
;
414 struct nbd_reply reply
;
416 if (!(s
->nbdflags
& NBD_FLAG_SEND_FLUSH
)) {
420 request
.type
= NBD_CMD_FLUSH
;
421 if (s
->nbdflags
& NBD_FLAG_SEND_FUA
) {
422 request
.type
|= NBD_CMD_FLAG_FUA
;
428 nbd_coroutine_start(s
, &request
);
429 if (nbd_co_send_request(s
, &request
, NULL
, 0) == -1) {
432 nbd_co_receive_reply(s
, &request
, &reply
, NULL
, 0);
434 nbd_coroutine_end(s
, &request
);
438 static int nbd_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
441 BDRVNBDState
*s
= bs
->opaque
;
442 struct nbd_request request
;
443 struct nbd_reply reply
;
445 if (!(s
->nbdflags
& NBD_FLAG_SEND_TRIM
)) {
448 request
.type
= NBD_CMD_TRIM
;
449 request
.from
= sector_num
* 512;;
450 request
.len
= nb_sectors
* 512;
452 nbd_coroutine_start(s
, &request
);
453 if (nbd_co_send_request(s
, &request
, NULL
, 0) == -1) {
456 nbd_co_receive_reply(s
, &request
, &reply
, NULL
, 0);
458 nbd_coroutine_end(s
, &request
);
462 static void nbd_close(BlockDriverState
*bs
)
464 BDRVNBDState
*s
= bs
->opaque
;
465 g_free(s
->export_name
);
466 g_free(s
->host_spec
);
468 nbd_teardown_connection(bs
);
471 static int64_t nbd_getlength(BlockDriverState
*bs
)
473 BDRVNBDState
*s
= bs
->opaque
;
478 static BlockDriver bdrv_nbd
= {
479 .format_name
= "nbd",
480 .instance_size
= sizeof(BDRVNBDState
),
481 .bdrv_file_open
= nbd_open
,
482 .bdrv_co_readv
= nbd_co_readv
,
483 .bdrv_co_writev
= nbd_co_writev
,
484 .bdrv_close
= nbd_close
,
485 .bdrv_co_flush_to_os
= nbd_co_flush
,
486 .bdrv_co_discard
= nbd_co_discard
,
487 .bdrv_getlength
= nbd_getlength
,
488 .protocol_name
= "nbd",
491 static void bdrv_nbd_init(void)
493 bdrv_register(&bdrv_nbd
);
496 block_init(bdrv_nbd_init
);