2 * Copyright (C) 2013-2019 Red Hat Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Red Hat nor the names of its contributors may be
16 * used to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
22 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 /* Helpers for registering a new backend. */
50 * -D nbdkit.backend.controlpath=0 to suppress control path debugging.
51 * -D nbdkit.backend.datapath=0 to suppress data path debugging.
53 int nbdkit_debug_backend_controlpath
= 1;
54 int nbdkit_debug_backend_datapath
= 1;
56 #define controlpath_debug(fs, ...) \
58 if (nbdkit_debug_backend_controlpath) debug ((fs), ##__VA_ARGS__); \
60 #define datapath_debug(fs, ...) \
62 if (nbdkit_debug_backend_datapath) debug ((fs), ##__VA_ARGS__); \
66 backend_init (struct backend
*b
, struct backend
*next
, size_t index
,
67 const char *filename
, void *dl
, const char *type
)
72 b
->filename
= strdup (filename
);
73 if (b
->filename
== NULL
) {
79 debug ("registering %s %s", type
, filename
);
83 backend_load (struct backend
*b
, const char *name
, void (*load
) (void))
87 /* name is required. */
89 fprintf (stderr
, "%s: %s: %s must have a .name field\n",
90 program_name
, b
->filename
, b
->type
);
96 fprintf (stderr
, "%s: %s: %s.name field must not be empty\n",
97 program_name
, b
->filename
, b
->type
);
100 for (i
= 0; i
< len
; ++i
) {
101 unsigned char c
= name
[i
];
103 if (!(isascii (c
) && isalnum (c
))) {
105 "%s: %s: %s.name ('%s') field "
106 "must contain only ASCII alphanumeric characters\n",
107 program_name
, b
->filename
, b
->type
, name
);
112 /* Copy the module's name into local storage, so that name
113 * survives past unload.
115 b
->name
= strdup (name
);
116 if (b
->name
== NULL
) {
121 debug ("registered %s %s (name %s)", b
->type
, b
->filename
, b
->name
);
123 /* Apply debug flags before calling load. */
124 apply_debug_flags (b
->dl
, name
);
126 /* Call the on-load callback if it exists. */
127 controlpath_debug ("%s: load", name
);
133 backend_unload (struct backend
*b
, void (*unload
) (void))
135 /* Acquiring this lock prevents any other backend callbacks from running
140 controlpath_debug ("%s: unload %s", b
->name
, b
->type
);
154 backend_open (struct backend
*b
, struct connection
*conn
, int readonly
)
156 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
158 controlpath_debug ("%s: open readonly=%d", b
->name
, readonly
);
160 assert (h
->handle
== NULL
);
161 assert ((h
->state
& HANDLE_OPEN
) == 0);
162 assert (h
->can_write
== -1);
166 /* Most filters will call next_open first, resulting in
167 * inner-to-outer ordering.
169 h
->handle
= b
->open (b
, conn
, readonly
);
170 controlpath_debug ("%s: open returned handle %p", b
->name
, h
->handle
);
172 if (h
->handle
== NULL
) {
173 if (b
->i
) /* Do not strand backend if this layer failed */
174 backend_close (b
->next
, conn
);
178 h
->state
|= HANDLE_OPEN
;
179 if (b
->i
) /* A filter must not succeed unless its backend did also */
180 assert (conn
->handles
[b
->i
- 1].handle
);
185 backend_prepare (struct backend
*b
, struct connection
*conn
)
187 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
190 assert ((h
->state
& (HANDLE_OPEN
| HANDLE_CONNECTED
)) == HANDLE_OPEN
);
192 /* Call these in order starting from the filter closest to the
193 * plugin, similar to typical .open order.
195 if (b
->i
&& backend_prepare (b
->next
, conn
) == -1)
198 controlpath_debug ("%s: prepare readonly=%d", b
->name
, h
->can_write
== 0);
200 if (b
->prepare (b
, conn
, h
->handle
, h
->can_write
== 0) == -1)
202 h
->state
|= HANDLE_CONNECTED
;
207 backend_finalize (struct backend
*b
, struct connection
*conn
)
209 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
211 /* Call these in reverse order to .prepare above, starting from the
212 * filter furthest away from the plugin, and matching .close order.
215 controlpath_debug ("%s: finalize", b
->name
);
217 /* Once finalize fails, we can do nothing further on this connection */
218 if (h
->state
& HANDLE_FAILED
)
222 assert (h
->state
& HANDLE_CONNECTED
);
223 if (b
->finalize (b
, conn
, h
->handle
) == -1) {
224 h
->state
|= HANDLE_FAILED
;
229 assert (! (h
->state
& HANDLE_CONNECTED
));
232 return backend_finalize (b
->next
, conn
);
237 backend_close (struct backend
*b
, struct connection
*conn
)
239 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
241 /* outer-to-inner order, opposite .open */
242 controlpath_debug ("%s: close", b
->name
);
245 assert (h
->state
& HANDLE_OPEN
);
246 b
->close (b
, conn
, h
->handle
);
249 assert (! (h
->state
& HANDLE_OPEN
));
250 reset_b_conn_handle (h
);
252 backend_close (b
->next
, conn
);
256 backend_valid_range (struct backend
*b
, struct connection
*conn
,
257 uint64_t offset
, uint32_t count
)
259 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
261 assert (h
->exportsize
<= INT64_MAX
); /* Guaranteed by negotiation phase */
262 return count
> 0 && offset
<= h
->exportsize
&&
263 offset
+ count
<= h
->exportsize
;
266 /* Wrappers for all callbacks in a filter's struct nbdkit_next_ops. */
269 backend_reopen (struct backend
*b
, struct connection
*conn
, int readonly
)
271 controlpath_debug ("%s: reopen readonly=%d", b
->name
, readonly
);
273 if (backend_finalize (b
, conn
) == -1)
275 backend_close (b
, conn
);
276 if (backend_open (b
, conn
, readonly
) == -1) {
277 backend_close (b
, conn
);
280 if (backend_prepare (b
, conn
) == -1) {
281 backend_finalize (b
, conn
);
282 backend_close (b
, conn
);
289 backend_get_size (struct backend
*b
, struct connection
*conn
)
291 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
293 controlpath_debug ("%s: get_size", b
->name
);
295 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
296 if (h
->exportsize
== -1)
297 h
->exportsize
= b
->get_size (b
, conn
, h
->handle
);
298 return h
->exportsize
;
302 backend_can_write (struct backend
*b
, struct connection
*conn
)
304 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
306 controlpath_debug ("%s: can_write", b
->name
);
308 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
309 if (h
->can_write
== -1)
310 h
->can_write
= b
->can_write (b
, conn
, h
->handle
);
315 backend_can_flush (struct backend
*b
, struct connection
*conn
)
317 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
319 controlpath_debug ("%s: can_flush", b
->name
);
321 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
322 if (h
->can_flush
== -1)
323 h
->can_flush
= b
->can_flush (b
, conn
, h
->handle
);
328 backend_is_rotational (struct backend
*b
, struct connection
*conn
)
330 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
332 controlpath_debug ("%s: is_rotational", b
->name
);
334 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
335 if (h
->is_rotational
== -1)
336 h
->is_rotational
= b
->is_rotational (b
, conn
, h
->handle
);
337 return h
->is_rotational
;
341 backend_can_trim (struct backend
*b
, struct connection
*conn
)
343 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
346 controlpath_debug ("%s: can_trim", b
->name
);
348 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
349 if (h
->can_trim
== -1) {
350 r
= backend_can_write (b
, conn
);
355 h
->can_trim
= b
->can_trim (b
, conn
, h
->handle
);
361 backend_can_zero (struct backend
*b
, struct connection
*conn
)
363 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
366 controlpath_debug ("%s: can_zero", b
->name
);
368 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
369 if (h
->can_zero
== -1) {
370 r
= backend_can_write (b
, conn
);
372 h
->can_zero
= NBDKIT_ZERO_NONE
;
373 return r
; /* Relies on 0 == NBDKIT_ZERO_NONE */
375 h
->can_zero
= b
->can_zero (b
, conn
, h
->handle
);
381 backend_can_fast_zero (struct backend
*b
, struct connection
*conn
)
383 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
386 controlpath_debug ("%s: can_fast_zero", b
->name
);
388 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
389 if (h
->can_fast_zero
== -1) {
390 r
= backend_can_zero (b
, conn
);
391 if (r
< NBDKIT_ZERO_EMULATE
) {
392 h
->can_fast_zero
= 0;
393 return r
; /* Relies on 0 == NBDKIT_ZERO_NONE */
395 h
->can_fast_zero
= b
->can_fast_zero (b
, conn
, h
->handle
);
397 return h
->can_fast_zero
;
401 backend_can_extents (struct backend
*b
, struct connection
*conn
)
403 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
405 controlpath_debug ("%s: can_extents", b
->name
);
407 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
408 if (h
->can_extents
== -1)
409 h
->can_extents
= b
->can_extents (b
, conn
, h
->handle
);
410 return h
->can_extents
;
414 backend_can_fua (struct backend
*b
, struct connection
*conn
)
416 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
419 controlpath_debug ("%s: can_fua", b
->name
);
421 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
422 if (h
->can_fua
== -1) {
423 r
= backend_can_write (b
, conn
);
425 h
->can_fua
= NBDKIT_FUA_NONE
;
426 return r
; /* Relies on 0 == NBDKIT_FUA_NONE */
428 h
->can_fua
= b
->can_fua (b
, conn
, h
->handle
);
434 backend_can_multi_conn (struct backend
*b
, struct connection
*conn
)
436 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
438 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
439 controlpath_debug ("%s: can_multi_conn", b
->name
);
441 if (h
->can_multi_conn
== -1)
442 h
->can_multi_conn
= b
->can_multi_conn (b
, conn
, h
->handle
);
443 return h
->can_multi_conn
;
447 backend_can_cache (struct backend
*b
, struct connection
*conn
)
449 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
451 controlpath_debug ("%s: can_cache", b
->name
);
453 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
454 if (h
->can_cache
== -1)
455 h
->can_cache
= b
->can_cache (b
, conn
, h
->handle
);
460 backend_pread (struct backend
*b
, struct connection
*conn
,
461 void *buf
, uint32_t count
, uint64_t offset
,
462 uint32_t flags
, int *err
)
464 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
467 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
468 assert (backend_valid_range (b
, conn
, offset
, count
));
470 datapath_debug ("%s: pread count=%" PRIu32
" offset=%" PRIu64
,
471 b
->name
, count
, offset
);
473 r
= b
->pread (b
, conn
, h
->handle
, buf
, count
, offset
, flags
, err
);
480 backend_pwrite (struct backend
*b
, struct connection
*conn
,
481 const void *buf
, uint32_t count
, uint64_t offset
,
482 uint32_t flags
, int *err
)
484 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
485 bool fua
= !!(flags
& NBDKIT_FLAG_FUA
);
488 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
489 assert (h
->can_write
== 1);
490 assert (backend_valid_range (b
, conn
, offset
, count
));
491 assert (!(flags
& ~NBDKIT_FLAG_FUA
));
493 assert (h
->can_fua
> NBDKIT_FUA_NONE
);
494 datapath_debug ("%s: pwrite count=%" PRIu32
" offset=%" PRIu64
" fua=%d",
495 b
->name
, count
, offset
, fua
);
497 r
= b
->pwrite (b
, conn
, h
->handle
, buf
, count
, offset
, flags
, err
);
504 backend_flush (struct backend
*b
, struct connection
*conn
,
505 uint32_t flags
, int *err
)
507 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
510 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
511 assert (h
->can_flush
== 1);
513 datapath_debug ("%s: flush", b
->name
);
515 r
= b
->flush (b
, conn
, h
->handle
, flags
, err
);
522 backend_trim (struct backend
*b
, struct connection
*conn
,
523 uint32_t count
, uint64_t offset
, uint32_t flags
,
526 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
527 bool fua
= !!(flags
& NBDKIT_FLAG_FUA
);
530 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
531 assert (h
->can_write
== 1);
532 assert (h
->can_trim
== 1);
533 assert (backend_valid_range (b
, conn
, offset
, count
));
534 assert (!(flags
& ~NBDKIT_FLAG_FUA
));
536 assert (h
->can_fua
> NBDKIT_FUA_NONE
);
537 datapath_debug ("%s: trim count=%" PRIu32
" offset=%" PRIu64
" fua=%d",
538 b
->name
, count
, offset
, fua
);
540 r
= b
->trim (b
, conn
, h
->handle
, count
, offset
, flags
, err
);
547 backend_zero (struct backend
*b
, struct connection
*conn
,
548 uint32_t count
, uint64_t offset
, uint32_t flags
,
551 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
552 bool fua
= !!(flags
& NBDKIT_FLAG_FUA
);
553 bool fast
= !!(flags
& NBDKIT_FLAG_FAST_ZERO
);
556 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
557 assert (h
->can_write
== 1);
558 assert (h
->can_zero
> NBDKIT_ZERO_NONE
);
559 assert (backend_valid_range (b
, conn
, offset
, count
));
560 assert (!(flags
& ~(NBDKIT_FLAG_MAY_TRIM
| NBDKIT_FLAG_FUA
|
561 NBDKIT_FLAG_FAST_ZERO
)));
563 assert (h
->can_fua
> NBDKIT_FUA_NONE
);
565 assert (h
->can_fast_zero
== 1);
566 datapath_debug ("%s: zero count=%" PRIu32
" offset=%" PRIu64
567 " may_trim=%d fua=%d fast=%d",
568 b
->name
, count
, offset
,
569 !!(flags
& NBDKIT_FLAG_MAY_TRIM
), fua
, fast
);
571 r
= b
->zero (b
, conn
, h
->handle
, count
, offset
, flags
, err
);
575 assert (*err
!= ENOTSUP
&& *err
!= EOPNOTSUPP
);
581 backend_extents (struct backend
*b
, struct connection
*conn
,
582 uint32_t count
, uint64_t offset
, uint32_t flags
,
583 struct nbdkit_extents
*extents
, int *err
)
585 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
588 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
589 assert (h
->can_extents
>= 0);
590 assert (backend_valid_range (b
, conn
, offset
, count
));
591 assert (!(flags
& ~NBDKIT_FLAG_REQ_ONE
));
592 datapath_debug ("%s: extents count=%" PRIu32
" offset=%" PRIu64
" req_one=%d",
593 b
->name
, count
, offset
, !!(flags
& NBDKIT_FLAG_REQ_ONE
));
595 if (h
->can_extents
== 0) {
596 /* By default it is safe assume that everything in the range is
599 r
= nbdkit_add_extent (extents
, offset
, count
, 0 /* allocated data */);
604 r
= b
->extents (b
, conn
, h
->handle
, count
, offset
, flags
, extents
, err
);
611 backend_cache (struct backend
*b
, struct connection
*conn
,
612 uint32_t count
, uint64_t offset
,
613 uint32_t flags
, int *err
)
615 struct b_conn_handle
*h
= &conn
->handles
[b
->i
];
618 assert (h
->handle
&& (h
->state
& HANDLE_CONNECTED
));
619 assert (h
->can_cache
> NBDKIT_CACHE_NONE
);
620 assert (backend_valid_range (b
, conn
, offset
, count
));
622 datapath_debug ("%s: cache count=%" PRIu32
" offset=%" PRIu64
,
623 b
->name
, count
, offset
);
625 if (h
->can_cache
== NBDKIT_CACHE_EMULATE
) {
626 static char buf
[MAX_REQUEST_SIZE
]; /* data sink, never read */
630 limit
= MIN (count
, sizeof buf
);
631 if (backend_pread (b
, conn
, buf
, limit
, offset
, flags
, err
) == -1)
637 r
= b
->cache (b
, conn
, h
->handle
, count
, offset
, flags
, err
);