2 * Copyright (C) 2013-2020 Red Hat Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Red Hat nor the names of its contributors may be
16 * used to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
22 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 /* We extend the generic backend struct with extra fields relating
47 struct backend_filter
{
48 struct backend backend
;
49 struct nbdkit_filter filter
;
52 /* Literally a backend + a connection pointer. This is the
53 * implementation of ‘void *nxdata’ in the filter API.
57 struct connection
*conn
;
60 /* Note this frees the whole chain. */
62 filter_free (struct backend
*b
)
64 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
66 b
->next
->free (b
->next
);
68 backend_unload (b
, f
->filter
.unload
);
73 filter_thread_model (struct backend
*b
)
75 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
76 int filter_thread_model
= NBDKIT_THREAD_MODEL_PARALLEL
;
77 int thread_model
= b
->next
->thread_model (b
->next
);
79 if (f
->filter
.thread_model
) {
80 filter_thread_model
= f
->filter
.thread_model ();
81 if (filter_thread_model
== -1)
85 if (filter_thread_model
< thread_model
) /* more serialized */
86 thread_model
= filter_thread_model
;
91 /* This is actually passing the request through to the final plugin,
92 * hence the function name.
95 plugin_name (struct backend
*b
)
97 return b
->next
->plugin_name (b
->next
);
101 filter_version (struct backend
*b
)
103 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
105 return f
->filter
._version
;
109 filter_usage (struct backend
*b
)
111 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
114 printf ("filter: %s", b
->name
);
115 if (f
->filter
.longname
)
116 printf (" (%s)", f
->filter
.longname
);
118 printf ("(%s)\n", b
->filename
);
119 if (f
->filter
.description
) {
120 printf ("%s", f
->filter
.description
);
121 if ((p
= strrchr (f
->filter
.description
, '\n')) == NULL
|| p
[1])
124 if (f
->filter
.config_help
) {
125 printf ("%s", f
->filter
.config_help
);
126 if ((p
= strrchr (f
->filter
.config_help
, '\n')) == NULL
|| p
[1])
132 filter_dump_fields (struct backend
*b
)
134 b
->next
->dump_fields (b
->next
);
138 next_config (void *nxdata
, const char *key
, const char *value
)
140 struct backend
*b
= nxdata
;
141 b
->config (b
, key
, value
);
146 filter_config (struct backend
*b
, const char *key
, const char *value
)
148 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
150 debug ("%s: config key=%s, value=%s",
151 b
->name
, key
, value
);
153 if (f
->filter
.config
) {
154 if (f
->filter
.config (next_config
, b
->next
, key
, value
) == -1)
158 b
->next
->config (b
->next
, key
, value
);
162 next_config_complete (void *nxdata
)
164 struct backend
*b
= nxdata
;
165 b
->config_complete (b
);
170 filter_config_complete (struct backend
*b
)
172 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
174 debug ("%s: config_complete", b
->name
);
176 if (f
->filter
.config_complete
) {
177 if (f
->filter
.config_complete (next_config_complete
, b
->next
) == -1)
181 b
->next
->config_complete (b
->next
);
185 next_preconnect (void *nxdata
, int readonly
)
187 struct b_conn
*b_conn
= nxdata
;
188 return b_conn
->b
->preconnect (b_conn
->b
, b_conn
->conn
, readonly
);
192 filter_preconnect (struct backend
*b
, struct connection
*conn
, int readonly
)
194 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
195 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
197 debug ("%s: preconnect", b
->name
);
199 if (f
->filter
.preconnect
)
200 return f
->filter
.preconnect (next_preconnect
, &nxdata
, readonly
);
202 return b
->next
->preconnect (b
->next
, conn
, readonly
);
205 /* magic_config_key only applies to plugins, so this passes the
206 * request through to the plugin (hence the name).
209 plugin_magic_config_key (struct backend
*b
)
211 return b
->next
->magic_config_key (b
->next
);
215 next_open (void *nxdata
, int readonly
)
217 struct b_conn
*b_conn
= nxdata
;
219 return backend_open (b_conn
->b
, b_conn
->conn
, readonly
);
223 filter_open (struct backend
*b
, struct connection
*conn
, int readonly
)
225 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
226 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
228 /* Most filters will call next_open first, resulting in
229 * inner-to-outer ordering.
232 return f
->filter
.open (next_open
, &nxdata
, readonly
);
233 else if (backend_open (b
->next
, conn
, readonly
) == -1)
236 return NBDKIT_HANDLE_NOT_NEEDED
;
240 filter_close (struct backend
*b
, struct connection
*conn
, void *handle
)
242 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
244 if (handle
&& f
->filter
.close
)
245 f
->filter
.close (handle
);
248 /* The next_functions structure contains pointers to backend
249 * functions. However because these functions are all expecting a
250 * backend and a connection, we cannot call them directly, but must
251 * write some next_* functions that unpack the two parameters from a
252 * single ‘void *nxdata’ struct pointer (‘b_conn’).
256 next_reopen (void *nxdata
, int readonly
)
258 struct b_conn
*b_conn
= nxdata
;
259 return backend_reopen (b_conn
->b
, b_conn
->conn
, readonly
);
263 next_get_size (void *nxdata
)
265 struct b_conn
*b_conn
= nxdata
;
266 return backend_get_size (b_conn
->b
, b_conn
->conn
);
270 next_can_write (void *nxdata
)
272 struct b_conn
*b_conn
= nxdata
;
273 return backend_can_write (b_conn
->b
, b_conn
->conn
);
277 next_can_flush (void *nxdata
)
279 struct b_conn
*b_conn
= nxdata
;
280 return backend_can_flush (b_conn
->b
, b_conn
->conn
);
284 next_is_rotational (void *nxdata
)
286 struct b_conn
*b_conn
= nxdata
;
287 return backend_is_rotational (b_conn
->b
, b_conn
->conn
);
291 next_can_trim (void *nxdata
)
293 struct b_conn
*b_conn
= nxdata
;
294 return backend_can_trim (b_conn
->b
, b_conn
->conn
);
298 next_can_zero (void *nxdata
)
300 struct b_conn
*b_conn
= nxdata
;
301 return backend_can_zero (b_conn
->b
, b_conn
->conn
);
305 next_can_fast_zero (void *nxdata
)
307 struct b_conn
*b_conn
= nxdata
;
308 return backend_can_fast_zero (b_conn
->b
, b_conn
->conn
);
312 next_can_extents (void *nxdata
)
314 struct b_conn
*b_conn
= nxdata
;
315 return backend_can_extents (b_conn
->b
, b_conn
->conn
);
319 next_can_fua (void *nxdata
)
321 struct b_conn
*b_conn
= nxdata
;
322 return backend_can_fua (b_conn
->b
, b_conn
->conn
);
326 next_can_multi_conn (void *nxdata
)
328 struct b_conn
*b_conn
= nxdata
;
329 return backend_can_multi_conn (b_conn
->b
, b_conn
->conn
);
333 next_can_cache (void *nxdata
)
335 struct b_conn
*b_conn
= nxdata
;
336 return backend_can_cache (b_conn
->b
, b_conn
->conn
);
340 next_init_sparse (void *nxdata
)
342 struct b_conn
*b_conn
= nxdata
;
343 return backend_init_sparse (b_conn
->b
, b_conn
->conn
);
347 next_init_zero (void *nxdata
)
349 struct b_conn
*b_conn
= nxdata
;
350 return backend_init_zero (b_conn
->b
, b_conn
->conn
);
354 next_pread (void *nxdata
, void *buf
, uint32_t count
, uint64_t offset
,
355 uint32_t flags
, int *err
)
357 struct b_conn
*b_conn
= nxdata
;
358 return backend_pread (b_conn
->b
, b_conn
->conn
, buf
, count
, offset
, flags
,
363 next_pwrite (void *nxdata
, const void *buf
, uint32_t count
, uint64_t offset
,
364 uint32_t flags
, int *err
)
366 struct b_conn
*b_conn
= nxdata
;
367 return backend_pwrite (b_conn
->b
, b_conn
->conn
, buf
, count
, offset
, flags
,
372 next_flush (void *nxdata
, uint32_t flags
, int *err
)
374 struct b_conn
*b_conn
= nxdata
;
375 return backend_flush (b_conn
->b
, b_conn
->conn
, flags
, err
);
379 next_trim (void *nxdata
, uint32_t count
, uint64_t offset
, uint32_t flags
,
382 struct b_conn
*b_conn
= nxdata
;
383 return backend_trim (b_conn
->b
, b_conn
->conn
, count
, offset
, flags
, err
);
387 next_zero (void *nxdata
, uint32_t count
, uint64_t offset
, uint32_t flags
,
390 struct b_conn
*b_conn
= nxdata
;
391 return backend_zero (b_conn
->b
, b_conn
->conn
, count
, offset
, flags
, err
);
395 next_extents (void *nxdata
, uint32_t count
, uint64_t offset
, uint32_t flags
,
396 struct nbdkit_extents
*extents
, int *err
)
398 struct b_conn
*b_conn
= nxdata
;
399 return backend_extents (b_conn
->b
, b_conn
->conn
, count
, offset
, flags
,
404 next_cache (void *nxdata
, uint32_t count
, uint64_t offset
,
405 uint32_t flags
, int *err
)
407 struct b_conn
*b_conn
= nxdata
;
408 return backend_cache (b_conn
->b
, b_conn
->conn
, count
, offset
, flags
, err
);
411 static struct nbdkit_next_ops next_ops
= {
412 .reopen
= next_reopen
,
413 .get_size
= next_get_size
,
414 .can_write
= next_can_write
,
415 .can_flush
= next_can_flush
,
416 .is_rotational
= next_is_rotational
,
417 .can_trim
= next_can_trim
,
418 .can_zero
= next_can_zero
,
419 .can_fast_zero
= next_can_fast_zero
,
420 .can_extents
= next_can_extents
,
421 .can_fua
= next_can_fua
,
422 .can_multi_conn
= next_can_multi_conn
,
423 .can_cache
= next_can_cache
,
424 .init_sparse
= next_init_sparse
,
425 .init_zero
= next_init_zero
,
427 .pwrite
= next_pwrite
,
431 .extents
= next_extents
,
436 filter_prepare (struct backend
*b
, struct connection
*conn
, void *handle
,
439 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
440 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
442 if (f
->filter
.prepare
&&
443 f
->filter
.prepare (&next_ops
, &nxdata
, handle
, readonly
) == -1)
450 filter_finalize (struct backend
*b
, struct connection
*conn
, void *handle
)
452 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
453 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
455 if (f
->filter
.finalize
&&
456 f
->filter
.finalize (&next_ops
, &nxdata
, handle
) == -1)
462 filter_get_size (struct backend
*b
, struct connection
*conn
, void *handle
)
464 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
465 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
467 if (f
->filter
.get_size
)
468 return f
->filter
.get_size (&next_ops
, &nxdata
, handle
);
470 return backend_get_size (b
->next
, conn
);
474 filter_can_write (struct backend
*b
, struct connection
*conn
, void *handle
)
476 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
477 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
479 if (f
->filter
.can_write
)
480 return f
->filter
.can_write (&next_ops
, &nxdata
, handle
);
482 return backend_can_write (b
->next
, conn
);
486 filter_can_flush (struct backend
*b
, struct connection
*conn
, void *handle
)
488 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
489 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
491 if (f
->filter
.can_flush
)
492 return f
->filter
.can_flush (&next_ops
, &nxdata
, handle
);
494 return backend_can_flush (b
->next
, conn
);
498 filter_is_rotational (struct backend
*b
, struct connection
*conn
, void *handle
)
500 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
501 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
503 if (f
->filter
.is_rotational
)
504 return f
->filter
.is_rotational (&next_ops
, &nxdata
, handle
);
506 return backend_is_rotational (b
->next
, conn
);
510 filter_can_trim (struct backend
*b
, struct connection
*conn
, void *handle
)
512 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
513 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
515 if (f
->filter
.can_trim
)
516 return f
->filter
.can_trim (&next_ops
, &nxdata
, handle
);
518 return backend_can_trim (b
->next
, conn
);
522 filter_can_zero (struct backend
*b
, struct connection
*conn
, void *handle
)
524 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
525 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
527 if (f
->filter
.can_zero
)
528 return f
->filter
.can_zero (&next_ops
, &nxdata
, handle
);
530 return backend_can_zero (b
->next
, conn
);
534 filter_can_fast_zero (struct backend
*b
, struct connection
*conn
, void *handle
)
536 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
537 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
539 if (f
->filter
.can_fast_zero
)
540 return f
->filter
.can_fast_zero (&next_ops
, &nxdata
, handle
);
542 return backend_can_fast_zero (b
->next
, conn
);
546 filter_can_extents (struct backend
*b
, struct connection
*conn
, void *handle
)
548 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
549 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
551 if (f
->filter
.can_extents
)
552 return f
->filter
.can_extents (&next_ops
, &nxdata
, handle
);
554 return backend_can_extents (b
->next
, conn
);
558 filter_can_fua (struct backend
*b
, struct connection
*conn
, void *handle
)
560 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
561 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
563 if (f
->filter
.can_fua
)
564 return f
->filter
.can_fua (&next_ops
, &nxdata
, handle
);
566 return backend_can_fua (b
->next
, conn
);
570 filter_can_multi_conn (struct backend
*b
, struct connection
*conn
, void *handle
)
572 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
573 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
575 if (f
->filter
.can_multi_conn
)
576 return f
->filter
.can_multi_conn (&next_ops
, &nxdata
, handle
);
578 return backend_can_multi_conn (b
->next
, conn
);
582 filter_can_cache (struct backend
*b
, struct connection
*conn
, void *handle
)
584 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
585 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
587 if (f
->filter
.can_cache
)
588 return f
->filter
.can_cache (&next_ops
, &nxdata
, handle
);
590 return backend_can_cache (b
->next
, conn
);
594 filter_init_sparse (struct backend
*b
, struct connection
*conn
, void *handle
)
596 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
597 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
599 if (f
->filter
.init_sparse
)
600 return f
->filter
.init_sparse (&next_ops
, &nxdata
, handle
);
602 return backend_init_sparse (b
->next
, conn
);
606 filter_init_zero (struct backend
*b
, struct connection
*conn
, void *handle
)
608 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
609 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
611 if (f
->filter
.init_zero
)
612 return f
->filter
.init_zero (&next_ops
, &nxdata
, handle
);
614 return backend_init_zero (b
->next
, conn
);
618 filter_pread (struct backend
*b
, struct connection
*conn
, void *handle
,
619 void *buf
, uint32_t count
, uint64_t offset
,
620 uint32_t flags
, int *err
)
622 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
623 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
626 return f
->filter
.pread (&next_ops
, &nxdata
, handle
,
627 buf
, count
, offset
, flags
, err
);
629 return backend_pread (b
->next
, conn
, buf
, count
, offset
, flags
, err
);
633 filter_pwrite (struct backend
*b
, struct connection
*conn
, void *handle
,
634 const void *buf
, uint32_t count
, uint64_t offset
,
635 uint32_t flags
, int *err
)
637 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
638 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
640 if (f
->filter
.pwrite
)
641 return f
->filter
.pwrite (&next_ops
, &nxdata
, handle
,
642 buf
, count
, offset
, flags
, err
);
644 return backend_pwrite (b
->next
, conn
, buf
, count
, offset
, flags
, err
);
648 filter_flush (struct backend
*b
, struct connection
*conn
, void *handle
,
649 uint32_t flags
, int *err
)
651 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
652 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
655 return f
->filter
.flush (&next_ops
, &nxdata
, handle
, flags
, err
);
657 return backend_flush (b
->next
, conn
, flags
, err
);
661 filter_trim (struct backend
*b
, struct connection
*conn
, void *handle
,
662 uint32_t count
, uint64_t offset
,
663 uint32_t flags
, int *err
)
665 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
666 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
669 return f
->filter
.trim (&next_ops
, &nxdata
, handle
, count
, offset
, flags
,
672 return backend_trim (b
->next
, conn
, count
, offset
, flags
, err
);
676 filter_zero (struct backend
*b
, struct connection
*conn
, void *handle
,
677 uint32_t count
, uint64_t offset
, uint32_t flags
, int *err
)
679 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
680 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
683 return f
->filter
.zero (&next_ops
, &nxdata
, handle
,
684 count
, offset
, flags
, err
);
686 return backend_zero (b
->next
, conn
, count
, offset
, flags
, err
);
690 filter_extents (struct backend
*b
, struct connection
*conn
, void *handle
,
691 uint32_t count
, uint64_t offset
, uint32_t flags
,
692 struct nbdkit_extents
*extents
, int *err
)
694 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
695 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
697 if (f
->filter
.extents
)
698 return f
->filter
.extents (&next_ops
, &nxdata
, handle
,
699 count
, offset
, flags
,
702 return backend_extents (b
->next
, conn
, count
, offset
, flags
,
707 filter_cache (struct backend
*b
, struct connection
*conn
, void *handle
,
708 uint32_t count
, uint64_t offset
,
709 uint32_t flags
, int *err
)
711 struct backend_filter
*f
= container_of (b
, struct backend_filter
, backend
);
712 struct b_conn nxdata
= { .b
= b
->next
, .conn
= conn
};
716 return f
->filter
.cache (&next_ops
, &nxdata
, handle
,
717 count
, offset
, flags
, err
);
719 return backend_cache (b
->next
, conn
, count
, offset
, flags
, err
);
722 static struct backend filter_functions
= {
724 .thread_model
= filter_thread_model
,
725 .plugin_name
= plugin_name
,
726 .usage
= filter_usage
,
727 .version
= filter_version
,
728 .dump_fields
= filter_dump_fields
,
729 .config
= filter_config
,
730 .config_complete
= filter_config_complete
,
731 .magic_config_key
= plugin_magic_config_key
,
732 .preconnect
= filter_preconnect
,
734 .prepare
= filter_prepare
,
735 .finalize
= filter_finalize
,
736 .close
= filter_close
,
737 .get_size
= filter_get_size
,
738 .can_write
= filter_can_write
,
739 .can_flush
= filter_can_flush
,
740 .is_rotational
= filter_is_rotational
,
741 .can_trim
= filter_can_trim
,
742 .can_zero
= filter_can_zero
,
743 .can_fast_zero
= filter_can_fast_zero
,
744 .can_extents
= filter_can_extents
,
745 .can_fua
= filter_can_fua
,
746 .can_multi_conn
= filter_can_multi_conn
,
747 .can_cache
= filter_can_cache
,
748 .init_sparse
= filter_init_sparse
,
749 .init_zero
= filter_init_zero
,
750 .pread
= filter_pread
,
751 .pwrite
= filter_pwrite
,
752 .flush
= filter_flush
,
755 .extents
= filter_extents
,
756 .cache
= filter_cache
,
759 /* Register and load a filter. */
761 filter_register (struct backend
*next
, size_t index
, const char *filename
,
762 void *dl
, struct nbdkit_filter
*(*filter_init
) (void))
764 struct backend_filter
*f
;
765 const struct nbdkit_filter
*filter
;
767 f
= calloc (1, sizeof *f
);
773 f
->backend
= filter_functions
;
774 backend_init (&f
->backend
, next
, index
, filename
, dl
, "filter");
776 /* Call the initialization function which returns the address of the
777 * filter's own 'struct nbdkit_filter'.
779 filter
= filter_init ();
781 fprintf (stderr
, "%s: %s: filter registration function failed\n",
782 program_name
, filename
);
786 /* We do not provide API or ABI guarantees for filters, other than
787 * the ABI position and API contents of _api_version and _version to
788 * diagnose mismatch from the current nbdkit version.
790 if (filter
->_api_version
!= NBDKIT_FILTER_API_VERSION
) {
792 "%s: %s: filter is incompatible with this version of nbdkit "
793 "(_api_version = %d, need %d)\n",
794 program_name
, filename
, filter
->_api_version
,
795 NBDKIT_FILTER_API_VERSION
);
798 if (filter
->_version
== NULL
||
799 strcmp (filter
->_version
, PACKAGE_VERSION
) != 0) {
801 "%s: %s: filter is incompatible with this version of nbdkit "
802 "(_version = %s, need %s)\n",
803 program_name
, filename
, filter
->_version
?: "<null>",
810 backend_load (&f
->backend
, f
->filter
.name
, f
->filter
.load
);
812 return (struct backend
*) f
;