2 * Block protocol for I/O error injection
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "qemu/config-file.h"
27 #include "block/block_int.h"
28 #include "qemu/module.h"
30 typedef struct BDRVBlkdebugState
{
34 QLIST_HEAD(, BlkdebugRule
) rules
[BLKDBG_EVENT_MAX
];
35 QSIMPLEQ_HEAD(, BlkdebugRule
) active_rules
;
36 QLIST_HEAD(, BlkdebugSuspendedReq
) suspended_reqs
;
39 typedef struct BlkdebugAIOCB
{
40 BlockDriverAIOCB common
;
45 typedef struct BlkdebugSuspendedReq
{
48 QLIST_ENTRY(BlkdebugSuspendedReq
) next
;
49 } BlkdebugSuspendedReq
;
51 static void blkdebug_aio_cancel(BlockDriverAIOCB
*blockacb
);
53 static const AIOCBInfo blkdebug_aiocb_info
= {
54 .aiocb_size
= sizeof(BlkdebugAIOCB
),
55 .cancel
= blkdebug_aio_cancel
,
64 typedef struct BlkdebugRule
{
82 QLIST_ENTRY(BlkdebugRule
) next
;
83 QSIMPLEQ_ENTRY(BlkdebugRule
) active_next
;
86 static QemuOptsList inject_error_opts
= {
87 .name
= "inject-error",
88 .head
= QTAILQ_HEAD_INITIALIZER(inject_error_opts
.head
),
92 .type
= QEMU_OPT_STRING
,
96 .type
= QEMU_OPT_NUMBER
,
100 .type
= QEMU_OPT_NUMBER
,
104 .type
= QEMU_OPT_NUMBER
,
108 .type
= QEMU_OPT_BOOL
,
111 .name
= "immediately",
112 .type
= QEMU_OPT_BOOL
,
114 { /* end of list */ }
118 static QemuOptsList set_state_opts
= {
120 .head
= QTAILQ_HEAD_INITIALIZER(set_state_opts
.head
),
124 .type
= QEMU_OPT_STRING
,
128 .type
= QEMU_OPT_NUMBER
,
132 .type
= QEMU_OPT_NUMBER
,
134 { /* end of list */ }
138 static QemuOptsList
*config_groups
[] = {
144 static const char *event_names
[BLKDBG_EVENT_MAX
] = {
145 [BLKDBG_L1_UPDATE
] = "l1_update",
146 [BLKDBG_L1_GROW_ALLOC_TABLE
] = "l1_grow.alloc_table",
147 [BLKDBG_L1_GROW_WRITE_TABLE
] = "l1_grow.write_table",
148 [BLKDBG_L1_GROW_ACTIVATE_TABLE
] = "l1_grow.activate_table",
150 [BLKDBG_L2_LOAD
] = "l2_load",
151 [BLKDBG_L2_UPDATE
] = "l2_update",
152 [BLKDBG_L2_UPDATE_COMPRESSED
] = "l2_update_compressed",
153 [BLKDBG_L2_ALLOC_COW_READ
] = "l2_alloc.cow_read",
154 [BLKDBG_L2_ALLOC_WRITE
] = "l2_alloc.write",
156 [BLKDBG_READ_AIO
] = "read_aio",
157 [BLKDBG_READ_BACKING_AIO
] = "read_backing_aio",
158 [BLKDBG_READ_COMPRESSED
] = "read_compressed",
160 [BLKDBG_WRITE_AIO
] = "write_aio",
161 [BLKDBG_WRITE_COMPRESSED
] = "write_compressed",
163 [BLKDBG_VMSTATE_LOAD
] = "vmstate_load",
164 [BLKDBG_VMSTATE_SAVE
] = "vmstate_save",
166 [BLKDBG_COW_READ
] = "cow_read",
167 [BLKDBG_COW_WRITE
] = "cow_write",
169 [BLKDBG_REFTABLE_LOAD
] = "reftable_load",
170 [BLKDBG_REFTABLE_GROW
] = "reftable_grow",
172 [BLKDBG_REFBLOCK_LOAD
] = "refblock_load",
173 [BLKDBG_REFBLOCK_UPDATE
] = "refblock_update",
174 [BLKDBG_REFBLOCK_UPDATE_PART
] = "refblock_update_part",
175 [BLKDBG_REFBLOCK_ALLOC
] = "refblock_alloc",
176 [BLKDBG_REFBLOCK_ALLOC_HOOKUP
] = "refblock_alloc.hookup",
177 [BLKDBG_REFBLOCK_ALLOC_WRITE
] = "refblock_alloc.write",
178 [BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS
] = "refblock_alloc.write_blocks",
179 [BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE
] = "refblock_alloc.write_table",
180 [BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE
] = "refblock_alloc.switch_table",
182 [BLKDBG_CLUSTER_ALLOC
] = "cluster_alloc",
183 [BLKDBG_CLUSTER_ALLOC_BYTES
] = "cluster_alloc_bytes",
184 [BLKDBG_CLUSTER_FREE
] = "cluster_free",
187 static int get_event_by_name(const char *name
, BlkDebugEvent
*event
)
191 for (i
= 0; i
< BLKDBG_EVENT_MAX
; i
++) {
192 if (!strcmp(event_names
[i
], name
)) {
201 struct add_rule_data
{
202 BDRVBlkdebugState
*s
;
206 static int add_rule(QemuOpts
*opts
, void *opaque
)
208 struct add_rule_data
*d
= opaque
;
209 BDRVBlkdebugState
*s
= d
->s
;
210 const char* event_name
;
212 struct BlkdebugRule
*rule
;
214 /* Find the right event for the rule */
215 event_name
= qemu_opt_get(opts
, "event");
216 if (!event_name
|| get_event_by_name(event_name
, &event
) < 0) {
220 /* Set attributes common for all actions */
221 rule
= g_malloc0(sizeof(*rule
));
222 *rule
= (struct BlkdebugRule
) {
225 .state
= qemu_opt_get_number(opts
, "state", 0),
228 /* Parse action-specific options */
230 case ACTION_INJECT_ERROR
:
231 rule
->options
.inject
.error
= qemu_opt_get_number(opts
, "errno", EIO
);
232 rule
->options
.inject
.once
= qemu_opt_get_bool(opts
, "once", 0);
233 rule
->options
.inject
.immediately
=
234 qemu_opt_get_bool(opts
, "immediately", 0);
235 rule
->options
.inject
.sector
= qemu_opt_get_number(opts
, "sector", -1);
238 case ACTION_SET_STATE
:
239 rule
->options
.set_state
.new_state
=
240 qemu_opt_get_number(opts
, "new_state", 0);
244 rule
->options
.suspend
.tag
=
245 g_strdup(qemu_opt_get(opts
, "tag"));
250 QLIST_INSERT_HEAD(&s
->rules
[event
], rule
, next
);
255 static void remove_rule(BlkdebugRule
*rule
)
257 switch (rule
->action
) {
258 case ACTION_INJECT_ERROR
:
259 case ACTION_SET_STATE
:
262 g_free(rule
->options
.suspend
.tag
);
266 QLIST_REMOVE(rule
, next
);
270 static int read_config(BDRVBlkdebugState
*s
, const char *filename
)
274 struct add_rule_data d
;
276 f
= fopen(filename
, "r");
281 ret
= qemu_config_parse(f
, config_groups
, filename
);
287 d
.action
= ACTION_INJECT_ERROR
;
288 qemu_opts_foreach(&inject_error_opts
, add_rule
, &d
, 0);
290 d
.action
= ACTION_SET_STATE
;
291 qemu_opts_foreach(&set_state_opts
, add_rule
, &d
, 0);
295 qemu_opts_reset(&inject_error_opts
);
296 qemu_opts_reset(&set_state_opts
);
301 /* Valid blkdebug filenames look like blkdebug:path/to/config:path/to/image */
302 static void blkdebug_parse_filename(const char *filename
, QDict
*options
,
307 /* Parse the blkdebug: prefix */
308 if (!strstart(filename
, "blkdebug:", &filename
)) {
309 error_setg(errp
, "File name string must start with 'blkdebug:'");
313 /* Parse config file path */
314 c
= strchr(filename
, ':');
316 error_setg(errp
, "blkdebug requires both config file and image path");
321 QString
*config_path
;
322 config_path
= qstring_from_substr(filename
, 0, c
- filename
- 1);
323 qdict_put(options
, "config", config_path
);
326 /* TODO Allow multi-level nesting and set file.filename here */
328 qdict_put(options
, "x-image", qstring_from_str(filename
));
331 static QemuOptsList runtime_opts
= {
333 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
337 .type
= QEMU_OPT_STRING
,
338 .help
= "Path to the configuration file",
342 .type
= QEMU_OPT_STRING
,
343 .help
= "[internal use only, will be removed]",
345 { /* end of list */ }
349 static int blkdebug_open(BlockDriverState
*bs
, QDict
*options
, int flags
)
351 BDRVBlkdebugState
*s
= bs
->opaque
;
353 Error
*local_err
= NULL
;
354 const char *filename
, *config
;
357 opts
= qemu_opts_create_nofail(&runtime_opts
);
358 qemu_opts_absorb_qdict(opts
, options
, &local_err
);
359 if (error_is_set(&local_err
)) {
360 qerror_report_err(local_err
);
361 error_free(local_err
);
366 /* Read rules from config file */
367 config
= qemu_opt_get(opts
, "config");
369 ret
= read_config(s
, config
);
375 /* Set initial state */
378 /* Open the backing file */
379 filename
= qemu_opt_get(opts
, "x-image");
380 if (filename
== NULL
) {
385 ret
= bdrv_file_open(&bs
->file
, filename
, NULL
, flags
);
396 static void error_callback_bh(void *opaque
)
398 struct BlkdebugAIOCB
*acb
= opaque
;
399 qemu_bh_delete(acb
->bh
);
400 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
401 qemu_aio_release(acb
);
404 static void blkdebug_aio_cancel(BlockDriverAIOCB
*blockacb
)
406 BlkdebugAIOCB
*acb
= container_of(blockacb
, BlkdebugAIOCB
, common
);
407 qemu_aio_release(acb
);
410 static BlockDriverAIOCB
*inject_error(BlockDriverState
*bs
,
411 BlockDriverCompletionFunc
*cb
, void *opaque
, BlkdebugRule
*rule
)
413 BDRVBlkdebugState
*s
= bs
->opaque
;
414 int error
= rule
->options
.inject
.error
;
415 struct BlkdebugAIOCB
*acb
;
418 if (rule
->options
.inject
.once
) {
419 QSIMPLEQ_INIT(&s
->active_rules
);
422 if (rule
->options
.inject
.immediately
) {
426 acb
= qemu_aio_get(&blkdebug_aiocb_info
, bs
, cb
, opaque
);
429 bh
= qemu_bh_new(error_callback_bh
, acb
);
431 qemu_bh_schedule(bh
);
436 static BlockDriverAIOCB
*blkdebug_aio_readv(BlockDriverState
*bs
,
437 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
438 BlockDriverCompletionFunc
*cb
, void *opaque
)
440 BDRVBlkdebugState
*s
= bs
->opaque
;
441 BlkdebugRule
*rule
= NULL
;
443 QSIMPLEQ_FOREACH(rule
, &s
->active_rules
, active_next
) {
444 if (rule
->options
.inject
.sector
== -1 ||
445 (rule
->options
.inject
.sector
>= sector_num
&&
446 rule
->options
.inject
.sector
< sector_num
+ nb_sectors
)) {
451 if (rule
&& rule
->options
.inject
.error
) {
452 return inject_error(bs
, cb
, opaque
, rule
);
455 return bdrv_aio_readv(bs
->file
, sector_num
, qiov
, nb_sectors
, cb
, opaque
);
458 static BlockDriverAIOCB
*blkdebug_aio_writev(BlockDriverState
*bs
,
459 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
460 BlockDriverCompletionFunc
*cb
, void *opaque
)
462 BDRVBlkdebugState
*s
= bs
->opaque
;
463 BlkdebugRule
*rule
= NULL
;
465 QSIMPLEQ_FOREACH(rule
, &s
->active_rules
, active_next
) {
466 if (rule
->options
.inject
.sector
== -1 ||
467 (rule
->options
.inject
.sector
>= sector_num
&&
468 rule
->options
.inject
.sector
< sector_num
+ nb_sectors
)) {
473 if (rule
&& rule
->options
.inject
.error
) {
474 return inject_error(bs
, cb
, opaque
, rule
);
477 return bdrv_aio_writev(bs
->file
, sector_num
, qiov
, nb_sectors
, cb
, opaque
);
481 static void blkdebug_close(BlockDriverState
*bs
)
483 BDRVBlkdebugState
*s
= bs
->opaque
;
484 BlkdebugRule
*rule
, *next
;
487 for (i
= 0; i
< BLKDBG_EVENT_MAX
; i
++) {
488 QLIST_FOREACH_SAFE(rule
, &s
->rules
[i
], next
, next
) {
494 static void suspend_request(BlockDriverState
*bs
, BlkdebugRule
*rule
)
496 BDRVBlkdebugState
*s
= bs
->opaque
;
497 BlkdebugSuspendedReq r
;
499 r
= (BlkdebugSuspendedReq
) {
500 .co
= qemu_coroutine_self(),
501 .tag
= g_strdup(rule
->options
.suspend
.tag
),
505 QLIST_INSERT_HEAD(&s
->suspended_reqs
, &r
, next
);
507 printf("blkdebug: Suspended request '%s'\n", r
.tag
);
508 qemu_coroutine_yield();
509 printf("blkdebug: Resuming request '%s'\n", r
.tag
);
511 QLIST_REMOVE(&r
, next
);
515 static bool process_rule(BlockDriverState
*bs
, struct BlkdebugRule
*rule
,
518 BDRVBlkdebugState
*s
= bs
->opaque
;
520 /* Only process rules for the current state */
521 if (rule
->state
&& rule
->state
!= s
->state
) {
525 /* Take the action */
526 switch (rule
->action
) {
527 case ACTION_INJECT_ERROR
:
529 QSIMPLEQ_INIT(&s
->active_rules
);
532 QSIMPLEQ_INSERT_HEAD(&s
->active_rules
, rule
, active_next
);
535 case ACTION_SET_STATE
:
536 s
->new_state
= rule
->options
.set_state
.new_state
;
540 suspend_request(bs
, rule
);
546 static void blkdebug_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
548 BDRVBlkdebugState
*s
= bs
->opaque
;
549 struct BlkdebugRule
*rule
, *next
;
552 assert((int)event
>= 0 && event
< BLKDBG_EVENT_MAX
);
555 s
->new_state
= s
->state
;
556 QLIST_FOREACH_SAFE(rule
, &s
->rules
[event
], next
, next
) {
557 injected
= process_rule(bs
, rule
, injected
);
559 s
->state
= s
->new_state
;
562 static int blkdebug_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
565 BDRVBlkdebugState
*s
= bs
->opaque
;
566 struct BlkdebugRule
*rule
;
567 BlkDebugEvent blkdebug_event
;
569 if (get_event_by_name(event
, &blkdebug_event
) < 0) {
574 rule
= g_malloc(sizeof(*rule
));
575 *rule
= (struct BlkdebugRule
) {
576 .event
= blkdebug_event
,
577 .action
= ACTION_SUSPEND
,
579 .options
.suspend
.tag
= g_strdup(tag
),
582 QLIST_INSERT_HEAD(&s
->rules
[blkdebug_event
], rule
, next
);
587 static int blkdebug_debug_resume(BlockDriverState
*bs
, const char *tag
)
589 BDRVBlkdebugState
*s
= bs
->opaque
;
590 BlkdebugSuspendedReq
*r
;
592 QLIST_FOREACH(r
, &s
->suspended_reqs
, next
) {
593 if (!strcmp(r
->tag
, tag
)) {
594 qemu_coroutine_enter(r
->co
, NULL
);
602 static bool blkdebug_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
604 BDRVBlkdebugState
*s
= bs
->opaque
;
605 BlkdebugSuspendedReq
*r
;
607 QLIST_FOREACH(r
, &s
->suspended_reqs
, next
) {
608 if (!strcmp(r
->tag
, tag
)) {
615 static int64_t blkdebug_getlength(BlockDriverState
*bs
)
617 return bdrv_getlength(bs
->file
);
620 static BlockDriver bdrv_blkdebug
= {
621 .format_name
= "blkdebug",
622 .protocol_name
= "blkdebug",
623 .instance_size
= sizeof(BDRVBlkdebugState
),
625 .bdrv_parse_filename
= blkdebug_parse_filename
,
626 .bdrv_file_open
= blkdebug_open
,
627 .bdrv_close
= blkdebug_close
,
628 .bdrv_getlength
= blkdebug_getlength
,
630 .bdrv_aio_readv
= blkdebug_aio_readv
,
631 .bdrv_aio_writev
= blkdebug_aio_writev
,
633 .bdrv_debug_event
= blkdebug_debug_event
,
634 .bdrv_debug_breakpoint
= blkdebug_debug_breakpoint
,
635 .bdrv_debug_resume
= blkdebug_debug_resume
,
636 .bdrv_debug_is_suspended
= blkdebug_debug_is_suspended
,
639 static void bdrv_blkdebug_init(void)
641 bdrv_register(&bdrv_blkdebug
);
644 block_init(bdrv_blkdebug_init
);