scsi-disk: Remove duplicate cdb parsing
[qemu/ar7.git] / block / blkdebug.c
blobcd9eb8006a1e781628e5a013bafeeae099cbdf96
1 /*
2 * Block protocol for I/O error injection
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "block_int.h"
27 #include "module.h"
29 typedef struct BlkdebugVars {
30 int state;
32 /* If inject_errno != 0, an error is injected for requests */
33 int inject_errno;
35 /* Decides if all future requests fail (false) or only the next one and
36 * after the next request inject_errno is reset to 0 (true) */
37 bool inject_once;
39 /* Decides if aio_readv/writev fails right away (true) or returns an error
40 * return value only in the callback (false) */
41 bool inject_immediately;
42 } BlkdebugVars;
44 typedef struct BDRVBlkdebugState {
45 BlkdebugVars vars;
46 QLIST_HEAD(list, BlkdebugRule) rules[BLKDBG_EVENT_MAX];
47 } BDRVBlkdebugState;
49 typedef struct BlkdebugAIOCB {
50 BlockDriverAIOCB common;
51 QEMUBH *bh;
52 int ret;
53 } BlkdebugAIOCB;
55 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb);
57 static AIOPool blkdebug_aio_pool = {
58 .aiocb_size = sizeof(BlkdebugAIOCB),
59 .cancel = blkdebug_aio_cancel,
62 enum {
63 ACTION_INJECT_ERROR,
64 ACTION_SET_STATE,
67 typedef struct BlkdebugRule {
68 BlkDebugEvent event;
69 int action;
70 int state;
71 union {
72 struct {
73 int error;
74 int immediately;
75 int once;
76 } inject;
77 struct {
78 int new_state;
79 } set_state;
80 } options;
81 QLIST_ENTRY(BlkdebugRule) next;
82 } BlkdebugRule;
84 static QemuOptsList inject_error_opts = {
85 .name = "inject-error",
86 .head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
87 .desc = {
89 .name = "event",
90 .type = QEMU_OPT_STRING,
93 .name = "state",
94 .type = QEMU_OPT_NUMBER,
97 .name = "errno",
98 .type = QEMU_OPT_NUMBER,
101 .name = "once",
102 .type = QEMU_OPT_BOOL,
105 .name = "immediately",
106 .type = QEMU_OPT_BOOL,
108 { /* end of list */ }
112 static QemuOptsList set_state_opts = {
113 .name = "set-state",
114 .head = QTAILQ_HEAD_INITIALIZER(set_state_opts.head),
115 .desc = {
117 .name = "event",
118 .type = QEMU_OPT_STRING,
121 .name = "state",
122 .type = QEMU_OPT_NUMBER,
125 .name = "new_state",
126 .type = QEMU_OPT_NUMBER,
128 { /* end of list */ }
132 static QemuOptsList *config_groups[] = {
133 &inject_error_opts,
134 &set_state_opts,
135 NULL
138 static const char *event_names[BLKDBG_EVENT_MAX] = {
139 [BLKDBG_L1_UPDATE] = "l1_update",
140 [BLKDBG_L1_GROW_ALLOC_TABLE] = "l1_grow.alloc_table",
141 [BLKDBG_L1_GROW_WRITE_TABLE] = "l1_grow.write_table",
142 [BLKDBG_L1_GROW_ACTIVATE_TABLE] = "l1_grow.activate_table",
144 [BLKDBG_L2_LOAD] = "l2_load",
145 [BLKDBG_L2_UPDATE] = "l2_update",
146 [BLKDBG_L2_UPDATE_COMPRESSED] = "l2_update_compressed",
147 [BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read",
148 [BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write",
150 [BLKDBG_READ] = "read",
151 [BLKDBG_READ_AIO] = "read_aio",
152 [BLKDBG_READ_BACKING] = "read_backing",
153 [BLKDBG_READ_BACKING_AIO] = "read_backing_aio",
154 [BLKDBG_READ_COMPRESSED] = "read_compressed",
156 [BLKDBG_WRITE_AIO] = "write_aio",
157 [BLKDBG_WRITE_COMPRESSED] = "write_compressed",
159 [BLKDBG_VMSTATE_LOAD] = "vmstate_load",
160 [BLKDBG_VMSTATE_SAVE] = "vmstate_save",
162 [BLKDBG_COW_READ] = "cow_read",
163 [BLKDBG_COW_WRITE] = "cow_write",
165 [BLKDBG_REFTABLE_LOAD] = "reftable_load",
166 [BLKDBG_REFTABLE_GROW] = "reftable_grow",
168 [BLKDBG_REFBLOCK_LOAD] = "refblock_load",
169 [BLKDBG_REFBLOCK_UPDATE] = "refblock_update",
170 [BLKDBG_REFBLOCK_UPDATE_PART] = "refblock_update_part",
171 [BLKDBG_REFBLOCK_ALLOC] = "refblock_alloc",
172 [BLKDBG_REFBLOCK_ALLOC_HOOKUP] = "refblock_alloc.hookup",
173 [BLKDBG_REFBLOCK_ALLOC_WRITE] = "refblock_alloc.write",
174 [BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS] = "refblock_alloc.write_blocks",
175 [BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE] = "refblock_alloc.write_table",
176 [BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE] = "refblock_alloc.switch_table",
178 [BLKDBG_CLUSTER_ALLOC] = "cluster_alloc",
179 [BLKDBG_CLUSTER_ALLOC_BYTES] = "cluster_alloc_bytes",
180 [BLKDBG_CLUSTER_FREE] = "cluster_free",
183 static int get_event_by_name(const char *name, BlkDebugEvent *event)
185 int i;
187 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
188 if (!strcmp(event_names[i], name)) {
189 *event = i;
190 return 0;
194 return -1;
197 struct add_rule_data {
198 BDRVBlkdebugState *s;
199 int action;
202 static int add_rule(QemuOpts *opts, void *opaque)
204 struct add_rule_data *d = opaque;
205 BDRVBlkdebugState *s = d->s;
206 const char* event_name;
207 BlkDebugEvent event;
208 struct BlkdebugRule *rule;
210 /* Find the right event for the rule */
211 event_name = qemu_opt_get(opts, "event");
212 if (!event_name || get_event_by_name(event_name, &event) < 0) {
213 return -1;
216 /* Set attributes common for all actions */
217 rule = qemu_mallocz(sizeof(*rule));
218 *rule = (struct BlkdebugRule) {
219 .event = event,
220 .action = d->action,
221 .state = qemu_opt_get_number(opts, "state", 0),
224 /* Parse action-specific options */
225 switch (d->action) {
226 case ACTION_INJECT_ERROR:
227 rule->options.inject.error = qemu_opt_get_number(opts, "errno", EIO);
228 rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
229 rule->options.inject.immediately =
230 qemu_opt_get_bool(opts, "immediately", 0);
231 break;
233 case ACTION_SET_STATE:
234 rule->options.set_state.new_state =
235 qemu_opt_get_number(opts, "new_state", 0);
236 break;
239 /* Add the rule */
240 QLIST_INSERT_HEAD(&s->rules[event], rule, next);
242 return 0;
245 static int read_config(BDRVBlkdebugState *s, const char *filename)
247 FILE *f;
248 int ret;
249 struct add_rule_data d;
251 f = fopen(filename, "r");
252 if (f == NULL) {
253 return -errno;
256 ret = qemu_config_parse(f, config_groups, filename);
257 if (ret < 0) {
258 goto fail;
261 d.s = s;
262 d.action = ACTION_INJECT_ERROR;
263 qemu_opts_foreach(&inject_error_opts, add_rule, &d, 0);
265 d.action = ACTION_SET_STATE;
266 qemu_opts_foreach(&set_state_opts, add_rule, &d, 0);
268 ret = 0;
269 fail:
270 qemu_opts_reset(&inject_error_opts);
271 qemu_opts_reset(&set_state_opts);
272 fclose(f);
273 return ret;
276 /* Valid blkdebug filenames look like blkdebug:path/to/config:path/to/image */
277 static int blkdebug_open(BlockDriverState *bs, const char *filename, int flags)
279 BDRVBlkdebugState *s = bs->opaque;
280 int ret;
281 char *config, *c;
283 /* Parse the blkdebug: prefix */
284 if (strncmp(filename, "blkdebug:", strlen("blkdebug:"))) {
285 return -EINVAL;
287 filename += strlen("blkdebug:");
289 /* Read rules from config file */
290 c = strchr(filename, ':');
291 if (c == NULL) {
292 return -EINVAL;
295 config = strdup(filename);
296 config[c - filename] = '\0';
297 ret = read_config(s, config);
298 free(config);
299 if (ret < 0) {
300 return ret;
302 filename = c + 1;
304 /* Set initial state */
305 s->vars.state = 1;
307 /* Open the backing file */
308 ret = bdrv_file_open(&bs->file, filename, flags);
309 if (ret < 0) {
310 return ret;
313 return 0;
316 static void error_callback_bh(void *opaque)
318 struct BlkdebugAIOCB *acb = opaque;
319 qemu_bh_delete(acb->bh);
320 acb->common.cb(acb->common.opaque, acb->ret);
321 qemu_aio_release(acb);
324 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb)
326 BlkdebugAIOCB *acb = container_of(blockacb, BlkdebugAIOCB, common);
327 qemu_aio_release(acb);
330 static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
331 BlockDriverCompletionFunc *cb, void *opaque)
333 BDRVBlkdebugState *s = bs->opaque;
334 int error = s->vars.inject_errno;
335 struct BlkdebugAIOCB *acb;
336 QEMUBH *bh;
338 if (s->vars.inject_once) {
339 s->vars.inject_errno = 0;
342 if (s->vars.inject_immediately) {
343 return NULL;
346 acb = qemu_aio_get(&blkdebug_aio_pool, bs, cb, opaque);
347 acb->ret = -error;
349 bh = qemu_bh_new(error_callback_bh, acb);
350 acb->bh = bh;
351 qemu_bh_schedule(bh);
353 return &acb->common;
356 static BlockDriverAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
357 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
358 BlockDriverCompletionFunc *cb, void *opaque)
360 BDRVBlkdebugState *s = bs->opaque;
362 if (s->vars.inject_errno) {
363 return inject_error(bs, cb, opaque);
366 BlockDriverAIOCB *acb =
367 bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
368 return acb;
371 static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
372 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
373 BlockDriverCompletionFunc *cb, void *opaque)
375 BDRVBlkdebugState *s = bs->opaque;
377 if (s->vars.inject_errno) {
378 return inject_error(bs, cb, opaque);
381 BlockDriverAIOCB *acb =
382 bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
383 return acb;
386 static void blkdebug_close(BlockDriverState *bs)
388 BDRVBlkdebugState *s = bs->opaque;
389 BlkdebugRule *rule, *next;
390 int i;
392 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
393 QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
394 QLIST_REMOVE(rule, next);
395 qemu_free(rule);
400 static int blkdebug_flush(BlockDriverState *bs)
402 return bdrv_flush(bs->file);
405 static BlockDriverAIOCB *blkdebug_aio_flush(BlockDriverState *bs,
406 BlockDriverCompletionFunc *cb, void *opaque)
408 return bdrv_aio_flush(bs->file, cb, opaque);
411 static void process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
412 BlkdebugVars *old_vars)
414 BDRVBlkdebugState *s = bs->opaque;
415 BlkdebugVars *vars = &s->vars;
417 /* Only process rules for the current state */
418 if (rule->state && rule->state != old_vars->state) {
419 return;
422 /* Take the action */
423 switch (rule->action) {
424 case ACTION_INJECT_ERROR:
425 vars->inject_errno = rule->options.inject.error;
426 vars->inject_once = rule->options.inject.once;
427 vars->inject_immediately = rule->options.inject.immediately;
428 break;
430 case ACTION_SET_STATE:
431 vars->state = rule->options.set_state.new_state;
432 break;
436 static void blkdebug_debug_event(BlockDriverState *bs, BlkDebugEvent event)
438 BDRVBlkdebugState *s = bs->opaque;
439 struct BlkdebugRule *rule;
440 BlkdebugVars old_vars = s->vars;
442 assert((int)event >= 0 && event < BLKDBG_EVENT_MAX);
444 QLIST_FOREACH(rule, &s->rules[event], next) {
445 process_rule(bs, rule, &old_vars);
449 static BlockDriver bdrv_blkdebug = {
450 .format_name = "blkdebug",
451 .protocol_name = "blkdebug",
453 .instance_size = sizeof(BDRVBlkdebugState),
455 .bdrv_file_open = blkdebug_open,
456 .bdrv_close = blkdebug_close,
457 .bdrv_flush = blkdebug_flush,
459 .bdrv_aio_readv = blkdebug_aio_readv,
460 .bdrv_aio_writev = blkdebug_aio_writev,
461 .bdrv_aio_flush = blkdebug_aio_flush,
463 .bdrv_debug_event = blkdebug_debug_event,
466 static void bdrv_blkdebug_init(void)
468 bdrv_register(&bdrv_blkdebug);
471 block_init(bdrv_blkdebug_init);