m68k: fix if statement with empty body, spotted by clang
[qemu/aliguori-queue.git] / block / blkdebug.c
blob643c3978f89762cf51f79771512bdd51cc02ad1d
1 /*
2 * Block protocol for I/O error injection
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "block_int.h"
27 #include "module.h"
29 #include <stdbool.h>
31 typedef struct BlkdebugVars {
32 int state;
34 /* If inject_errno != 0, an error is injected for requests */
35 int inject_errno;
37 /* Decides if all future requests fail (false) or only the next one and
38 * after the next request inject_errno is reset to 0 (true) */
39 bool inject_once;
41 /* Decides if aio_readv/writev fails right away (true) or returns an error
42 * return value only in the callback (false) */
43 bool inject_immediately;
44 } BlkdebugVars;
46 typedef struct BDRVBlkdebugState {
47 BlockDriverState *hd;
48 BlkdebugVars vars;
49 QLIST_HEAD(list, BlkdebugRule) rules[BLKDBG_EVENT_MAX];
50 } BDRVBlkdebugState;
52 typedef struct BlkdebugAIOCB {
53 BlockDriverAIOCB common;
54 QEMUBH *bh;
55 int ret;
56 } BlkdebugAIOCB;
58 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb);
60 static AIOPool blkdebug_aio_pool = {
61 .aiocb_size = sizeof(BlkdebugAIOCB),
62 .cancel = blkdebug_aio_cancel,
65 enum {
66 ACTION_INJECT_ERROR,
67 ACTION_SET_STATE,
70 typedef struct BlkdebugRule {
71 BlkDebugEvent event;
72 int action;
73 int state;
74 union {
75 struct {
76 int error;
77 int immediately;
78 int once;
79 } inject;
80 struct {
81 int new_state;
82 } set_state;
83 } options;
84 QLIST_ENTRY(BlkdebugRule) next;
85 } BlkdebugRule;
87 static QemuOptsList inject_error_opts = {
88 .name = "inject-error",
89 .head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
90 .desc = {
92 .name = "event",
93 .type = QEMU_OPT_STRING,
96 .name = "state",
97 .type = QEMU_OPT_NUMBER,
100 .name = "errno",
101 .type = QEMU_OPT_NUMBER,
104 .name = "once",
105 .type = QEMU_OPT_BOOL,
108 .name = "immediately",
109 .type = QEMU_OPT_BOOL,
111 { /* end of list */ }
115 static QemuOptsList set_state_opts = {
116 .name = "set-state",
117 .head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
118 .desc = {
120 .name = "event",
121 .type = QEMU_OPT_STRING,
124 .name = "state",
125 .type = QEMU_OPT_NUMBER,
128 .name = "new_state",
129 .type = QEMU_OPT_NUMBER,
131 { /* end of list */ }
135 static QemuOptsList *config_groups[] = {
136 &inject_error_opts,
137 &set_state_opts,
138 NULL
141 static const char *event_names[BLKDBG_EVENT_MAX] = {
142 [BLKDBG_L1_UPDATE] = "l1_update",
143 [BLKDBG_L1_GROW_ALLOC_TABLE] = "l1_grow.alloc_table",
144 [BLKDBG_L1_GROW_WRITE_TABLE] = "l1_grow.write_table",
145 [BLKDBG_L1_GROW_ACTIVATE_TABLE] = "l1_grow.activate_table",
147 [BLKDBG_L2_LOAD] = "l2_load",
148 [BLKDBG_L2_UPDATE] = "l2_update",
149 [BLKDBG_L2_UPDATE_COMPRESSED] = "l2_update_compressed",
150 [BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read",
151 [BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write",
153 [BLKDBG_READ] = "read",
154 [BLKDBG_READ_AIO] = "read_aio",
155 [BLKDBG_READ_BACKING] = "read_backing",
156 [BLKDBG_READ_BACKING_AIO] = "read_backing_aio",
157 [BLKDBG_READ_COMPRESSED] = "read_compressed",
159 [BLKDBG_WRITE_AIO] = "write_aio",
160 [BLKDBG_WRITE_COMPRESSED] = "write_compressed",
162 [BLKDBG_VMSTATE_LOAD] = "vmstate_load",
163 [BLKDBG_VMSTATE_SAVE] = "vmstate_save",
165 [BLKDBG_COW_READ] = "cow_read",
166 [BLKDBG_COW_WRITE] = "cow_write",
168 [BLKDBG_REFTABLE_LOAD] = "reftable_load",
169 [BLKDBG_REFTABLE_GROW] = "reftable_grow",
171 [BLKDBG_REFBLOCK_LOAD] = "refblock_load",
172 [BLKDBG_REFBLOCK_UPDATE] = "refblock_update",
173 [BLKDBG_REFBLOCK_UPDATE_PART] = "refblock_update_part",
174 [BLKDBG_REFBLOCK_ALLOC] = "refblock_alloc",
175 [BLKDBG_REFBLOCK_ALLOC_HOOKUP] = "refblock_alloc.hookup",
176 [BLKDBG_REFBLOCK_ALLOC_WRITE] = "refblock_alloc.write",
177 [BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS] = "refblock_alloc.write_blocks",
178 [BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE] = "refblock_alloc.write_table",
179 [BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE] = "refblock_alloc.switch_table",
181 [BLKDBG_CLUSTER_ALLOC] = "cluster_alloc",
182 [BLKDBG_CLUSTER_ALLOC_BYTES] = "cluster_alloc_bytes",
183 [BLKDBG_CLUSTER_FREE] = "cluster_free",
186 static int get_event_by_name(const char *name, BlkDebugEvent *event)
188 int i;
190 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
191 if (!strcmp(event_names[i], name)) {
192 *event = i;
193 return 0;
197 return -1;
200 struct add_rule_data {
201 BDRVBlkdebugState *s;
202 int action;
205 static int add_rule(QemuOpts *opts, void *opaque)
207 struct add_rule_data *d = opaque;
208 BDRVBlkdebugState *s = d->s;
209 const char* event_name;
210 BlkDebugEvent event;
211 struct BlkdebugRule *rule;
213 /* Find the right event for the rule */
214 event_name = qemu_opt_get(opts, "event");
215 if (!event_name || get_event_by_name(event_name, &event) < 0) {
216 return -1;
219 /* Set attributes common for all actions */
220 rule = qemu_mallocz(sizeof(*rule));
221 *rule = (struct BlkdebugRule) {
222 .event = event,
223 .action = d->action,
224 .state = qemu_opt_get_number(opts, "state", 0),
227 /* Parse action-specific options */
228 switch (d->action) {
229 case ACTION_INJECT_ERROR:
230 rule->options.inject.error = qemu_opt_get_number(opts, "errno", EIO);
231 rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
232 rule->options.inject.immediately =
233 qemu_opt_get_bool(opts, "immediately", 0);
234 break;
236 case ACTION_SET_STATE:
237 rule->options.set_state.new_state =
238 qemu_opt_get_number(opts, "new_state", 0);
239 break;
242 /* Add the rule */
243 QLIST_INSERT_HEAD(&s->rules[event], rule, next);
245 return 0;
248 static int read_config(BDRVBlkdebugState *s, const char *filename)
250 FILE *f;
251 int ret;
252 struct add_rule_data d;
254 f = fopen(filename, "r");
255 if (f == NULL) {
256 return -errno;
259 ret = qemu_config_parse(f, config_groups, filename);
260 if (ret < 0) {
261 goto fail;
264 d.s = s;
265 d.action = ACTION_INJECT_ERROR;
266 qemu_opts_foreach(&inject_error_opts, add_rule, &d, 0);
268 d.action = ACTION_SET_STATE;
269 qemu_opts_foreach(&set_state_opts, add_rule, &d, 0);
271 ret = 0;
272 fail:
273 fclose(f);
274 return ret;
277 /* Valid blkdebug filenames look like blkdebug:path/to/config:path/to/image */
278 static int blkdebug_open(BlockDriverState *bs, const char *filename, int flags)
280 BDRVBlkdebugState *s = bs->opaque;
281 int ret;
282 char *config, *c;
284 /* Parse the blkdebug: prefix */
285 if (strncmp(filename, "blkdebug:", strlen("blkdebug:"))) {
286 return -EINVAL;
288 filename += strlen("blkdebug:");
290 /* Read rules from config file */
291 c = strchr(filename, ':');
292 if (c == NULL) {
293 return -EINVAL;
296 config = strdup(filename);
297 config[c - filename] = '\0';
298 ret = read_config(s, config);
299 free(config);
300 if (ret < 0) {
301 return ret;
303 filename = c + 1;
305 /* Open the backing file */
306 ret = bdrv_file_open(&s->hd, filename, flags);
307 if (ret < 0) {
308 return ret;
311 return 0;
314 static void error_callback_bh(void *opaque)
316 struct BlkdebugAIOCB *acb = opaque;
317 qemu_bh_delete(acb->bh);
318 acb->common.cb(acb->common.opaque, acb->ret);
319 qemu_aio_release(acb);
322 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb)
324 BlkdebugAIOCB *acb = (BlkdebugAIOCB*) blockacb;
325 qemu_aio_release(acb);
328 static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
329 BlockDriverCompletionFunc *cb, void *opaque)
331 BDRVBlkdebugState *s = bs->opaque;
332 int error = s->vars.inject_errno;
333 struct BlkdebugAIOCB *acb;
334 QEMUBH *bh;
336 if (s->vars.inject_once) {
337 s->vars.inject_errno = 0;
340 if (s->vars.inject_immediately) {
341 return NULL;
344 acb = qemu_aio_get(&blkdebug_aio_pool, bs, cb, opaque);
345 acb->ret = -error;
347 bh = qemu_bh_new(error_callback_bh, acb);
348 acb->bh = bh;
349 qemu_bh_schedule(bh);
351 return (BlockDriverAIOCB*) acb;
354 static BlockDriverAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
355 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
356 BlockDriverCompletionFunc *cb, void *opaque)
358 BDRVBlkdebugState *s = bs->opaque;
360 if (s->vars.inject_errno) {
361 return inject_error(bs, cb, opaque);
364 BlockDriverAIOCB *acb =
365 bdrv_aio_readv(s->hd, sector_num, qiov, nb_sectors, cb, opaque);
366 return acb;
369 static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
370 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
371 BlockDriverCompletionFunc *cb, void *opaque)
373 BDRVBlkdebugState *s = bs->opaque;
375 if (s->vars.inject_errno) {
376 return inject_error(bs, cb, opaque);
379 BlockDriverAIOCB *acb =
380 bdrv_aio_writev(s->hd, sector_num, qiov, nb_sectors, cb, opaque);
381 return acb;
384 static void blkdebug_close(BlockDriverState *bs)
386 BDRVBlkdebugState *s = bs->opaque;
387 BlkdebugRule *rule, *next;
388 int i;
390 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
391 QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
392 QLIST_REMOVE(rule, next);
393 qemu_free(rule);
397 bdrv_delete(s->hd);
400 static void blkdebug_flush(BlockDriverState *bs)
402 BDRVBlkdebugState *s = bs->opaque;
403 bdrv_flush(s->hd);
406 static BlockDriverAIOCB *blkdebug_aio_flush(BlockDriverState *bs,
407 BlockDriverCompletionFunc *cb, void *opaque)
409 BDRVBlkdebugState *s = bs->opaque;
410 return bdrv_aio_flush(s->hd, cb, opaque);
413 static void process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
414 BlkdebugVars *old_vars)
416 BDRVBlkdebugState *s = bs->opaque;
417 BlkdebugVars *vars = &s->vars;
419 /* Only process rules for the current state */
420 if (rule->state && rule->state != old_vars->state) {
421 return;
424 /* Take the action */
425 switch (rule->action) {
426 case ACTION_INJECT_ERROR:
427 vars->inject_errno = rule->options.inject.error;
428 vars->inject_once = rule->options.inject.once;
429 vars->inject_immediately = rule->options.inject.immediately;
430 break;
432 case ACTION_SET_STATE:
433 vars->state = rule->options.set_state.new_state;
434 break;
438 static void blkdebug_debug_event(BlockDriverState *bs, BlkDebugEvent event)
440 BDRVBlkdebugState *s = bs->opaque;
441 struct BlkdebugRule *rule;
442 BlkdebugVars old_vars = s->vars;
444 if (event < 0 || event >= BLKDBG_EVENT_MAX) {
445 return;
448 QLIST_FOREACH(rule, &s->rules[event], next) {
449 process_rule(bs, rule, &old_vars);
453 static BlockDriver bdrv_blkdebug = {
454 .format_name = "blkdebug",
455 .protocol_name = "blkdebug",
457 .instance_size = sizeof(BDRVBlkdebugState),
459 .bdrv_open = blkdebug_open,
460 .bdrv_close = blkdebug_close,
461 .bdrv_flush = blkdebug_flush,
463 .bdrv_aio_readv = blkdebug_aio_readv,
464 .bdrv_aio_writev = blkdebug_aio_writev,
465 .bdrv_aio_flush = blkdebug_aio_flush,
467 .bdrv_debug_event = blkdebug_debug_event,
470 static void bdrv_blkdebug_init(void)
472 bdrv_register(&bdrv_blkdebug);
475 block_init(bdrv_blkdebug_init);