sparc64: fix TT_WOTHER value
[qemu.git] / block / blkdebug.c
blobbb4a91abc7fbc78f691958b680f76f15a331553a
1 /*
2 * Block protocol for I/O error injection
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "block_int.h"
27 #include "module.h"
29 #include <stdbool.h>
31 typedef struct BlkdebugVars {
32 int state;
34 /* If inject_errno != 0, an error is injected for requests */
35 int inject_errno;
37 /* Decides if all future requests fail (false) or only the next one and
38 * after the next request inject_errno is reset to 0 (true) */
39 bool inject_once;
41 /* Decides if aio_readv/writev fails right away (true) or returns an error
42 * return value only in the callback (false) */
43 bool inject_immediately;
44 } BlkdebugVars;
46 typedef struct BDRVBlkdebugState {
47 BlkdebugVars vars;
48 QLIST_HEAD(list, BlkdebugRule) rules[BLKDBG_EVENT_MAX];
49 } BDRVBlkdebugState;
51 typedef struct BlkdebugAIOCB {
52 BlockDriverAIOCB common;
53 QEMUBH *bh;
54 int ret;
55 } BlkdebugAIOCB;
57 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb);
59 static AIOPool blkdebug_aio_pool = {
60 .aiocb_size = sizeof(BlkdebugAIOCB),
61 .cancel = blkdebug_aio_cancel,
64 enum {
65 ACTION_INJECT_ERROR,
66 ACTION_SET_STATE,
69 typedef struct BlkdebugRule {
70 BlkDebugEvent event;
71 int action;
72 int state;
73 union {
74 struct {
75 int error;
76 int immediately;
77 int once;
78 } inject;
79 struct {
80 int new_state;
81 } set_state;
82 } options;
83 QLIST_ENTRY(BlkdebugRule) next;
84 } BlkdebugRule;
86 static QemuOptsList inject_error_opts = {
87 .name = "inject-error",
88 .head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
89 .desc = {
91 .name = "event",
92 .type = QEMU_OPT_STRING,
95 .name = "state",
96 .type = QEMU_OPT_NUMBER,
99 .name = "errno",
100 .type = QEMU_OPT_NUMBER,
103 .name = "once",
104 .type = QEMU_OPT_BOOL,
107 .name = "immediately",
108 .type = QEMU_OPT_BOOL,
110 { /* end of list */ }
114 static QemuOptsList set_state_opts = {
115 .name = "set-state",
116 .head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
117 .desc = {
119 .name = "event",
120 .type = QEMU_OPT_STRING,
123 .name = "state",
124 .type = QEMU_OPT_NUMBER,
127 .name = "new_state",
128 .type = QEMU_OPT_NUMBER,
130 { /* end of list */ }
134 static QemuOptsList *config_groups[] = {
135 &inject_error_opts,
136 &set_state_opts,
137 NULL
140 static const char *event_names[BLKDBG_EVENT_MAX] = {
141 [BLKDBG_L1_UPDATE] = "l1_update",
142 [BLKDBG_L1_GROW_ALLOC_TABLE] = "l1_grow.alloc_table",
143 [BLKDBG_L1_GROW_WRITE_TABLE] = "l1_grow.write_table",
144 [BLKDBG_L1_GROW_ACTIVATE_TABLE] = "l1_grow.activate_table",
146 [BLKDBG_L2_LOAD] = "l2_load",
147 [BLKDBG_L2_UPDATE] = "l2_update",
148 [BLKDBG_L2_UPDATE_COMPRESSED] = "l2_update_compressed",
149 [BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read",
150 [BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write",
152 [BLKDBG_READ] = "read",
153 [BLKDBG_READ_AIO] = "read_aio",
154 [BLKDBG_READ_BACKING] = "read_backing",
155 [BLKDBG_READ_BACKING_AIO] = "read_backing_aio",
156 [BLKDBG_READ_COMPRESSED] = "read_compressed",
158 [BLKDBG_WRITE_AIO] = "write_aio",
159 [BLKDBG_WRITE_COMPRESSED] = "write_compressed",
161 [BLKDBG_VMSTATE_LOAD] = "vmstate_load",
162 [BLKDBG_VMSTATE_SAVE] = "vmstate_save",
164 [BLKDBG_COW_READ] = "cow_read",
165 [BLKDBG_COW_WRITE] = "cow_write",
167 [BLKDBG_REFTABLE_LOAD] = "reftable_load",
168 [BLKDBG_REFTABLE_GROW] = "reftable_grow",
170 [BLKDBG_REFBLOCK_LOAD] = "refblock_load",
171 [BLKDBG_REFBLOCK_UPDATE] = "refblock_update",
172 [BLKDBG_REFBLOCK_UPDATE_PART] = "refblock_update_part",
173 [BLKDBG_REFBLOCK_ALLOC] = "refblock_alloc",
174 [BLKDBG_REFBLOCK_ALLOC_HOOKUP] = "refblock_alloc.hookup",
175 [BLKDBG_REFBLOCK_ALLOC_WRITE] = "refblock_alloc.write",
176 [BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS] = "refblock_alloc.write_blocks",
177 [BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE] = "refblock_alloc.write_table",
178 [BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE] = "refblock_alloc.switch_table",
180 [BLKDBG_CLUSTER_ALLOC] = "cluster_alloc",
181 [BLKDBG_CLUSTER_ALLOC_BYTES] = "cluster_alloc_bytes",
182 [BLKDBG_CLUSTER_FREE] = "cluster_free",
185 static int get_event_by_name(const char *name, BlkDebugEvent *event)
187 int i;
189 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
190 if (!strcmp(event_names[i], name)) {
191 *event = i;
192 return 0;
196 return -1;
199 struct add_rule_data {
200 BDRVBlkdebugState *s;
201 int action;
204 static int add_rule(QemuOpts *opts, void *opaque)
206 struct add_rule_data *d = opaque;
207 BDRVBlkdebugState *s = d->s;
208 const char* event_name;
209 BlkDebugEvent event;
210 struct BlkdebugRule *rule;
212 /* Find the right event for the rule */
213 event_name = qemu_opt_get(opts, "event");
214 if (!event_name || get_event_by_name(event_name, &event) < 0) {
215 return -1;
218 /* Set attributes common for all actions */
219 rule = qemu_mallocz(sizeof(*rule));
220 *rule = (struct BlkdebugRule) {
221 .event = event,
222 .action = d->action,
223 .state = qemu_opt_get_number(opts, "state", 0),
226 /* Parse action-specific options */
227 switch (d->action) {
228 case ACTION_INJECT_ERROR:
229 rule->options.inject.error = qemu_opt_get_number(opts, "errno", EIO);
230 rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
231 rule->options.inject.immediately =
232 qemu_opt_get_bool(opts, "immediately", 0);
233 break;
235 case ACTION_SET_STATE:
236 rule->options.set_state.new_state =
237 qemu_opt_get_number(opts, "new_state", 0);
238 break;
241 /* Add the rule */
242 QLIST_INSERT_HEAD(&s->rules[event], rule, next);
244 return 0;
247 static int read_config(BDRVBlkdebugState *s, const char *filename)
249 FILE *f;
250 int ret;
251 struct add_rule_data d;
253 f = fopen(filename, "r");
254 if (f == NULL) {
255 return -errno;
258 ret = qemu_config_parse(f, config_groups, filename);
259 if (ret < 0) {
260 goto fail;
263 d.s = s;
264 d.action = ACTION_INJECT_ERROR;
265 qemu_opts_foreach(&inject_error_opts, add_rule, &d, 0);
267 d.action = ACTION_SET_STATE;
268 qemu_opts_foreach(&set_state_opts, add_rule, &d, 0);
270 ret = 0;
271 fail:
272 fclose(f);
273 return ret;
276 /* Valid blkdebug filenames look like blkdebug:path/to/config:path/to/image */
277 static int blkdebug_open(BlockDriverState *bs, const char *filename, int flags)
279 BDRVBlkdebugState *s = bs->opaque;
280 int ret;
281 char *config, *c;
283 /* Parse the blkdebug: prefix */
284 if (strncmp(filename, "blkdebug:", strlen("blkdebug:"))) {
285 return -EINVAL;
287 filename += strlen("blkdebug:");
289 /* Read rules from config file */
290 c = strchr(filename, ':');
291 if (c == NULL) {
292 return -EINVAL;
295 config = strdup(filename);
296 config[c - filename] = '\0';
297 ret = read_config(s, config);
298 free(config);
299 if (ret < 0) {
300 return ret;
302 filename = c + 1;
304 /* Open the backing file */
305 ret = bdrv_file_open(&bs->file, filename, flags);
306 if (ret < 0) {
307 return ret;
310 return 0;
313 static void error_callback_bh(void *opaque)
315 struct BlkdebugAIOCB *acb = opaque;
316 qemu_bh_delete(acb->bh);
317 acb->common.cb(acb->common.opaque, acb->ret);
318 qemu_aio_release(acb);
321 static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb)
323 BlkdebugAIOCB *acb = (BlkdebugAIOCB*) blockacb;
324 qemu_aio_release(acb);
327 static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
328 BlockDriverCompletionFunc *cb, void *opaque)
330 BDRVBlkdebugState *s = bs->opaque;
331 int error = s->vars.inject_errno;
332 struct BlkdebugAIOCB *acb;
333 QEMUBH *bh;
335 if (s->vars.inject_once) {
336 s->vars.inject_errno = 0;
339 if (s->vars.inject_immediately) {
340 return NULL;
343 acb = qemu_aio_get(&blkdebug_aio_pool, bs, cb, opaque);
344 acb->ret = -error;
346 bh = qemu_bh_new(error_callback_bh, acb);
347 acb->bh = bh;
348 qemu_bh_schedule(bh);
350 return (BlockDriverAIOCB*) acb;
353 static BlockDriverAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
354 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
355 BlockDriverCompletionFunc *cb, void *opaque)
357 BDRVBlkdebugState *s = bs->opaque;
359 if (s->vars.inject_errno) {
360 return inject_error(bs, cb, opaque);
363 BlockDriverAIOCB *acb =
364 bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
365 return acb;
368 static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
369 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
370 BlockDriverCompletionFunc *cb, void *opaque)
372 BDRVBlkdebugState *s = bs->opaque;
374 if (s->vars.inject_errno) {
375 return inject_error(bs, cb, opaque);
378 BlockDriverAIOCB *acb =
379 bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
380 return acb;
383 static void blkdebug_close(BlockDriverState *bs)
385 BDRVBlkdebugState *s = bs->opaque;
386 BlkdebugRule *rule, *next;
387 int i;
389 for (i = 0; i < BLKDBG_EVENT_MAX; i++) {
390 QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
391 QLIST_REMOVE(rule, next);
392 qemu_free(rule);
397 static void blkdebug_flush(BlockDriverState *bs)
399 bdrv_flush(bs->file);
402 static BlockDriverAIOCB *blkdebug_aio_flush(BlockDriverState *bs,
403 BlockDriverCompletionFunc *cb, void *opaque)
405 return bdrv_aio_flush(bs->file, cb, opaque);
408 static void process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
409 BlkdebugVars *old_vars)
411 BDRVBlkdebugState *s = bs->opaque;
412 BlkdebugVars *vars = &s->vars;
414 /* Only process rules for the current state */
415 if (rule->state && rule->state != old_vars->state) {
416 return;
419 /* Take the action */
420 switch (rule->action) {
421 case ACTION_INJECT_ERROR:
422 vars->inject_errno = rule->options.inject.error;
423 vars->inject_once = rule->options.inject.once;
424 vars->inject_immediately = rule->options.inject.immediately;
425 break;
427 case ACTION_SET_STATE:
428 vars->state = rule->options.set_state.new_state;
429 break;
433 static void blkdebug_debug_event(BlockDriverState *bs, BlkDebugEvent event)
435 BDRVBlkdebugState *s = bs->opaque;
436 struct BlkdebugRule *rule;
437 BlkdebugVars old_vars = s->vars;
439 if (event < 0 || event >= BLKDBG_EVENT_MAX) {
440 return;
443 QLIST_FOREACH(rule, &s->rules[event], next) {
444 process_rule(bs, rule, &old_vars);
448 static BlockDriver bdrv_blkdebug = {
449 .format_name = "blkdebug",
450 .protocol_name = "blkdebug",
452 .instance_size = sizeof(BDRVBlkdebugState),
454 .bdrv_file_open = blkdebug_open,
455 .bdrv_close = blkdebug_close,
456 .bdrv_flush = blkdebug_flush,
458 .bdrv_aio_readv = blkdebug_aio_readv,
459 .bdrv_aio_writev = blkdebug_aio_writev,
460 .bdrv_aio_flush = blkdebug_aio_flush,
462 .bdrv_debug_event = blkdebug_debug_event,
465 static void bdrv_blkdebug_init(void)
467 bdrv_register(&bdrv_blkdebug);
470 block_init(bdrv_blkdebug_init);