intc/ibex_plic: Clear interrupts that occur during claim process
[qemu/ar7.git] / block / qapi.c
blob036da085eea66bd1ad866f5be06f22330297437c
1 /*
2 * Block layer qmp and info dump related functions
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "qemu/cutils.h"
27 #include "block/qapi.h"
28 #include "block/block_int.h"
29 #include "block/throttle-groups.h"
30 #include "block/write-threshold.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-commands-block-core.h"
33 #include "qapi/qobject-output-visitor.h"
34 #include "qapi/qapi-visit-block-core.h"
35 #include "qapi/qmp/qbool.h"
36 #include "qapi/qmp/qdict.h"
37 #include "qapi/qmp/qlist.h"
38 #include "qapi/qmp/qnum.h"
39 #include "qapi/qmp/qstring.h"
40 #include "qemu/qemu-print.h"
41 #include "sysemu/block-backend.h"
42 #include "qemu/cutils.h"
44 BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
45 BlockDriverState *bs,
46 bool flat,
47 Error **errp)
49 ImageInfo **p_image_info;
50 BlockDriverState *bs0, *backing;
51 BlockDeviceInfo *info;
53 if (!bs->drv) {
54 error_setg(errp, "Block device %s is ejected", bs->node_name);
55 return NULL;
58 bdrv_refresh_filename(bs);
60 info = g_malloc0(sizeof(*info));
61 info->file = g_strdup(bs->filename);
62 info->ro = bs->read_only;
63 info->drv = g_strdup(bs->drv->format_name);
64 info->encrypted = bs->encrypted;
65 info->encryption_key_missing = false;
67 info->cache = g_new(BlockdevCacheInfo, 1);
68 *info->cache = (BlockdevCacheInfo) {
69 .writeback = blk ? blk_enable_write_cache(blk) : true,
70 .direct = !!(bs->open_flags & BDRV_O_NOCACHE),
71 .no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
74 if (bs->node_name[0]) {
75 info->has_node_name = true;
76 info->node_name = g_strdup(bs->node_name);
79 backing = bdrv_cow_bs(bs);
80 if (backing) {
81 info->has_backing_file = true;
82 info->backing_file = g_strdup(backing->filename);
85 if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
86 info->has_dirty_bitmaps = true;
87 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
90 info->detect_zeroes = bs->detect_zeroes;
92 if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
93 ThrottleConfig cfg;
94 BlockBackendPublic *blkp = blk_get_public(blk);
96 throttle_group_get_config(&blkp->throttle_group_member, &cfg);
98 info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
99 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
100 info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
102 info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
103 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
104 info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
106 info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
107 info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
108 info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
109 info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
110 info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
111 info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
113 info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
114 info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
115 info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
116 info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
117 info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
118 info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
120 info->has_bps_max_length = info->has_bps_max;
121 info->bps_max_length =
122 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length;
123 info->has_bps_rd_max_length = info->has_bps_rd_max;
124 info->bps_rd_max_length =
125 cfg.buckets[THROTTLE_BPS_READ].burst_length;
126 info->has_bps_wr_max_length = info->has_bps_wr_max;
127 info->bps_wr_max_length =
128 cfg.buckets[THROTTLE_BPS_WRITE].burst_length;
130 info->has_iops_max_length = info->has_iops_max;
131 info->iops_max_length =
132 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length;
133 info->has_iops_rd_max_length = info->has_iops_rd_max;
134 info->iops_rd_max_length =
135 cfg.buckets[THROTTLE_OPS_READ].burst_length;
136 info->has_iops_wr_max_length = info->has_iops_wr_max;
137 info->iops_wr_max_length =
138 cfg.buckets[THROTTLE_OPS_WRITE].burst_length;
140 info->has_iops_size = cfg.op_size;
141 info->iops_size = cfg.op_size;
143 info->has_group = true;
144 info->group =
145 g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
148 info->write_threshold = bdrv_write_threshold_get(bs);
150 bs0 = bs;
151 p_image_info = &info->image;
152 info->backing_file_depth = 0;
153 while (1) {
154 Error *local_err = NULL;
155 bdrv_query_image_info(bs0, p_image_info, &local_err);
156 if (local_err) {
157 error_propagate(errp, local_err);
158 qapi_free_BlockDeviceInfo(info);
159 return NULL;
162 /* stop gathering data for flat output */
163 if (flat) {
164 break;
167 if (bs0->drv && bdrv_filter_or_cow_child(bs0)) {
169 * Put any filtered child here (for backwards compatibility to when
170 * we put bs0->backing here, which might be any filtered child).
172 info->backing_file_depth++;
173 bs0 = bdrv_filter_or_cow_bs(bs0);
174 (*p_image_info)->has_backing_image = true;
175 p_image_info = &((*p_image_info)->backing_image);
176 } else {
177 break;
180 /* Skip automatically inserted nodes that the user isn't aware of for
181 * query-block (blk != NULL), but not for query-named-block-nodes */
182 if (blk) {
183 bs0 = bdrv_skip_implicit_filters(bs0);
187 return info;
191 * Returns 0 on success, with *p_list either set to describe snapshot
192 * information, or NULL because there are no snapshots. Returns -errno on
193 * error, with *p_list untouched.
195 int bdrv_query_snapshot_info_list(BlockDriverState *bs,
196 SnapshotInfoList **p_list,
197 Error **errp)
199 int i, sn_count;
200 QEMUSnapshotInfo *sn_tab = NULL;
201 SnapshotInfoList *info_list, *cur_item = NULL, *head = NULL;
202 SnapshotInfo *info;
204 sn_count = bdrv_snapshot_list(bs, &sn_tab);
205 if (sn_count < 0) {
206 const char *dev = bdrv_get_device_name(bs);
207 switch (sn_count) {
208 case -ENOMEDIUM:
209 error_setg(errp, "Device '%s' is not inserted", dev);
210 break;
211 case -ENOTSUP:
212 error_setg(errp,
213 "Device '%s' does not support internal snapshots",
214 dev);
215 break;
216 default:
217 error_setg_errno(errp, -sn_count,
218 "Can't list snapshots of device '%s'", dev);
219 break;
221 return sn_count;
224 for (i = 0; i < sn_count; i++) {
225 info = g_new0(SnapshotInfo, 1);
226 info->id = g_strdup(sn_tab[i].id_str);
227 info->name = g_strdup(sn_tab[i].name);
228 info->vm_state_size = sn_tab[i].vm_state_size;
229 info->date_sec = sn_tab[i].date_sec;
230 info->date_nsec = sn_tab[i].date_nsec;
231 info->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000;
232 info->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000;
233 info->icount = sn_tab[i].icount;
234 info->has_icount = sn_tab[i].icount != -1ULL;
236 info_list = g_new0(SnapshotInfoList, 1);
237 info_list->value = info;
239 /* XXX: waiting for the qapi to support qemu-queue.h types */
240 if (!cur_item) {
241 head = cur_item = info_list;
242 } else {
243 cur_item->next = info_list;
244 cur_item = info_list;
249 g_free(sn_tab);
250 *p_list = head;
251 return 0;
255 * bdrv_query_image_info:
256 * @bs: block device to examine
257 * @p_info: location to store image information
258 * @errp: location to store error information
260 * Store "flat" image information in @p_info.
262 * "Flat" means it does *not* query backing image information,
263 * i.e. (*pinfo)->has_backing_image will be set to false and
264 * (*pinfo)->backing_image to NULL even when the image does in fact have
265 * a backing image.
267 * @p_info will be set only on success. On error, store error in @errp.
269 void bdrv_query_image_info(BlockDriverState *bs,
270 ImageInfo **p_info,
271 Error **errp)
273 int64_t size;
274 const char *backing_filename;
275 BlockDriverInfo bdi;
276 int ret;
277 Error *err = NULL;
278 ImageInfo *info;
280 aio_context_acquire(bdrv_get_aio_context(bs));
282 size = bdrv_getlength(bs);
283 if (size < 0) {
284 error_setg_errno(errp, -size, "Can't get image size '%s'",
285 bs->exact_filename);
286 goto out;
289 bdrv_refresh_filename(bs);
291 info = g_new0(ImageInfo, 1);
292 info->filename = g_strdup(bs->filename);
293 info->format = g_strdup(bdrv_get_format_name(bs));
294 info->virtual_size = size;
295 info->actual_size = bdrv_get_allocated_file_size(bs);
296 info->has_actual_size = info->actual_size >= 0;
297 if (bs->encrypted) {
298 info->encrypted = true;
299 info->has_encrypted = true;
301 if (bdrv_get_info(bs, &bdi) >= 0) {
302 if (bdi.cluster_size != 0) {
303 info->cluster_size = bdi.cluster_size;
304 info->has_cluster_size = true;
306 info->dirty_flag = bdi.is_dirty;
307 info->has_dirty_flag = true;
309 info->format_specific = bdrv_get_specific_info(bs, &err);
310 if (err) {
311 error_propagate(errp, err);
312 qapi_free_ImageInfo(info);
313 goto out;
315 info->has_format_specific = info->format_specific != NULL;
317 backing_filename = bs->backing_file;
318 if (backing_filename[0] != '\0') {
319 char *backing_filename2;
321 info->backing_filename = g_strdup(backing_filename);
322 info->has_backing_filename = true;
323 backing_filename2 = bdrv_get_full_backing_filename(bs, NULL);
325 /* Always report the full_backing_filename if present, even if it's the
326 * same as backing_filename. That they are same is useful info. */
327 if (backing_filename2) {
328 info->full_backing_filename = g_strdup(backing_filename2);
329 info->has_full_backing_filename = true;
332 if (bs->backing_format[0]) {
333 info->backing_filename_format = g_strdup(bs->backing_format);
334 info->has_backing_filename_format = true;
336 g_free(backing_filename2);
339 ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err);
340 switch (ret) {
341 case 0:
342 if (info->snapshots) {
343 info->has_snapshots = true;
345 break;
346 /* recoverable error */
347 case -ENOMEDIUM:
348 case -ENOTSUP:
349 error_free(err);
350 break;
351 default:
352 error_propagate(errp, err);
353 qapi_free_ImageInfo(info);
354 goto out;
357 *p_info = info;
359 out:
360 aio_context_release(bdrv_get_aio_context(bs));
363 /* @p_info will be set only on success. */
364 static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
365 Error **errp)
367 BlockInfo *info = g_malloc0(sizeof(*info));
368 BlockDriverState *bs = blk_bs(blk);
369 char *qdev;
371 /* Skip automatically inserted nodes that the user isn't aware of */
372 bs = bdrv_skip_implicit_filters(bs);
374 info->device = g_strdup(blk_name(blk));
375 info->type = g_strdup("unknown");
376 info->locked = blk_dev_is_medium_locked(blk);
377 info->removable = blk_dev_has_removable_media(blk);
379 qdev = blk_get_attached_dev_id(blk);
380 if (qdev && *qdev) {
381 info->has_qdev = true;
382 info->qdev = qdev;
383 } else {
384 g_free(qdev);
387 if (blk_dev_has_tray(blk)) {
388 info->has_tray_open = true;
389 info->tray_open = blk_dev_is_tray_open(blk);
392 if (blk_iostatus_is_enabled(blk)) {
393 info->has_io_status = true;
394 info->io_status = blk_iostatus(blk);
397 if (bs && !QLIST_EMPTY(&bs->dirty_bitmaps)) {
398 info->has_dirty_bitmaps = true;
399 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
402 if (bs && bs->drv) {
403 info->has_inserted = true;
404 info->inserted = bdrv_block_device_info(blk, bs, false, errp);
405 if (info->inserted == NULL) {
406 goto err;
410 *p_info = info;
411 return;
413 err:
414 qapi_free_BlockInfo(info);
417 static uint64List *uint64_list(uint64_t *list, int size)
419 int i;
420 uint64List *out_list = NULL;
421 uint64List **pout_list = &out_list;
423 for (i = 0; i < size; i++) {
424 uint64List *entry = g_new(uint64List, 1);
425 entry->value = list[i];
426 *pout_list = entry;
427 pout_list = &entry->next;
430 *pout_list = NULL;
432 return out_list;
435 static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist,
436 bool *not_null,
437 BlockLatencyHistogramInfo **info)
439 *not_null = hist->bins != NULL;
440 if (*not_null) {
441 *info = g_new0(BlockLatencyHistogramInfo, 1);
443 (*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
444 (*info)->bins = uint64_list(hist->bins, hist->nbins);
448 static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
450 BlockAcctStats *stats = blk_get_stats(blk);
451 BlockAcctTimedStats *ts = NULL;
453 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
454 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
455 ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
456 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
457 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
458 ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
460 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
461 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
462 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
463 ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
465 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
466 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
467 ds->invalid_flush_operations =
468 stats->invalid_ops[BLOCK_ACCT_FLUSH];
469 ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
471 ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
472 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
473 ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
474 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
475 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
476 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
477 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
478 ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
480 ds->has_idle_time_ns = stats->last_access_time_ns > 0;
481 if (ds->has_idle_time_ns) {
482 ds->idle_time_ns = block_acct_idle_time_ns(stats);
485 ds->account_invalid = stats->account_invalid;
486 ds->account_failed = stats->account_failed;
488 while ((ts = block_acct_interval_next(stats, ts))) {
489 BlockDeviceTimedStatsList *timed_stats =
490 g_malloc0(sizeof(*timed_stats));
491 BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
492 timed_stats->next = ds->timed_stats;
493 timed_stats->value = dev_stats;
494 ds->timed_stats = timed_stats;
496 TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
497 TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
498 TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];
500 dev_stats->interval_length = ts->interval_length;
502 dev_stats->min_rd_latency_ns = timed_average_min(rd);
503 dev_stats->max_rd_latency_ns = timed_average_max(rd);
504 dev_stats->avg_rd_latency_ns = timed_average_avg(rd);
506 dev_stats->min_wr_latency_ns = timed_average_min(wr);
507 dev_stats->max_wr_latency_ns = timed_average_max(wr);
508 dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
510 dev_stats->min_flush_latency_ns = timed_average_min(fl);
511 dev_stats->max_flush_latency_ns = timed_average_max(fl);
512 dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
514 dev_stats->avg_rd_queue_depth =
515 block_acct_queue_depth(ts, BLOCK_ACCT_READ);
516 dev_stats->avg_wr_queue_depth =
517 block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
520 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ],
521 &ds->has_rd_latency_histogram,
522 &ds->rd_latency_histogram);
523 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE],
524 &ds->has_wr_latency_histogram,
525 &ds->wr_latency_histogram);
526 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH],
527 &ds->has_flush_latency_histogram,
528 &ds->flush_latency_histogram);
531 static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
532 bool blk_level)
534 BdrvChild *parent_child;
535 BlockDriverState *filter_or_cow_bs;
536 BlockStats *s = NULL;
538 s = g_malloc0(sizeof(*s));
539 s->stats = g_malloc0(sizeof(*s->stats));
541 if (!bs) {
542 return s;
545 /* Skip automatically inserted nodes that the user isn't aware of in
546 * a BlockBackend-level command. Stay at the exact node for a node-level
547 * command. */
548 if (blk_level) {
549 bs = bdrv_skip_implicit_filters(bs);
552 if (bdrv_get_node_name(bs)[0]) {
553 s->has_node_name = true;
554 s->node_name = g_strdup(bdrv_get_node_name(bs));
557 s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
559 s->driver_specific = bdrv_get_specific_stats(bs);
560 if (s->driver_specific) {
561 s->has_driver_specific = true;
564 parent_child = bdrv_primary_child(bs);
565 if (!parent_child ||
566 !(parent_child->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED)))
568 BdrvChild *c;
571 * Look for a unique data-storing child. We do not need to look for
572 * filtered children, as there would be only one and it would have been
573 * the primary child.
575 parent_child = NULL;
576 QLIST_FOREACH(c, &bs->children, next) {
577 if (c->role & BDRV_CHILD_DATA) {
578 if (parent_child) {
580 * There are multiple data-storing children and we cannot
581 * choose between them.
583 parent_child = NULL;
584 break;
586 parent_child = c;
590 if (parent_child) {
591 s->has_parent = true;
592 s->parent = bdrv_query_bds_stats(parent_child->bs, blk_level);
595 filter_or_cow_bs = bdrv_filter_or_cow_bs(bs);
596 if (blk_level && filter_or_cow_bs) {
598 * Put any filtered or COW child here (for backwards
599 * compatibility to when we put bs0->backing here, which might
600 * be either)
602 s->has_backing = true;
603 s->backing = bdrv_query_bds_stats(filter_or_cow_bs, blk_level);
606 return s;
609 BlockInfoList *qmp_query_block(Error **errp)
611 BlockInfoList *head = NULL, **p_next = &head;
612 BlockBackend *blk;
613 Error *local_err = NULL;
615 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
616 BlockInfoList *info;
618 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
619 continue;
622 info = g_malloc0(sizeof(*info));
623 bdrv_query_info(blk, &info->value, &local_err);
624 if (local_err) {
625 error_propagate(errp, local_err);
626 g_free(info);
627 qapi_free_BlockInfoList(head);
628 return NULL;
631 *p_next = info;
632 p_next = &info->next;
635 return head;
638 BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
639 bool query_nodes,
640 Error **errp)
642 BlockStatsList *head = NULL, **p_next = &head;
643 BlockBackend *blk;
644 BlockDriverState *bs;
646 /* Just to be safe if query_nodes is not always initialized */
647 if (has_query_nodes && query_nodes) {
648 for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
649 BlockStatsList *info = g_malloc0(sizeof(*info));
650 AioContext *ctx = bdrv_get_aio_context(bs);
652 aio_context_acquire(ctx);
653 info->value = bdrv_query_bds_stats(bs, false);
654 aio_context_release(ctx);
656 *p_next = info;
657 p_next = &info->next;
659 } else {
660 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
661 BlockStatsList *info;
662 AioContext *ctx = blk_get_aio_context(blk);
663 BlockStats *s;
664 char *qdev;
666 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
667 continue;
670 aio_context_acquire(ctx);
671 s = bdrv_query_bds_stats(blk_bs(blk), true);
672 s->has_device = true;
673 s->device = g_strdup(blk_name(blk));
675 qdev = blk_get_attached_dev_id(blk);
676 if (qdev && *qdev) {
677 s->has_qdev = true;
678 s->qdev = qdev;
679 } else {
680 g_free(qdev);
683 bdrv_query_blk_stats(s->stats, blk);
684 aio_context_release(ctx);
686 info = g_malloc0(sizeof(*info));
687 info->value = s;
688 *p_next = info;
689 p_next = &info->next;
693 return head;
696 void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
698 char date_buf[128], clock_buf[128];
699 char icount_buf[128] = {0};
700 struct tm tm;
701 time_t ti;
702 int64_t secs;
703 char *sizing = NULL;
705 if (!sn) {
706 qemu_printf("%-10s%-18s%7s%20s%13s%11s",
707 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK", "ICOUNT");
708 } else {
709 ti = sn->date_sec;
710 localtime_r(&ti, &tm);
711 strftime(date_buf, sizeof(date_buf),
712 "%Y-%m-%d %H:%M:%S", &tm);
713 secs = sn->vm_clock_nsec / 1000000000;
714 snprintf(clock_buf, sizeof(clock_buf),
715 "%02d:%02d:%02d.%03d",
716 (int)(secs / 3600),
717 (int)((secs / 60) % 60),
718 (int)(secs % 60),
719 (int)((sn->vm_clock_nsec / 1000000) % 1000));
720 sizing = size_to_str(sn->vm_state_size);
721 if (sn->icount != -1ULL) {
722 snprintf(icount_buf, sizeof(icount_buf),
723 "%"PRId64, sn->icount);
725 qemu_printf("%-9s %-17s %7s%20s%13s%11s",
726 sn->id_str, sn->name,
727 sizing,
728 date_buf,
729 clock_buf,
730 icount_buf);
732 g_free(sizing);
735 static void dump_qdict(int indentation, QDict *dict);
736 static void dump_qlist(int indentation, QList *list);
738 static void dump_qobject(int comp_indent, QObject *obj)
740 switch (qobject_type(obj)) {
741 case QTYPE_QNUM: {
742 QNum *value = qobject_to(QNum, obj);
743 char *tmp = qnum_to_string(value);
744 qemu_printf("%s", tmp);
745 g_free(tmp);
746 break;
748 case QTYPE_QSTRING: {
749 QString *value = qobject_to(QString, obj);
750 qemu_printf("%s", qstring_get_str(value));
751 break;
753 case QTYPE_QDICT: {
754 QDict *value = qobject_to(QDict, obj);
755 dump_qdict(comp_indent, value);
756 break;
758 case QTYPE_QLIST: {
759 QList *value = qobject_to(QList, obj);
760 dump_qlist(comp_indent, value);
761 break;
763 case QTYPE_QBOOL: {
764 QBool *value = qobject_to(QBool, obj);
765 qemu_printf("%s", qbool_get_bool(value) ? "true" : "false");
766 break;
768 default:
769 abort();
773 static void dump_qlist(int indentation, QList *list)
775 const QListEntry *entry;
776 int i = 0;
778 for (entry = qlist_first(list); entry; entry = qlist_next(entry), i++) {
779 QType type = qobject_type(entry->value);
780 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
781 qemu_printf("%*s[%i]:%c", indentation * 4, "", i,
782 composite ? '\n' : ' ');
783 dump_qobject(indentation + 1, entry->value);
784 if (!composite) {
785 qemu_printf("\n");
790 static void dump_qdict(int indentation, QDict *dict)
792 const QDictEntry *entry;
794 for (entry = qdict_first(dict); entry; entry = qdict_next(dict, entry)) {
795 QType type = qobject_type(entry->value);
796 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
797 char *key = g_malloc(strlen(entry->key) + 1);
798 int i;
800 /* replace dashes with spaces in key (variable) names */
801 for (i = 0; entry->key[i]; i++) {
802 key[i] = entry->key[i] == '-' ? ' ' : entry->key[i];
804 key[i] = 0;
805 qemu_printf("%*s%s:%c", indentation * 4, "", key,
806 composite ? '\n' : ' ');
807 dump_qobject(indentation + 1, entry->value);
808 if (!composite) {
809 qemu_printf("\n");
811 g_free(key);
815 void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec)
817 QObject *obj, *data;
818 Visitor *v = qobject_output_visitor_new(&obj);
820 visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort);
821 visit_complete(v, &obj);
822 data = qdict_get(qobject_to(QDict, obj), "data");
823 dump_qobject(1, data);
824 qobject_unref(obj);
825 visit_free(v);
828 void bdrv_image_info_dump(ImageInfo *info)
830 char *size_buf, *dsize_buf;
831 if (!info->has_actual_size) {
832 dsize_buf = g_strdup("unavailable");
833 } else {
834 dsize_buf = size_to_str(info->actual_size);
836 size_buf = size_to_str(info->virtual_size);
837 qemu_printf("image: %s\n"
838 "file format: %s\n"
839 "virtual size: %s (%" PRId64 " bytes)\n"
840 "disk size: %s\n",
841 info->filename, info->format, size_buf,
842 info->virtual_size,
843 dsize_buf);
844 g_free(size_buf);
845 g_free(dsize_buf);
847 if (info->has_encrypted && info->encrypted) {
848 qemu_printf("encrypted: yes\n");
851 if (info->has_cluster_size) {
852 qemu_printf("cluster_size: %" PRId64 "\n",
853 info->cluster_size);
856 if (info->has_dirty_flag && info->dirty_flag) {
857 qemu_printf("cleanly shut down: no\n");
860 if (info->has_backing_filename) {
861 qemu_printf("backing file: %s", info->backing_filename);
862 if (!info->has_full_backing_filename) {
863 qemu_printf(" (cannot determine actual path)");
864 } else if (strcmp(info->backing_filename,
865 info->full_backing_filename) != 0) {
866 qemu_printf(" (actual path: %s)", info->full_backing_filename);
868 qemu_printf("\n");
869 if (info->has_backing_filename_format) {
870 qemu_printf("backing file format: %s\n",
871 info->backing_filename_format);
875 if (info->has_snapshots) {
876 SnapshotInfoList *elem;
878 qemu_printf("Snapshot list:\n");
879 bdrv_snapshot_dump(NULL);
880 qemu_printf("\n");
882 /* Ideally bdrv_snapshot_dump() would operate on SnapshotInfoList but
883 * we convert to the block layer's native QEMUSnapshotInfo for now.
885 for (elem = info->snapshots; elem; elem = elem->next) {
886 QEMUSnapshotInfo sn = {
887 .vm_state_size = elem->value->vm_state_size,
888 .date_sec = elem->value->date_sec,
889 .date_nsec = elem->value->date_nsec,
890 .vm_clock_nsec = elem->value->vm_clock_sec * 1000000000ULL +
891 elem->value->vm_clock_nsec,
892 .icount = elem->value->has_icount ?
893 elem->value->icount : -1ULL,
896 pstrcpy(sn.id_str, sizeof(sn.id_str), elem->value->id);
897 pstrcpy(sn.name, sizeof(sn.name), elem->value->name);
898 bdrv_snapshot_dump(&sn);
899 qemu_printf("\n");
903 if (info->has_format_specific) {
904 qemu_printf("Format specific information:\n");
905 bdrv_image_info_specific_dump(info->format_specific);