pcihp: piix4: do not call acpi_pcihp_reset() when ACPI PCI hotplug is disabled
[qemu.git] / block / qapi.c
blob9b4da12966518844f2cfe600fc713dbe7ffc14c7
1 /*
2 * Block layer qmp and info dump related functions
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "qemu/cutils.h"
27 #include "block/qapi.h"
28 #include "block/block_int.h"
29 #include "block/dirty-bitmap.h"
30 #include "block/throttle-groups.h"
31 #include "block/write-threshold.h"
32 #include "qapi/error.h"
33 #include "qapi/qapi-commands-block-core.h"
34 #include "qapi/qobject-output-visitor.h"
35 #include "qapi/qapi-visit-block-core.h"
36 #include "qapi/qmp/qbool.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qlist.h"
39 #include "qapi/qmp/qnum.h"
40 #include "qapi/qmp/qstring.h"
41 #include "qemu/qemu-print.h"
42 #include "sysemu/block-backend.h"
43 #include "qemu/cutils.h"
45 BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
46 BlockDriverState *bs,
47 bool flat,
48 Error **errp)
50 ImageInfo **p_image_info;
51 BlockDriverState *bs0, *backing;
52 BlockDeviceInfo *info;
54 if (!bs->drv) {
55 error_setg(errp, "Block device %s is ejected", bs->node_name);
56 return NULL;
59 bdrv_refresh_filename(bs);
61 info = g_malloc0(sizeof(*info));
62 info->file = g_strdup(bs->filename);
63 info->ro = bdrv_is_read_only(bs);
64 info->drv = g_strdup(bs->drv->format_name);
65 info->encrypted = bs->encrypted;
67 info->cache = g_new(BlockdevCacheInfo, 1);
68 *info->cache = (BlockdevCacheInfo) {
69 .writeback = blk ? blk_enable_write_cache(blk) : true,
70 .direct = !!(bs->open_flags & BDRV_O_NOCACHE),
71 .no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
74 if (bs->node_name[0]) {
75 info->node_name = g_strdup(bs->node_name);
78 backing = bdrv_cow_bs(bs);
79 if (backing) {
80 info->backing_file = g_strdup(backing->filename);
83 if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
84 info->has_dirty_bitmaps = true;
85 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
88 info->detect_zeroes = bs->detect_zeroes;
90 if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
91 ThrottleConfig cfg;
92 BlockBackendPublic *blkp = blk_get_public(blk);
94 throttle_group_get_config(&blkp->throttle_group_member, &cfg);
96 info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
97 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
98 info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
100 info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
101 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
102 info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
104 info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
105 info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
106 info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
107 info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
108 info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
109 info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
111 info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
112 info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
113 info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
114 info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
115 info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
116 info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
118 info->has_bps_max_length = info->has_bps_max;
119 info->bps_max_length =
120 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length;
121 info->has_bps_rd_max_length = info->has_bps_rd_max;
122 info->bps_rd_max_length =
123 cfg.buckets[THROTTLE_BPS_READ].burst_length;
124 info->has_bps_wr_max_length = info->has_bps_wr_max;
125 info->bps_wr_max_length =
126 cfg.buckets[THROTTLE_BPS_WRITE].burst_length;
128 info->has_iops_max_length = info->has_iops_max;
129 info->iops_max_length =
130 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length;
131 info->has_iops_rd_max_length = info->has_iops_rd_max;
132 info->iops_rd_max_length =
133 cfg.buckets[THROTTLE_OPS_READ].burst_length;
134 info->has_iops_wr_max_length = info->has_iops_wr_max;
135 info->iops_wr_max_length =
136 cfg.buckets[THROTTLE_OPS_WRITE].burst_length;
138 info->has_iops_size = cfg.op_size;
139 info->iops_size = cfg.op_size;
141 info->group =
142 g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
145 info->write_threshold = bdrv_write_threshold_get(bs);
147 bs0 = bs;
148 p_image_info = &info->image;
149 info->backing_file_depth = 0;
150 while (1) {
151 Error *local_err = NULL;
152 bdrv_query_image_info(bs0, p_image_info, &local_err);
153 if (local_err) {
154 error_propagate(errp, local_err);
155 qapi_free_BlockDeviceInfo(info);
156 return NULL;
159 /* stop gathering data for flat output */
160 if (flat) {
161 break;
164 if (bs0->drv && bdrv_filter_or_cow_child(bs0)) {
166 * Put any filtered child here (for backwards compatibility to when
167 * we put bs0->backing here, which might be any filtered child).
169 info->backing_file_depth++;
170 bs0 = bdrv_filter_or_cow_bs(bs0);
171 p_image_info = &((*p_image_info)->backing_image);
172 } else {
173 break;
176 /* Skip automatically inserted nodes that the user isn't aware of for
177 * query-block (blk != NULL), but not for query-named-block-nodes */
178 if (blk) {
179 bs0 = bdrv_skip_implicit_filters(bs0);
183 return info;
187 * Returns 0 on success, with *p_list either set to describe snapshot
188 * information, or NULL because there are no snapshots. Returns -errno on
189 * error, with *p_list untouched.
191 int bdrv_query_snapshot_info_list(BlockDriverState *bs,
192 SnapshotInfoList **p_list,
193 Error **errp)
195 int i, sn_count;
196 QEMUSnapshotInfo *sn_tab = NULL;
197 SnapshotInfoList *head = NULL, **tail = &head;
198 SnapshotInfo *info;
200 sn_count = bdrv_snapshot_list(bs, &sn_tab);
201 if (sn_count < 0) {
202 const char *dev = bdrv_get_device_name(bs);
203 switch (sn_count) {
204 case -ENOMEDIUM:
205 error_setg(errp, "Device '%s' is not inserted", dev);
206 break;
207 case -ENOTSUP:
208 error_setg(errp,
209 "Device '%s' does not support internal snapshots",
210 dev);
211 break;
212 default:
213 error_setg_errno(errp, -sn_count,
214 "Can't list snapshots of device '%s'", dev);
215 break;
217 return sn_count;
220 for (i = 0; i < sn_count; i++) {
221 info = g_new0(SnapshotInfo, 1);
222 info->id = g_strdup(sn_tab[i].id_str);
223 info->name = g_strdup(sn_tab[i].name);
224 info->vm_state_size = sn_tab[i].vm_state_size;
225 info->date_sec = sn_tab[i].date_sec;
226 info->date_nsec = sn_tab[i].date_nsec;
227 info->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000;
228 info->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000;
229 info->icount = sn_tab[i].icount;
230 info->has_icount = sn_tab[i].icount != -1ULL;
232 QAPI_LIST_APPEND(tail, info);
235 g_free(sn_tab);
236 *p_list = head;
237 return 0;
241 * bdrv_query_image_info:
242 * @bs: block device to examine
243 * @p_info: location to store image information
244 * @errp: location to store error information
246 * Store "flat" image information in @p_info.
248 * "Flat" means it does *not* query backing image information,
249 * i.e. (*pinfo)->has_backing_image will be set to false and
250 * (*pinfo)->backing_image to NULL even when the image does in fact have
251 * a backing image.
253 * @p_info will be set only on success. On error, store error in @errp.
255 void bdrv_query_image_info(BlockDriverState *bs,
256 ImageInfo **p_info,
257 Error **errp)
259 int64_t size;
260 const char *backing_filename;
261 BlockDriverInfo bdi;
262 int ret;
263 Error *err = NULL;
264 ImageInfo *info;
266 aio_context_acquire(bdrv_get_aio_context(bs));
268 size = bdrv_getlength(bs);
269 if (size < 0) {
270 error_setg_errno(errp, -size, "Can't get image size '%s'",
271 bs->exact_filename);
272 goto out;
275 bdrv_refresh_filename(bs);
277 info = g_new0(ImageInfo, 1);
278 info->filename = g_strdup(bs->filename);
279 info->format = g_strdup(bdrv_get_format_name(bs));
280 info->virtual_size = size;
281 info->actual_size = bdrv_get_allocated_file_size(bs);
282 info->has_actual_size = info->actual_size >= 0;
283 if (bs->encrypted) {
284 info->encrypted = true;
285 info->has_encrypted = true;
287 if (bdrv_get_info(bs, &bdi) >= 0) {
288 if (bdi.cluster_size != 0) {
289 info->cluster_size = bdi.cluster_size;
290 info->has_cluster_size = true;
292 info->dirty_flag = bdi.is_dirty;
293 info->has_dirty_flag = true;
295 info->format_specific = bdrv_get_specific_info(bs, &err);
296 if (err) {
297 error_propagate(errp, err);
298 qapi_free_ImageInfo(info);
299 goto out;
301 backing_filename = bs->backing_file;
302 if (backing_filename[0] != '\0') {
303 char *backing_filename2;
305 info->backing_filename = g_strdup(backing_filename);
306 backing_filename2 = bdrv_get_full_backing_filename(bs, NULL);
308 /* Always report the full_backing_filename if present, even if it's the
309 * same as backing_filename. That they are same is useful info. */
310 if (backing_filename2) {
311 info->full_backing_filename = g_strdup(backing_filename2);
314 if (bs->backing_format[0]) {
315 info->backing_filename_format = g_strdup(bs->backing_format);
317 g_free(backing_filename2);
320 ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err);
321 switch (ret) {
322 case 0:
323 if (info->snapshots) {
324 info->has_snapshots = true;
326 break;
327 /* recoverable error */
328 case -ENOMEDIUM:
329 case -ENOTSUP:
330 error_free(err);
331 break;
332 default:
333 error_propagate(errp, err);
334 qapi_free_ImageInfo(info);
335 goto out;
338 *p_info = info;
340 out:
341 aio_context_release(bdrv_get_aio_context(bs));
344 /* @p_info will be set only on success. */
345 static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
346 Error **errp)
348 BlockInfo *info = g_malloc0(sizeof(*info));
349 BlockDriverState *bs = blk_bs(blk);
350 char *qdev;
352 /* Skip automatically inserted nodes that the user isn't aware of */
353 bs = bdrv_skip_implicit_filters(bs);
355 info->device = g_strdup(blk_name(blk));
356 info->type = g_strdup("unknown");
357 info->locked = blk_dev_is_medium_locked(blk);
358 info->removable = blk_dev_has_removable_media(blk);
360 qdev = blk_get_attached_dev_id(blk);
361 if (qdev && *qdev) {
362 info->qdev = qdev;
363 } else {
364 g_free(qdev);
367 if (blk_dev_has_tray(blk)) {
368 info->has_tray_open = true;
369 info->tray_open = blk_dev_is_tray_open(blk);
372 if (blk_iostatus_is_enabled(blk)) {
373 info->has_io_status = true;
374 info->io_status = blk_iostatus(blk);
377 if (bs && bs->drv) {
378 info->inserted = bdrv_block_device_info(blk, bs, false, errp);
379 if (info->inserted == NULL) {
380 goto err;
384 *p_info = info;
385 return;
387 err:
388 qapi_free_BlockInfo(info);
391 static uint64List *uint64_list(uint64_t *list, int size)
393 int i;
394 uint64List *out_list = NULL;
395 uint64List **tail = &out_list;
397 for (i = 0; i < size; i++) {
398 QAPI_LIST_APPEND(tail, list[i]);
401 return out_list;
404 static BlockLatencyHistogramInfo *
405 bdrv_latency_histogram_stats(BlockLatencyHistogram *hist)
407 BlockLatencyHistogramInfo *info;
409 if (!hist->bins) {
410 return NULL;
413 info = g_new0(BlockLatencyHistogramInfo, 1);
414 info->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
415 info->bins = uint64_list(hist->bins, hist->nbins);
416 return info;
419 static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
421 BlockAcctStats *stats = blk_get_stats(blk);
422 BlockAcctTimedStats *ts = NULL;
423 BlockLatencyHistogram *hgram;
425 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
426 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
427 ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
428 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
429 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
430 ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
432 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
433 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
434 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
435 ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
437 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
438 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
439 ds->invalid_flush_operations =
440 stats->invalid_ops[BLOCK_ACCT_FLUSH];
441 ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
443 ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
444 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
445 ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
446 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
447 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
448 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
449 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
450 ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
452 ds->has_idle_time_ns = stats->last_access_time_ns > 0;
453 if (ds->has_idle_time_ns) {
454 ds->idle_time_ns = block_acct_idle_time_ns(stats);
457 ds->account_invalid = stats->account_invalid;
458 ds->account_failed = stats->account_failed;
460 while ((ts = block_acct_interval_next(stats, ts))) {
461 BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
463 TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
464 TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
465 TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];
467 dev_stats->interval_length = ts->interval_length;
469 dev_stats->min_rd_latency_ns = timed_average_min(rd);
470 dev_stats->max_rd_latency_ns = timed_average_max(rd);
471 dev_stats->avg_rd_latency_ns = timed_average_avg(rd);
473 dev_stats->min_wr_latency_ns = timed_average_min(wr);
474 dev_stats->max_wr_latency_ns = timed_average_max(wr);
475 dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
477 dev_stats->min_flush_latency_ns = timed_average_min(fl);
478 dev_stats->max_flush_latency_ns = timed_average_max(fl);
479 dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
481 dev_stats->avg_rd_queue_depth =
482 block_acct_queue_depth(ts, BLOCK_ACCT_READ);
483 dev_stats->avg_wr_queue_depth =
484 block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
486 QAPI_LIST_PREPEND(ds->timed_stats, dev_stats);
489 hgram = stats->latency_histogram;
490 ds->rd_latency_histogram
491 = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_READ]);
492 ds->wr_latency_histogram
493 = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_WRITE]);
494 ds->flush_latency_histogram
495 = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_FLUSH]);
498 static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
499 bool blk_level)
501 BdrvChild *parent_child;
502 BlockDriverState *filter_or_cow_bs;
503 BlockStats *s = NULL;
505 s = g_malloc0(sizeof(*s));
506 s->stats = g_malloc0(sizeof(*s->stats));
508 if (!bs) {
509 return s;
512 /* Skip automatically inserted nodes that the user isn't aware of in
513 * a BlockBackend-level command. Stay at the exact node for a node-level
514 * command. */
515 if (blk_level) {
516 bs = bdrv_skip_implicit_filters(bs);
519 if (bdrv_get_node_name(bs)[0]) {
520 s->node_name = g_strdup(bdrv_get_node_name(bs));
523 s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
525 s->driver_specific = bdrv_get_specific_stats(bs);
527 parent_child = bdrv_primary_child(bs);
528 if (!parent_child ||
529 !(parent_child->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED)))
531 BdrvChild *c;
534 * Look for a unique data-storing child. We do not need to look for
535 * filtered children, as there would be only one and it would have been
536 * the primary child.
538 parent_child = NULL;
539 QLIST_FOREACH(c, &bs->children, next) {
540 if (c->role & BDRV_CHILD_DATA) {
541 if (parent_child) {
543 * There are multiple data-storing children and we cannot
544 * choose between them.
546 parent_child = NULL;
547 break;
549 parent_child = c;
553 if (parent_child) {
554 s->parent = bdrv_query_bds_stats(parent_child->bs, blk_level);
557 filter_or_cow_bs = bdrv_filter_or_cow_bs(bs);
558 if (blk_level && filter_or_cow_bs) {
560 * Put any filtered or COW child here (for backwards
561 * compatibility to when we put bs0->backing here, which might
562 * be either)
564 s->backing = bdrv_query_bds_stats(filter_or_cow_bs, blk_level);
567 return s;
570 BlockInfoList *qmp_query_block(Error **errp)
572 BlockInfoList *head = NULL, **p_next = &head;
573 BlockBackend *blk;
574 Error *local_err = NULL;
576 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
577 BlockInfoList *info;
579 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
580 continue;
583 info = g_malloc0(sizeof(*info));
584 bdrv_query_info(blk, &info->value, &local_err);
585 if (local_err) {
586 error_propagate(errp, local_err);
587 g_free(info);
588 qapi_free_BlockInfoList(head);
589 return NULL;
592 *p_next = info;
593 p_next = &info->next;
596 return head;
599 BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
600 bool query_nodes,
601 Error **errp)
603 BlockStatsList *head = NULL, **tail = &head;
604 BlockBackend *blk;
605 BlockDriverState *bs;
607 /* Just to be safe if query_nodes is not always initialized */
608 if (has_query_nodes && query_nodes) {
609 for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
610 AioContext *ctx = bdrv_get_aio_context(bs);
612 aio_context_acquire(ctx);
613 QAPI_LIST_APPEND(tail, bdrv_query_bds_stats(bs, false));
614 aio_context_release(ctx);
616 } else {
617 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
618 AioContext *ctx = blk_get_aio_context(blk);
619 BlockStats *s;
620 char *qdev;
622 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
623 continue;
626 aio_context_acquire(ctx);
627 s = bdrv_query_bds_stats(blk_bs(blk), true);
628 s->device = g_strdup(blk_name(blk));
630 qdev = blk_get_attached_dev_id(blk);
631 if (qdev && *qdev) {
632 s->qdev = qdev;
633 } else {
634 g_free(qdev);
637 bdrv_query_blk_stats(s->stats, blk);
638 aio_context_release(ctx);
640 QAPI_LIST_APPEND(tail, s);
644 return head;
647 void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
649 char clock_buf[128];
650 char icount_buf[128] = {0};
651 int64_t secs;
652 char *sizing = NULL;
654 if (!sn) {
655 qemu_printf("%-10s%-17s%8s%20s%13s%11s",
656 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK", "ICOUNT");
657 } else {
658 g_autoptr(GDateTime) date = g_date_time_new_from_unix_local(sn->date_sec);
659 g_autofree char *date_buf = g_date_time_format(date, "%Y-%m-%d %H:%M:%S");
661 secs = sn->vm_clock_nsec / 1000000000;
662 snprintf(clock_buf, sizeof(clock_buf),
663 "%02d:%02d:%02d.%03d",
664 (int)(secs / 3600),
665 (int)((secs / 60) % 60),
666 (int)(secs % 60),
667 (int)((sn->vm_clock_nsec / 1000000) % 1000));
668 sizing = size_to_str(sn->vm_state_size);
669 if (sn->icount != -1ULL) {
670 snprintf(icount_buf, sizeof(icount_buf),
671 "%"PRId64, sn->icount);
673 qemu_printf("%-9s %-16s %8s%20s%13s%11s",
674 sn->id_str, sn->name,
675 sizing,
676 date_buf,
677 clock_buf,
678 icount_buf);
680 g_free(sizing);
683 static void dump_qdict(int indentation, QDict *dict);
684 static void dump_qlist(int indentation, QList *list);
686 static void dump_qobject(int comp_indent, QObject *obj)
688 switch (qobject_type(obj)) {
689 case QTYPE_QNUM: {
690 QNum *value = qobject_to(QNum, obj);
691 char *tmp = qnum_to_string(value);
692 qemu_printf("%s", tmp);
693 g_free(tmp);
694 break;
696 case QTYPE_QSTRING: {
697 QString *value = qobject_to(QString, obj);
698 qemu_printf("%s", qstring_get_str(value));
699 break;
701 case QTYPE_QDICT: {
702 QDict *value = qobject_to(QDict, obj);
703 dump_qdict(comp_indent, value);
704 break;
706 case QTYPE_QLIST: {
707 QList *value = qobject_to(QList, obj);
708 dump_qlist(comp_indent, value);
709 break;
711 case QTYPE_QBOOL: {
712 QBool *value = qobject_to(QBool, obj);
713 qemu_printf("%s", qbool_get_bool(value) ? "true" : "false");
714 break;
716 default:
717 abort();
721 static void dump_qlist(int indentation, QList *list)
723 const QListEntry *entry;
724 int i = 0;
726 for (entry = qlist_first(list); entry; entry = qlist_next(entry), i++) {
727 QType type = qobject_type(entry->value);
728 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
729 qemu_printf("%*s[%i]:%c", indentation * 4, "", i,
730 composite ? '\n' : ' ');
731 dump_qobject(indentation + 1, entry->value);
732 if (!composite) {
733 qemu_printf("\n");
738 static void dump_qdict(int indentation, QDict *dict)
740 const QDictEntry *entry;
742 for (entry = qdict_first(dict); entry; entry = qdict_next(dict, entry)) {
743 QType type = qobject_type(entry->value);
744 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
745 char *key = g_malloc(strlen(entry->key) + 1);
746 int i;
748 /* replace dashes with spaces in key (variable) names */
749 for (i = 0; entry->key[i]; i++) {
750 key[i] = entry->key[i] == '-' ? ' ' : entry->key[i];
752 key[i] = 0;
753 qemu_printf("%*s%s:%c", indentation * 4, "", key,
754 composite ? '\n' : ' ');
755 dump_qobject(indentation + 1, entry->value);
756 if (!composite) {
757 qemu_printf("\n");
759 g_free(key);
763 void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec)
765 QObject *obj, *data;
766 Visitor *v = qobject_output_visitor_new(&obj);
768 visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort);
769 visit_complete(v, &obj);
770 data = qdict_get(qobject_to(QDict, obj), "data");
771 dump_qobject(1, data);
772 qobject_unref(obj);
773 visit_free(v);
776 void bdrv_image_info_dump(ImageInfo *info)
778 char *size_buf, *dsize_buf;
779 if (!info->has_actual_size) {
780 dsize_buf = g_strdup("unavailable");
781 } else {
782 dsize_buf = size_to_str(info->actual_size);
784 size_buf = size_to_str(info->virtual_size);
785 qemu_printf("image: %s\n"
786 "file format: %s\n"
787 "virtual size: %s (%" PRId64 " bytes)\n"
788 "disk size: %s\n",
789 info->filename, info->format, size_buf,
790 info->virtual_size,
791 dsize_buf);
792 g_free(size_buf);
793 g_free(dsize_buf);
795 if (info->has_encrypted && info->encrypted) {
796 qemu_printf("encrypted: yes\n");
799 if (info->has_cluster_size) {
800 qemu_printf("cluster_size: %" PRId64 "\n",
801 info->cluster_size);
804 if (info->has_dirty_flag && info->dirty_flag) {
805 qemu_printf("cleanly shut down: no\n");
808 if (info->backing_filename) {
809 qemu_printf("backing file: %s", info->backing_filename);
810 if (!info->full_backing_filename) {
811 qemu_printf(" (cannot determine actual path)");
812 } else if (strcmp(info->backing_filename,
813 info->full_backing_filename) != 0) {
814 qemu_printf(" (actual path: %s)", info->full_backing_filename);
816 qemu_printf("\n");
817 if (info->backing_filename_format) {
818 qemu_printf("backing file format: %s\n",
819 info->backing_filename_format);
823 if (info->has_snapshots) {
824 SnapshotInfoList *elem;
826 qemu_printf("Snapshot list:\n");
827 bdrv_snapshot_dump(NULL);
828 qemu_printf("\n");
830 /* Ideally bdrv_snapshot_dump() would operate on SnapshotInfoList but
831 * we convert to the block layer's native QEMUSnapshotInfo for now.
833 for (elem = info->snapshots; elem; elem = elem->next) {
834 QEMUSnapshotInfo sn = {
835 .vm_state_size = elem->value->vm_state_size,
836 .date_sec = elem->value->date_sec,
837 .date_nsec = elem->value->date_nsec,
838 .vm_clock_nsec = elem->value->vm_clock_sec * 1000000000ULL +
839 elem->value->vm_clock_nsec,
840 .icount = elem->value->has_icount ?
841 elem->value->icount : -1ULL,
844 pstrcpy(sn.id_str, sizeof(sn.id_str), elem->value->id);
845 pstrcpy(sn.name, sizeof(sn.name), elem->value->name);
846 bdrv_snapshot_dump(&sn);
847 qemu_printf("\n");
851 if (info->format_specific) {
852 qemu_printf("Format specific information:\n");
853 bdrv_image_info_specific_dump(info->format_specific);