target/riscv: Split gen_arith_imm into functional and temp
[qemu/ar7.git] / block / qapi.c
blob0c13c86f4e37420b06fec5b12c17d641a2d78214
1 /*
2 * Block layer qmp and info dump related functions
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "block/qapi.h"
27 #include "block/block_int.h"
28 #include "block/throttle-groups.h"
29 #include "block/write-threshold.h"
30 #include "qapi/error.h"
31 #include "qapi/qapi-commands-block-core.h"
32 #include "qapi/qobject-output-visitor.h"
33 #include "qapi/qapi-visit-block-core.h"
34 #include "qapi/qmp/qbool.h"
35 #include "qapi/qmp/qdict.h"
36 #include "qapi/qmp/qlist.h"
37 #include "qapi/qmp/qnum.h"
38 #include "qapi/qmp/qstring.h"
39 #include "qemu/qemu-print.h"
40 #include "sysemu/block-backend.h"
41 #include "qemu/cutils.h"
43 BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
44 BlockDriverState *bs, Error **errp)
46 ImageInfo **p_image_info;
47 BlockDriverState *bs0;
48 BlockDeviceInfo *info;
50 if (!bs->drv) {
51 error_setg(errp, "Block device %s is ejected", bs->node_name);
52 return NULL;
55 bdrv_refresh_filename(bs);
57 info = g_malloc0(sizeof(*info));
58 info->file = g_strdup(bs->filename);
59 info->ro = bs->read_only;
60 info->drv = g_strdup(bs->drv->format_name);
61 info->encrypted = bs->encrypted;
62 info->encryption_key_missing = false;
64 info->cache = g_new(BlockdevCacheInfo, 1);
65 *info->cache = (BlockdevCacheInfo) {
66 .writeback = blk ? blk_enable_write_cache(blk) : true,
67 .direct = !!(bs->open_flags & BDRV_O_NOCACHE),
68 .no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
71 if (bs->node_name[0]) {
72 info->has_node_name = true;
73 info->node_name = g_strdup(bs->node_name);
76 if (bs->backing_file[0]) {
77 info->has_backing_file = true;
78 info->backing_file = g_strdup(bs->backing_file);
81 info->detect_zeroes = bs->detect_zeroes;
83 if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
84 ThrottleConfig cfg;
85 BlockBackendPublic *blkp = blk_get_public(blk);
87 throttle_group_get_config(&blkp->throttle_group_member, &cfg);
89 info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
90 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
91 info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
93 info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
94 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
95 info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
97 info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
98 info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
99 info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
100 info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
101 info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
102 info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
104 info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
105 info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
106 info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
107 info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
108 info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
109 info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
111 info->has_bps_max_length = info->has_bps_max;
112 info->bps_max_length =
113 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length;
114 info->has_bps_rd_max_length = info->has_bps_rd_max;
115 info->bps_rd_max_length =
116 cfg.buckets[THROTTLE_BPS_READ].burst_length;
117 info->has_bps_wr_max_length = info->has_bps_wr_max;
118 info->bps_wr_max_length =
119 cfg.buckets[THROTTLE_BPS_WRITE].burst_length;
121 info->has_iops_max_length = info->has_iops_max;
122 info->iops_max_length =
123 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length;
124 info->has_iops_rd_max_length = info->has_iops_rd_max;
125 info->iops_rd_max_length =
126 cfg.buckets[THROTTLE_OPS_READ].burst_length;
127 info->has_iops_wr_max_length = info->has_iops_wr_max;
128 info->iops_wr_max_length =
129 cfg.buckets[THROTTLE_OPS_WRITE].burst_length;
131 info->has_iops_size = cfg.op_size;
132 info->iops_size = cfg.op_size;
134 info->has_group = true;
135 info->group =
136 g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
139 info->write_threshold = bdrv_write_threshold_get(bs);
141 bs0 = bs;
142 p_image_info = &info->image;
143 info->backing_file_depth = 0;
144 while (1) {
145 Error *local_err = NULL;
146 bdrv_query_image_info(bs0, p_image_info, &local_err);
147 if (local_err) {
148 error_propagate(errp, local_err);
149 qapi_free_BlockDeviceInfo(info);
150 return NULL;
153 if (bs0->drv && bs0->backing) {
154 info->backing_file_depth++;
155 bs0 = bs0->backing->bs;
156 (*p_image_info)->has_backing_image = true;
157 p_image_info = &((*p_image_info)->backing_image);
158 } else {
159 break;
162 /* Skip automatically inserted nodes that the user isn't aware of for
163 * query-block (blk != NULL), but not for query-named-block-nodes */
164 while (blk && bs0->drv && bs0->implicit) {
165 bs0 = backing_bs(bs0);
166 assert(bs0);
170 return info;
174 * Returns 0 on success, with *p_list either set to describe snapshot
175 * information, or NULL because there are no snapshots. Returns -errno on
176 * error, with *p_list untouched.
178 int bdrv_query_snapshot_info_list(BlockDriverState *bs,
179 SnapshotInfoList **p_list,
180 Error **errp)
182 int i, sn_count;
183 QEMUSnapshotInfo *sn_tab = NULL;
184 SnapshotInfoList *info_list, *cur_item = NULL, *head = NULL;
185 SnapshotInfo *info;
187 sn_count = bdrv_snapshot_list(bs, &sn_tab);
188 if (sn_count < 0) {
189 const char *dev = bdrv_get_device_name(bs);
190 switch (sn_count) {
191 case -ENOMEDIUM:
192 error_setg(errp, "Device '%s' is not inserted", dev);
193 break;
194 case -ENOTSUP:
195 error_setg(errp,
196 "Device '%s' does not support internal snapshots",
197 dev);
198 break;
199 default:
200 error_setg_errno(errp, -sn_count,
201 "Can't list snapshots of device '%s'", dev);
202 break;
204 return sn_count;
207 for (i = 0; i < sn_count; i++) {
208 info = g_new0(SnapshotInfo, 1);
209 info->id = g_strdup(sn_tab[i].id_str);
210 info->name = g_strdup(sn_tab[i].name);
211 info->vm_state_size = sn_tab[i].vm_state_size;
212 info->date_sec = sn_tab[i].date_sec;
213 info->date_nsec = sn_tab[i].date_nsec;
214 info->vm_clock_sec = sn_tab[i].vm_clock_nsec / 1000000000;
215 info->vm_clock_nsec = sn_tab[i].vm_clock_nsec % 1000000000;
217 info_list = g_new0(SnapshotInfoList, 1);
218 info_list->value = info;
220 /* XXX: waiting for the qapi to support qemu-queue.h types */
221 if (!cur_item) {
222 head = cur_item = info_list;
223 } else {
224 cur_item->next = info_list;
225 cur_item = info_list;
230 g_free(sn_tab);
231 *p_list = head;
232 return 0;
236 * bdrv_query_image_info:
237 * @bs: block device to examine
238 * @p_info: location to store image information
239 * @errp: location to store error information
241 * Store "flat" image information in @p_info.
243 * "Flat" means it does *not* query backing image information,
244 * i.e. (*pinfo)->has_backing_image will be set to false and
245 * (*pinfo)->backing_image to NULL even when the image does in fact have
246 * a backing image.
248 * @p_info will be set only on success. On error, store error in @errp.
250 void bdrv_query_image_info(BlockDriverState *bs,
251 ImageInfo **p_info,
252 Error **errp)
254 int64_t size;
255 const char *backing_filename;
256 BlockDriverInfo bdi;
257 int ret;
258 Error *err = NULL;
259 ImageInfo *info;
261 aio_context_acquire(bdrv_get_aio_context(bs));
263 size = bdrv_getlength(bs);
264 if (size < 0) {
265 error_setg_errno(errp, -size, "Can't get image size '%s'",
266 bs->exact_filename);
267 goto out;
270 bdrv_refresh_filename(bs);
272 info = g_new0(ImageInfo, 1);
273 info->filename = g_strdup(bs->filename);
274 info->format = g_strdup(bdrv_get_format_name(bs));
275 info->virtual_size = size;
276 info->actual_size = bdrv_get_allocated_file_size(bs);
277 info->has_actual_size = info->actual_size >= 0;
278 if (bdrv_is_encrypted(bs)) {
279 info->encrypted = true;
280 info->has_encrypted = true;
282 if (bdrv_get_info(bs, &bdi) >= 0) {
283 if (bdi.cluster_size != 0) {
284 info->cluster_size = bdi.cluster_size;
285 info->has_cluster_size = true;
287 info->dirty_flag = bdi.is_dirty;
288 info->has_dirty_flag = true;
290 info->format_specific = bdrv_get_specific_info(bs, &err);
291 if (err) {
292 error_propagate(errp, err);
293 qapi_free_ImageInfo(info);
294 goto out;
296 info->has_format_specific = info->format_specific != NULL;
298 backing_filename = bs->backing_file;
299 if (backing_filename[0] != '\0') {
300 char *backing_filename2;
301 info->backing_filename = g_strdup(backing_filename);
302 info->has_backing_filename = true;
303 backing_filename2 = bdrv_get_full_backing_filename(bs, NULL);
305 /* Always report the full_backing_filename if present, even if it's the
306 * same as backing_filename. That they are same is useful info. */
307 if (backing_filename2) {
308 info->full_backing_filename = g_strdup(backing_filename2);
309 info->has_full_backing_filename = true;
312 if (bs->backing_format[0]) {
313 info->backing_filename_format = g_strdup(bs->backing_format);
314 info->has_backing_filename_format = true;
316 g_free(backing_filename2);
319 ret = bdrv_query_snapshot_info_list(bs, &info->snapshots, &err);
320 switch (ret) {
321 case 0:
322 if (info->snapshots) {
323 info->has_snapshots = true;
325 break;
326 /* recoverable error */
327 case -ENOMEDIUM:
328 case -ENOTSUP:
329 error_free(err);
330 break;
331 default:
332 error_propagate(errp, err);
333 qapi_free_ImageInfo(info);
334 goto out;
337 *p_info = info;
339 out:
340 aio_context_release(bdrv_get_aio_context(bs));
343 /* @p_info will be set only on success. */
344 static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
345 Error **errp)
347 BlockInfo *info = g_malloc0(sizeof(*info));
348 BlockDriverState *bs = blk_bs(blk);
349 char *qdev;
351 /* Skip automatically inserted nodes that the user isn't aware of */
352 while (bs && bs->drv && bs->implicit) {
353 bs = backing_bs(bs);
356 info->device = g_strdup(blk_name(blk));
357 info->type = g_strdup("unknown");
358 info->locked = blk_dev_is_medium_locked(blk);
359 info->removable = blk_dev_has_removable_media(blk);
361 qdev = blk_get_attached_dev_id(blk);
362 if (qdev && *qdev) {
363 info->has_qdev = true;
364 info->qdev = qdev;
365 } else {
366 g_free(qdev);
369 if (blk_dev_has_tray(blk)) {
370 info->has_tray_open = true;
371 info->tray_open = blk_dev_is_tray_open(blk);
374 if (blk_iostatus_is_enabled(blk)) {
375 info->has_io_status = true;
376 info->io_status = blk_iostatus(blk);
379 if (bs && !QLIST_EMPTY(&bs->dirty_bitmaps)) {
380 info->has_dirty_bitmaps = true;
381 info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
384 if (bs && bs->drv) {
385 info->has_inserted = true;
386 info->inserted = bdrv_block_device_info(blk, bs, errp);
387 if (info->inserted == NULL) {
388 goto err;
392 *p_info = info;
393 return;
395 err:
396 qapi_free_BlockInfo(info);
399 static uint64List *uint64_list(uint64_t *list, int size)
401 int i;
402 uint64List *out_list = NULL;
403 uint64List **pout_list = &out_list;
405 for (i = 0; i < size; i++) {
406 uint64List *entry = g_new(uint64List, 1);
407 entry->value = list[i];
408 *pout_list = entry;
409 pout_list = &entry->next;
412 *pout_list = NULL;
414 return out_list;
417 static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist,
418 bool *not_null,
419 BlockLatencyHistogramInfo **info)
421 *not_null = hist->bins != NULL;
422 if (*not_null) {
423 *info = g_new0(BlockLatencyHistogramInfo, 1);
425 (*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1);
426 (*info)->bins = uint64_list(hist->bins, hist->nbins);
430 static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
432 BlockAcctStats *stats = blk_get_stats(blk);
433 BlockAcctTimedStats *ts = NULL;
435 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
436 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
437 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
438 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
440 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
441 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
442 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
444 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
445 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
446 ds->invalid_flush_operations =
447 stats->invalid_ops[BLOCK_ACCT_FLUSH];
449 ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
450 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
451 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
452 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
453 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
454 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
456 ds->has_idle_time_ns = stats->last_access_time_ns > 0;
457 if (ds->has_idle_time_ns) {
458 ds->idle_time_ns = block_acct_idle_time_ns(stats);
461 ds->account_invalid = stats->account_invalid;
462 ds->account_failed = stats->account_failed;
464 while ((ts = block_acct_interval_next(stats, ts))) {
465 BlockDeviceTimedStatsList *timed_stats =
466 g_malloc0(sizeof(*timed_stats));
467 BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
468 timed_stats->next = ds->timed_stats;
469 timed_stats->value = dev_stats;
470 ds->timed_stats = timed_stats;
472 TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
473 TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
474 TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];
476 dev_stats->interval_length = ts->interval_length;
478 dev_stats->min_rd_latency_ns = timed_average_min(rd);
479 dev_stats->max_rd_latency_ns = timed_average_max(rd);
480 dev_stats->avg_rd_latency_ns = timed_average_avg(rd);
482 dev_stats->min_wr_latency_ns = timed_average_min(wr);
483 dev_stats->max_wr_latency_ns = timed_average_max(wr);
484 dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
486 dev_stats->min_flush_latency_ns = timed_average_min(fl);
487 dev_stats->max_flush_latency_ns = timed_average_max(fl);
488 dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
490 dev_stats->avg_rd_queue_depth =
491 block_acct_queue_depth(ts, BLOCK_ACCT_READ);
492 dev_stats->avg_wr_queue_depth =
493 block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
496 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ],
497 &ds->has_rd_latency_histogram,
498 &ds->rd_latency_histogram);
499 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE],
500 &ds->has_wr_latency_histogram,
501 &ds->wr_latency_histogram);
502 bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH],
503 &ds->has_flush_latency_histogram,
504 &ds->flush_latency_histogram);
507 static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs,
508 bool blk_level)
510 BlockStats *s = NULL;
512 s = g_malloc0(sizeof(*s));
513 s->stats = g_malloc0(sizeof(*s->stats));
515 if (!bs) {
516 return s;
519 /* Skip automatically inserted nodes that the user isn't aware of in
520 * a BlockBackend-level command. Stay at the exact node for a node-level
521 * command. */
522 while (blk_level && bs->drv && bs->implicit) {
523 bs = backing_bs(bs);
524 assert(bs);
527 if (bdrv_get_node_name(bs)[0]) {
528 s->has_node_name = true;
529 s->node_name = g_strdup(bdrv_get_node_name(bs));
532 s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset);
534 if (bs->file) {
535 s->has_parent = true;
536 s->parent = bdrv_query_bds_stats(bs->file->bs, blk_level);
539 if (blk_level && bs->backing) {
540 s->has_backing = true;
541 s->backing = bdrv_query_bds_stats(bs->backing->bs, blk_level);
544 return s;
547 BlockInfoList *qmp_query_block(Error **errp)
549 BlockInfoList *head = NULL, **p_next = &head;
550 BlockBackend *blk;
551 Error *local_err = NULL;
553 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
554 BlockInfoList *info;
556 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
557 continue;
560 info = g_malloc0(sizeof(*info));
561 bdrv_query_info(blk, &info->value, &local_err);
562 if (local_err) {
563 error_propagate(errp, local_err);
564 g_free(info);
565 qapi_free_BlockInfoList(head);
566 return NULL;
569 *p_next = info;
570 p_next = &info->next;
573 return head;
576 BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
577 bool query_nodes,
578 Error **errp)
580 BlockStatsList *head = NULL, **p_next = &head;
581 BlockBackend *blk;
582 BlockDriverState *bs;
584 /* Just to be safe if query_nodes is not always initialized */
585 if (has_query_nodes && query_nodes) {
586 for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
587 BlockStatsList *info = g_malloc0(sizeof(*info));
588 AioContext *ctx = bdrv_get_aio_context(bs);
590 aio_context_acquire(ctx);
591 info->value = bdrv_query_bds_stats(bs, false);
592 aio_context_release(ctx);
594 *p_next = info;
595 p_next = &info->next;
597 } else {
598 for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
599 BlockStatsList *info;
600 AioContext *ctx = blk_get_aio_context(blk);
601 BlockStats *s;
602 char *qdev;
604 if (!*blk_name(blk) && !blk_get_attached_dev(blk)) {
605 continue;
608 aio_context_acquire(ctx);
609 s = bdrv_query_bds_stats(blk_bs(blk), true);
610 s->has_device = true;
611 s->device = g_strdup(blk_name(blk));
613 qdev = blk_get_attached_dev_id(blk);
614 if (qdev && *qdev) {
615 s->has_qdev = true;
616 s->qdev = qdev;
617 } else {
618 g_free(qdev);
621 bdrv_query_blk_stats(s->stats, blk);
622 aio_context_release(ctx);
624 info = g_malloc0(sizeof(*info));
625 info->value = s;
626 *p_next = info;
627 p_next = &info->next;
631 return head;
634 void bdrv_snapshot_dump(QEMUSnapshotInfo *sn)
636 char date_buf[128], clock_buf[128];
637 struct tm tm;
638 time_t ti;
639 int64_t secs;
640 char *sizing = NULL;
642 if (!sn) {
643 qemu_printf("%-10s%-20s%7s%20s%15s",
644 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
645 } else {
646 ti = sn->date_sec;
647 localtime_r(&ti, &tm);
648 strftime(date_buf, sizeof(date_buf),
649 "%Y-%m-%d %H:%M:%S", &tm);
650 secs = sn->vm_clock_nsec / 1000000000;
651 snprintf(clock_buf, sizeof(clock_buf),
652 "%02d:%02d:%02d.%03d",
653 (int)(secs / 3600),
654 (int)((secs / 60) % 60),
655 (int)(secs % 60),
656 (int)((sn->vm_clock_nsec / 1000000) % 1000));
657 sizing = size_to_str(sn->vm_state_size);
658 qemu_printf("%-10s%-20s%7s%20s%15s",
659 sn->id_str, sn->name,
660 sizing,
661 date_buf,
662 clock_buf);
664 g_free(sizing);
667 static void dump_qdict(int indentation, QDict *dict);
668 static void dump_qlist(int indentation, QList *list);
670 static void dump_qobject(int comp_indent, QObject *obj)
672 switch (qobject_type(obj)) {
673 case QTYPE_QNUM: {
674 QNum *value = qobject_to(QNum, obj);
675 char *tmp = qnum_to_string(value);
676 qemu_printf("%s", tmp);
677 g_free(tmp);
678 break;
680 case QTYPE_QSTRING: {
681 QString *value = qobject_to(QString, obj);
682 qemu_printf("%s", qstring_get_str(value));
683 break;
685 case QTYPE_QDICT: {
686 QDict *value = qobject_to(QDict, obj);
687 dump_qdict(comp_indent, value);
688 break;
690 case QTYPE_QLIST: {
691 QList *value = qobject_to(QList, obj);
692 dump_qlist(comp_indent, value);
693 break;
695 case QTYPE_QBOOL: {
696 QBool *value = qobject_to(QBool, obj);
697 qemu_printf("%s", qbool_get_bool(value) ? "true" : "false");
698 break;
700 default:
701 abort();
705 static void dump_qlist(int indentation, QList *list)
707 const QListEntry *entry;
708 int i = 0;
710 for (entry = qlist_first(list); entry; entry = qlist_next(entry), i++) {
711 QType type = qobject_type(entry->value);
712 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
713 qemu_printf("%*s[%i]:%c", indentation * 4, "", i,
714 composite ? '\n' : ' ');
715 dump_qobject(indentation + 1, entry->value);
716 if (!composite) {
717 qemu_printf("\n");
722 static void dump_qdict(int indentation, QDict *dict)
724 const QDictEntry *entry;
726 for (entry = qdict_first(dict); entry; entry = qdict_next(dict, entry)) {
727 QType type = qobject_type(entry->value);
728 bool composite = (type == QTYPE_QDICT || type == QTYPE_QLIST);
729 char *key = g_malloc(strlen(entry->key) + 1);
730 int i;
732 /* replace dashes with spaces in key (variable) names */
733 for (i = 0; entry->key[i]; i++) {
734 key[i] = entry->key[i] == '-' ? ' ' : entry->key[i];
736 key[i] = 0;
737 qemu_printf("%*s%s:%c", indentation * 4, "", key,
738 composite ? '\n' : ' ');
739 dump_qobject(indentation + 1, entry->value);
740 if (!composite) {
741 qemu_printf("\n");
743 g_free(key);
747 void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec)
749 QObject *obj, *data;
750 Visitor *v = qobject_output_visitor_new(&obj);
752 visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort);
753 visit_complete(v, &obj);
754 data = qdict_get(qobject_to(QDict, obj), "data");
755 dump_qobject(1, data);
756 qobject_unref(obj);
757 visit_free(v);
760 void bdrv_image_info_dump(ImageInfo *info)
762 char *size_buf, *dsize_buf;
763 if (!info->has_actual_size) {
764 dsize_buf = g_strdup("unavailable");
765 } else {
766 dsize_buf = size_to_str(info->actual_size);
768 size_buf = size_to_str(info->virtual_size);
769 qemu_printf("image: %s\n"
770 "file format: %s\n"
771 "virtual size: %s (%" PRId64 " bytes)\n"
772 "disk size: %s\n",
773 info->filename, info->format, size_buf,
774 info->virtual_size,
775 dsize_buf);
776 g_free(size_buf);
777 g_free(dsize_buf);
779 if (info->has_encrypted && info->encrypted) {
780 qemu_printf("encrypted: yes\n");
783 if (info->has_cluster_size) {
784 qemu_printf("cluster_size: %" PRId64 "\n",
785 info->cluster_size);
788 if (info->has_dirty_flag && info->dirty_flag) {
789 qemu_printf("cleanly shut down: no\n");
792 if (info->has_backing_filename) {
793 qemu_printf("backing file: %s", info->backing_filename);
794 if (!info->has_full_backing_filename) {
795 qemu_printf(" (cannot determine actual path)");
796 } else if (strcmp(info->backing_filename,
797 info->full_backing_filename) != 0) {
798 qemu_printf(" (actual path: %s)", info->full_backing_filename);
800 qemu_printf("\n");
801 if (info->has_backing_filename_format) {
802 qemu_printf("backing file format: %s\n",
803 info->backing_filename_format);
807 if (info->has_snapshots) {
808 SnapshotInfoList *elem;
810 qemu_printf("Snapshot list:\n");
811 bdrv_snapshot_dump(NULL);
812 qemu_printf("\n");
814 /* Ideally bdrv_snapshot_dump() would operate on SnapshotInfoList but
815 * we convert to the block layer's native QEMUSnapshotInfo for now.
817 for (elem = info->snapshots; elem; elem = elem->next) {
818 QEMUSnapshotInfo sn = {
819 .vm_state_size = elem->value->vm_state_size,
820 .date_sec = elem->value->date_sec,
821 .date_nsec = elem->value->date_nsec,
822 .vm_clock_nsec = elem->value->vm_clock_sec * 1000000000ULL +
823 elem->value->vm_clock_nsec,
826 pstrcpy(sn.id_str, sizeof(sn.id_str), elem->value->id);
827 pstrcpy(sn.name, sizeof(sn.name), elem->value->name);
828 bdrv_snapshot_dump(&sn);
829 qemu_printf("\n");
833 if (info->has_format_specific) {
834 qemu_printf("Format specific information:\n");
835 bdrv_image_info_specific_dump(info->format_specific);