2 * Copyright (C) 2023, Pierrick Bouvier <pierrick.bouvier@linaro.org>
4 * Demonstrates and tests usage of inline ops.
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
14 #include <qemu-plugin.h>
18 uint64_t count_tb_inline
;
20 uint64_t count_insn_inline
;
22 uint64_t count_mem_inline
;
25 static struct qemu_plugin_scoreboard
*counts
;
26 static qemu_plugin_u64 count_tb
;
27 static qemu_plugin_u64 count_tb_inline
;
28 static qemu_plugin_u64 count_insn
;
29 static qemu_plugin_u64 count_insn_inline
;
30 static qemu_plugin_u64 count_mem
;
31 static qemu_plugin_u64 count_mem_inline
;
33 static uint64_t global_count_tb
;
34 static uint64_t global_count_insn
;
35 static uint64_t global_count_mem
;
36 static unsigned int max_cpu_index
;
37 static GMutex tb_lock
;
38 static GMutex insn_lock
;
39 static GMutex mem_lock
;
41 QEMU_PLUGIN_EXPORT
int qemu_plugin_version
= QEMU_PLUGIN_VERSION
;
43 static void stats_insn(void)
45 const uint64_t expected
= global_count_insn
;
46 const uint64_t per_vcpu
= qemu_plugin_u64_sum(count_insn
);
47 const uint64_t inl_per_vcpu
=
48 qemu_plugin_u64_sum(count_insn_inline
);
49 printf("insn: %" PRIu64
"\n", expected
);
50 printf("insn: %" PRIu64
" (per vcpu)\n", per_vcpu
);
51 printf("insn: %" PRIu64
" (per vcpu inline)\n", inl_per_vcpu
);
52 g_assert(expected
> 0);
53 g_assert(per_vcpu
== expected
);
54 g_assert(inl_per_vcpu
== expected
);
57 static void stats_tb(void)
59 const uint64_t expected
= global_count_tb
;
60 const uint64_t per_vcpu
= qemu_plugin_u64_sum(count_tb
);
61 const uint64_t inl_per_vcpu
=
62 qemu_plugin_u64_sum(count_tb_inline
);
63 printf("tb: %" PRIu64
"\n", expected
);
64 printf("tb: %" PRIu64
" (per vcpu)\n", per_vcpu
);
65 printf("tb: %" PRIu64
" (per vcpu inline)\n", inl_per_vcpu
);
66 g_assert(expected
> 0);
67 g_assert(per_vcpu
== expected
);
68 g_assert(inl_per_vcpu
== expected
);
71 static void stats_mem(void)
73 const uint64_t expected
= global_count_mem
;
74 const uint64_t per_vcpu
= qemu_plugin_u64_sum(count_mem
);
75 const uint64_t inl_per_vcpu
=
76 qemu_plugin_u64_sum(count_mem_inline
);
77 printf("mem: %" PRIu64
"\n", expected
);
78 printf("mem: %" PRIu64
" (per vcpu)\n", per_vcpu
);
79 printf("mem: %" PRIu64
" (per vcpu inline)\n", inl_per_vcpu
);
80 g_assert(expected
> 0);
81 g_assert(per_vcpu
== expected
);
82 g_assert(inl_per_vcpu
== expected
);
85 static void plugin_exit(qemu_plugin_id_t id
, void *udata
)
87 const unsigned int num_cpus
= qemu_plugin_num_vcpus();
88 g_assert(num_cpus
== max_cpu_index
+ 1);
90 for (int i
= 0; i
< num_cpus
; ++i
) {
91 const uint64_t tb
= qemu_plugin_u64_get(count_tb
, i
);
92 const uint64_t tb_inline
= qemu_plugin_u64_get(count_tb_inline
, i
);
93 const uint64_t insn
= qemu_plugin_u64_get(count_insn
, i
);
94 const uint64_t insn_inline
= qemu_plugin_u64_get(count_insn_inline
, i
);
95 const uint64_t mem
= qemu_plugin_u64_get(count_mem
, i
);
96 const uint64_t mem_inline
= qemu_plugin_u64_get(count_mem_inline
, i
);
97 printf("cpu %d: tb (%" PRIu64
", %" PRIu64
") | "
98 "insn (%" PRIu64
", %" PRIu64
") | "
99 "mem (%" PRIu64
", %" PRIu64
")"
101 i
, tb
, tb_inline
, insn
, insn_inline
, mem
, mem_inline
);
102 g_assert(tb
== tb_inline
);
103 g_assert(insn
== insn_inline
);
104 g_assert(mem
== mem_inline
);
111 qemu_plugin_scoreboard_free(counts
);
114 static void vcpu_tb_exec(unsigned int cpu_index
, void *udata
)
116 qemu_plugin_u64_add(count_tb
, cpu_index
, 1);
117 g_mutex_lock(&tb_lock
);
118 max_cpu_index
= MAX(max_cpu_index
, cpu_index
);
120 g_mutex_unlock(&tb_lock
);
123 static void vcpu_insn_exec(unsigned int cpu_index
, void *udata
)
125 qemu_plugin_u64_add(count_insn
, cpu_index
, 1);
126 g_mutex_lock(&insn_lock
);
128 g_mutex_unlock(&insn_lock
);
131 static void vcpu_mem_access(unsigned int cpu_index
,
132 qemu_plugin_meminfo_t info
,
136 qemu_plugin_u64_add(count_mem
, cpu_index
, 1);
137 g_mutex_lock(&mem_lock
);
139 g_mutex_unlock(&mem_lock
);
142 static void vcpu_tb_trans(qemu_plugin_id_t id
, struct qemu_plugin_tb
*tb
)
144 qemu_plugin_register_vcpu_tb_exec_cb(
145 tb
, vcpu_tb_exec
, QEMU_PLUGIN_CB_NO_REGS
, 0);
146 qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
147 tb
, QEMU_PLUGIN_INLINE_ADD_U64
, count_tb_inline
, 1);
149 for (int idx
= 0; idx
< qemu_plugin_tb_n_insns(tb
); ++idx
) {
150 struct qemu_plugin_insn
*insn
= qemu_plugin_tb_get_insn(tb
, idx
);
151 qemu_plugin_register_vcpu_insn_exec_cb(
152 insn
, vcpu_insn_exec
, QEMU_PLUGIN_CB_NO_REGS
, 0);
153 qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
154 insn
, QEMU_PLUGIN_INLINE_ADD_U64
, count_insn_inline
, 1);
155 qemu_plugin_register_vcpu_mem_cb(insn
, &vcpu_mem_access
,
156 QEMU_PLUGIN_CB_NO_REGS
,
157 QEMU_PLUGIN_MEM_RW
, 0);
158 qemu_plugin_register_vcpu_mem_inline_per_vcpu(
159 insn
, QEMU_PLUGIN_MEM_RW
,
160 QEMU_PLUGIN_INLINE_ADD_U64
,
161 count_mem_inline
, 1);
166 int qemu_plugin_install(qemu_plugin_id_t id
, const qemu_info_t
*info
,
167 int argc
, char **argv
)
169 counts
= qemu_plugin_scoreboard_new(sizeof(CPUCount
));
170 count_tb
= qemu_plugin_scoreboard_u64_in_struct(
171 counts
, CPUCount
, count_tb
);
172 count_insn
= qemu_plugin_scoreboard_u64_in_struct(
173 counts
, CPUCount
, count_insn
);
174 count_mem
= qemu_plugin_scoreboard_u64_in_struct(
175 counts
, CPUCount
, count_mem
);
176 count_tb_inline
= qemu_plugin_scoreboard_u64_in_struct(
177 counts
, CPUCount
, count_tb_inline
);
178 count_insn_inline
= qemu_plugin_scoreboard_u64_in_struct(
179 counts
, CPUCount
, count_insn_inline
);
180 count_mem_inline
= qemu_plugin_scoreboard_u64_in_struct(
181 counts
, CPUCount
, count_mem_inline
);
182 qemu_plugin_register_vcpu_tb_trans_cb(id
, vcpu_tb_trans
);
183 qemu_plugin_register_atexit_cb(id
, plugin_exit
, NULL
);