2 * QEMU Plugin Core code
4 * This is the core code that deals with injecting instrumentation into the code
6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7 * Copyright (C) 2019, Linaro
9 * License: GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
12 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/option.h"
19 #include "qemu/rcu_queue.h"
20 #include "qemu/xxhash.h"
22 #include "hw/core/cpu.h"
23 #include "exec/cpu-common.h"
26 #include "exec/exec-all.h"
27 #include "exec/helper-proto.h"
28 #include "sysemu/sysemu.h"
30 #include "tcg/tcg-op.h"
31 #include "trace/mem-internal.h" /* mem_info macros */
34 struct qemu_plugin_cb
{
35 struct qemu_plugin_ctx
*ctx
;
36 union qemu_plugin_cb_sig f
;
38 QLIST_ENTRY(qemu_plugin_cb
) entry
;
41 struct qemu_plugin_state plugin
;
43 struct qemu_plugin_ctx
*plugin_id_to_ctx_locked(qemu_plugin_id_t id
)
45 struct qemu_plugin_ctx
*ctx
;
46 qemu_plugin_id_t
*id_p
;
48 id_p
= g_hash_table_lookup(plugin
.id_ht
, &id
);
49 ctx
= container_of(id_p
, struct qemu_plugin_ctx
, id
);
51 error_report("plugin: invalid plugin id %" PRIu64
, id
);
57 static void plugin_cpu_update__async(CPUState
*cpu
, run_on_cpu_data data
)
59 bitmap_copy(cpu
->plugin_mask
, &data
.host_ulong
, QEMU_PLUGIN_EV_MAX
);
60 cpu_tb_jmp_cache_clear(cpu
);
63 static void plugin_cpu_update__locked(gpointer k
, gpointer v
, gpointer udata
)
65 CPUState
*cpu
= container_of(k
, CPUState
, cpu_index
);
66 run_on_cpu_data mask
= RUN_ON_CPU_HOST_ULONG(*plugin
.mask
);
69 async_run_on_cpu(cpu
, plugin_cpu_update__async
, mask
);
71 plugin_cpu_update__async(cpu
, mask
);
75 void plugin_unregister_cb__locked(struct qemu_plugin_ctx
*ctx
,
76 enum qemu_plugin_event ev
)
78 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
83 QLIST_REMOVE_RCU(cb
, entry
);
85 ctx
->callbacks
[ev
] = NULL
;
86 if (QLIST_EMPTY_RCU(&plugin
.cb_lists
[ev
])) {
87 clear_bit(ev
, plugin
.mask
);
88 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
, NULL
);
92 static void plugin_vcpu_cb__simple(CPUState
*cpu
, enum qemu_plugin_event ev
)
94 struct qemu_plugin_cb
*cb
, *next
;
97 case QEMU_PLUGIN_EV_VCPU_INIT
:
98 case QEMU_PLUGIN_EV_VCPU_EXIT
:
99 case QEMU_PLUGIN_EV_VCPU_IDLE
:
100 case QEMU_PLUGIN_EV_VCPU_RESUME
:
101 /* iterate safely; plugins might uninstall themselves at any time */
102 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
103 qemu_plugin_vcpu_simple_cb_t func
= cb
->f
.vcpu_simple
;
105 func(cb
->ctx
->id
, cpu
->cpu_index
);
109 g_assert_not_reached();
113 static void plugin_cb__simple(enum qemu_plugin_event ev
)
115 struct qemu_plugin_cb
*cb
, *next
;
118 case QEMU_PLUGIN_EV_FLUSH
:
119 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
120 qemu_plugin_simple_cb_t func
= cb
->f
.simple
;
126 g_assert_not_reached();
130 static void plugin_cb__udata(enum qemu_plugin_event ev
)
132 struct qemu_plugin_cb
*cb
, *next
;
135 case QEMU_PLUGIN_EV_ATEXIT
:
136 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
137 qemu_plugin_udata_cb_t func
= cb
->f
.udata
;
139 func(cb
->ctx
->id
, cb
->udata
);
143 g_assert_not_reached();
148 do_plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
149 void *func
, void *udata
)
151 struct qemu_plugin_ctx
*ctx
;
153 qemu_rec_mutex_lock(&plugin
.lock
);
154 ctx
= plugin_id_to_ctx_locked(id
);
155 /* if the plugin is on its way out, ignore this request */
156 if (unlikely(ctx
->uninstalling
)) {
160 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
163 cb
->f
.generic
= func
;
166 cb
= g_new(struct qemu_plugin_cb
, 1);
168 cb
->f
.generic
= func
;
170 ctx
->callbacks
[ev
] = cb
;
171 QLIST_INSERT_HEAD_RCU(&plugin
.cb_lists
[ev
], cb
, entry
);
172 if (!test_bit(ev
, plugin
.mask
)) {
173 set_bit(ev
, plugin
.mask
);
174 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
,
179 plugin_unregister_cb__locked(ctx
, ev
);
182 qemu_rec_mutex_unlock(&plugin
.lock
);
185 void plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
188 do_plugin_register_cb(id
, ev
, func
, NULL
);
192 plugin_register_cb_udata(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
193 void *func
, void *udata
)
195 do_plugin_register_cb(id
, ev
, func
, udata
);
198 void qemu_plugin_vcpu_init_hook(CPUState
*cpu
)
202 qemu_rec_mutex_lock(&plugin
.lock
);
203 plugin_cpu_update__locked(&cpu
->cpu_index
, NULL
, NULL
);
204 success
= g_hash_table_insert(plugin
.cpu_ht
, &cpu
->cpu_index
,
207 qemu_rec_mutex_unlock(&plugin
.lock
);
209 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_INIT
);
212 void qemu_plugin_vcpu_exit_hook(CPUState
*cpu
)
216 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_EXIT
);
218 qemu_rec_mutex_lock(&plugin
.lock
);
219 success
= g_hash_table_remove(plugin
.cpu_ht
, &cpu
->cpu_index
);
221 qemu_rec_mutex_unlock(&plugin
.lock
);
224 struct plugin_for_each_args
{
225 struct qemu_plugin_ctx
*ctx
;
226 qemu_plugin_vcpu_simple_cb_t cb
;
229 static void plugin_vcpu_for_each(gpointer k
, gpointer v
, gpointer udata
)
231 struct plugin_for_each_args
*args
= udata
;
232 int cpu_index
= *(int *)k
;
234 args
->cb(args
->ctx
->id
, cpu_index
);
237 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id
,
238 qemu_plugin_vcpu_simple_cb_t cb
)
240 struct plugin_for_each_args args
;
245 qemu_rec_mutex_lock(&plugin
.lock
);
246 args
.ctx
= plugin_id_to_ctx_locked(id
);
248 g_hash_table_foreach(plugin
.cpu_ht
, plugin_vcpu_for_each
, &args
);
249 qemu_rec_mutex_unlock(&plugin
.lock
);
252 /* Allocate and return a callback record */
253 static struct qemu_plugin_dyn_cb
*plugin_get_dyn_cb(GArray
**arr
)
258 cbs
= g_array_sized_new(false, false,
259 sizeof(struct qemu_plugin_dyn_cb
), 1);
263 g_array_set_size(cbs
, cbs
->len
+ 1);
264 return &g_array_index(cbs
, struct qemu_plugin_dyn_cb
, cbs
->len
- 1);
267 void plugin_register_inline_op(GArray
**arr
,
268 enum qemu_plugin_mem_rw rw
,
269 enum qemu_plugin_op op
, void *ptr
,
272 struct qemu_plugin_dyn_cb
*dyn_cb
;
274 dyn_cb
= plugin_get_dyn_cb(arr
);
276 dyn_cb
->type
= PLUGIN_CB_INLINE
;
278 dyn_cb
->inline_insn
.op
= op
;
279 dyn_cb
->inline_insn
.imm
= imm
;
282 static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags
)
287 case QEMU_PLUGIN_CB_RW_REGS
:
289 case QEMU_PLUGIN_CB_R_REGS
:
290 ret
= TCG_CALL_NO_WG
;
292 case QEMU_PLUGIN_CB_NO_REGS
:
294 ret
= TCG_CALL_NO_RWG
;
300 plugin_register_dyn_cb__udata(GArray
**arr
,
301 qemu_plugin_vcpu_udata_cb_t cb
,
302 enum qemu_plugin_cb_flags flags
, void *udata
)
304 struct qemu_plugin_dyn_cb
*dyn_cb
= plugin_get_dyn_cb(arr
);
306 dyn_cb
->userp
= udata
;
307 dyn_cb
->tcg_flags
= cb_to_tcg_flags(flags
);
308 dyn_cb
->f
.vcpu_udata
= cb
;
309 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
312 void plugin_register_vcpu_mem_cb(GArray
**arr
,
314 enum qemu_plugin_cb_flags flags
,
315 enum qemu_plugin_mem_rw rw
,
318 struct qemu_plugin_dyn_cb
*dyn_cb
;
320 dyn_cb
= plugin_get_dyn_cb(arr
);
321 dyn_cb
->userp
= udata
;
322 dyn_cb
->tcg_flags
= cb_to_tcg_flags(flags
);
323 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
325 dyn_cb
->f
.generic
= cb
;
328 void qemu_plugin_tb_trans_cb(CPUState
*cpu
, struct qemu_plugin_tb
*tb
)
330 struct qemu_plugin_cb
*cb
, *next
;
331 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_TB_TRANS
;
333 /* no plugin_mask check here; caller should have checked */
335 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
336 qemu_plugin_vcpu_tb_trans_cb_t func
= cb
->f
.vcpu_tb_trans
;
338 func(cb
->ctx
->id
, tb
);
343 qemu_plugin_vcpu_syscall(CPUState
*cpu
, int64_t num
, uint64_t a1
, uint64_t a2
,
344 uint64_t a3
, uint64_t a4
, uint64_t a5
,
345 uint64_t a6
, uint64_t a7
, uint64_t a8
)
347 struct qemu_plugin_cb
*cb
, *next
;
348 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL
;
350 if (!test_bit(ev
, cpu
->plugin_mask
)) {
354 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
355 qemu_plugin_vcpu_syscall_cb_t func
= cb
->f
.vcpu_syscall
;
357 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, a1
, a2
, a3
, a4
, a5
, a6
, a7
, a8
);
361 void qemu_plugin_vcpu_syscall_ret(CPUState
*cpu
, int64_t num
, int64_t ret
)
363 struct qemu_plugin_cb
*cb
, *next
;
364 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL_RET
;
366 if (!test_bit(ev
, cpu
->plugin_mask
)) {
370 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
371 qemu_plugin_vcpu_syscall_ret_cb_t func
= cb
->f
.vcpu_syscall_ret
;
373 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, ret
);
377 void qemu_plugin_vcpu_idle_cb(CPUState
*cpu
)
379 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_IDLE
);
382 void qemu_plugin_vcpu_resume_cb(CPUState
*cpu
)
384 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_RESUME
);
387 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id
,
388 qemu_plugin_vcpu_simple_cb_t cb
)
390 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_IDLE
, cb
);
393 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id
,
394 qemu_plugin_vcpu_simple_cb_t cb
)
396 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_RESUME
, cb
);
399 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id
,
400 qemu_plugin_simple_cb_t cb
)
402 plugin_register_cb(id
, QEMU_PLUGIN_EV_FLUSH
, cb
);
405 static bool free_dyn_cb_arr(void *p
, uint32_t h
, void *userp
)
407 g_array_free((GArray
*) p
, true);
411 void qemu_plugin_flush_cb(void)
413 qht_iter_remove(&plugin
.dyn_cb_arr_ht
, free_dyn_cb_arr
, NULL
);
414 qht_reset(&plugin
.dyn_cb_arr_ht
);
416 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH
);
419 void exec_inline_op(struct qemu_plugin_dyn_cb
*cb
)
421 uint64_t *val
= cb
->userp
;
423 switch (cb
->inline_insn
.op
) {
424 case QEMU_PLUGIN_INLINE_ADD_U64
:
425 *val
+= cb
->inline_insn
.imm
;
428 g_assert_not_reached();
432 void qemu_plugin_vcpu_mem_cb(CPUState
*cpu
, uint64_t vaddr
, uint32_t info
)
434 GArray
*arr
= cpu
->plugin_mem_cbs
;
440 for (i
= 0; i
< arr
->len
; i
++) {
441 struct qemu_plugin_dyn_cb
*cb
=
442 &g_array_index(arr
, struct qemu_plugin_dyn_cb
, i
);
443 int w
= !!(info
& TRACE_MEM_ST
) + 1;
449 case PLUGIN_CB_REGULAR
:
450 cb
->f
.vcpu_mem(cpu
->cpu_index
, info
, vaddr
, cb
->userp
);
452 case PLUGIN_CB_INLINE
:
456 g_assert_not_reached();
461 void qemu_plugin_atexit_cb(void)
463 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT
);
466 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id
,
467 qemu_plugin_udata_cb_t cb
,
470 plugin_register_cb_udata(id
, QEMU_PLUGIN_EV_ATEXIT
, cb
, udata
);
474 * Call this function after longjmp'ing to the main loop. It's possible that the
475 * last instruction of a TB might have used helpers, and therefore the
476 * "disable" instruction will never execute because it ended up as dead code.
478 void qemu_plugin_disable_mem_helpers(CPUState
*cpu
)
480 cpu
->plugin_mem_cbs
= NULL
;
483 static bool plugin_dyn_cb_arr_cmp(const void *ap
, const void *bp
)
488 static void __attribute__((__constructor__
)) plugin_init(void)
492 for (i
= 0; i
< QEMU_PLUGIN_EV_MAX
; i
++) {
493 QLIST_INIT(&plugin
.cb_lists
[i
]);
495 qemu_rec_mutex_init(&plugin
.lock
);
496 plugin
.id_ht
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
497 plugin
.cpu_ht
= g_hash_table_new(g_int_hash
, g_int_equal
);
498 QTAILQ_INIT(&plugin
.ctxs
);
499 qht_init(&plugin
.dyn_cb_arr_ht
, plugin_dyn_cb_arr_cmp
, 16,
500 QHT_MODE_AUTO_RESIZE
);
501 atexit(qemu_plugin_atexit_cb
);