2 * QEMU Plugin Core code
4 * This is the core code that deals with injecting instrumentation into the code
6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7 * Copyright (C) 2019, Linaro
9 * License: GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
12 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/lockable.h"
19 #include "qemu/option.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/xxhash.h"
23 #include "hw/core/cpu.h"
24 #include "exec/cpu-common.h"
26 #include "exec/exec-all.h"
27 #include "exec/helper-proto.h"
29 #include "tcg/tcg-op.h"
31 #include "qemu/compiler.h"
33 struct qemu_plugin_cb
{
34 struct qemu_plugin_ctx
*ctx
;
35 union qemu_plugin_cb_sig f
;
37 QLIST_ENTRY(qemu_plugin_cb
) entry
;
40 struct qemu_plugin_state plugin
;
42 struct qemu_plugin_ctx
*plugin_id_to_ctx_locked(qemu_plugin_id_t id
)
44 struct qemu_plugin_ctx
*ctx
;
45 qemu_plugin_id_t
*id_p
;
47 id_p
= g_hash_table_lookup(plugin
.id_ht
, &id
);
48 ctx
= container_of(id_p
, struct qemu_plugin_ctx
, id
);
50 error_report("plugin: invalid plugin id %" PRIu64
, id
);
56 static void plugin_cpu_update__async(CPUState
*cpu
, run_on_cpu_data data
)
58 bitmap_copy(cpu
->plugin_mask
, &data
.host_ulong
, QEMU_PLUGIN_EV_MAX
);
59 tcg_flush_jmp_cache(cpu
);
62 static void plugin_cpu_update__locked(gpointer k
, gpointer v
, gpointer udata
)
64 CPUState
*cpu
= container_of(k
, CPUState
, cpu_index
);
65 run_on_cpu_data mask
= RUN_ON_CPU_HOST_ULONG(*plugin
.mask
);
68 async_run_on_cpu(cpu
, plugin_cpu_update__async
, mask
);
70 plugin_cpu_update__async(cpu
, mask
);
74 void plugin_unregister_cb__locked(struct qemu_plugin_ctx
*ctx
,
75 enum qemu_plugin_event ev
)
77 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
82 QLIST_REMOVE_RCU(cb
, entry
);
84 ctx
->callbacks
[ev
] = NULL
;
85 if (QLIST_EMPTY_RCU(&plugin
.cb_lists
[ev
])) {
86 clear_bit(ev
, plugin
.mask
);
87 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
, NULL
);
93 * The callback function has been loaded from an external library so we do not
94 * have type information
97 static void plugin_vcpu_cb__simple(CPUState
*cpu
, enum qemu_plugin_event ev
)
99 struct qemu_plugin_cb
*cb
, *next
;
102 case QEMU_PLUGIN_EV_VCPU_INIT
:
103 case QEMU_PLUGIN_EV_VCPU_EXIT
:
104 case QEMU_PLUGIN_EV_VCPU_IDLE
:
105 case QEMU_PLUGIN_EV_VCPU_RESUME
:
106 /* iterate safely; plugins might uninstall themselves at any time */
107 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
108 qemu_plugin_vcpu_simple_cb_t func
= cb
->f
.vcpu_simple
;
110 func(cb
->ctx
->id
, cpu
->cpu_index
);
114 g_assert_not_reached();
119 * Disable CFI checks.
120 * The callback function has been loaded from an external library so we do not
121 * have type information
124 static void plugin_cb__simple(enum qemu_plugin_event ev
)
126 struct qemu_plugin_cb
*cb
, *next
;
129 case QEMU_PLUGIN_EV_FLUSH
:
130 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
131 qemu_plugin_simple_cb_t func
= cb
->f
.simple
;
137 g_assert_not_reached();
142 * Disable CFI checks.
143 * The callback function has been loaded from an external library so we do not
144 * have type information
147 static void plugin_cb__udata(enum qemu_plugin_event ev
)
149 struct qemu_plugin_cb
*cb
, *next
;
152 case QEMU_PLUGIN_EV_ATEXIT
:
153 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
154 qemu_plugin_udata_cb_t func
= cb
->f
.udata
;
156 func(cb
->ctx
->id
, cb
->udata
);
160 g_assert_not_reached();
165 do_plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
166 void *func
, void *udata
)
168 struct qemu_plugin_ctx
*ctx
;
170 QEMU_LOCK_GUARD(&plugin
.lock
);
171 ctx
= plugin_id_to_ctx_locked(id
);
172 /* if the plugin is on its way out, ignore this request */
173 if (unlikely(ctx
->uninstalling
)) {
177 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
180 cb
->f
.generic
= func
;
183 cb
= g_new(struct qemu_plugin_cb
, 1);
185 cb
->f
.generic
= func
;
187 ctx
->callbacks
[ev
] = cb
;
188 QLIST_INSERT_HEAD_RCU(&plugin
.cb_lists
[ev
], cb
, entry
);
189 if (!test_bit(ev
, plugin
.mask
)) {
190 set_bit(ev
, plugin
.mask
);
191 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
,
196 plugin_unregister_cb__locked(ctx
, ev
);
200 void plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
203 do_plugin_register_cb(id
, ev
, func
, NULL
);
207 plugin_register_cb_udata(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
208 void *func
, void *udata
)
210 do_plugin_register_cb(id
, ev
, func
, udata
);
213 void qemu_plugin_vcpu_init_hook(CPUState
*cpu
)
217 qemu_rec_mutex_lock(&plugin
.lock
);
218 plugin_cpu_update__locked(&cpu
->cpu_index
, NULL
, NULL
);
219 success
= g_hash_table_insert(plugin
.cpu_ht
, &cpu
->cpu_index
,
222 qemu_rec_mutex_unlock(&plugin
.lock
);
224 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_INIT
);
227 void qemu_plugin_vcpu_exit_hook(CPUState
*cpu
)
231 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_EXIT
);
233 qemu_rec_mutex_lock(&plugin
.lock
);
234 success
= g_hash_table_remove(plugin
.cpu_ht
, &cpu
->cpu_index
);
236 qemu_rec_mutex_unlock(&plugin
.lock
);
239 struct plugin_for_each_args
{
240 struct qemu_plugin_ctx
*ctx
;
241 qemu_plugin_vcpu_simple_cb_t cb
;
244 static void plugin_vcpu_for_each(gpointer k
, gpointer v
, gpointer udata
)
246 struct plugin_for_each_args
*args
= udata
;
247 int cpu_index
= *(int *)k
;
249 args
->cb(args
->ctx
->id
, cpu_index
);
252 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id
,
253 qemu_plugin_vcpu_simple_cb_t cb
)
255 struct plugin_for_each_args args
;
260 qemu_rec_mutex_lock(&plugin
.lock
);
261 args
.ctx
= plugin_id_to_ctx_locked(id
);
263 g_hash_table_foreach(plugin
.cpu_ht
, plugin_vcpu_for_each
, &args
);
264 qemu_rec_mutex_unlock(&plugin
.lock
);
267 /* Allocate and return a callback record */
268 static struct qemu_plugin_dyn_cb
*plugin_get_dyn_cb(GArray
**arr
)
273 cbs
= g_array_sized_new(false, false,
274 sizeof(struct qemu_plugin_dyn_cb
), 1);
278 g_array_set_size(cbs
, cbs
->len
+ 1);
279 return &g_array_index(cbs
, struct qemu_plugin_dyn_cb
, cbs
->len
- 1);
282 void plugin_register_inline_op(GArray
**arr
,
283 enum qemu_plugin_mem_rw rw
,
284 enum qemu_plugin_op op
, void *ptr
,
287 struct qemu_plugin_dyn_cb
*dyn_cb
;
289 dyn_cb
= plugin_get_dyn_cb(arr
);
291 dyn_cb
->type
= PLUGIN_CB_INLINE
;
293 dyn_cb
->inline_insn
.op
= op
;
294 dyn_cb
->inline_insn
.imm
= imm
;
297 void plugin_register_dyn_cb__udata(GArray
**arr
,
298 qemu_plugin_vcpu_udata_cb_t cb
,
299 enum qemu_plugin_cb_flags flags
,
302 struct qemu_plugin_dyn_cb
*dyn_cb
= plugin_get_dyn_cb(arr
);
304 dyn_cb
->userp
= udata
;
305 /* Note flags are discarded as unused. */
306 dyn_cb
->f
.vcpu_udata
= cb
;
307 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
310 void plugin_register_vcpu_mem_cb(GArray
**arr
,
312 enum qemu_plugin_cb_flags flags
,
313 enum qemu_plugin_mem_rw rw
,
316 struct qemu_plugin_dyn_cb
*dyn_cb
;
318 dyn_cb
= plugin_get_dyn_cb(arr
);
319 dyn_cb
->userp
= udata
;
320 /* Note flags are discarded as unused. */
321 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
323 dyn_cb
->f
.generic
= cb
;
327 * Disable CFI checks.
328 * The callback function has been loaded from an external library so we do not
329 * have type information
332 void qemu_plugin_tb_trans_cb(CPUState
*cpu
, struct qemu_plugin_tb
*tb
)
334 struct qemu_plugin_cb
*cb
, *next
;
335 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_TB_TRANS
;
337 /* no plugin_mask check here; caller should have checked */
339 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
340 qemu_plugin_vcpu_tb_trans_cb_t func
= cb
->f
.vcpu_tb_trans
;
342 func(cb
->ctx
->id
, tb
);
347 * Disable CFI checks.
348 * The callback function has been loaded from an external library so we do not
349 * have type information
353 qemu_plugin_vcpu_syscall(CPUState
*cpu
, int64_t num
, uint64_t a1
, uint64_t a2
,
354 uint64_t a3
, uint64_t a4
, uint64_t a5
,
355 uint64_t a6
, uint64_t a7
, uint64_t a8
)
357 struct qemu_plugin_cb
*cb
, *next
;
358 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL
;
360 if (!test_bit(ev
, cpu
->plugin_mask
)) {
364 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
365 qemu_plugin_vcpu_syscall_cb_t func
= cb
->f
.vcpu_syscall
;
367 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, a1
, a2
, a3
, a4
, a5
, a6
, a7
, a8
);
372 * Disable CFI checks.
373 * The callback function has been loaded from an external library so we do not
374 * have type information
377 void qemu_plugin_vcpu_syscall_ret(CPUState
*cpu
, int64_t num
, int64_t ret
)
379 struct qemu_plugin_cb
*cb
, *next
;
380 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL_RET
;
382 if (!test_bit(ev
, cpu
->plugin_mask
)) {
386 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
387 qemu_plugin_vcpu_syscall_ret_cb_t func
= cb
->f
.vcpu_syscall_ret
;
389 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, ret
);
393 void qemu_plugin_vcpu_idle_cb(CPUState
*cpu
)
395 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_IDLE
);
398 void qemu_plugin_vcpu_resume_cb(CPUState
*cpu
)
400 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_RESUME
);
403 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id
,
404 qemu_plugin_vcpu_simple_cb_t cb
)
406 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_IDLE
, cb
);
409 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id
,
410 qemu_plugin_vcpu_simple_cb_t cb
)
412 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_RESUME
, cb
);
415 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id
,
416 qemu_plugin_simple_cb_t cb
)
418 plugin_register_cb(id
, QEMU_PLUGIN_EV_FLUSH
, cb
);
421 static bool free_dyn_cb_arr(void *p
, uint32_t h
, void *userp
)
423 g_array_free((GArray
*) p
, true);
427 void qemu_plugin_flush_cb(void)
429 qht_iter_remove(&plugin
.dyn_cb_arr_ht
, free_dyn_cb_arr
, NULL
);
430 qht_reset(&plugin
.dyn_cb_arr_ht
);
432 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH
);
435 void exec_inline_op(struct qemu_plugin_dyn_cb
*cb
)
437 uint64_t *val
= cb
->userp
;
439 switch (cb
->inline_insn
.op
) {
440 case QEMU_PLUGIN_INLINE_ADD_U64
:
441 *val
+= cb
->inline_insn
.imm
;
444 g_assert_not_reached();
448 void qemu_plugin_vcpu_mem_cb(CPUState
*cpu
, uint64_t vaddr
,
449 MemOpIdx oi
, enum qemu_plugin_mem_rw rw
)
451 GArray
*arr
= cpu
->plugin_mem_cbs
;
457 for (i
= 0; i
< arr
->len
; i
++) {
458 struct qemu_plugin_dyn_cb
*cb
=
459 &g_array_index(arr
, struct qemu_plugin_dyn_cb
, i
);
461 if (!(rw
& cb
->rw
)) {
465 case PLUGIN_CB_REGULAR
:
466 cb
->f
.vcpu_mem(cpu
->cpu_index
, make_plugin_meminfo(oi
, rw
),
469 case PLUGIN_CB_INLINE
:
473 g_assert_not_reached();
478 void qemu_plugin_atexit_cb(void)
480 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT
);
483 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id
,
484 qemu_plugin_udata_cb_t cb
,
487 plugin_register_cb_udata(id
, QEMU_PLUGIN_EV_ATEXIT
, cb
, udata
);
491 * Handle exit from linux-user. Unlike the normal atexit() mechanism
492 * we need to handle the clean-up manually as it's possible threads
493 * are still running. We need to remove all callbacks from code
494 * generation, flush the current translations and then we can safely
495 * trigger the exit callbacks.
498 void qemu_plugin_user_exit(void)
500 enum qemu_plugin_event ev
;
504 * Locking order: we must acquire locks in an order that is consistent
505 * with the one in fork_start(). That is:
506 * - start_exclusive(), which acquires qemu_cpu_list_lock,
507 * must be called before acquiring plugin.lock.
508 * - tb_flush(), which acquires mmap_lock(), must be called
509 * while plugin.lock is not held.
513 qemu_rec_mutex_lock(&plugin
.lock
);
514 /* un-register all callbacks except the final AT_EXIT one */
515 for (ev
= 0; ev
< QEMU_PLUGIN_EV_MAX
; ev
++) {
516 if (ev
!= QEMU_PLUGIN_EV_ATEXIT
) {
517 struct qemu_plugin_cb
*cb
, *next
;
519 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
520 plugin_unregister_cb__locked(cb
->ctx
, ev
);
525 qemu_plugin_disable_mem_helpers(cpu
);
527 qemu_rec_mutex_unlock(&plugin
.lock
);
529 tb_flush(current_cpu
);
532 /* now it's safe to handle the exit case */
533 qemu_plugin_atexit_cb();
537 * Helpers for *-user to ensure locks are sane across fork() events.
540 void qemu_plugin_user_prefork_lock(void)
542 qemu_rec_mutex_lock(&plugin
.lock
);
545 void qemu_plugin_user_postfork(bool is_child
)
548 /* should we just reset via plugin_init? */
549 qemu_rec_mutex_init(&plugin
.lock
);
551 qemu_rec_mutex_unlock(&plugin
.lock
);
557 * Call this function after longjmp'ing to the main loop. It's possible that the
558 * last instruction of a TB might have used helpers, and therefore the
559 * "disable" instruction will never execute because it ended up as dead code.
561 void qemu_plugin_disable_mem_helpers(CPUState
*cpu
)
563 cpu
->plugin_mem_cbs
= NULL
;
566 static bool plugin_dyn_cb_arr_cmp(const void *ap
, const void *bp
)
571 static void __attribute__((__constructor__
)) plugin_init(void)
575 for (i
= 0; i
< QEMU_PLUGIN_EV_MAX
; i
++) {
576 QLIST_INIT(&plugin
.cb_lists
[i
]);
578 qemu_rec_mutex_init(&plugin
.lock
);
579 plugin
.id_ht
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
580 plugin
.cpu_ht
= g_hash_table_new(g_int_hash
, g_int_equal
);
581 QTAILQ_INIT(&plugin
.ctxs
);
582 qht_init(&plugin
.dyn_cb_arr_ht
, plugin_dyn_cb_arr_cmp
, 16,
583 QHT_MODE_AUTO_RESIZE
);
584 atexit(qemu_plugin_atexit_cb
);