2 * QEMU Plugin Core code
4 * This is the core code that deals with injecting instrumentation into the code
6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7 * Copyright (C) 2019, Linaro
9 * License: GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
12 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/lockable.h"
19 #include "qemu/option.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/xxhash.h"
23 #include "hw/core/cpu.h"
25 #include "exec/exec-all.h"
26 #include "exec/tb-flush.h"
28 #include "tcg/tcg-op.h"
31 struct qemu_plugin_cb
{
32 struct qemu_plugin_ctx
*ctx
;
33 union qemu_plugin_cb_sig f
;
35 QLIST_ENTRY(qemu_plugin_cb
) entry
;
38 struct qemu_plugin_state plugin
;
40 struct qemu_plugin_ctx
*plugin_id_to_ctx_locked(qemu_plugin_id_t id
)
42 struct qemu_plugin_ctx
*ctx
;
43 qemu_plugin_id_t
*id_p
;
45 id_p
= g_hash_table_lookup(plugin
.id_ht
, &id
);
46 ctx
= container_of(id_p
, struct qemu_plugin_ctx
, id
);
48 error_report("plugin: invalid plugin id %" PRIu64
, id
);
54 static void plugin_cpu_update__async(CPUState
*cpu
, run_on_cpu_data data
)
56 bitmap_copy(cpu
->plugin_mask
, &data
.host_ulong
, QEMU_PLUGIN_EV_MAX
);
57 tcg_flush_jmp_cache(cpu
);
60 static void plugin_cpu_update__locked(gpointer k
, gpointer v
, gpointer udata
)
62 CPUState
*cpu
= container_of(k
, CPUState
, cpu_index
);
63 run_on_cpu_data mask
= RUN_ON_CPU_HOST_ULONG(*plugin
.mask
);
65 if (DEVICE(cpu
)->realized
) {
66 async_run_on_cpu(cpu
, plugin_cpu_update__async
, mask
);
68 plugin_cpu_update__async(cpu
, mask
);
72 void plugin_unregister_cb__locked(struct qemu_plugin_ctx
*ctx
,
73 enum qemu_plugin_event ev
)
75 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
80 QLIST_REMOVE_RCU(cb
, entry
);
82 ctx
->callbacks
[ev
] = NULL
;
83 if (QLIST_EMPTY_RCU(&plugin
.cb_lists
[ev
])) {
84 clear_bit(ev
, plugin
.mask
);
85 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
, NULL
);
91 * The callback function has been loaded from an external library so we do not
92 * have type information
95 static void plugin_vcpu_cb__simple(CPUState
*cpu
, enum qemu_plugin_event ev
)
97 struct qemu_plugin_cb
*cb
, *next
;
100 case QEMU_PLUGIN_EV_VCPU_INIT
:
101 case QEMU_PLUGIN_EV_VCPU_EXIT
:
102 case QEMU_PLUGIN_EV_VCPU_IDLE
:
103 case QEMU_PLUGIN_EV_VCPU_RESUME
:
104 /* iterate safely; plugins might uninstall themselves at any time */
105 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
106 qemu_plugin_vcpu_simple_cb_t func
= cb
->f
.vcpu_simple
;
108 func(cb
->ctx
->id
, cpu
->cpu_index
);
112 g_assert_not_reached();
117 * Disable CFI checks.
118 * The callback function has been loaded from an external library so we do not
119 * have type information
122 static void plugin_cb__simple(enum qemu_plugin_event ev
)
124 struct qemu_plugin_cb
*cb
, *next
;
127 case QEMU_PLUGIN_EV_FLUSH
:
128 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
129 qemu_plugin_simple_cb_t func
= cb
->f
.simple
;
135 g_assert_not_reached();
140 * Disable CFI checks.
141 * The callback function has been loaded from an external library so we do not
142 * have type information
145 static void plugin_cb__udata(enum qemu_plugin_event ev
)
147 struct qemu_plugin_cb
*cb
, *next
;
150 case QEMU_PLUGIN_EV_ATEXIT
:
151 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
152 qemu_plugin_udata_cb_t func
= cb
->f
.udata
;
154 func(cb
->ctx
->id
, cb
->udata
);
158 g_assert_not_reached();
163 do_plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
164 void *func
, void *udata
)
166 struct qemu_plugin_ctx
*ctx
;
168 QEMU_LOCK_GUARD(&plugin
.lock
);
169 ctx
= plugin_id_to_ctx_locked(id
);
170 /* if the plugin is on its way out, ignore this request */
171 if (unlikely(ctx
->uninstalling
)) {
175 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
178 cb
->f
.generic
= func
;
181 cb
= g_new(struct qemu_plugin_cb
, 1);
183 cb
->f
.generic
= func
;
185 ctx
->callbacks
[ev
] = cb
;
186 QLIST_INSERT_HEAD_RCU(&plugin
.cb_lists
[ev
], cb
, entry
);
187 if (!test_bit(ev
, plugin
.mask
)) {
188 set_bit(ev
, plugin
.mask
);
189 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
,
194 plugin_unregister_cb__locked(ctx
, ev
);
198 void plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
201 do_plugin_register_cb(id
, ev
, func
, NULL
);
205 plugin_register_cb_udata(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
206 void *func
, void *udata
)
208 do_plugin_register_cb(id
, ev
, func
, udata
);
211 void qemu_plugin_vcpu_init_hook(CPUState
*cpu
)
215 qemu_rec_mutex_lock(&plugin
.lock
);
216 plugin_cpu_update__locked(&cpu
->cpu_index
, NULL
, NULL
);
217 success
= g_hash_table_insert(plugin
.cpu_ht
, &cpu
->cpu_index
,
220 qemu_rec_mutex_unlock(&plugin
.lock
);
222 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_INIT
);
225 void qemu_plugin_vcpu_exit_hook(CPUState
*cpu
)
229 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_EXIT
);
231 qemu_rec_mutex_lock(&plugin
.lock
);
232 success
= g_hash_table_remove(plugin
.cpu_ht
, &cpu
->cpu_index
);
234 qemu_rec_mutex_unlock(&plugin
.lock
);
237 struct plugin_for_each_args
{
238 struct qemu_plugin_ctx
*ctx
;
239 qemu_plugin_vcpu_simple_cb_t cb
;
242 static void plugin_vcpu_for_each(gpointer k
, gpointer v
, gpointer udata
)
244 struct plugin_for_each_args
*args
= udata
;
245 int cpu_index
= *(int *)k
;
247 args
->cb(args
->ctx
->id
, cpu_index
);
250 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id
,
251 qemu_plugin_vcpu_simple_cb_t cb
)
253 struct plugin_for_each_args args
;
258 qemu_rec_mutex_lock(&plugin
.lock
);
259 args
.ctx
= plugin_id_to_ctx_locked(id
);
261 g_hash_table_foreach(plugin
.cpu_ht
, plugin_vcpu_for_each
, &args
);
262 qemu_rec_mutex_unlock(&plugin
.lock
);
265 /* Allocate and return a callback record */
266 static struct qemu_plugin_dyn_cb
*plugin_get_dyn_cb(GArray
**arr
)
271 cbs
= g_array_sized_new(false, false,
272 sizeof(struct qemu_plugin_dyn_cb
), 1);
276 g_array_set_size(cbs
, cbs
->len
+ 1);
277 return &g_array_index(cbs
, struct qemu_plugin_dyn_cb
, cbs
->len
- 1);
280 void plugin_register_inline_op(GArray
**arr
,
281 enum qemu_plugin_mem_rw rw
,
282 enum qemu_plugin_op op
, void *ptr
,
285 struct qemu_plugin_dyn_cb
*dyn_cb
;
287 dyn_cb
= plugin_get_dyn_cb(arr
);
289 dyn_cb
->type
= PLUGIN_CB_INLINE
;
291 dyn_cb
->inline_insn
.op
= op
;
292 dyn_cb
->inline_insn
.imm
= imm
;
295 void plugin_register_dyn_cb__udata(GArray
**arr
,
296 qemu_plugin_vcpu_udata_cb_t cb
,
297 enum qemu_plugin_cb_flags flags
,
300 struct qemu_plugin_dyn_cb
*dyn_cb
= plugin_get_dyn_cb(arr
);
302 dyn_cb
->userp
= udata
;
303 /* Note flags are discarded as unused. */
304 dyn_cb
->f
.vcpu_udata
= cb
;
305 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
308 void plugin_register_vcpu_mem_cb(GArray
**arr
,
310 enum qemu_plugin_cb_flags flags
,
311 enum qemu_plugin_mem_rw rw
,
314 struct qemu_plugin_dyn_cb
*dyn_cb
;
316 dyn_cb
= plugin_get_dyn_cb(arr
);
317 dyn_cb
->userp
= udata
;
318 /* Note flags are discarded as unused. */
319 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
321 dyn_cb
->f
.generic
= cb
;
325 * Disable CFI checks.
326 * The callback function has been loaded from an external library so we do not
327 * have type information
330 void qemu_plugin_tb_trans_cb(CPUState
*cpu
, struct qemu_plugin_tb
*tb
)
332 struct qemu_plugin_cb
*cb
, *next
;
333 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_TB_TRANS
;
335 /* no plugin_mask check here; caller should have checked */
337 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
338 qemu_plugin_vcpu_tb_trans_cb_t func
= cb
->f
.vcpu_tb_trans
;
340 func(cb
->ctx
->id
, tb
);
345 * Disable CFI checks.
346 * The callback function has been loaded from an external library so we do not
347 * have type information
351 qemu_plugin_vcpu_syscall(CPUState
*cpu
, int64_t num
, uint64_t a1
, uint64_t a2
,
352 uint64_t a3
, uint64_t a4
, uint64_t a5
,
353 uint64_t a6
, uint64_t a7
, uint64_t a8
)
355 struct qemu_plugin_cb
*cb
, *next
;
356 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL
;
358 if (!test_bit(ev
, cpu
->plugin_mask
)) {
362 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
363 qemu_plugin_vcpu_syscall_cb_t func
= cb
->f
.vcpu_syscall
;
365 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, a1
, a2
, a3
, a4
, a5
, a6
, a7
, a8
);
370 * Disable CFI checks.
371 * The callback function has been loaded from an external library so we do not
372 * have type information
375 void qemu_plugin_vcpu_syscall_ret(CPUState
*cpu
, int64_t num
, int64_t ret
)
377 struct qemu_plugin_cb
*cb
, *next
;
378 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL_RET
;
380 if (!test_bit(ev
, cpu
->plugin_mask
)) {
384 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
385 qemu_plugin_vcpu_syscall_ret_cb_t func
= cb
->f
.vcpu_syscall_ret
;
387 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, ret
);
391 void qemu_plugin_vcpu_idle_cb(CPUState
*cpu
)
393 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_IDLE
);
396 void qemu_plugin_vcpu_resume_cb(CPUState
*cpu
)
398 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_RESUME
);
401 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id
,
402 qemu_plugin_vcpu_simple_cb_t cb
)
404 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_IDLE
, cb
);
407 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id
,
408 qemu_plugin_vcpu_simple_cb_t cb
)
410 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_RESUME
, cb
);
413 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id
,
414 qemu_plugin_simple_cb_t cb
)
416 plugin_register_cb(id
, QEMU_PLUGIN_EV_FLUSH
, cb
);
419 static bool free_dyn_cb_arr(void *p
, uint32_t h
, void *userp
)
421 g_array_free((GArray
*) p
, true);
425 void qemu_plugin_flush_cb(void)
427 qht_iter_remove(&plugin
.dyn_cb_arr_ht
, free_dyn_cb_arr
, NULL
);
428 qht_reset(&plugin
.dyn_cb_arr_ht
);
430 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH
);
433 void exec_inline_op(struct qemu_plugin_dyn_cb
*cb
)
435 uint64_t *val
= cb
->userp
;
437 switch (cb
->inline_insn
.op
) {
438 case QEMU_PLUGIN_INLINE_ADD_U64
:
439 *val
+= cb
->inline_insn
.imm
;
442 g_assert_not_reached();
446 void qemu_plugin_vcpu_mem_cb(CPUState
*cpu
, uint64_t vaddr
,
447 MemOpIdx oi
, enum qemu_plugin_mem_rw rw
)
449 GArray
*arr
= cpu
->plugin_mem_cbs
;
455 for (i
= 0; i
< arr
->len
; i
++) {
456 struct qemu_plugin_dyn_cb
*cb
=
457 &g_array_index(arr
, struct qemu_plugin_dyn_cb
, i
);
459 if (!(rw
& cb
->rw
)) {
463 case PLUGIN_CB_REGULAR
:
464 cb
->f
.vcpu_mem(cpu
->cpu_index
, make_plugin_meminfo(oi
, rw
),
467 case PLUGIN_CB_INLINE
:
471 g_assert_not_reached();
476 void qemu_plugin_atexit_cb(void)
478 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT
);
481 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id
,
482 qemu_plugin_udata_cb_t cb
,
485 plugin_register_cb_udata(id
, QEMU_PLUGIN_EV_ATEXIT
, cb
, udata
);
489 * Handle exit from linux-user. Unlike the normal atexit() mechanism
490 * we need to handle the clean-up manually as it's possible threads
491 * are still running. We need to remove all callbacks from code
492 * generation, flush the current translations and then we can safely
493 * trigger the exit callbacks.
496 void qemu_plugin_user_exit(void)
498 enum qemu_plugin_event ev
;
502 * Locking order: we must acquire locks in an order that is consistent
503 * with the one in fork_start(). That is:
504 * - start_exclusive(), which acquires qemu_cpu_list_lock,
505 * must be called before acquiring plugin.lock.
506 * - tb_flush(), which acquires mmap_lock(), must be called
507 * while plugin.lock is not held.
511 qemu_rec_mutex_lock(&plugin
.lock
);
512 /* un-register all callbacks except the final AT_EXIT one */
513 for (ev
= 0; ev
< QEMU_PLUGIN_EV_MAX
; ev
++) {
514 if (ev
!= QEMU_PLUGIN_EV_ATEXIT
) {
515 struct qemu_plugin_cb
*cb
, *next
;
517 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
518 plugin_unregister_cb__locked(cb
->ctx
, ev
);
523 qemu_plugin_disable_mem_helpers(cpu
);
525 qemu_rec_mutex_unlock(&plugin
.lock
);
527 tb_flush(current_cpu
);
530 /* now it's safe to handle the exit case */
531 qemu_plugin_atexit_cb();
535 * Helpers for *-user to ensure locks are sane across fork() events.
538 void qemu_plugin_user_prefork_lock(void)
540 qemu_rec_mutex_lock(&plugin
.lock
);
543 void qemu_plugin_user_postfork(bool is_child
)
546 /* should we just reset via plugin_init? */
547 qemu_rec_mutex_init(&plugin
.lock
);
549 qemu_rec_mutex_unlock(&plugin
.lock
);
553 static bool plugin_dyn_cb_arr_cmp(const void *ap
, const void *bp
)
558 static void __attribute__((__constructor__
)) plugin_init(void)
562 for (i
= 0; i
< QEMU_PLUGIN_EV_MAX
; i
++) {
563 QLIST_INIT(&plugin
.cb_lists
[i
]);
565 qemu_rec_mutex_init(&plugin
.lock
);
566 plugin
.id_ht
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
567 plugin
.cpu_ht
= g_hash_table_new(g_int_hash
, g_int_equal
);
568 QTAILQ_INIT(&plugin
.ctxs
);
569 qht_init(&plugin
.dyn_cb_arr_ht
, plugin_dyn_cb_arr_cmp
, 16,
570 QHT_MODE_AUTO_RESIZE
);
571 atexit(qemu_plugin_atexit_cb
);