2 * QEMU Plugin Core code
4 * This is the core code that deals with injecting instrumentation into the code
6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7 * Copyright (C) 2019, Linaro
9 * License: GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
12 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/lockable.h"
19 #include "qemu/option.h"
20 #include "qemu/plugin.h"
21 #include "qemu/queue.h"
22 #include "qemu/rcu_queue.h"
23 #include "qemu/xxhash.h"
25 #include "hw/core/cpu.h"
27 #include "exec/exec-all.h"
28 #include "exec/tb-flush.h"
30 #include "tcg/tcg-op.h"
33 struct qemu_plugin_cb
{
34 struct qemu_plugin_ctx
*ctx
;
35 union qemu_plugin_cb_sig f
;
37 QLIST_ENTRY(qemu_plugin_cb
) entry
;
40 struct qemu_plugin_state plugin
;
42 struct qemu_plugin_ctx
*plugin_id_to_ctx_locked(qemu_plugin_id_t id
)
44 struct qemu_plugin_ctx
*ctx
;
45 qemu_plugin_id_t
*id_p
;
47 id_p
= g_hash_table_lookup(plugin
.id_ht
, &id
);
48 ctx
= container_of(id_p
, struct qemu_plugin_ctx
, id
);
50 error_report("plugin: invalid plugin id %" PRIu64
, id
);
56 static void plugin_cpu_update__async(CPUState
*cpu
, run_on_cpu_data data
)
58 bitmap_copy(cpu
->plugin_state
->event_mask
,
59 &data
.host_ulong
, QEMU_PLUGIN_EV_MAX
);
60 tcg_flush_jmp_cache(cpu
);
63 static void plugin_cpu_update__locked(gpointer k
, gpointer v
, gpointer udata
)
65 CPUState
*cpu
= container_of(k
, CPUState
, cpu_index
);
66 run_on_cpu_data mask
= RUN_ON_CPU_HOST_ULONG(*plugin
.mask
);
68 if (DEVICE(cpu
)->realized
) {
69 async_run_on_cpu(cpu
, plugin_cpu_update__async
, mask
);
71 plugin_cpu_update__async(cpu
, mask
);
75 void plugin_unregister_cb__locked(struct qemu_plugin_ctx
*ctx
,
76 enum qemu_plugin_event ev
)
78 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
83 QLIST_REMOVE_RCU(cb
, entry
);
85 ctx
->callbacks
[ev
] = NULL
;
86 if (QLIST_EMPTY_RCU(&plugin
.cb_lists
[ev
])) {
87 clear_bit(ev
, plugin
.mask
);
88 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
, NULL
);
94 * The callback function has been loaded from an external library so we do not
95 * have type information
98 static void plugin_vcpu_cb__simple(CPUState
*cpu
, enum qemu_plugin_event ev
)
100 struct qemu_plugin_cb
*cb
, *next
;
103 case QEMU_PLUGIN_EV_VCPU_INIT
:
104 case QEMU_PLUGIN_EV_VCPU_EXIT
:
105 case QEMU_PLUGIN_EV_VCPU_IDLE
:
106 case QEMU_PLUGIN_EV_VCPU_RESUME
:
107 /* iterate safely; plugins might uninstall themselves at any time */
108 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
109 qemu_plugin_vcpu_simple_cb_t func
= cb
->f
.vcpu_simple
;
111 func(cb
->ctx
->id
, cpu
->cpu_index
);
115 g_assert_not_reached();
120 * Disable CFI checks.
121 * The callback function has been loaded from an external library so we do not
122 * have type information
125 static void plugin_cb__simple(enum qemu_plugin_event ev
)
127 struct qemu_plugin_cb
*cb
, *next
;
130 case QEMU_PLUGIN_EV_FLUSH
:
131 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
132 qemu_plugin_simple_cb_t func
= cb
->f
.simple
;
138 g_assert_not_reached();
143 * Disable CFI checks.
144 * The callback function has been loaded from an external library so we do not
145 * have type information
148 static void plugin_cb__udata(enum qemu_plugin_event ev
)
150 struct qemu_plugin_cb
*cb
, *next
;
153 case QEMU_PLUGIN_EV_ATEXIT
:
154 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
155 qemu_plugin_udata_cb_t func
= cb
->f
.udata
;
157 func(cb
->ctx
->id
, cb
->udata
);
161 g_assert_not_reached();
166 do_plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
167 void *func
, void *udata
)
169 struct qemu_plugin_ctx
*ctx
;
171 QEMU_LOCK_GUARD(&plugin
.lock
);
172 ctx
= plugin_id_to_ctx_locked(id
);
173 /* if the plugin is on its way out, ignore this request */
174 if (unlikely(ctx
->uninstalling
)) {
178 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
181 cb
->f
.generic
= func
;
184 cb
= g_new(struct qemu_plugin_cb
, 1);
186 cb
->f
.generic
= func
;
188 ctx
->callbacks
[ev
] = cb
;
189 QLIST_INSERT_HEAD_RCU(&plugin
.cb_lists
[ev
], cb
, entry
);
190 if (!test_bit(ev
, plugin
.mask
)) {
191 set_bit(ev
, plugin
.mask
);
192 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
,
197 plugin_unregister_cb__locked(ctx
, ev
);
201 void plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
204 do_plugin_register_cb(id
, ev
, func
, NULL
);
208 plugin_register_cb_udata(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
209 void *func
, void *udata
)
211 do_plugin_register_cb(id
, ev
, func
, udata
);
214 CPUPluginState
*qemu_plugin_create_vcpu_state(void)
216 return g_new0(CPUPluginState
, 1);
219 static void plugin_grow_scoreboards__locked(CPUState
*cpu
)
221 if (cpu
->cpu_index
< plugin
.scoreboard_alloc_size
) {
225 bool need_realloc
= FALSE
;
226 while (cpu
->cpu_index
>= plugin
.scoreboard_alloc_size
) {
227 plugin
.scoreboard_alloc_size
*= 2;
232 if (!need_realloc
|| QLIST_EMPTY(&plugin
.scoreboards
)) {
233 /* nothing to do, we just updated sizes for future scoreboards */
237 /* cpus must be stopped, as tb might still use an existing scoreboard. */
239 struct qemu_plugin_scoreboard
*score
;
240 QLIST_FOREACH(score
, &plugin
.scoreboards
, entry
) {
241 g_array_set_size(score
->data
, plugin
.scoreboard_alloc_size
);
243 /* force all tb to be flushed, as scoreboard pointers were changed. */
248 void qemu_plugin_vcpu_init_hook(CPUState
*cpu
)
252 qemu_rec_mutex_lock(&plugin
.lock
);
253 plugin
.num_vcpus
= MAX(plugin
.num_vcpus
, cpu
->cpu_index
+ 1);
254 plugin_cpu_update__locked(&cpu
->cpu_index
, NULL
, NULL
);
255 success
= g_hash_table_insert(plugin
.cpu_ht
, &cpu
->cpu_index
,
258 plugin_grow_scoreboards__locked(cpu
);
259 qemu_rec_mutex_unlock(&plugin
.lock
);
261 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_INIT
);
264 void qemu_plugin_vcpu_exit_hook(CPUState
*cpu
)
268 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_EXIT
);
270 qemu_rec_mutex_lock(&plugin
.lock
);
271 success
= g_hash_table_remove(plugin
.cpu_ht
, &cpu
->cpu_index
);
273 qemu_rec_mutex_unlock(&plugin
.lock
);
276 struct plugin_for_each_args
{
277 struct qemu_plugin_ctx
*ctx
;
278 qemu_plugin_vcpu_simple_cb_t cb
;
281 static void plugin_vcpu_for_each(gpointer k
, gpointer v
, gpointer udata
)
283 struct plugin_for_each_args
*args
= udata
;
284 int cpu_index
= *(int *)k
;
286 args
->cb(args
->ctx
->id
, cpu_index
);
289 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id
,
290 qemu_plugin_vcpu_simple_cb_t cb
)
292 struct plugin_for_each_args args
;
297 qemu_rec_mutex_lock(&plugin
.lock
);
298 args
.ctx
= plugin_id_to_ctx_locked(id
);
300 g_hash_table_foreach(plugin
.cpu_ht
, plugin_vcpu_for_each
, &args
);
301 qemu_rec_mutex_unlock(&plugin
.lock
);
304 /* Allocate and return a callback record */
305 static struct qemu_plugin_dyn_cb
*plugin_get_dyn_cb(GArray
**arr
)
310 cbs
= g_array_sized_new(false, false,
311 sizeof(struct qemu_plugin_dyn_cb
), 1);
315 g_array_set_size(cbs
, cbs
->len
+ 1);
316 return &g_array_index(cbs
, struct qemu_plugin_dyn_cb
, cbs
->len
- 1);
319 void plugin_register_inline_op_on_entry(GArray
**arr
,
320 enum qemu_plugin_mem_rw rw
,
321 enum qemu_plugin_op op
,
322 qemu_plugin_u64 entry
,
325 struct qemu_plugin_dyn_cb
*dyn_cb
;
327 dyn_cb
= plugin_get_dyn_cb(arr
);
328 dyn_cb
->userp
= NULL
;
329 dyn_cb
->type
= PLUGIN_CB_INLINE
;
331 dyn_cb
->inline_insn
.entry
= entry
;
332 dyn_cb
->inline_insn
.op
= op
;
333 dyn_cb
->inline_insn
.imm
= imm
;
336 void plugin_register_dyn_cb__udata(GArray
**arr
,
337 qemu_plugin_vcpu_udata_cb_t cb
,
338 enum qemu_plugin_cb_flags flags
,
341 struct qemu_plugin_dyn_cb
*dyn_cb
= plugin_get_dyn_cb(arr
);
343 dyn_cb
->userp
= udata
;
344 /* Note flags are discarded as unused. */
345 dyn_cb
->f
.vcpu_udata
= cb
;
346 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
349 void plugin_register_vcpu_mem_cb(GArray
**arr
,
351 enum qemu_plugin_cb_flags flags
,
352 enum qemu_plugin_mem_rw rw
,
355 struct qemu_plugin_dyn_cb
*dyn_cb
;
357 dyn_cb
= plugin_get_dyn_cb(arr
);
358 dyn_cb
->userp
= udata
;
359 /* Note flags are discarded as unused. */
360 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
362 dyn_cb
->f
.generic
= cb
;
366 * Disable CFI checks.
367 * The callback function has been loaded from an external library so we do not
368 * have type information
371 void qemu_plugin_tb_trans_cb(CPUState
*cpu
, struct qemu_plugin_tb
*tb
)
373 struct qemu_plugin_cb
*cb
, *next
;
374 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_TB_TRANS
;
376 /* no plugin_mask check here; caller should have checked */
378 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
379 qemu_plugin_vcpu_tb_trans_cb_t func
= cb
->f
.vcpu_tb_trans
;
381 func(cb
->ctx
->id
, tb
);
386 * Disable CFI checks.
387 * The callback function has been loaded from an external library so we do not
388 * have type information
392 qemu_plugin_vcpu_syscall(CPUState
*cpu
, int64_t num
, uint64_t a1
, uint64_t a2
,
393 uint64_t a3
, uint64_t a4
, uint64_t a5
,
394 uint64_t a6
, uint64_t a7
, uint64_t a8
)
396 struct qemu_plugin_cb
*cb
, *next
;
397 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL
;
399 if (!test_bit(ev
, cpu
->plugin_state
->event_mask
)) {
403 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
404 qemu_plugin_vcpu_syscall_cb_t func
= cb
->f
.vcpu_syscall
;
406 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, a1
, a2
, a3
, a4
, a5
, a6
, a7
, a8
);
411 * Disable CFI checks.
412 * The callback function has been loaded from an external library so we do not
413 * have type information
416 void qemu_plugin_vcpu_syscall_ret(CPUState
*cpu
, int64_t num
, int64_t ret
)
418 struct qemu_plugin_cb
*cb
, *next
;
419 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL_RET
;
421 if (!test_bit(ev
, cpu
->plugin_state
->event_mask
)) {
425 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
426 qemu_plugin_vcpu_syscall_ret_cb_t func
= cb
->f
.vcpu_syscall_ret
;
428 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, ret
);
432 void qemu_plugin_vcpu_idle_cb(CPUState
*cpu
)
434 /* idle and resume cb may be called before init, ignore in this case */
435 if (cpu
->cpu_index
< plugin
.num_vcpus
) {
436 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_IDLE
);
440 void qemu_plugin_vcpu_resume_cb(CPUState
*cpu
)
442 if (cpu
->cpu_index
< plugin
.num_vcpus
) {
443 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_RESUME
);
447 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id
,
448 qemu_plugin_vcpu_simple_cb_t cb
)
450 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_IDLE
, cb
);
453 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id
,
454 qemu_plugin_vcpu_simple_cb_t cb
)
456 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_RESUME
, cb
);
459 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id
,
460 qemu_plugin_simple_cb_t cb
)
462 plugin_register_cb(id
, QEMU_PLUGIN_EV_FLUSH
, cb
);
465 static bool free_dyn_cb_arr(void *p
, uint32_t h
, void *userp
)
467 g_array_free((GArray
*) p
, true);
471 void qemu_plugin_flush_cb(void)
473 qht_iter_remove(&plugin
.dyn_cb_arr_ht
, free_dyn_cb_arr
, NULL
);
474 qht_reset(&plugin
.dyn_cb_arr_ht
);
476 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH
);
479 void exec_inline_op(struct qemu_plugin_dyn_cb
*cb
, int cpu_index
)
481 char *ptr
= cb
->inline_insn
.entry
.score
->data
->data
;
482 size_t elem_size
= g_array_get_element_size(
483 cb
->inline_insn
.entry
.score
->data
);
484 size_t offset
= cb
->inline_insn
.entry
.offset
;
485 uint64_t *val
= (uint64_t *)(ptr
+ offset
+ cpu_index
* elem_size
);
487 switch (cb
->inline_insn
.op
) {
488 case QEMU_PLUGIN_INLINE_ADD_U64
:
489 *val
+= cb
->inline_insn
.imm
;
492 g_assert_not_reached();
496 void qemu_plugin_vcpu_mem_cb(CPUState
*cpu
, uint64_t vaddr
,
497 MemOpIdx oi
, enum qemu_plugin_mem_rw rw
)
499 GArray
*arr
= cpu
->plugin_mem_cbs
;
505 for (i
= 0; i
< arr
->len
; i
++) {
506 struct qemu_plugin_dyn_cb
*cb
=
507 &g_array_index(arr
, struct qemu_plugin_dyn_cb
, i
);
509 if (!(rw
& cb
->rw
)) {
513 case PLUGIN_CB_REGULAR
:
514 cb
->f
.vcpu_mem(cpu
->cpu_index
, make_plugin_meminfo(oi
, rw
),
517 case PLUGIN_CB_INLINE
:
518 exec_inline_op(cb
, cpu
->cpu_index
);
521 g_assert_not_reached();
526 void qemu_plugin_atexit_cb(void)
528 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT
);
531 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id
,
532 qemu_plugin_udata_cb_t cb
,
535 plugin_register_cb_udata(id
, QEMU_PLUGIN_EV_ATEXIT
, cb
, udata
);
539 * Handle exit from linux-user. Unlike the normal atexit() mechanism
540 * we need to handle the clean-up manually as it's possible threads
541 * are still running. We need to remove all callbacks from code
542 * generation, flush the current translations and then we can safely
543 * trigger the exit callbacks.
546 void qemu_plugin_user_exit(void)
548 enum qemu_plugin_event ev
;
552 * Locking order: we must acquire locks in an order that is consistent
553 * with the one in fork_start(). That is:
554 * - start_exclusive(), which acquires qemu_cpu_list_lock,
555 * must be called before acquiring plugin.lock.
556 * - tb_flush(), which acquires mmap_lock(), must be called
557 * while plugin.lock is not held.
561 qemu_rec_mutex_lock(&plugin
.lock
);
562 /* un-register all callbacks except the final AT_EXIT one */
563 for (ev
= 0; ev
< QEMU_PLUGIN_EV_MAX
; ev
++) {
564 if (ev
!= QEMU_PLUGIN_EV_ATEXIT
) {
565 struct qemu_plugin_cb
*cb
, *next
;
567 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
568 plugin_unregister_cb__locked(cb
->ctx
, ev
);
573 qemu_plugin_disable_mem_helpers(cpu
);
575 qemu_rec_mutex_unlock(&plugin
.lock
);
577 tb_flush(current_cpu
);
580 /* now it's safe to handle the exit case */
581 qemu_plugin_atexit_cb();
585 * Helpers for *-user to ensure locks are sane across fork() events.
588 void qemu_plugin_user_prefork_lock(void)
590 qemu_rec_mutex_lock(&plugin
.lock
);
593 void qemu_plugin_user_postfork(bool is_child
)
596 /* should we just reset via plugin_init? */
597 qemu_rec_mutex_init(&plugin
.lock
);
599 qemu_rec_mutex_unlock(&plugin
.lock
);
603 static bool plugin_dyn_cb_arr_cmp(const void *ap
, const void *bp
)
608 static void __attribute__((__constructor__
)) plugin_init(void)
612 for (i
= 0; i
< QEMU_PLUGIN_EV_MAX
; i
++) {
613 QLIST_INIT(&plugin
.cb_lists
[i
]);
615 qemu_rec_mutex_init(&plugin
.lock
);
616 plugin
.id_ht
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
617 plugin
.cpu_ht
= g_hash_table_new(g_int_hash
, g_int_equal
);
618 QLIST_INIT(&plugin
.scoreboards
);
619 plugin
.scoreboard_alloc_size
= 16; /* avoid frequent reallocation */
620 QTAILQ_INIT(&plugin
.ctxs
);
621 qht_init(&plugin
.dyn_cb_arr_ht
, plugin_dyn_cb_arr_cmp
, 16,
622 QHT_MODE_AUTO_RESIZE
);
623 atexit(qemu_plugin_atexit_cb
);
626 int plugin_num_vcpus(void)
628 return plugin
.num_vcpus
;
631 struct qemu_plugin_scoreboard
*plugin_scoreboard_new(size_t element_size
)
633 struct qemu_plugin_scoreboard
*score
=
634 g_malloc0(sizeof(struct qemu_plugin_scoreboard
));
635 score
->data
= g_array_new(FALSE
, TRUE
, element_size
);
636 g_array_set_size(score
->data
, plugin
.scoreboard_alloc_size
);
638 qemu_rec_mutex_lock(&plugin
.lock
);
639 QLIST_INSERT_HEAD(&plugin
.scoreboards
, score
, entry
);
640 qemu_rec_mutex_unlock(&plugin
.lock
);
645 void plugin_scoreboard_free(struct qemu_plugin_scoreboard
*score
)
647 qemu_rec_mutex_lock(&plugin
.lock
);
648 QLIST_REMOVE(score
, entry
);
649 qemu_rec_mutex_unlock(&plugin
.lock
);
651 g_array_free(score
->data
, TRUE
);