block/nvme: Properly display doorbell stride length in trace event
[qemu/ar7.git] / plugins / core.c
blob87b823bbc4707314aeeb58f79b72b3168d55c524
1 /*
2 * QEMU Plugin Core code
4 * This is the core code that deals with injecting instrumentation into the code
6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7 * Copyright (C) 2019, Linaro
9 * License: GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
12 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/lockable.h"
19 #include "qemu/option.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/xxhash.h"
22 #include "qemu/rcu.h"
23 #include "hw/core/cpu.h"
24 #include "exec/cpu-common.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "exec/helper-proto.h"
29 #include "sysemu/sysemu.h"
30 #include "tcg/tcg.h"
31 #include "tcg/tcg-op.h"
32 #include "trace/mem-internal.h" /* mem_info macros */
33 #include "plugin.h"
34 #include "qemu/compiler.h"
36 struct qemu_plugin_cb {
37 struct qemu_plugin_ctx *ctx;
38 union qemu_plugin_cb_sig f;
39 void *udata;
40 QLIST_ENTRY(qemu_plugin_cb) entry;
43 struct qemu_plugin_state plugin;
45 struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
47 struct qemu_plugin_ctx *ctx;
48 qemu_plugin_id_t *id_p;
50 id_p = g_hash_table_lookup(plugin.id_ht, &id);
51 ctx = container_of(id_p, struct qemu_plugin_ctx, id);
52 if (ctx == NULL) {
53 error_report("plugin: invalid plugin id %" PRIu64, id);
54 abort();
56 return ctx;
59 static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
61 bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
62 cpu_tb_jmp_cache_clear(cpu);
65 static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
67 CPUState *cpu = container_of(k, CPUState, cpu_index);
68 run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask);
70 if (cpu->created) {
71 async_run_on_cpu(cpu, plugin_cpu_update__async, mask);
72 } else {
73 plugin_cpu_update__async(cpu, mask);
77 void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx,
78 enum qemu_plugin_event ev)
80 struct qemu_plugin_cb *cb = ctx->callbacks[ev];
82 if (cb == NULL) {
83 return;
85 QLIST_REMOVE_RCU(cb, entry);
86 g_free(cb);
87 ctx->callbacks[ev] = NULL;
88 if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) {
89 clear_bit(ev, plugin.mask);
90 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL);
95 * Disable CFI checks.
96 * The callback function has been loaded from an external library so we do not
97 * have type information
99 QEMU_DISABLE_CFI
100 static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev)
102 struct qemu_plugin_cb *cb, *next;
104 switch (ev) {
105 case QEMU_PLUGIN_EV_VCPU_INIT:
106 case QEMU_PLUGIN_EV_VCPU_EXIT:
107 case QEMU_PLUGIN_EV_VCPU_IDLE:
108 case QEMU_PLUGIN_EV_VCPU_RESUME:
109 /* iterate safely; plugins might uninstall themselves at any time */
110 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
111 qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple;
113 func(cb->ctx->id, cpu->cpu_index);
115 break;
116 default:
117 g_assert_not_reached();
122 * Disable CFI checks.
123 * The callback function has been loaded from an external library so we do not
124 * have type information
126 QEMU_DISABLE_CFI
127 static void plugin_cb__simple(enum qemu_plugin_event ev)
129 struct qemu_plugin_cb *cb, *next;
131 switch (ev) {
132 case QEMU_PLUGIN_EV_FLUSH:
133 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
134 qemu_plugin_simple_cb_t func = cb->f.simple;
136 func(cb->ctx->id);
138 break;
139 default:
140 g_assert_not_reached();
145 * Disable CFI checks.
146 * The callback function has been loaded from an external library so we do not
147 * have type information
149 QEMU_DISABLE_CFI
150 static void plugin_cb__udata(enum qemu_plugin_event ev)
152 struct qemu_plugin_cb *cb, *next;
154 switch (ev) {
155 case QEMU_PLUGIN_EV_ATEXIT:
156 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
157 qemu_plugin_udata_cb_t func = cb->f.udata;
159 func(cb->ctx->id, cb->udata);
161 break;
162 default:
163 g_assert_not_reached();
167 static void
168 do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
169 void *func, void *udata)
171 struct qemu_plugin_ctx *ctx;
173 QEMU_LOCK_GUARD(&plugin.lock);
174 ctx = plugin_id_to_ctx_locked(id);
175 /* if the plugin is on its way out, ignore this request */
176 if (unlikely(ctx->uninstalling)) {
177 return;
179 if (func) {
180 struct qemu_plugin_cb *cb = ctx->callbacks[ev];
182 if (cb) {
183 cb->f.generic = func;
184 cb->udata = udata;
185 } else {
186 cb = g_new(struct qemu_plugin_cb, 1);
187 cb->ctx = ctx;
188 cb->f.generic = func;
189 cb->udata = udata;
190 ctx->callbacks[ev] = cb;
191 QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry);
192 if (!test_bit(ev, plugin.mask)) {
193 set_bit(ev, plugin.mask);
194 g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked,
195 NULL);
198 } else {
199 plugin_unregister_cb__locked(ctx, ev);
203 void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
204 void *func)
206 do_plugin_register_cb(id, ev, func, NULL);
209 void
210 plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev,
211 void *func, void *udata)
213 do_plugin_register_cb(id, ev, func, udata);
216 void qemu_plugin_vcpu_init_hook(CPUState *cpu)
218 bool success;
220 qemu_rec_mutex_lock(&plugin.lock);
221 plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL);
222 success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
223 &cpu->cpu_index);
224 g_assert(success);
225 qemu_rec_mutex_unlock(&plugin.lock);
227 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
230 void qemu_plugin_vcpu_exit_hook(CPUState *cpu)
232 bool success;
234 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT);
236 qemu_rec_mutex_lock(&plugin.lock);
237 success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index);
238 g_assert(success);
239 qemu_rec_mutex_unlock(&plugin.lock);
242 struct plugin_for_each_args {
243 struct qemu_plugin_ctx *ctx;
244 qemu_plugin_vcpu_simple_cb_t cb;
247 static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata)
249 struct plugin_for_each_args *args = udata;
250 int cpu_index = *(int *)k;
252 args->cb(args->ctx->id, cpu_index);
255 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
256 qemu_plugin_vcpu_simple_cb_t cb)
258 struct plugin_for_each_args args;
260 if (cb == NULL) {
261 return;
263 qemu_rec_mutex_lock(&plugin.lock);
264 args.ctx = plugin_id_to_ctx_locked(id);
265 args.cb = cb;
266 g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args);
267 qemu_rec_mutex_unlock(&plugin.lock);
270 /* Allocate and return a callback record */
271 static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
273 GArray *cbs = *arr;
275 if (!cbs) {
276 cbs = g_array_sized_new(false, false,
277 sizeof(struct qemu_plugin_dyn_cb), 1);
278 *arr = cbs;
281 g_array_set_size(cbs, cbs->len + 1);
282 return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
285 void plugin_register_inline_op(GArray **arr,
286 enum qemu_plugin_mem_rw rw,
287 enum qemu_plugin_op op, void *ptr,
288 uint64_t imm)
290 struct qemu_plugin_dyn_cb *dyn_cb;
292 dyn_cb = plugin_get_dyn_cb(arr);
293 dyn_cb->userp = ptr;
294 dyn_cb->type = PLUGIN_CB_INLINE;
295 dyn_cb->rw = rw;
296 dyn_cb->inline_insn.op = op;
297 dyn_cb->inline_insn.imm = imm;
300 static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags)
302 uint32_t ret;
304 switch (flags) {
305 case QEMU_PLUGIN_CB_RW_REGS:
306 ret = 0;
307 break;
308 case QEMU_PLUGIN_CB_R_REGS:
309 ret = TCG_CALL_NO_WG;
310 break;
311 case QEMU_PLUGIN_CB_NO_REGS:
312 default:
313 ret = TCG_CALL_NO_RWG;
315 return ret;
318 inline void
319 plugin_register_dyn_cb__udata(GArray **arr,
320 qemu_plugin_vcpu_udata_cb_t cb,
321 enum qemu_plugin_cb_flags flags, void *udata)
323 struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
325 dyn_cb->userp = udata;
326 dyn_cb->tcg_flags = cb_to_tcg_flags(flags);
327 dyn_cb->f.vcpu_udata = cb;
328 dyn_cb->type = PLUGIN_CB_REGULAR;
331 void plugin_register_vcpu_mem_cb(GArray **arr,
332 void *cb,
333 enum qemu_plugin_cb_flags flags,
334 enum qemu_plugin_mem_rw rw,
335 void *udata)
337 struct qemu_plugin_dyn_cb *dyn_cb;
339 dyn_cb = plugin_get_dyn_cb(arr);
340 dyn_cb->userp = udata;
341 dyn_cb->tcg_flags = cb_to_tcg_flags(flags);
342 dyn_cb->type = PLUGIN_CB_REGULAR;
343 dyn_cb->rw = rw;
344 dyn_cb->f.generic = cb;
348 * Disable CFI checks.
349 * The callback function has been loaded from an external library so we do not
350 * have type information
352 QEMU_DISABLE_CFI
353 void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb)
355 struct qemu_plugin_cb *cb, *next;
356 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS;
358 /* no plugin_mask check here; caller should have checked */
360 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
361 qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans;
363 func(cb->ctx->id, tb);
368 * Disable CFI checks.
369 * The callback function has been loaded from an external library so we do not
370 * have type information
372 QEMU_DISABLE_CFI
373 void
374 qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
375 uint64_t a3, uint64_t a4, uint64_t a5,
376 uint64_t a6, uint64_t a7, uint64_t a8)
378 struct qemu_plugin_cb *cb, *next;
379 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL;
381 if (!test_bit(ev, cpu->plugin_mask)) {
382 return;
385 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
386 qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall;
388 func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8);
393 * Disable CFI checks.
394 * The callback function has been loaded from an external library so we do not
395 * have type information
397 QEMU_DISABLE_CFI
398 void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
400 struct qemu_plugin_cb *cb, *next;
401 enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET;
403 if (!test_bit(ev, cpu->plugin_mask)) {
404 return;
407 QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
408 qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret;
410 func(cb->ctx->id, cpu->cpu_index, num, ret);
414 void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
416 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
419 void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
421 plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
424 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
425 qemu_plugin_vcpu_simple_cb_t cb)
427 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb);
430 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
431 qemu_plugin_vcpu_simple_cb_t cb)
433 plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb);
436 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
437 qemu_plugin_simple_cb_t cb)
439 plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb);
442 static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp)
444 g_array_free((GArray *) p, true);
445 return true;
448 void qemu_plugin_flush_cb(void)
450 qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL);
451 qht_reset(&plugin.dyn_cb_arr_ht);
453 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
456 void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
458 uint64_t *val = cb->userp;
460 switch (cb->inline_insn.op) {
461 case QEMU_PLUGIN_INLINE_ADD_U64:
462 *val += cb->inline_insn.imm;
463 break;
464 default:
465 g_assert_not_reached();
469 void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info)
471 GArray *arr = cpu->plugin_mem_cbs;
472 size_t i;
474 if (arr == NULL) {
475 return;
477 for (i = 0; i < arr->len; i++) {
478 struct qemu_plugin_dyn_cb *cb =
479 &g_array_index(arr, struct qemu_plugin_dyn_cb, i);
480 int w = !!(info & TRACE_MEM_ST) + 1;
482 if (!(w & cb->rw)) {
483 break;
485 switch (cb->type) {
486 case PLUGIN_CB_REGULAR:
487 cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp);
488 break;
489 case PLUGIN_CB_INLINE:
490 exec_inline_op(cb);
491 break;
492 default:
493 g_assert_not_reached();
498 void qemu_plugin_atexit_cb(void)
500 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT);
503 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
504 qemu_plugin_udata_cb_t cb,
505 void *udata)
507 plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata);
511 * Call this function after longjmp'ing to the main loop. It's possible that the
512 * last instruction of a TB might have used helpers, and therefore the
513 * "disable" instruction will never execute because it ended up as dead code.
515 void qemu_plugin_disable_mem_helpers(CPUState *cpu)
517 cpu->plugin_mem_cbs = NULL;
520 static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp)
522 return ap == bp;
525 static void __attribute__((__constructor__)) plugin_init(void)
527 int i;
529 for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) {
530 QLIST_INIT(&plugin.cb_lists[i]);
532 qemu_rec_mutex_init(&plugin.lock);
533 plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
534 plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
535 QTAILQ_INIT(&plugin.ctxs);
536 qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
537 QHT_MODE_AUTO_RESIZE);
538 atexit(qemu_plugin_atexit_cb);