include/qemu/bswap.h: Handle being included outside extern "C" block
[qemu/ar7.git] / accel / tcg / plugin-gen.c
blob7627225aef6b0315ca1e2bf16711ac2f44943eb7
1 /*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "empty" instrumentation calls for all
18 * possible instrumentation events, and then once we collect the instrumentation
19 * requests from plugins, we either "fill in" those empty events or remove them
20 * if they have no requests.
22 * When "filling in" an event we first copy the empty callback's TCG ops. This
23 * might seem unnecessary, but it is done to support an arbitrary number
24 * of callbacks per event. Take for example a regular instruction callback.
25 * We first generate a callback to an empty helper function. Then, if two
26 * plugins register one callback each for this instruction, we make two copies
27 * of the TCG ops generated for the empty callback, substituting the function
28 * pointer that points to the empty helper function with the plugins' desired
29 * callback functions. After that we remove the empty callback's ops.
31 * Note that the location in TCGOp.args[] of the pointer to a helper function
32 * varies across different guest and host architectures. Instead of duplicating
33 * the logic that figures this out, we rely on the fact that the empty
34 * callbacks point to empty functions that are unique pointers in the program.
35 * Thus, to find the right location we just have to look for a match in
36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37 * TCG ops and then fill them in; regardless of whether we have one or many
38 * callbacks for that event, the logic to add all of them is the same.
40 * When generating more than one callback per event, we make a small
41 * optimization to avoid generating redundant operations. For instance, for the
42 * second and all subsequent callbacks of an event, we do not need to reload the
43 * CPU's index into a TCG temp, since the first callback did it already.
45 #include "qemu/osdep.h"
46 #include "tcg/tcg.h"
47 #include "tcg/tcg-op.h"
48 #include "trace/mem.h"
49 #include "exec/exec-all.h"
50 #include "exec/plugin-gen.h"
51 #include "exec/translator.h"
53 #ifdef CONFIG_SOFTMMU
54 # define CONFIG_SOFTMMU_GATE 1
55 #else
56 # define CONFIG_SOFTMMU_GATE 0
57 #endif
60 * plugin_cb_start TCG op args[]:
61 * 0: enum plugin_gen_from
62 * 1: enum plugin_gen_cb
63 * 2: set to 1 for mem callback that is a write, 0 otherwise.
66 enum plugin_gen_from {
67 PLUGIN_GEN_FROM_TB,
68 PLUGIN_GEN_FROM_INSN,
69 PLUGIN_GEN_FROM_MEM,
70 PLUGIN_GEN_AFTER_INSN,
71 PLUGIN_GEN_N_FROMS,
74 enum plugin_gen_cb {
75 PLUGIN_GEN_CB_UDATA,
76 PLUGIN_GEN_CB_INLINE,
77 PLUGIN_GEN_CB_MEM,
78 PLUGIN_GEN_ENABLE_MEM_HELPER,
79 PLUGIN_GEN_DISABLE_MEM_HELPER,
80 PLUGIN_GEN_N_CBS,
84 * These helpers are stubs that get dynamically switched out for calls
85 * direct to the plugin if they are subscribed to.
87 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
88 { }
90 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
91 qemu_plugin_meminfo_t info, uint64_t vaddr,
92 void *userdata)
93 { }
95 static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
97 TCGv_i32 cpu_index = tcg_temp_new_i32();
98 TCGv_i32 meminfo = tcg_const_i32(info);
99 TCGv_i64 vaddr64 = tcg_temp_new_i64();
100 TCGv_ptr udata = tcg_const_ptr(NULL);
102 tcg_gen_ld_i32(cpu_index, cpu_env,
103 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
104 tcg_gen_extu_tl_i64(vaddr64, vaddr);
106 gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
108 tcg_temp_free_ptr(udata);
109 tcg_temp_free_i64(vaddr64);
110 tcg_temp_free_i32(meminfo);
111 tcg_temp_free_i32(cpu_index);
114 static void gen_empty_udata_cb(void)
116 TCGv_i32 cpu_index = tcg_temp_new_i32();
117 TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
119 tcg_gen_ld_i32(cpu_index, cpu_env,
120 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
121 gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
123 tcg_temp_free_ptr(udata);
124 tcg_temp_free_i32(cpu_index);
128 * For now we only support addi_i64.
129 * When we support more ops, we can generate one empty inline cb for each.
131 static void gen_empty_inline_cb(void)
133 TCGv_i64 val = tcg_temp_new_i64();
134 TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
136 tcg_gen_ld_i64(val, ptr, 0);
137 /* pass an immediate != 0 so that it doesn't get optimized away */
138 tcg_gen_addi_i64(val, val, 0xdeadface);
139 tcg_gen_st_i64(val, ptr, 0);
140 tcg_temp_free_ptr(ptr);
141 tcg_temp_free_i64(val);
144 static void gen_empty_mem_cb(TCGv addr, uint32_t info)
146 do_gen_mem_cb(addr, info);
150 * Share the same function for enable/disable. When enabling, the NULL
151 * pointer will be overwritten later.
153 static void gen_empty_mem_helper(void)
155 TCGv_ptr ptr;
157 ptr = tcg_const_ptr(NULL);
158 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
159 offsetof(ArchCPU, env));
160 tcg_temp_free_ptr(ptr);
163 static inline
164 void gen_plugin_cb_start(enum plugin_gen_from from,
165 enum plugin_gen_cb type, unsigned wr)
167 TCGOp *op;
169 tcg_gen_plugin_cb_start(from, type, wr);
170 op = tcg_last_op();
171 QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link);
174 static void gen_wrapped(enum plugin_gen_from from,
175 enum plugin_gen_cb type, void (*func)(void))
177 gen_plugin_cb_start(from, type, 0);
178 func();
179 tcg_gen_plugin_cb_end();
182 static inline void plugin_gen_empty_callback(enum plugin_gen_from from)
184 switch (from) {
185 case PLUGIN_GEN_AFTER_INSN:
186 gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
187 gen_empty_mem_helper);
188 break;
189 case PLUGIN_GEN_FROM_INSN:
191 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
192 * the first callback of an instruction
194 gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
195 gen_empty_mem_helper);
196 /* fall through */
197 case PLUGIN_GEN_FROM_TB:
198 gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
199 gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
200 break;
201 default:
202 g_assert_not_reached();
206 union mem_gen_fn {
207 void (*mem_fn)(TCGv, uint32_t);
208 void (*inline_fn)(void);
211 static void gen_mem_wrapped(enum plugin_gen_cb type,
212 const union mem_gen_fn *f, TCGv addr,
213 uint32_t info, bool is_mem)
215 int wr = !!(info & TRACE_MEM_ST);
217 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr);
218 if (is_mem) {
219 f->mem_fn(addr, info);
220 } else {
221 f->inline_fn();
223 tcg_gen_plugin_cb_end();
226 void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
228 union mem_gen_fn fn;
230 fn.mem_fn = gen_empty_mem_cb;
231 gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
233 fn.inline_fn = gen_empty_inline_cb;
234 gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
237 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
239 while (op) {
240 if (op->opc == opc) {
241 return op;
243 op = QTAILQ_NEXT(op, link);
245 return NULL;
248 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
250 TCGOp *ret = QTAILQ_NEXT(end, link);
252 QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
253 return ret;
256 /* remove all ops until (and including) plugin_cb_end */
257 static TCGOp *rm_ops(TCGOp *op)
259 TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
261 tcg_debug_assert(end_op);
262 return rm_ops_range(op, end_op);
265 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
267 *begin_op = QTAILQ_NEXT(*begin_op, link);
268 tcg_debug_assert(*begin_op);
269 op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc);
270 memcpy(op->args, (*begin_op)->args, sizeof(op->args));
271 return op;
274 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
276 op = copy_op_nocheck(begin_op, op);
277 tcg_debug_assert((*begin_op)->opc == opc);
278 return op;
281 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
283 if (TCG_TARGET_REG_BITS == 32) {
284 /* mov_i32 */
285 op = copy_op(begin_op, op, INDEX_op_mov_i32);
286 /* mov_i32 w/ $0 */
287 op = copy_op(begin_op, op, INDEX_op_mov_i32);
288 } else {
289 /* extu_i32_i64 */
290 op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
292 return op;
295 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
297 if (TCG_TARGET_REG_BITS == 32) {
298 /* 2x mov_i32 */
299 op = copy_op(begin_op, op, INDEX_op_mov_i32);
300 op = copy_op(begin_op, op, INDEX_op_mov_i32);
301 } else {
302 /* mov_i64 */
303 op = copy_op(begin_op, op, INDEX_op_mov_i64);
305 return op;
308 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
310 if (UINTPTR_MAX == UINT32_MAX) {
311 /* mov_i32 */
312 op = copy_op(begin_op, op, INDEX_op_mov_i32);
313 op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
314 } else {
315 /* mov_i64 */
316 op = copy_op(begin_op, op, INDEX_op_mov_i64);
317 op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
319 return op;
322 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
324 if (TARGET_LONG_BITS == 32) {
325 /* extu_i32_i64 */
326 op = copy_extu_i32_i64(begin_op, op);
327 } else {
328 /* mov_i64 */
329 op = copy_mov_i64(begin_op, op);
331 return op;
334 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
336 if (TCG_TARGET_REG_BITS == 32) {
337 /* 2x ld_i32 */
338 op = copy_op(begin_op, op, INDEX_op_ld_i32);
339 op = copy_op(begin_op, op, INDEX_op_ld_i32);
340 } else {
341 /* ld_i64 */
342 op = copy_op(begin_op, op, INDEX_op_ld_i64);
344 return op;
347 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
349 if (TCG_TARGET_REG_BITS == 32) {
350 /* 2x st_i32 */
351 op = copy_op(begin_op, op, INDEX_op_st_i32);
352 op = copy_op(begin_op, op, INDEX_op_st_i32);
353 } else {
354 /* st_i64 */
355 op = copy_op(begin_op, op, INDEX_op_st_i64);
357 return op;
360 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
362 if (TCG_TARGET_REG_BITS == 32) {
363 /* all 32-bit backends must implement add2_i32 */
364 g_assert(TCG_TARGET_HAS_add2_i32);
365 op = copy_op(begin_op, op, INDEX_op_add2_i32);
366 op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
367 op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
368 } else {
369 op = copy_op(begin_op, op, INDEX_op_add_i64);
370 op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
372 return op;
375 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
377 if (UINTPTR_MAX == UINT32_MAX) {
378 /* st_i32 */
379 op = copy_op(begin_op, op, INDEX_op_st_i32);
380 } else {
381 /* st_i64 */
382 op = copy_st_i64(begin_op, op);
384 return op;
387 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
388 void *func, unsigned tcg_flags, int *cb_idx)
390 /* copy all ops until the call */
391 do {
392 op = copy_op_nocheck(begin_op, op);
393 } while (op->opc != INDEX_op_call);
395 /* fill in the op call */
396 op->param1 = (*begin_op)->param1;
397 op->param2 = (*begin_op)->param2;
398 tcg_debug_assert(op->life == 0);
399 if (*cb_idx == -1) {
400 int i;
403 * Instead of working out the position of the callback in args[], just
404 * look for @empty_func, since it should be a unique pointer.
406 for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) {
407 if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) {
408 *cb_idx = i;
409 break;
412 tcg_debug_assert(i < MAX_OPC_PARAM_ARGS);
414 op->args[*cb_idx] = (uintptr_t)func;
415 op->args[*cb_idx + 1] = tcg_flags;
417 return op;
421 * When we append/replace ops here we are sensitive to changing patterns of
422 * TCGOps generated by the tcg_gen_FOO calls when we generated the
423 * empty callbacks. This will assert very quickly in a debug build as
424 * we assert the ops we are replacing are the correct ones.
426 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
427 TCGOp *begin_op, TCGOp *op, int *cb_idx)
429 /* const_ptr */
430 op = copy_const_ptr(&begin_op, op, cb->userp);
432 /* copy the ld_i32, but note that we only have to copy it once */
433 begin_op = QTAILQ_NEXT(begin_op, link);
434 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
435 if (*cb_idx == -1) {
436 op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
437 memcpy(op->args, begin_op->args, sizeof(op->args));
440 /* call */
441 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
442 cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
444 return op;
447 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
448 TCGOp *begin_op, TCGOp *op,
449 int *unused)
451 /* const_ptr */
452 op = copy_const_ptr(&begin_op, op, cb->userp);
454 /* ld_i64 */
455 op = copy_ld_i64(&begin_op, op);
457 /* add_i64 */
458 op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
460 /* st_i64 */
461 op = copy_st_i64(&begin_op, op);
463 return op;
466 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
467 TCGOp *begin_op, TCGOp *op, int *cb_idx)
469 enum plugin_gen_cb type = begin_op->args[1];
471 tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
473 /* const_i32 == mov_i32 ("info", so it remains as is) */
474 op = copy_op(&begin_op, op, INDEX_op_mov_i32);
476 /* const_ptr */
477 op = copy_const_ptr(&begin_op, op, cb->userp);
479 /* copy the ld_i32, but note that we only have to copy it once */
480 begin_op = QTAILQ_NEXT(begin_op, link);
481 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
482 if (*cb_idx == -1) {
483 op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
484 memcpy(op->args, begin_op->args, sizeof(op->args));
487 /* extu_tl_i64 */
488 op = copy_extu_tl_i64(&begin_op, op);
490 if (type == PLUGIN_GEN_CB_MEM) {
491 /* call */
492 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
493 cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
496 return op;
499 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
500 TCGOp *begin_op, TCGOp *op, int *intp);
501 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
503 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
505 return true;
508 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
510 int w;
512 w = op->args[2];
513 return !!(cb->rw & (w + 1));
516 static inline
517 void inject_cb_type(const GArray *cbs, TCGOp *begin_op, inject_fn inject,
518 op_ok_fn ok)
520 TCGOp *end_op;
521 TCGOp *op;
522 int cb_idx = -1;
523 int i;
525 if (!cbs || cbs->len == 0) {
526 rm_ops(begin_op);
527 return;
530 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
531 tcg_debug_assert(end_op);
533 op = end_op;
534 for (i = 0; i < cbs->len; i++) {
535 struct qemu_plugin_dyn_cb *cb =
536 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
538 if (!ok(begin_op, cb)) {
539 continue;
541 op = inject(cb, begin_op, op, &cb_idx);
543 rm_ops_range(begin_op, end_op);
546 static void
547 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
549 inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
552 static void
553 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
555 inject_cb_type(cbs, begin_op, append_inline_cb, ok);
558 static void
559 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
561 inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
564 /* we could change the ops in place, but we can reuse more code by copying */
565 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
567 TCGOp *orig_op = begin_op;
568 TCGOp *end_op;
569 TCGOp *op;
571 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
572 tcg_debug_assert(end_op);
574 /* const ptr */
575 op = copy_const_ptr(&begin_op, end_op, arr);
577 /* st_ptr */
578 op = copy_st_ptr(&begin_op, op);
580 rm_ops_range(orig_op, end_op);
584 * Tracking memory accesses performed from helpers requires extra work.
585 * If an instruction is emulated with helpers, we do two things:
586 * (1) copy the CB descriptors, and keep track of it so that they can be
587 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
588 * that we can read them at run-time (i.e. when the helper executes).
589 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
591 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
592 * is possible that the code we generate after the instruction is
593 * dead, we also add checks before generating tb_exit etc.
595 static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn,
596 TCGOp *begin_op)
598 GArray *cbs[2];
599 GArray *arr;
600 size_t n_cbs, i;
602 cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
603 cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
605 n_cbs = 0;
606 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
607 n_cbs += cbs[i]->len;
610 plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
611 if (likely(!plugin_insn->mem_helper)) {
612 rm_ops(begin_op);
613 return;
616 arr = g_array_sized_new(false, false,
617 sizeof(struct qemu_plugin_dyn_cb), n_cbs);
619 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
620 g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
623 qemu_plugin_add_dyn_cb_arr(arr);
624 inject_mem_helper(begin_op, arr);
627 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
628 TCGOp *begin_op)
630 if (likely(!plugin_insn->mem_helper)) {
631 rm_ops(begin_op);
632 return;
634 inject_mem_helper(begin_op, NULL);
637 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
638 void plugin_gen_disable_mem_helpers(void)
640 TCGv_ptr ptr;
642 if (likely(tcg_ctx->plugin_insn == NULL ||
643 !tcg_ctx->plugin_insn->mem_helper)) {
644 return;
646 ptr = tcg_const_ptr(NULL);
647 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
648 offsetof(ArchCPU, env));
649 tcg_temp_free_ptr(ptr);
650 tcg_ctx->plugin_insn->mem_helper = false;
653 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
654 TCGOp *begin_op)
656 inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
659 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
660 TCGOp *begin_op)
662 inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
665 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
666 TCGOp *begin_op, int insn_idx)
668 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
670 inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
673 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
674 TCGOp *begin_op, int insn_idx)
676 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
677 inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
678 begin_op, op_ok);
681 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
682 TCGOp *begin_op, int insn_idx)
684 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
685 inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
688 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
689 TCGOp *begin_op, int insn_idx)
691 const GArray *cbs;
692 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
694 cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
695 inject_inline_cb(cbs, begin_op, op_rw);
698 static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb,
699 TCGOp *begin_op, int insn_idx)
701 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
702 inject_mem_enable_helper(insn, begin_op);
705 static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb,
706 TCGOp *begin_op, int insn_idx)
708 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
709 inject_mem_disable_helper(insn, begin_op);
712 static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op,
713 int insn_idx)
715 enum plugin_gen_from from = begin_op->args[0];
716 enum plugin_gen_cb type = begin_op->args[1];
718 switch (from) {
719 case PLUGIN_GEN_FROM_TB:
720 switch (type) {
721 case PLUGIN_GEN_CB_UDATA:
722 plugin_gen_tb_udata(ptb, begin_op);
723 return;
724 case PLUGIN_GEN_CB_INLINE:
725 plugin_gen_tb_inline(ptb, begin_op);
726 return;
727 default:
728 g_assert_not_reached();
730 case PLUGIN_GEN_FROM_INSN:
731 switch (type) {
732 case PLUGIN_GEN_CB_UDATA:
733 plugin_gen_insn_udata(ptb, begin_op, insn_idx);
734 return;
735 case PLUGIN_GEN_CB_INLINE:
736 plugin_gen_insn_inline(ptb, begin_op, insn_idx);
737 return;
738 case PLUGIN_GEN_ENABLE_MEM_HELPER:
739 plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx);
740 return;
741 default:
742 g_assert_not_reached();
744 case PLUGIN_GEN_FROM_MEM:
745 switch (type) {
746 case PLUGIN_GEN_CB_MEM:
747 plugin_gen_mem_regular(ptb, begin_op, insn_idx);
748 return;
749 case PLUGIN_GEN_CB_INLINE:
750 plugin_gen_mem_inline(ptb, begin_op, insn_idx);
751 return;
752 default:
753 g_assert_not_reached();
755 case PLUGIN_GEN_AFTER_INSN:
756 switch (type) {
757 case PLUGIN_GEN_DISABLE_MEM_HELPER:
758 plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx);
759 return;
760 default:
761 g_assert_not_reached();
763 default:
764 g_assert_not_reached();
768 /* #define DEBUG_PLUGIN_GEN_OPS */
769 static void pr_ops(void)
771 #ifdef DEBUG_PLUGIN_GEN_OPS
772 TCGOp *op;
773 int i = 0;
775 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
776 const char *name = "";
777 const char *type = "";
779 if (op->opc == INDEX_op_plugin_cb_start) {
780 switch (op->args[0]) {
781 case PLUGIN_GEN_FROM_TB:
782 name = "tb";
783 break;
784 case PLUGIN_GEN_FROM_INSN:
785 name = "insn";
786 break;
787 case PLUGIN_GEN_FROM_MEM:
788 name = "mem";
789 break;
790 case PLUGIN_GEN_AFTER_INSN:
791 name = "after insn";
792 break;
793 default:
794 break;
796 switch (op->args[1]) {
797 case PLUGIN_GEN_CB_UDATA:
798 type = "udata";
799 break;
800 case PLUGIN_GEN_CB_INLINE:
801 type = "inline";
802 break;
803 case PLUGIN_GEN_CB_MEM:
804 type = "mem";
805 break;
806 case PLUGIN_GEN_ENABLE_MEM_HELPER:
807 type = "enable mem helper";
808 break;
809 case PLUGIN_GEN_DISABLE_MEM_HELPER:
810 type = "disable mem helper";
811 break;
812 default:
813 break;
816 printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
817 i++;
819 #endif
822 static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
824 TCGOp *op;
825 int insn_idx;
827 pr_ops();
828 insn_idx = -1;
829 QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) {
830 enum plugin_gen_from from = op->args[0];
831 enum plugin_gen_cb type = op->args[1];
833 tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start);
834 /* ENABLE_MEM_HELPER is the first callback of an instruction */
835 if (from == PLUGIN_GEN_FROM_INSN &&
836 type == PLUGIN_GEN_ENABLE_MEM_HELPER) {
837 insn_idx++;
839 plugin_inject_cb(plugin_tb, op, insn_idx);
841 pr_ops();
844 bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
846 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
847 bool ret = false;
849 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
850 ret = true;
852 QSIMPLEQ_INIT(&tcg_ctx->plugin_ops);
853 ptb->vaddr = tb->pc;
854 ptb->vaddr2 = -1;
855 get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
856 ptb->haddr2 = NULL;
857 ptb->mem_only = mem_only;
859 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
861 return ret;
864 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
866 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
867 struct qemu_plugin_insn *pinsn;
869 pinsn = qemu_plugin_tb_insn_get(ptb);
870 tcg_ctx->plugin_insn = pinsn;
871 pinsn->vaddr = db->pc_next;
872 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
875 * Detect page crossing to get the new host address.
876 * Note that we skip this when haddr1 == NULL, e.g. when we're
877 * fetching instructions from a region not backed by RAM.
879 if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
880 unlikely((db->pc_next & TARGET_PAGE_MASK) !=
881 (db->pc_first & TARGET_PAGE_MASK))) {
882 get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
883 &ptb->haddr2);
884 ptb->vaddr2 = db->pc_next;
886 if (likely(ptb->vaddr2 == -1)) {
887 pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
888 } else {
889 pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
893 void plugin_gen_insn_end(void)
895 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
898 void plugin_gen_tb_end(CPUState *cpu)
900 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
901 int i;
903 /* collect instrumentation requests */
904 qemu_plugin_tb_trans_cb(cpu, ptb);
906 /* inject the instrumentation at the appropriate places */
907 plugin_gen_inject(ptb);
909 /* clean up */
910 for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
911 if (ptb->cbs[i]) {
912 g_array_set_size(ptb->cbs[i], 0);
915 ptb->n = 0;
916 tcg_ctx->plugin_insn = NULL;