2 * plugin-gen.c - TCG-related bits of plugin infrastructure
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "empty" instrumentation calls for all
18 * possible instrumentation events, and then once we collect the instrumentation
19 * requests from plugins, we either "fill in" those empty events or remove them
20 * if they have no requests.
22 * When "filling in" an event we first copy the empty callback's TCG ops. This
23 * might seem unnecessary, but it is done to support an arbitrary number
24 * of callbacks per event. Take for example a regular instruction callback.
25 * We first generate a callback to an empty helper function. Then, if two
26 * plugins register one callback each for this instruction, we make two copies
27 * of the TCG ops generated for the empty callback, substituting the function
28 * pointer that points to the empty helper function with the plugins' desired
29 * callback functions. After that we remove the empty callback's ops.
31 * Note that the location in TCGOp.args[] of the pointer to a helper function
32 * varies across different guest and host architectures. Instead of duplicating
33 * the logic that figures this out, we rely on the fact that the empty
34 * callbacks point to empty functions that are unique pointers in the program.
35 * Thus, to find the right location we just have to look for a match in
36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37 * TCG ops and then fill them in; regardless of whether we have one or many
38 * callbacks for that event, the logic to add all of them is the same.
40 * When generating more than one callback per event, we make a small
41 * optimization to avoid generating redundant operations. For instance, for the
42 * second and all subsequent callbacks of an event, we do not need to reload the
43 * CPU's index into a TCG temp, since the first callback did it already.
45 #include "qemu/osdep.h"
48 #include "tcg/tcg-op.h"
49 #include "trace/mem.h"
50 #include "exec/exec-all.h"
51 #include "exec/plugin-gen.h"
52 #include "exec/translator.h"
55 # define CONFIG_SOFTMMU_GATE 1
57 # define CONFIG_SOFTMMU_GATE 0
61 * plugin_cb_start TCG op args[]:
62 * 0: enum plugin_gen_from
63 * 1: enum plugin_gen_cb
64 * 2: set to 1 for mem callback that is a write, 0 otherwise.
67 enum plugin_gen_from
{
71 PLUGIN_GEN_AFTER_INSN
,
79 PLUGIN_GEN_ENABLE_MEM_HELPER
,
80 PLUGIN_GEN_DISABLE_MEM_HELPER
,
85 * These helpers are stubs that get dynamically switched out for calls
86 * direct to the plugin if they are subscribed to.
88 void HELPER(plugin_vcpu_udata_cb
)(uint32_t cpu_index
, void *udata
)
91 void HELPER(plugin_vcpu_mem_cb
)(unsigned int vcpu_index
,
92 qemu_plugin_meminfo_t info
, uint64_t vaddr
,
96 static void do_gen_mem_cb(TCGv vaddr
, uint32_t info
)
98 TCGv_i32 cpu_index
= tcg_temp_new_i32();
99 TCGv_i32 meminfo
= tcg_const_i32(info
);
100 TCGv_i64 vaddr64
= tcg_temp_new_i64();
101 TCGv_ptr udata
= tcg_const_ptr(NULL
);
103 tcg_gen_ld_i32(cpu_index
, cpu_env
,
104 -offsetof(ArchCPU
, env
) + offsetof(CPUState
, cpu_index
));
105 tcg_gen_extu_tl_i64(vaddr64
, vaddr
);
107 gen_helper_plugin_vcpu_mem_cb(cpu_index
, meminfo
, vaddr64
, udata
);
109 tcg_temp_free_ptr(udata
);
110 tcg_temp_free_i64(vaddr64
);
111 tcg_temp_free_i32(meminfo
);
112 tcg_temp_free_i32(cpu_index
);
115 static void gen_empty_udata_cb(void)
117 TCGv_i32 cpu_index
= tcg_temp_new_i32();
118 TCGv_ptr udata
= tcg_const_ptr(NULL
); /* will be overwritten later */
120 tcg_gen_ld_i32(cpu_index
, cpu_env
,
121 -offsetof(ArchCPU
, env
) + offsetof(CPUState
, cpu_index
));
122 gen_helper_plugin_vcpu_udata_cb(cpu_index
, udata
);
124 tcg_temp_free_ptr(udata
);
125 tcg_temp_free_i32(cpu_index
);
129 * For now we only support addi_i64.
130 * When we support more ops, we can generate one empty inline cb for each.
132 static void gen_empty_inline_cb(void)
134 TCGv_i64 val
= tcg_temp_new_i64();
135 TCGv_ptr ptr
= tcg_const_ptr(NULL
); /* overwritten later */
137 tcg_gen_ld_i64(val
, ptr
, 0);
138 /* pass an immediate != 0 so that it doesn't get optimized away */
139 tcg_gen_addi_i64(val
, val
, 0xdeadface);
140 tcg_gen_st_i64(val
, ptr
, 0);
141 tcg_temp_free_ptr(ptr
);
142 tcg_temp_free_i64(val
);
145 static void gen_empty_mem_cb(TCGv addr
, uint32_t info
)
147 do_gen_mem_cb(addr
, info
);
151 * Share the same function for enable/disable. When enabling, the NULL
152 * pointer will be overwritten later.
154 static void gen_empty_mem_helper(void)
158 ptr
= tcg_const_ptr(NULL
);
159 tcg_gen_st_ptr(ptr
, cpu_env
, offsetof(CPUState
, plugin_mem_cbs
) -
160 offsetof(ArchCPU
, env
));
161 tcg_temp_free_ptr(ptr
);
165 void gen_plugin_cb_start(enum plugin_gen_from from
,
166 enum plugin_gen_cb type
, unsigned wr
)
170 tcg_gen_plugin_cb_start(from
, type
, wr
);
172 QSIMPLEQ_INSERT_TAIL(&tcg_ctx
->plugin_ops
, op
, plugin_link
);
175 static void gen_wrapped(enum plugin_gen_from from
,
176 enum plugin_gen_cb type
, void (*func
)(void))
178 gen_plugin_cb_start(from
, type
, 0);
180 tcg_gen_plugin_cb_end();
183 static inline void plugin_gen_empty_callback(enum plugin_gen_from from
)
186 case PLUGIN_GEN_AFTER_INSN
:
187 gen_wrapped(from
, PLUGIN_GEN_DISABLE_MEM_HELPER
,
188 gen_empty_mem_helper
);
190 case PLUGIN_GEN_FROM_INSN
:
192 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
193 * the first callback of an instruction
195 gen_wrapped(from
, PLUGIN_GEN_ENABLE_MEM_HELPER
,
196 gen_empty_mem_helper
);
198 case PLUGIN_GEN_FROM_TB
:
199 gen_wrapped(from
, PLUGIN_GEN_CB_UDATA
, gen_empty_udata_cb
);
200 gen_wrapped(from
, PLUGIN_GEN_CB_INLINE
, gen_empty_inline_cb
);
203 g_assert_not_reached();
208 void (*mem_fn
)(TCGv
, uint32_t);
209 void (*inline_fn
)(void);
212 static void gen_mem_wrapped(enum plugin_gen_cb type
,
213 const union mem_gen_fn
*f
, TCGv addr
,
214 uint32_t info
, bool is_mem
)
216 int wr
= !!(info
& TRACE_MEM_ST
);
218 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM
, type
, wr
);
220 f
->mem_fn(addr
, info
);
224 tcg_gen_plugin_cb_end();
227 void plugin_gen_empty_mem_callback(TCGv addr
, uint32_t info
)
231 fn
.mem_fn
= gen_empty_mem_cb
;
232 gen_mem_wrapped(PLUGIN_GEN_CB_MEM
, &fn
, addr
, info
, true);
234 fn
.inline_fn
= gen_empty_inline_cb
;
235 gen_mem_wrapped(PLUGIN_GEN_CB_INLINE
, &fn
, 0, info
, false);
238 static TCGOp
*find_op(TCGOp
*op
, TCGOpcode opc
)
241 if (op
->opc
== opc
) {
244 op
= QTAILQ_NEXT(op
, link
);
249 static TCGOp
*rm_ops_range(TCGOp
*begin
, TCGOp
*end
)
251 TCGOp
*ret
= QTAILQ_NEXT(end
, link
);
253 QTAILQ_REMOVE_SEVERAL(&tcg_ctx
->ops
, begin
, end
, link
);
257 /* remove all ops until (and including) plugin_cb_end */
258 static TCGOp
*rm_ops(TCGOp
*op
)
260 TCGOp
*end_op
= find_op(op
, INDEX_op_plugin_cb_end
);
262 tcg_debug_assert(end_op
);
263 return rm_ops_range(op
, end_op
);
266 static TCGOp
*copy_op_nocheck(TCGOp
**begin_op
, TCGOp
*op
)
268 *begin_op
= QTAILQ_NEXT(*begin_op
, link
);
269 tcg_debug_assert(*begin_op
);
270 op
= tcg_op_insert_after(tcg_ctx
, op
, (*begin_op
)->opc
);
271 memcpy(op
->args
, (*begin_op
)->args
, sizeof(op
->args
));
275 static TCGOp
*copy_op(TCGOp
**begin_op
, TCGOp
*op
, TCGOpcode opc
)
277 op
= copy_op_nocheck(begin_op
, op
);
278 tcg_debug_assert((*begin_op
)->opc
== opc
);
282 static TCGOp
*copy_extu_i32_i64(TCGOp
**begin_op
, TCGOp
*op
)
284 if (TCG_TARGET_REG_BITS
== 32) {
286 op
= copy_op(begin_op
, op
, INDEX_op_mov_i32
);
288 op
= copy_op(begin_op
, op
, INDEX_op_mov_i32
);
291 op
= copy_op(begin_op
, op
, INDEX_op_extu_i32_i64
);
296 static TCGOp
*copy_mov_i64(TCGOp
**begin_op
, TCGOp
*op
)
298 if (TCG_TARGET_REG_BITS
== 32) {
300 op
= copy_op(begin_op
, op
, INDEX_op_mov_i32
);
301 op
= copy_op(begin_op
, op
, INDEX_op_mov_i32
);
304 op
= copy_op(begin_op
, op
, INDEX_op_mov_i64
);
309 static TCGOp
*copy_const_ptr(TCGOp
**begin_op
, TCGOp
*op
, void *ptr
)
311 if (UINTPTR_MAX
== UINT32_MAX
) {
313 op
= copy_op(begin_op
, op
, INDEX_op_mov_i32
);
314 op
->args
[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr
));
317 op
= copy_op(begin_op
, op
, INDEX_op_mov_i64
);
318 op
->args
[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr
));
323 static TCGOp
*copy_extu_tl_i64(TCGOp
**begin_op
, TCGOp
*op
)
325 if (TARGET_LONG_BITS
== 32) {
327 op
= copy_extu_i32_i64(begin_op
, op
);
330 op
= copy_mov_i64(begin_op
, op
);
335 static TCGOp
*copy_ld_i64(TCGOp
**begin_op
, TCGOp
*op
)
337 if (TCG_TARGET_REG_BITS
== 32) {
339 op
= copy_op(begin_op
, op
, INDEX_op_ld_i32
);
340 op
= copy_op(begin_op
, op
, INDEX_op_ld_i32
);
343 op
= copy_op(begin_op
, op
, INDEX_op_ld_i64
);
348 static TCGOp
*copy_st_i64(TCGOp
**begin_op
, TCGOp
*op
)
350 if (TCG_TARGET_REG_BITS
== 32) {
352 op
= copy_op(begin_op
, op
, INDEX_op_st_i32
);
353 op
= copy_op(begin_op
, op
, INDEX_op_st_i32
);
356 op
= copy_op(begin_op
, op
, INDEX_op_st_i64
);
361 static TCGOp
*copy_add_i64(TCGOp
**begin_op
, TCGOp
*op
, uint64_t v
)
363 if (TCG_TARGET_REG_BITS
== 32) {
364 /* all 32-bit backends must implement add2_i32 */
365 g_assert(TCG_TARGET_HAS_add2_i32
);
366 op
= copy_op(begin_op
, op
, INDEX_op_add2_i32
);
367 op
->args
[4] = tcgv_i32_arg(tcg_constant_i32(v
));
368 op
->args
[5] = tcgv_i32_arg(tcg_constant_i32(v
>> 32));
370 op
= copy_op(begin_op
, op
, INDEX_op_add_i64
);
371 op
->args
[2] = tcgv_i64_arg(tcg_constant_i64(v
));
376 static TCGOp
*copy_st_ptr(TCGOp
**begin_op
, TCGOp
*op
)
378 if (UINTPTR_MAX
== UINT32_MAX
) {
380 op
= copy_op(begin_op
, op
, INDEX_op_st_i32
);
383 op
= copy_st_i64(begin_op
, op
);
388 static TCGOp
*copy_call(TCGOp
**begin_op
, TCGOp
*op
, void *empty_func
,
389 void *func
, unsigned tcg_flags
, int *cb_idx
)
391 /* copy all ops until the call */
393 op
= copy_op_nocheck(begin_op
, op
);
394 } while (op
->opc
!= INDEX_op_call
);
396 /* fill in the op call */
397 op
->param1
= (*begin_op
)->param1
;
398 op
->param2
= (*begin_op
)->param2
;
399 tcg_debug_assert(op
->life
== 0);
404 * Instead of working out the position of the callback in args[], just
405 * look for @empty_func, since it should be a unique pointer.
407 for (i
= 0; i
< MAX_OPC_PARAM_ARGS
; i
++) {
408 if ((uintptr_t)(*begin_op
)->args
[i
] == (uintptr_t)empty_func
) {
413 tcg_debug_assert(i
< MAX_OPC_PARAM_ARGS
);
415 op
->args
[*cb_idx
] = (uintptr_t)func
;
416 op
->args
[*cb_idx
+ 1] = tcg_flags
;
422 * When we append/replace ops here we are sensitive to changing patterns of
423 * TCGOps generated by the tcg_gen_FOO calls when we generated the
424 * empty callbacks. This will assert very quickly in a debug build as
425 * we assert the ops we are replacing are the correct ones.
427 static TCGOp
*append_udata_cb(const struct qemu_plugin_dyn_cb
*cb
,
428 TCGOp
*begin_op
, TCGOp
*op
, int *cb_idx
)
431 op
= copy_const_ptr(&begin_op
, op
, cb
->userp
);
433 /* copy the ld_i32, but note that we only have to copy it once */
434 begin_op
= QTAILQ_NEXT(begin_op
, link
);
435 tcg_debug_assert(begin_op
&& begin_op
->opc
== INDEX_op_ld_i32
);
437 op
= tcg_op_insert_after(tcg_ctx
, op
, INDEX_op_ld_i32
);
438 memcpy(op
->args
, begin_op
->args
, sizeof(op
->args
));
442 op
= copy_call(&begin_op
, op
, HELPER(plugin_vcpu_udata_cb
),
443 cb
->f
.vcpu_udata
, cb
->tcg_flags
, cb_idx
);
448 static TCGOp
*append_inline_cb(const struct qemu_plugin_dyn_cb
*cb
,
449 TCGOp
*begin_op
, TCGOp
*op
,
453 op
= copy_const_ptr(&begin_op
, op
, cb
->userp
);
456 op
= copy_ld_i64(&begin_op
, op
);
459 op
= copy_add_i64(&begin_op
, op
, cb
->inline_insn
.imm
);
462 op
= copy_st_i64(&begin_op
, op
);
467 static TCGOp
*append_mem_cb(const struct qemu_plugin_dyn_cb
*cb
,
468 TCGOp
*begin_op
, TCGOp
*op
, int *cb_idx
)
470 enum plugin_gen_cb type
= begin_op
->args
[1];
472 tcg_debug_assert(type
== PLUGIN_GEN_CB_MEM
);
474 /* const_i32 == mov_i32 ("info", so it remains as is) */
475 op
= copy_op(&begin_op
, op
, INDEX_op_mov_i32
);
478 op
= copy_const_ptr(&begin_op
, op
, cb
->userp
);
480 /* copy the ld_i32, but note that we only have to copy it once */
481 begin_op
= QTAILQ_NEXT(begin_op
, link
);
482 tcg_debug_assert(begin_op
&& begin_op
->opc
== INDEX_op_ld_i32
);
484 op
= tcg_op_insert_after(tcg_ctx
, op
, INDEX_op_ld_i32
);
485 memcpy(op
->args
, begin_op
->args
, sizeof(op
->args
));
489 op
= copy_extu_tl_i64(&begin_op
, op
);
491 if (type
== PLUGIN_GEN_CB_MEM
) {
493 op
= copy_call(&begin_op
, op
, HELPER(plugin_vcpu_mem_cb
),
494 cb
->f
.vcpu_udata
, cb
->tcg_flags
, cb_idx
);
500 typedef TCGOp
*(*inject_fn
)(const struct qemu_plugin_dyn_cb
*cb
,
501 TCGOp
*begin_op
, TCGOp
*op
, int *intp
);
502 typedef bool (*op_ok_fn
)(const TCGOp
*op
, const struct qemu_plugin_dyn_cb
*cb
);
504 static bool op_ok(const TCGOp
*op
, const struct qemu_plugin_dyn_cb
*cb
)
509 static bool op_rw(const TCGOp
*op
, const struct qemu_plugin_dyn_cb
*cb
)
514 return !!(cb
->rw
& (w
+ 1));
518 void inject_cb_type(const GArray
*cbs
, TCGOp
*begin_op
, inject_fn inject
,
526 if (!cbs
|| cbs
->len
== 0) {
531 end_op
= find_op(begin_op
, INDEX_op_plugin_cb_end
);
532 tcg_debug_assert(end_op
);
535 for (i
= 0; i
< cbs
->len
; i
++) {
536 struct qemu_plugin_dyn_cb
*cb
=
537 &g_array_index(cbs
, struct qemu_plugin_dyn_cb
, i
);
539 if (!ok(begin_op
, cb
)) {
542 op
= inject(cb
, begin_op
, op
, &cb_idx
);
544 rm_ops_range(begin_op
, end_op
);
548 inject_udata_cb(const GArray
*cbs
, TCGOp
*begin_op
)
550 inject_cb_type(cbs
, begin_op
, append_udata_cb
, op_ok
);
554 inject_inline_cb(const GArray
*cbs
, TCGOp
*begin_op
, op_ok_fn ok
)
556 inject_cb_type(cbs
, begin_op
, append_inline_cb
, ok
);
560 inject_mem_cb(const GArray
*cbs
, TCGOp
*begin_op
)
562 inject_cb_type(cbs
, begin_op
, append_mem_cb
, op_rw
);
565 /* we could change the ops in place, but we can reuse more code by copying */
566 static void inject_mem_helper(TCGOp
*begin_op
, GArray
*arr
)
568 TCGOp
*orig_op
= begin_op
;
572 end_op
= find_op(begin_op
, INDEX_op_plugin_cb_end
);
573 tcg_debug_assert(end_op
);
576 op
= copy_const_ptr(&begin_op
, end_op
, arr
);
579 op
= copy_st_ptr(&begin_op
, op
);
581 rm_ops_range(orig_op
, end_op
);
585 * Tracking memory accesses performed from helpers requires extra work.
586 * If an instruction is emulated with helpers, we do two things:
587 * (1) copy the CB descriptors, and keep track of it so that they can be
588 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
589 * that we can read them at run-time (i.e. when the helper executes).
590 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
592 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
593 * is possible that the code we generate after the instruction is
594 * dead, we also add checks before generating tb_exit etc.
596 static void inject_mem_enable_helper(struct qemu_plugin_insn
*plugin_insn
,
603 cbs
[0] = plugin_insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_REGULAR
];
604 cbs
[1] = plugin_insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_INLINE
];
607 for (i
= 0; i
< ARRAY_SIZE(cbs
); i
++) {
608 n_cbs
+= cbs
[i
]->len
;
611 plugin_insn
->mem_helper
= plugin_insn
->calls_helpers
&& n_cbs
;
612 if (likely(!plugin_insn
->mem_helper
)) {
617 arr
= g_array_sized_new(false, false,
618 sizeof(struct qemu_plugin_dyn_cb
), n_cbs
);
620 for (i
= 0; i
< ARRAY_SIZE(cbs
); i
++) {
621 g_array_append_vals(arr
, cbs
[i
]->data
, cbs
[i
]->len
);
624 qemu_plugin_add_dyn_cb_arr(arr
);
625 inject_mem_helper(begin_op
, arr
);
628 static void inject_mem_disable_helper(struct qemu_plugin_insn
*plugin_insn
,
631 if (likely(!plugin_insn
->mem_helper
)) {
635 inject_mem_helper(begin_op
, NULL
);
638 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
639 void plugin_gen_disable_mem_helpers(void)
643 if (likely(tcg_ctx
->plugin_insn
== NULL
||
644 !tcg_ctx
->plugin_insn
->mem_helper
)) {
647 ptr
= tcg_const_ptr(NULL
);
648 tcg_gen_st_ptr(ptr
, cpu_env
, offsetof(CPUState
, plugin_mem_cbs
) -
649 offsetof(ArchCPU
, env
));
650 tcg_temp_free_ptr(ptr
);
651 tcg_ctx
->plugin_insn
->mem_helper
= false;
654 static void plugin_gen_tb_udata(const struct qemu_plugin_tb
*ptb
,
657 inject_udata_cb(ptb
->cbs
[PLUGIN_CB_REGULAR
], begin_op
);
660 static void plugin_gen_tb_inline(const struct qemu_plugin_tb
*ptb
,
663 inject_inline_cb(ptb
->cbs
[PLUGIN_CB_INLINE
], begin_op
, op_ok
);
666 static void plugin_gen_insn_udata(const struct qemu_plugin_tb
*ptb
,
667 TCGOp
*begin_op
, int insn_idx
)
669 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
671 inject_udata_cb(insn
->cbs
[PLUGIN_CB_INSN
][PLUGIN_CB_REGULAR
], begin_op
);
674 static void plugin_gen_insn_inline(const struct qemu_plugin_tb
*ptb
,
675 TCGOp
*begin_op
, int insn_idx
)
677 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
678 inject_inline_cb(insn
->cbs
[PLUGIN_CB_INSN
][PLUGIN_CB_INLINE
],
682 static void plugin_gen_mem_regular(const struct qemu_plugin_tb
*ptb
,
683 TCGOp
*begin_op
, int insn_idx
)
685 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
686 inject_mem_cb(insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_REGULAR
], begin_op
);
689 static void plugin_gen_mem_inline(const struct qemu_plugin_tb
*ptb
,
690 TCGOp
*begin_op
, int insn_idx
)
693 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
695 cbs
= insn
->cbs
[PLUGIN_CB_MEM
][PLUGIN_CB_INLINE
];
696 inject_inline_cb(cbs
, begin_op
, op_rw
);
699 static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb
*ptb
,
700 TCGOp
*begin_op
, int insn_idx
)
702 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
703 inject_mem_enable_helper(insn
, begin_op
);
706 static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb
*ptb
,
707 TCGOp
*begin_op
, int insn_idx
)
709 struct qemu_plugin_insn
*insn
= g_ptr_array_index(ptb
->insns
, insn_idx
);
710 inject_mem_disable_helper(insn
, begin_op
);
713 static void plugin_inject_cb(const struct qemu_plugin_tb
*ptb
, TCGOp
*begin_op
,
716 enum plugin_gen_from from
= begin_op
->args
[0];
717 enum plugin_gen_cb type
= begin_op
->args
[1];
720 case PLUGIN_GEN_FROM_TB
:
722 case PLUGIN_GEN_CB_UDATA
:
723 plugin_gen_tb_udata(ptb
, begin_op
);
725 case PLUGIN_GEN_CB_INLINE
:
726 plugin_gen_tb_inline(ptb
, begin_op
);
729 g_assert_not_reached();
731 case PLUGIN_GEN_FROM_INSN
:
733 case PLUGIN_GEN_CB_UDATA
:
734 plugin_gen_insn_udata(ptb
, begin_op
, insn_idx
);
736 case PLUGIN_GEN_CB_INLINE
:
737 plugin_gen_insn_inline(ptb
, begin_op
, insn_idx
);
739 case PLUGIN_GEN_ENABLE_MEM_HELPER
:
740 plugin_gen_enable_mem_helper(ptb
, begin_op
, insn_idx
);
743 g_assert_not_reached();
745 case PLUGIN_GEN_FROM_MEM
:
747 case PLUGIN_GEN_CB_MEM
:
748 plugin_gen_mem_regular(ptb
, begin_op
, insn_idx
);
750 case PLUGIN_GEN_CB_INLINE
:
751 plugin_gen_mem_inline(ptb
, begin_op
, insn_idx
);
754 g_assert_not_reached();
756 case PLUGIN_GEN_AFTER_INSN
:
758 case PLUGIN_GEN_DISABLE_MEM_HELPER
:
759 plugin_gen_disable_mem_helper(ptb
, begin_op
, insn_idx
);
762 g_assert_not_reached();
765 g_assert_not_reached();
769 /* #define DEBUG_PLUGIN_GEN_OPS */
770 static void pr_ops(void)
772 #ifdef DEBUG_PLUGIN_GEN_OPS
776 QTAILQ_FOREACH(op
, &tcg_ctx
->ops
, link
) {
777 const char *name
= "";
778 const char *type
= "";
780 if (op
->opc
== INDEX_op_plugin_cb_start
) {
781 switch (op
->args
[0]) {
782 case PLUGIN_GEN_FROM_TB
:
785 case PLUGIN_GEN_FROM_INSN
:
788 case PLUGIN_GEN_FROM_MEM
:
791 case PLUGIN_GEN_AFTER_INSN
:
797 switch (op
->args
[1]) {
798 case PLUGIN_GEN_CB_UDATA
:
801 case PLUGIN_GEN_CB_INLINE
:
804 case PLUGIN_GEN_CB_MEM
:
807 case PLUGIN_GEN_ENABLE_MEM_HELPER
:
808 type
= "enable mem helper";
810 case PLUGIN_GEN_DISABLE_MEM_HELPER
:
811 type
= "disable mem helper";
817 printf("op[%2i]: %s %s %s\n", i
, tcg_op_defs
[op
->opc
].name
, name
, type
);
823 static void plugin_gen_inject(const struct qemu_plugin_tb
*plugin_tb
)
830 QSIMPLEQ_FOREACH(op
, &tcg_ctx
->plugin_ops
, plugin_link
) {
831 enum plugin_gen_from from
= op
->args
[0];
832 enum plugin_gen_cb type
= op
->args
[1];
834 tcg_debug_assert(op
->opc
== INDEX_op_plugin_cb_start
);
835 /* ENABLE_MEM_HELPER is the first callback of an instruction */
836 if (from
== PLUGIN_GEN_FROM_INSN
&&
837 type
== PLUGIN_GEN_ENABLE_MEM_HELPER
) {
840 plugin_inject_cb(plugin_tb
, op
, insn_idx
);
845 bool plugin_gen_tb_start(CPUState
*cpu
, const TranslationBlock
*tb
, bool mem_only
)
847 struct qemu_plugin_tb
*ptb
= tcg_ctx
->plugin_tb
;
850 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS
, cpu
->plugin_mask
)) {
853 QSIMPLEQ_INIT(&tcg_ctx
->plugin_ops
);
856 get_page_addr_code_hostp(cpu
->env_ptr
, tb
->pc
, &ptb
->haddr1
);
858 ptb
->mem_only
= mem_only
;
860 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB
);
865 void plugin_gen_insn_start(CPUState
*cpu
, const DisasContextBase
*db
)
867 struct qemu_plugin_tb
*ptb
= tcg_ctx
->plugin_tb
;
868 struct qemu_plugin_insn
*pinsn
;
870 pinsn
= qemu_plugin_tb_insn_get(ptb
);
871 tcg_ctx
->plugin_insn
= pinsn
;
872 pinsn
->vaddr
= db
->pc_next
;
873 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN
);
876 * Detect page crossing to get the new host address.
877 * Note that we skip this when haddr1 == NULL, e.g. when we're
878 * fetching instructions from a region not backed by RAM.
880 if (likely(ptb
->haddr1
!= NULL
&& ptb
->vaddr2
== -1) &&
881 unlikely((db
->pc_next
& TARGET_PAGE_MASK
) !=
882 (db
->pc_first
& TARGET_PAGE_MASK
))) {
883 get_page_addr_code_hostp(cpu
->env_ptr
, db
->pc_next
,
885 ptb
->vaddr2
= db
->pc_next
;
887 if (likely(ptb
->vaddr2
== -1)) {
888 pinsn
->haddr
= ptb
->haddr1
+ pinsn
->vaddr
- ptb
->vaddr
;
890 pinsn
->haddr
= ptb
->haddr2
+ pinsn
->vaddr
- ptb
->vaddr2
;
894 void plugin_gen_insn_end(void)
896 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN
);
899 void plugin_gen_tb_end(CPUState
*cpu
)
901 struct qemu_plugin_tb
*ptb
= tcg_ctx
->plugin_tb
;
904 /* collect instrumentation requests */
905 qemu_plugin_tb_trans_cb(cpu
, ptb
);
907 /* inject the instrumentation at the appropriate places */
908 plugin_gen_inject(ptb
);
911 for (i
= 0; i
< PLUGIN_N_CB_SUBTYPES
; i
++) {
913 g_array_set_size(ptb
->cbs
[i
], 0);
917 tcg_ctx
->plugin_insn
= NULL
;