hw/misc/iotkit-sysctl: Implement dummy version of SSE-300 PWRCTRL register
[qemu/ar7.git] / accel / tcg / plugin-gen.c
blobc3dc3effe7ee4eaa4631d22ea0b18f86e4d386f0
1 /*
2 * plugin-gen.c - TCG-related bits of plugin infrastructure
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
8 * We support instrumentation at an instruction granularity. That is,
9 * if a plugin wants to instrument the memory accesses performed by a
10 * particular instruction, it can just do that instead of instrumenting
11 * all memory accesses. Thus, in order to do this we first have to
12 * translate a TB, so that plugins can decide what/where to instrument.
14 * Injecting the desired instrumentation could be done with a second
15 * translation pass that combined the instrumentation requests, but that
16 * would be ugly and inefficient since we would decode the guest code twice.
17 * Instead, during TB translation we add "empty" instrumentation calls for all
18 * possible instrumentation events, and then once we collect the instrumentation
19 * requests from plugins, we either "fill in" those empty events or remove them
20 * if they have no requests.
22 * When "filling in" an event we first copy the empty callback's TCG ops. This
23 * might seem unnecessary, but it is done to support an arbitrary number
24 * of callbacks per event. Take for example a regular instruction callback.
25 * We first generate a callback to an empty helper function. Then, if two
26 * plugins register one callback each for this instruction, we make two copies
27 * of the TCG ops generated for the empty callback, substituting the function
28 * pointer that points to the empty helper function with the plugins' desired
29 * callback functions. After that we remove the empty callback's ops.
31 * Note that the location in TCGOp.args[] of the pointer to a helper function
32 * varies across different guest and host architectures. Instead of duplicating
33 * the logic that figures this out, we rely on the fact that the empty
34 * callbacks point to empty functions that are unique pointers in the program.
35 * Thus, to find the right location we just have to look for a match in
36 * TCGOp.args[]. This is the main reason why we first copy an empty callback's
37 * TCG ops and then fill them in; regardless of whether we have one or many
38 * callbacks for that event, the logic to add all of them is the same.
40 * When generating more than one callback per event, we make a small
41 * optimization to avoid generating redundant operations. For instance, for the
42 * second and all subsequent callbacks of an event, we do not need to reload the
43 * CPU's index into a TCG temp, since the first callback did it already.
45 #include "qemu/osdep.h"
46 #include "cpu.h"
47 #include "tcg/tcg.h"
48 #include "tcg/tcg-op.h"
49 #include "trace/mem.h"
50 #include "exec/exec-all.h"
51 #include "exec/plugin-gen.h"
52 #include "exec/translator.h"
54 #ifdef CONFIG_SOFTMMU
55 # define CONFIG_SOFTMMU_GATE 1
56 #else
57 # define CONFIG_SOFTMMU_GATE 0
58 #endif
61 * plugin_cb_start TCG op args[]:
62 * 0: enum plugin_gen_from
63 * 1: enum plugin_gen_cb
64 * 2: set to 1 for mem callback that is a write, 0 otherwise.
67 enum plugin_gen_from {
68 PLUGIN_GEN_FROM_TB,
69 PLUGIN_GEN_FROM_INSN,
70 PLUGIN_GEN_FROM_MEM,
71 PLUGIN_GEN_AFTER_INSN,
72 PLUGIN_GEN_N_FROMS,
75 enum plugin_gen_cb {
76 PLUGIN_GEN_CB_UDATA,
77 PLUGIN_GEN_CB_INLINE,
78 PLUGIN_GEN_CB_MEM,
79 PLUGIN_GEN_ENABLE_MEM_HELPER,
80 PLUGIN_GEN_DISABLE_MEM_HELPER,
81 PLUGIN_GEN_N_CBS,
85 * These helpers are stubs that get dynamically switched out for calls
86 * direct to the plugin if they are subscribed to.
88 void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
89 { }
91 void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
92 qemu_plugin_meminfo_t info, uint64_t vaddr,
93 void *userdata)
94 { }
96 static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
98 TCGv_i32 cpu_index = tcg_temp_new_i32();
99 TCGv_i32 meminfo = tcg_const_i32(info);
100 TCGv_i64 vaddr64 = tcg_temp_new_i64();
101 TCGv_ptr udata = tcg_const_ptr(NULL);
103 tcg_gen_ld_i32(cpu_index, cpu_env,
104 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
105 tcg_gen_extu_tl_i64(vaddr64, vaddr);
107 gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
109 tcg_temp_free_ptr(udata);
110 tcg_temp_free_i64(vaddr64);
111 tcg_temp_free_i32(meminfo);
112 tcg_temp_free_i32(cpu_index);
115 static void gen_empty_udata_cb(void)
117 TCGv_i32 cpu_index = tcg_temp_new_i32();
118 TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */
120 tcg_gen_ld_i32(cpu_index, cpu_env,
121 -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
122 gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
124 tcg_temp_free_ptr(udata);
125 tcg_temp_free_i32(cpu_index);
129 * For now we only support addi_i64.
130 * When we support more ops, we can generate one empty inline cb for each.
132 static void gen_empty_inline_cb(void)
134 TCGv_i64 val = tcg_temp_new_i64();
135 TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */
137 tcg_gen_ld_i64(val, ptr, 0);
138 /* pass an immediate != 0 so that it doesn't get optimized away */
139 tcg_gen_addi_i64(val, val, 0xdeadface);
140 tcg_gen_st_i64(val, ptr, 0);
141 tcg_temp_free_ptr(ptr);
142 tcg_temp_free_i64(val);
145 static void gen_empty_mem_cb(TCGv addr, uint32_t info)
147 do_gen_mem_cb(addr, info);
151 * Share the same function for enable/disable. When enabling, the NULL
152 * pointer will be overwritten later.
154 static void gen_empty_mem_helper(void)
156 TCGv_ptr ptr;
158 ptr = tcg_const_ptr(NULL);
159 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
160 offsetof(ArchCPU, env));
161 tcg_temp_free_ptr(ptr);
164 static inline
165 void gen_plugin_cb_start(enum plugin_gen_from from,
166 enum plugin_gen_cb type, unsigned wr)
168 TCGOp *op;
170 tcg_gen_plugin_cb_start(from, type, wr);
171 op = tcg_last_op();
172 QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link);
175 static void gen_wrapped(enum plugin_gen_from from,
176 enum plugin_gen_cb type, void (*func)(void))
178 gen_plugin_cb_start(from, type, 0);
179 func();
180 tcg_gen_plugin_cb_end();
183 static inline void plugin_gen_empty_callback(enum plugin_gen_from from)
185 switch (from) {
186 case PLUGIN_GEN_AFTER_INSN:
187 gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
188 gen_empty_mem_helper);
189 break;
190 case PLUGIN_GEN_FROM_INSN:
192 * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
193 * the first callback of an instruction
195 gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
196 gen_empty_mem_helper);
197 /* fall through */
198 case PLUGIN_GEN_FROM_TB:
199 gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
200 gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
201 break;
202 default:
203 g_assert_not_reached();
207 union mem_gen_fn {
208 void (*mem_fn)(TCGv, uint32_t);
209 void (*inline_fn)(void);
212 static void gen_mem_wrapped(enum plugin_gen_cb type,
213 const union mem_gen_fn *f, TCGv addr,
214 uint32_t info, bool is_mem)
216 int wr = !!(info & TRACE_MEM_ST);
218 gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr);
219 if (is_mem) {
220 f->mem_fn(addr, info);
221 } else {
222 f->inline_fn();
224 tcg_gen_plugin_cb_end();
227 void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
229 union mem_gen_fn fn;
231 fn.mem_fn = gen_empty_mem_cb;
232 gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
234 fn.inline_fn = gen_empty_inline_cb;
235 gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
238 static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
240 while (op) {
241 if (op->opc == opc) {
242 return op;
244 op = QTAILQ_NEXT(op, link);
246 return NULL;
249 static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
251 TCGOp *ret = QTAILQ_NEXT(end, link);
253 QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
254 return ret;
257 /* remove all ops until (and including) plugin_cb_end */
258 static TCGOp *rm_ops(TCGOp *op)
260 TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
262 tcg_debug_assert(end_op);
263 return rm_ops_range(op, end_op);
266 static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
268 *begin_op = QTAILQ_NEXT(*begin_op, link);
269 tcg_debug_assert(*begin_op);
270 op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc);
271 memcpy(op->args, (*begin_op)->args, sizeof(op->args));
272 return op;
275 static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
277 op = copy_op_nocheck(begin_op, op);
278 tcg_debug_assert((*begin_op)->opc == opc);
279 return op;
282 static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op)
284 if (TCG_TARGET_REG_BITS == 32) {
285 /* mov_i32 */
286 op = copy_op(begin_op, op, INDEX_op_mov_i32);
287 /* mov_i32 w/ $0 */
288 op = copy_op(begin_op, op, INDEX_op_mov_i32);
289 } else {
290 /* extu_i32_i64 */
291 op = copy_op(begin_op, op, INDEX_op_extu_i32_i64);
293 return op;
296 static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op)
298 if (TCG_TARGET_REG_BITS == 32) {
299 /* 2x mov_i32 */
300 op = copy_op(begin_op, op, INDEX_op_mov_i32);
301 op = copy_op(begin_op, op, INDEX_op_mov_i32);
302 } else {
303 /* mov_i64 */
304 op = copy_op(begin_op, op, INDEX_op_mov_i64);
306 return op;
309 static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
311 if (UINTPTR_MAX == UINT32_MAX) {
312 /* mov_i32 */
313 op = copy_op(begin_op, op, INDEX_op_mov_i32);
314 op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
315 } else {
316 /* mov_i64 */
317 op = copy_op(begin_op, op, INDEX_op_mov_i64);
318 op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
320 return op;
323 static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op)
325 if (TARGET_LONG_BITS == 32) {
326 /* extu_i32_i64 */
327 op = copy_extu_i32_i64(begin_op, op);
328 } else {
329 /* mov_i64 */
330 op = copy_mov_i64(begin_op, op);
332 return op;
335 static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
337 if (TCG_TARGET_REG_BITS == 32) {
338 /* 2x ld_i32 */
339 op = copy_op(begin_op, op, INDEX_op_ld_i32);
340 op = copy_op(begin_op, op, INDEX_op_ld_i32);
341 } else {
342 /* ld_i64 */
343 op = copy_op(begin_op, op, INDEX_op_ld_i64);
345 return op;
348 static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
350 if (TCG_TARGET_REG_BITS == 32) {
351 /* 2x st_i32 */
352 op = copy_op(begin_op, op, INDEX_op_st_i32);
353 op = copy_op(begin_op, op, INDEX_op_st_i32);
354 } else {
355 /* st_i64 */
356 op = copy_op(begin_op, op, INDEX_op_st_i64);
358 return op;
361 static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
363 if (TCG_TARGET_REG_BITS == 32) {
364 /* all 32-bit backends must implement add2_i32 */
365 g_assert(TCG_TARGET_HAS_add2_i32);
366 op = copy_op(begin_op, op, INDEX_op_add2_i32);
367 op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
368 op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
369 } else {
370 op = copy_op(begin_op, op, INDEX_op_add_i64);
371 op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
373 return op;
376 static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
378 if (UINTPTR_MAX == UINT32_MAX) {
379 /* st_i32 */
380 op = copy_op(begin_op, op, INDEX_op_st_i32);
381 } else {
382 /* st_i64 */
383 op = copy_st_i64(begin_op, op);
385 return op;
388 static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func,
389 void *func, unsigned tcg_flags, int *cb_idx)
391 /* copy all ops until the call */
392 do {
393 op = copy_op_nocheck(begin_op, op);
394 } while (op->opc != INDEX_op_call);
396 /* fill in the op call */
397 op->param1 = (*begin_op)->param1;
398 op->param2 = (*begin_op)->param2;
399 tcg_debug_assert(op->life == 0);
400 if (*cb_idx == -1) {
401 int i;
404 * Instead of working out the position of the callback in args[], just
405 * look for @empty_func, since it should be a unique pointer.
407 for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) {
408 if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) {
409 *cb_idx = i;
410 break;
413 tcg_debug_assert(i < MAX_OPC_PARAM_ARGS);
415 op->args[*cb_idx] = (uintptr_t)func;
416 op->args[*cb_idx + 1] = tcg_flags;
418 return op;
422 * When we append/replace ops here we are sensitive to changing patterns of
423 * TCGOps generated by the tcg_gen_FOO calls when we generated the
424 * empty callbacks. This will assert very quickly in a debug build as
425 * we assert the ops we are replacing are the correct ones.
427 static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
428 TCGOp *begin_op, TCGOp *op, int *cb_idx)
430 /* const_ptr */
431 op = copy_const_ptr(&begin_op, op, cb->userp);
433 /* copy the ld_i32, but note that we only have to copy it once */
434 begin_op = QTAILQ_NEXT(begin_op, link);
435 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
436 if (*cb_idx == -1) {
437 op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
438 memcpy(op->args, begin_op->args, sizeof(op->args));
441 /* call */
442 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb),
443 cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
445 return op;
448 static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
449 TCGOp *begin_op, TCGOp *op,
450 int *unused)
452 /* const_ptr */
453 op = copy_const_ptr(&begin_op, op, cb->userp);
455 /* ld_i64 */
456 op = copy_ld_i64(&begin_op, op);
458 /* add_i64 */
459 op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
461 /* st_i64 */
462 op = copy_st_i64(&begin_op, op);
464 return op;
467 static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
468 TCGOp *begin_op, TCGOp *op, int *cb_idx)
470 enum plugin_gen_cb type = begin_op->args[1];
472 tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
474 /* const_i32 == mov_i32 ("info", so it remains as is) */
475 op = copy_op(&begin_op, op, INDEX_op_mov_i32);
477 /* const_ptr */
478 op = copy_const_ptr(&begin_op, op, cb->userp);
480 /* copy the ld_i32, but note that we only have to copy it once */
481 begin_op = QTAILQ_NEXT(begin_op, link);
482 tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
483 if (*cb_idx == -1) {
484 op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32);
485 memcpy(op->args, begin_op->args, sizeof(op->args));
488 /* extu_tl_i64 */
489 op = copy_extu_tl_i64(&begin_op, op);
491 if (type == PLUGIN_GEN_CB_MEM) {
492 /* call */
493 op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb),
494 cb->f.vcpu_udata, cb->tcg_flags, cb_idx);
497 return op;
500 typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
501 TCGOp *begin_op, TCGOp *op, int *intp);
502 typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
504 static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
506 return true;
509 static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
511 int w;
513 w = op->args[2];
514 return !!(cb->rw & (w + 1));
517 static inline
518 void inject_cb_type(const GArray *cbs, TCGOp *begin_op, inject_fn inject,
519 op_ok_fn ok)
521 TCGOp *end_op;
522 TCGOp *op;
523 int cb_idx = -1;
524 int i;
526 if (!cbs || cbs->len == 0) {
527 rm_ops(begin_op);
528 return;
531 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
532 tcg_debug_assert(end_op);
534 op = end_op;
535 for (i = 0; i < cbs->len; i++) {
536 struct qemu_plugin_dyn_cb *cb =
537 &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
539 if (!ok(begin_op, cb)) {
540 continue;
542 op = inject(cb, begin_op, op, &cb_idx);
544 rm_ops_range(begin_op, end_op);
547 static void
548 inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
550 inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
553 static void
554 inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
556 inject_cb_type(cbs, begin_op, append_inline_cb, ok);
559 static void
560 inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
562 inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
565 /* we could change the ops in place, but we can reuse more code by copying */
566 static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
568 TCGOp *orig_op = begin_op;
569 TCGOp *end_op;
570 TCGOp *op;
572 end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
573 tcg_debug_assert(end_op);
575 /* const ptr */
576 op = copy_const_ptr(&begin_op, end_op, arr);
578 /* st_ptr */
579 op = copy_st_ptr(&begin_op, op);
581 rm_ops_range(orig_op, end_op);
585 * Tracking memory accesses performed from helpers requires extra work.
586 * If an instruction is emulated with helpers, we do two things:
587 * (1) copy the CB descriptors, and keep track of it so that they can be
588 * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
589 * that we can read them at run-time (i.e. when the helper executes).
590 * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
592 * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
593 * is possible that the code we generate after the instruction is
594 * dead, we also add checks before generating tb_exit etc.
596 static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn,
597 TCGOp *begin_op)
599 GArray *cbs[2];
600 GArray *arr;
601 size_t n_cbs, i;
603 cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
604 cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
606 n_cbs = 0;
607 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
608 n_cbs += cbs[i]->len;
611 plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
612 if (likely(!plugin_insn->mem_helper)) {
613 rm_ops(begin_op);
614 return;
617 arr = g_array_sized_new(false, false,
618 sizeof(struct qemu_plugin_dyn_cb), n_cbs);
620 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
621 g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
624 qemu_plugin_add_dyn_cb_arr(arr);
625 inject_mem_helper(begin_op, arr);
628 static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
629 TCGOp *begin_op)
631 if (likely(!plugin_insn->mem_helper)) {
632 rm_ops(begin_op);
633 return;
635 inject_mem_helper(begin_op, NULL);
638 /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
639 void plugin_gen_disable_mem_helpers(void)
641 TCGv_ptr ptr;
643 if (likely(tcg_ctx->plugin_insn == NULL ||
644 !tcg_ctx->plugin_insn->mem_helper)) {
645 return;
647 ptr = tcg_const_ptr(NULL);
648 tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
649 offsetof(ArchCPU, env));
650 tcg_temp_free_ptr(ptr);
651 tcg_ctx->plugin_insn->mem_helper = false;
654 static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
655 TCGOp *begin_op)
657 inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
660 static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
661 TCGOp *begin_op)
663 inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
666 static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
667 TCGOp *begin_op, int insn_idx)
669 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
671 inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
674 static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
675 TCGOp *begin_op, int insn_idx)
677 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
678 inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
679 begin_op, op_ok);
682 static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
683 TCGOp *begin_op, int insn_idx)
685 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
686 inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
689 static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
690 TCGOp *begin_op, int insn_idx)
692 const GArray *cbs;
693 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
695 cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
696 inject_inline_cb(cbs, begin_op, op_rw);
699 static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb,
700 TCGOp *begin_op, int insn_idx)
702 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
703 inject_mem_enable_helper(insn, begin_op);
706 static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb,
707 TCGOp *begin_op, int insn_idx)
709 struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
710 inject_mem_disable_helper(insn, begin_op);
713 static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op,
714 int insn_idx)
716 enum plugin_gen_from from = begin_op->args[0];
717 enum plugin_gen_cb type = begin_op->args[1];
719 switch (from) {
720 case PLUGIN_GEN_FROM_TB:
721 switch (type) {
722 case PLUGIN_GEN_CB_UDATA:
723 plugin_gen_tb_udata(ptb, begin_op);
724 return;
725 case PLUGIN_GEN_CB_INLINE:
726 plugin_gen_tb_inline(ptb, begin_op);
727 return;
728 default:
729 g_assert_not_reached();
731 case PLUGIN_GEN_FROM_INSN:
732 switch (type) {
733 case PLUGIN_GEN_CB_UDATA:
734 plugin_gen_insn_udata(ptb, begin_op, insn_idx);
735 return;
736 case PLUGIN_GEN_CB_INLINE:
737 plugin_gen_insn_inline(ptb, begin_op, insn_idx);
738 return;
739 case PLUGIN_GEN_ENABLE_MEM_HELPER:
740 plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx);
741 return;
742 default:
743 g_assert_not_reached();
745 case PLUGIN_GEN_FROM_MEM:
746 switch (type) {
747 case PLUGIN_GEN_CB_MEM:
748 plugin_gen_mem_regular(ptb, begin_op, insn_idx);
749 return;
750 case PLUGIN_GEN_CB_INLINE:
751 plugin_gen_mem_inline(ptb, begin_op, insn_idx);
752 return;
753 default:
754 g_assert_not_reached();
756 case PLUGIN_GEN_AFTER_INSN:
757 switch (type) {
758 case PLUGIN_GEN_DISABLE_MEM_HELPER:
759 plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx);
760 return;
761 default:
762 g_assert_not_reached();
764 default:
765 g_assert_not_reached();
769 /* #define DEBUG_PLUGIN_GEN_OPS */
770 static void pr_ops(void)
772 #ifdef DEBUG_PLUGIN_GEN_OPS
773 TCGOp *op;
774 int i = 0;
776 QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
777 const char *name = "";
778 const char *type = "";
780 if (op->opc == INDEX_op_plugin_cb_start) {
781 switch (op->args[0]) {
782 case PLUGIN_GEN_FROM_TB:
783 name = "tb";
784 break;
785 case PLUGIN_GEN_FROM_INSN:
786 name = "insn";
787 break;
788 case PLUGIN_GEN_FROM_MEM:
789 name = "mem";
790 break;
791 case PLUGIN_GEN_AFTER_INSN:
792 name = "after insn";
793 break;
794 default:
795 break;
797 switch (op->args[1]) {
798 case PLUGIN_GEN_CB_UDATA:
799 type = "udata";
800 break;
801 case PLUGIN_GEN_CB_INLINE:
802 type = "inline";
803 break;
804 case PLUGIN_GEN_CB_MEM:
805 type = "mem";
806 break;
807 case PLUGIN_GEN_ENABLE_MEM_HELPER:
808 type = "enable mem helper";
809 break;
810 case PLUGIN_GEN_DISABLE_MEM_HELPER:
811 type = "disable mem helper";
812 break;
813 default:
814 break;
817 printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
818 i++;
820 #endif
823 static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
825 TCGOp *op;
826 int insn_idx;
828 pr_ops();
829 insn_idx = -1;
830 QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) {
831 enum plugin_gen_from from = op->args[0];
832 enum plugin_gen_cb type = op->args[1];
834 tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start);
835 /* ENABLE_MEM_HELPER is the first callback of an instruction */
836 if (from == PLUGIN_GEN_FROM_INSN &&
837 type == PLUGIN_GEN_ENABLE_MEM_HELPER) {
838 insn_idx++;
840 plugin_inject_cb(plugin_tb, op, insn_idx);
842 pr_ops();
845 bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
847 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
848 bool ret = false;
850 if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
851 ret = true;
853 QSIMPLEQ_INIT(&tcg_ctx->plugin_ops);
854 ptb->vaddr = tb->pc;
855 ptb->vaddr2 = -1;
856 get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
857 ptb->haddr2 = NULL;
858 ptb->mem_only = mem_only;
860 plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
862 return ret;
865 void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
867 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
868 struct qemu_plugin_insn *pinsn;
870 pinsn = qemu_plugin_tb_insn_get(ptb);
871 tcg_ctx->plugin_insn = pinsn;
872 pinsn->vaddr = db->pc_next;
873 plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
876 * Detect page crossing to get the new host address.
877 * Note that we skip this when haddr1 == NULL, e.g. when we're
878 * fetching instructions from a region not backed by RAM.
880 if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
881 unlikely((db->pc_next & TARGET_PAGE_MASK) !=
882 (db->pc_first & TARGET_PAGE_MASK))) {
883 get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
884 &ptb->haddr2);
885 ptb->vaddr2 = db->pc_next;
887 if (likely(ptb->vaddr2 == -1)) {
888 pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
889 } else {
890 pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
894 void plugin_gen_insn_end(void)
896 plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
899 void plugin_gen_tb_end(CPUState *cpu)
901 struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
902 int i;
904 /* collect instrumentation requests */
905 qemu_plugin_tb_trans_cb(cpu, ptb);
907 /* inject the instrumentation at the appropriate places */
908 plugin_gen_inject(ptb);
910 /* clean up */
911 for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
912 if (ptb->cbs[i]) {
913 g_array_set_size(ptb->cbs[i], 0);
916 ptb->n = 0;
917 tcg_ctx->plugin_insn = NULL;