tcg: Store the TCGHelperInfo in the TCGOp for call
[qemu/ar7.git] / tcg / tcg.c
blob0dc99cc65b85ebeba9e86d6ce03c18cef911c308
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
31 #undef DEBUG_JIT
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
38 #include "qemu/cacheflush.h"
40 /* Note: the long term plan is to reduce the dependencies on the QEMU
41 CPU definitions. Currently they are used for qemu_ld/st
42 instructions */
43 #define NO_CPU_IO_DEFS
45 #include "exec/exec-all.h"
46 #include "tcg/tcg-op.h"
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS ELFCLASS32
50 #else
51 # define ELF_CLASS ELFCLASS64
52 #endif
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA ELFDATA2MSB
55 #else
56 # define ELF_DATA ELFDATA2LSB
57 #endif
59 #include "elf.h"
60 #include "exec/log.h"
61 #include "tcg-internal.h"
63 /* Forward declarations for functions declared in tcg-target.c.inc and
64 used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
68 intptr_t value, intptr_t addend);
70 /* The CIE and FDE header definitions will be common to all hosts. */
71 typedef struct {
72 uint32_t len __attribute__((aligned((sizeof(void *)))));
73 uint32_t id;
74 uint8_t version;
75 char augmentation[1];
76 uint8_t code_align;
77 uint8_t data_align;
78 uint8_t return_column;
79 } DebugFrameCIE;
81 typedef struct QEMU_PACKED {
82 uint32_t len __attribute__((aligned((sizeof(void *)))));
83 uint32_t cie_offset;
84 uintptr_t func_start;
85 uintptr_t func_len;
86 } DebugFrameFDEHeader;
88 typedef struct QEMU_PACKED {
89 DebugFrameCIE cie;
90 DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
93 static void tcg_register_jit_int(const void *buf, size_t size,
94 const void *debug_frame,
95 size_t debug_frame_size)
96 __attribute__((unused));
98 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
99 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
100 intptr_t arg2);
101 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
102 static void tcg_out_movi(TCGContext *s, TCGType type,
103 TCGReg ret, tcg_target_long arg);
104 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
105 const TCGArg args[TCG_MAX_OP_ARGS],
106 const int const_args[TCG_MAX_OP_ARGS]);
107 #if TCG_TARGET_MAYBE_vec
108 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
109 TCGReg dst, TCGReg src);
110 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
111 TCGReg dst, TCGReg base, intptr_t offset);
112 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
113 TCGReg dst, int64_t arg);
114 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
115 unsigned vecl, unsigned vece,
116 const TCGArg args[TCG_MAX_OP_ARGS],
117 const int const_args[TCG_MAX_OP_ARGS]);
118 #else
119 static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
120 TCGReg dst, TCGReg src)
122 g_assert_not_reached();
124 static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
125 TCGReg dst, TCGReg base, intptr_t offset)
127 g_assert_not_reached();
129 static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
130 TCGReg dst, int64_t arg)
132 g_assert_not_reached();
134 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
135 unsigned vecl, unsigned vece,
136 const TCGArg args[TCG_MAX_OP_ARGS],
137 const int const_args[TCG_MAX_OP_ARGS])
139 g_assert_not_reached();
141 #endif
142 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
143 intptr_t arg2);
144 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
145 TCGReg base, intptr_t ofs);
146 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target);
147 static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
148 #ifdef TCG_TARGET_NEED_LDST_LABELS
149 static int tcg_out_ldst_finalize(TCGContext *s);
150 #endif
152 TCGContext tcg_init_ctx;
153 __thread TCGContext *tcg_ctx;
155 TCGContext **tcg_ctxs;
156 unsigned int tcg_cur_ctxs;
157 unsigned int tcg_max_ctxs;
158 TCGv_env cpu_env = 0;
159 const void *tcg_code_gen_epilogue;
160 uintptr_t tcg_splitwx_diff;
162 #ifndef CONFIG_TCG_INTERPRETER
163 tcg_prologue_fn *tcg_qemu_tb_exec;
164 #endif
166 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
167 static TCGRegSet tcg_target_call_clobber_regs;
169 #if TCG_TARGET_INSN_UNIT_SIZE == 1
170 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
172 *s->code_ptr++ = v;
175 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
176 uint8_t v)
178 *p = v;
180 #endif
182 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
183 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
185 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
186 *s->code_ptr++ = v;
187 } else {
188 tcg_insn_unit *p = s->code_ptr;
189 memcpy(p, &v, sizeof(v));
190 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
194 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
195 uint16_t v)
197 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
198 *p = v;
199 } else {
200 memcpy(p, &v, sizeof(v));
203 #endif
205 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
206 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
208 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
209 *s->code_ptr++ = v;
210 } else {
211 tcg_insn_unit *p = s->code_ptr;
212 memcpy(p, &v, sizeof(v));
213 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
217 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
218 uint32_t v)
220 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
221 *p = v;
222 } else {
223 memcpy(p, &v, sizeof(v));
226 #endif
228 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
229 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
231 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
232 *s->code_ptr++ = v;
233 } else {
234 tcg_insn_unit *p = s->code_ptr;
235 memcpy(p, &v, sizeof(v));
236 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
240 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
241 uint64_t v)
243 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
244 *p = v;
245 } else {
246 memcpy(p, &v, sizeof(v));
249 #endif
251 /* label relocation processing */
253 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
254 TCGLabel *l, intptr_t addend)
256 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
258 r->type = type;
259 r->ptr = code_ptr;
260 r->addend = addend;
261 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
264 static void tcg_out_label(TCGContext *s, TCGLabel *l)
266 tcg_debug_assert(!l->has_value);
267 l->has_value = 1;
268 l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
271 TCGLabel *gen_new_label(void)
273 TCGContext *s = tcg_ctx;
274 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
276 memset(l, 0, sizeof(TCGLabel));
277 l->id = s->nb_labels++;
278 QSIMPLEQ_INIT(&l->relocs);
280 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
282 return l;
285 static bool tcg_resolve_relocs(TCGContext *s)
287 TCGLabel *l;
289 QSIMPLEQ_FOREACH(l, &s->labels, next) {
290 TCGRelocation *r;
291 uintptr_t value = l->u.value;
293 QSIMPLEQ_FOREACH(r, &l->relocs, next) {
294 if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
295 return false;
299 return true;
302 static void set_jmp_reset_offset(TCGContext *s, int which)
305 * We will check for overflow at the end of the opcode loop in
306 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
308 s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
311 /* Signal overflow, starting over with fewer guest insns. */
312 static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s)
314 siglongjmp(s->jmp_trans, -2);
317 #define C_PFX1(P, A) P##A
318 #define C_PFX2(P, A, B) P##A##_##B
319 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
320 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
321 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
322 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
324 /* Define an enumeration for the various combinations. */
326 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
327 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
328 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
329 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
331 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
332 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
333 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
334 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
336 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
338 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
339 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
340 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
341 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
343 typedef enum {
344 #include "tcg-target-con-set.h"
345 } TCGConstraintSetIndex;
347 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
349 #undef C_O0_I1
350 #undef C_O0_I2
351 #undef C_O0_I3
352 #undef C_O0_I4
353 #undef C_O1_I1
354 #undef C_O1_I2
355 #undef C_O1_I3
356 #undef C_O1_I4
357 #undef C_N1_I2
358 #undef C_O2_I1
359 #undef C_O2_I2
360 #undef C_O2_I3
361 #undef C_O2_I4
363 /* Put all of the constraint sets into an array, indexed by the enum. */
365 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
366 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
367 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
368 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
370 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
371 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
372 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
373 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
375 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
377 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
378 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
379 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
380 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
382 static const TCGTargetOpDef constraint_sets[] = {
383 #include "tcg-target-con-set.h"
387 #undef C_O0_I1
388 #undef C_O0_I2
389 #undef C_O0_I3
390 #undef C_O0_I4
391 #undef C_O1_I1
392 #undef C_O1_I2
393 #undef C_O1_I3
394 #undef C_O1_I4
395 #undef C_N1_I2
396 #undef C_O2_I1
397 #undef C_O2_I2
398 #undef C_O2_I3
399 #undef C_O2_I4
401 /* Expand the enumerator to be returned from tcg_target_op_def(). */
403 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
404 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
405 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
406 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
408 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
409 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
410 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
411 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
413 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
415 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
416 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
417 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
418 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
420 #include "tcg-target.c.inc"
422 static void alloc_tcg_plugin_context(TCGContext *s)
424 #ifdef CONFIG_PLUGIN
425 s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
426 s->plugin_tb->insns =
427 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
428 #endif
432 * All TCG threads except the parent (i.e. the one that called tcg_context_init
433 * and registered the target's TCG globals) must register with this function
434 * before initiating translation.
436 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
437 * of tcg_region_init() for the reasoning behind this.
439 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
440 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
441 * is not used anymore for translation once this function is called.
443 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
444 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
446 #ifdef CONFIG_USER_ONLY
447 void tcg_register_thread(void)
449 tcg_ctx = &tcg_init_ctx;
451 #else
452 void tcg_register_thread(void)
454 TCGContext *s = g_malloc(sizeof(*s));
455 unsigned int i, n;
457 *s = tcg_init_ctx;
459 /* Relink mem_base. */
460 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
461 if (tcg_init_ctx.temps[i].mem_base) {
462 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
463 tcg_debug_assert(b >= 0 && b < n);
464 s->temps[i].mem_base = &s->temps[b];
468 /* Claim an entry in tcg_ctxs */
469 n = qatomic_fetch_inc(&tcg_cur_ctxs);
470 g_assert(n < tcg_max_ctxs);
471 qatomic_set(&tcg_ctxs[n], s);
473 if (n > 0) {
474 alloc_tcg_plugin_context(s);
475 tcg_region_initial_alloc(s);
478 tcg_ctx = s;
480 #endif /* !CONFIG_USER_ONLY */
482 /* pool based memory allocation */
483 void *tcg_malloc_internal(TCGContext *s, int size)
485 TCGPool *p;
486 int pool_size;
488 if (size > TCG_POOL_CHUNK_SIZE) {
489 /* big malloc: insert a new pool (XXX: could optimize) */
490 p = g_malloc(sizeof(TCGPool) + size);
491 p->size = size;
492 p->next = s->pool_first_large;
493 s->pool_first_large = p;
494 return p->data;
495 } else {
496 p = s->pool_current;
497 if (!p) {
498 p = s->pool_first;
499 if (!p)
500 goto new_pool;
501 } else {
502 if (!p->next) {
503 new_pool:
504 pool_size = TCG_POOL_CHUNK_SIZE;
505 p = g_malloc(sizeof(TCGPool) + pool_size);
506 p->size = pool_size;
507 p->next = NULL;
508 if (s->pool_current)
509 s->pool_current->next = p;
510 else
511 s->pool_first = p;
512 } else {
513 p = p->next;
517 s->pool_current = p;
518 s->pool_cur = p->data + size;
519 s->pool_end = p->data + p->size;
520 return p->data;
523 void tcg_pool_reset(TCGContext *s)
525 TCGPool *p, *t;
526 for (p = s->pool_first_large; p; p = t) {
527 t = p->next;
528 g_free(p);
530 s->pool_first_large = NULL;
531 s->pool_cur = s->pool_end = NULL;
532 s->pool_current = NULL;
535 #include "exec/helper-proto.h"
537 static const TCGHelperInfo all_helpers[] = {
538 #include "exec/helper-tcg.h"
540 static GHashTable *helper_table;
542 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
543 static void process_op_defs(TCGContext *s);
544 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
545 TCGReg reg, const char *name);
547 static void tcg_context_init(unsigned max_cpus)
549 TCGContext *s = &tcg_init_ctx;
550 int op, total_args, n, i;
551 TCGOpDef *def;
552 TCGArgConstraint *args_ct;
553 TCGTemp *ts;
555 memset(s, 0, sizeof(*s));
556 s->nb_globals = 0;
558 /* Count total number of arguments and allocate the corresponding
559 space */
560 total_args = 0;
561 for(op = 0; op < NB_OPS; op++) {
562 def = &tcg_op_defs[op];
563 n = def->nb_iargs + def->nb_oargs;
564 total_args += n;
567 args_ct = g_new0(TCGArgConstraint, total_args);
569 for(op = 0; op < NB_OPS; op++) {
570 def = &tcg_op_defs[op];
571 def->args_ct = args_ct;
572 n = def->nb_iargs + def->nb_oargs;
573 args_ct += n;
576 /* Register helpers. */
577 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
578 helper_table = g_hash_table_new(NULL, NULL);
580 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
581 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
582 (gpointer)&all_helpers[i]);
585 tcg_target_init(s);
586 process_op_defs(s);
588 /* Reverse the order of the saved registers, assuming they're all at
589 the start of tcg_target_reg_alloc_order. */
590 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
591 int r = tcg_target_reg_alloc_order[n];
592 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
593 break;
596 for (i = 0; i < n; ++i) {
597 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
599 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
600 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
603 alloc_tcg_plugin_context(s);
605 tcg_ctx = s;
607 * In user-mode we simply share the init context among threads, since we
608 * use a single region. See the documentation tcg_region_init() for the
609 * reasoning behind this.
610 * In softmmu we will have at most max_cpus TCG threads.
612 #ifdef CONFIG_USER_ONLY
613 tcg_ctxs = &tcg_ctx;
614 tcg_cur_ctxs = 1;
615 tcg_max_ctxs = 1;
616 #else
617 tcg_max_ctxs = max_cpus;
618 tcg_ctxs = g_new0(TCGContext *, max_cpus);
619 #endif
621 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
622 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
623 cpu_env = temp_tcgv_ptr(ts);
626 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
628 tcg_context_init(max_cpus);
629 tcg_region_init(tb_size, splitwx, max_cpus);
633 * Allocate TBs right before their corresponding translated code, making
634 * sure that TBs and code are on different cache lines.
636 TranslationBlock *tcg_tb_alloc(TCGContext *s)
638 uintptr_t align = qemu_icache_linesize;
639 TranslationBlock *tb;
640 void *next;
642 retry:
643 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
644 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
646 if (unlikely(next > s->code_gen_highwater)) {
647 if (tcg_region_alloc(s)) {
648 return NULL;
650 goto retry;
652 qatomic_set(&s->code_gen_ptr, next);
653 s->data_gen_ptr = NULL;
654 return tb;
657 void tcg_prologue_init(TCGContext *s)
659 size_t prologue_size;
661 s->code_ptr = s->code_gen_ptr;
662 s->code_buf = s->code_gen_ptr;
663 s->data_gen_ptr = NULL;
665 #ifndef CONFIG_TCG_INTERPRETER
666 tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
667 #endif
669 #ifdef TCG_TARGET_NEED_POOL_LABELS
670 s->pool_labels = NULL;
671 #endif
673 qemu_thread_jit_write();
674 /* Generate the prologue. */
675 tcg_target_qemu_prologue(s);
677 #ifdef TCG_TARGET_NEED_POOL_LABELS
678 /* Allow the prologue to put e.g. guest_base into a pool entry. */
680 int result = tcg_out_pool_finalize(s);
681 tcg_debug_assert(result == 0);
683 #endif
685 prologue_size = tcg_current_code_size(s);
687 #ifndef CONFIG_TCG_INTERPRETER
688 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
689 (uintptr_t)s->code_buf, prologue_size);
690 #endif
692 tcg_region_prologue_set(s);
694 #ifdef DEBUG_DISAS
695 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
696 FILE *logfile = qemu_log_lock();
697 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
698 if (s->data_gen_ptr) {
699 size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
700 size_t data_size = prologue_size - code_size;
701 size_t i;
703 log_disas(s->code_gen_ptr, code_size);
705 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
706 if (sizeof(tcg_target_ulong) == 8) {
707 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
708 (uintptr_t)s->data_gen_ptr + i,
709 *(uint64_t *)(s->data_gen_ptr + i));
710 } else {
711 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
712 (uintptr_t)s->data_gen_ptr + i,
713 *(uint32_t *)(s->data_gen_ptr + i));
716 } else {
717 log_disas(s->code_gen_ptr, prologue_size);
719 qemu_log("\n");
720 qemu_log_flush();
721 qemu_log_unlock(logfile);
723 #endif
725 /* Assert that goto_ptr is implemented completely. */
726 if (TCG_TARGET_HAS_goto_ptr) {
727 tcg_debug_assert(tcg_code_gen_epilogue != NULL);
731 void tcg_func_start(TCGContext *s)
733 tcg_pool_reset(s);
734 s->nb_temps = s->nb_globals;
736 /* No temps have been previously allocated for size or locality. */
737 memset(s->free_temps, 0, sizeof(s->free_temps));
739 /* No constant temps have been previously allocated. */
740 for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
741 if (s->const_table[i]) {
742 g_hash_table_remove_all(s->const_table[i]);
746 s->nb_ops = 0;
747 s->nb_labels = 0;
748 s->current_frame_offset = s->frame_start;
750 #ifdef CONFIG_DEBUG_TCG
751 s->goto_tb_issue_mask = 0;
752 #endif
754 QTAILQ_INIT(&s->ops);
755 QTAILQ_INIT(&s->free_ops);
756 QSIMPLEQ_INIT(&s->labels);
759 static TCGTemp *tcg_temp_alloc(TCGContext *s)
761 int n = s->nb_temps++;
763 if (n >= TCG_MAX_TEMPS) {
764 tcg_raise_tb_overflow(s);
766 return memset(&s->temps[n], 0, sizeof(TCGTemp));
769 static TCGTemp *tcg_global_alloc(TCGContext *s)
771 TCGTemp *ts;
773 tcg_debug_assert(s->nb_globals == s->nb_temps);
774 tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
775 s->nb_globals++;
776 ts = tcg_temp_alloc(s);
777 ts->kind = TEMP_GLOBAL;
779 return ts;
782 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
783 TCGReg reg, const char *name)
785 TCGTemp *ts;
787 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
788 tcg_abort();
791 ts = tcg_global_alloc(s);
792 ts->base_type = type;
793 ts->type = type;
794 ts->kind = TEMP_FIXED;
795 ts->reg = reg;
796 ts->name = name;
797 tcg_regset_set_reg(s->reserved_regs, reg);
799 return ts;
802 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
804 s->frame_start = start;
805 s->frame_end = start + size;
806 s->frame_temp
807 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
810 TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
811 intptr_t offset, const char *name)
813 TCGContext *s = tcg_ctx;
814 TCGTemp *base_ts = tcgv_ptr_temp(base);
815 TCGTemp *ts = tcg_global_alloc(s);
816 int indirect_reg = 0, bigendian = 0;
817 #ifdef HOST_WORDS_BIGENDIAN
818 bigendian = 1;
819 #endif
821 switch (base_ts->kind) {
822 case TEMP_FIXED:
823 break;
824 case TEMP_GLOBAL:
825 /* We do not support double-indirect registers. */
826 tcg_debug_assert(!base_ts->indirect_reg);
827 base_ts->indirect_base = 1;
828 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
829 ? 2 : 1);
830 indirect_reg = 1;
831 break;
832 default:
833 g_assert_not_reached();
836 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
837 TCGTemp *ts2 = tcg_global_alloc(s);
838 char buf[64];
840 ts->base_type = TCG_TYPE_I64;
841 ts->type = TCG_TYPE_I32;
842 ts->indirect_reg = indirect_reg;
843 ts->mem_allocated = 1;
844 ts->mem_base = base_ts;
845 ts->mem_offset = offset + bigendian * 4;
846 pstrcpy(buf, sizeof(buf), name);
847 pstrcat(buf, sizeof(buf), "_0");
848 ts->name = strdup(buf);
850 tcg_debug_assert(ts2 == ts + 1);
851 ts2->base_type = TCG_TYPE_I64;
852 ts2->type = TCG_TYPE_I32;
853 ts2->indirect_reg = indirect_reg;
854 ts2->mem_allocated = 1;
855 ts2->mem_base = base_ts;
856 ts2->mem_offset = offset + (1 - bigendian) * 4;
857 pstrcpy(buf, sizeof(buf), name);
858 pstrcat(buf, sizeof(buf), "_1");
859 ts2->name = strdup(buf);
860 } else {
861 ts->base_type = type;
862 ts->type = type;
863 ts->indirect_reg = indirect_reg;
864 ts->mem_allocated = 1;
865 ts->mem_base = base_ts;
866 ts->mem_offset = offset;
867 ts->name = name;
869 return ts;
872 TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
874 TCGContext *s = tcg_ctx;
875 TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL;
876 TCGTemp *ts;
877 int idx, k;
879 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
880 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
881 if (idx < TCG_MAX_TEMPS) {
882 /* There is already an available temp with the right type. */
883 clear_bit(idx, s->free_temps[k].l);
885 ts = &s->temps[idx];
886 ts->temp_allocated = 1;
887 tcg_debug_assert(ts->base_type == type);
888 tcg_debug_assert(ts->kind == kind);
889 } else {
890 ts = tcg_temp_alloc(s);
891 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
892 TCGTemp *ts2 = tcg_temp_alloc(s);
894 ts->base_type = type;
895 ts->type = TCG_TYPE_I32;
896 ts->temp_allocated = 1;
897 ts->kind = kind;
899 tcg_debug_assert(ts2 == ts + 1);
900 ts2->base_type = TCG_TYPE_I64;
901 ts2->type = TCG_TYPE_I32;
902 ts2->temp_allocated = 1;
903 ts2->kind = kind;
904 } else {
905 ts->base_type = type;
906 ts->type = type;
907 ts->temp_allocated = 1;
908 ts->kind = kind;
912 #if defined(CONFIG_DEBUG_TCG)
913 s->temps_in_use++;
914 #endif
915 return ts;
918 TCGv_vec tcg_temp_new_vec(TCGType type)
920 TCGTemp *t;
922 #ifdef CONFIG_DEBUG_TCG
923 switch (type) {
924 case TCG_TYPE_V64:
925 assert(TCG_TARGET_HAS_v64);
926 break;
927 case TCG_TYPE_V128:
928 assert(TCG_TARGET_HAS_v128);
929 break;
930 case TCG_TYPE_V256:
931 assert(TCG_TARGET_HAS_v256);
932 break;
933 default:
934 g_assert_not_reached();
936 #endif
938 t = tcg_temp_new_internal(type, 0);
939 return temp_tcgv_vec(t);
942 /* Create a new temp of the same type as an existing temp. */
943 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
945 TCGTemp *t = tcgv_vec_temp(match);
947 tcg_debug_assert(t->temp_allocated != 0);
949 t = tcg_temp_new_internal(t->base_type, 0);
950 return temp_tcgv_vec(t);
953 void tcg_temp_free_internal(TCGTemp *ts)
955 TCGContext *s = tcg_ctx;
956 int k, idx;
958 /* In order to simplify users of tcg_constant_*, silently ignore free. */
959 if (ts->kind == TEMP_CONST) {
960 return;
963 #if defined(CONFIG_DEBUG_TCG)
964 s->temps_in_use--;
965 if (s->temps_in_use < 0) {
966 fprintf(stderr, "More temporaries freed than allocated!\n");
968 #endif
970 tcg_debug_assert(ts->kind < TEMP_GLOBAL);
971 tcg_debug_assert(ts->temp_allocated != 0);
972 ts->temp_allocated = 0;
974 idx = temp_idx(ts);
975 k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT);
976 set_bit(idx, s->free_temps[k].l);
979 TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
981 TCGContext *s = tcg_ctx;
982 GHashTable *h = s->const_table[type];
983 TCGTemp *ts;
985 if (h == NULL) {
986 h = g_hash_table_new(g_int64_hash, g_int64_equal);
987 s->const_table[type] = h;
990 ts = g_hash_table_lookup(h, &val);
991 if (ts == NULL) {
992 ts = tcg_temp_alloc(s);
994 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
995 TCGTemp *ts2 = tcg_temp_alloc(s);
997 ts->base_type = TCG_TYPE_I64;
998 ts->type = TCG_TYPE_I32;
999 ts->kind = TEMP_CONST;
1000 ts->temp_allocated = 1;
1002 * Retain the full value of the 64-bit constant in the low
1003 * part, so that the hash table works. Actual uses will
1004 * truncate the value to the low part.
1006 ts->val = val;
1008 tcg_debug_assert(ts2 == ts + 1);
1009 ts2->base_type = TCG_TYPE_I64;
1010 ts2->type = TCG_TYPE_I32;
1011 ts2->kind = TEMP_CONST;
1012 ts2->temp_allocated = 1;
1013 ts2->val = val >> 32;
1014 } else {
1015 ts->base_type = type;
1016 ts->type = type;
1017 ts->kind = TEMP_CONST;
1018 ts->temp_allocated = 1;
1019 ts->val = val;
1021 g_hash_table_insert(h, &ts->val, ts);
1024 return ts;
1027 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1029 val = dup_const(vece, val);
1030 return temp_tcgv_vec(tcg_constant_internal(type, val));
1033 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1035 TCGTemp *t = tcgv_vec_temp(match);
1037 tcg_debug_assert(t->temp_allocated != 0);
1038 return tcg_constant_vec(t->base_type, vece, val);
1041 TCGv_i32 tcg_const_i32(int32_t val)
1043 TCGv_i32 t0;
1044 t0 = tcg_temp_new_i32();
1045 tcg_gen_movi_i32(t0, val);
1046 return t0;
1049 TCGv_i64 tcg_const_i64(int64_t val)
1051 TCGv_i64 t0;
1052 t0 = tcg_temp_new_i64();
1053 tcg_gen_movi_i64(t0, val);
1054 return t0;
1057 TCGv_i32 tcg_const_local_i32(int32_t val)
1059 TCGv_i32 t0;
1060 t0 = tcg_temp_local_new_i32();
1061 tcg_gen_movi_i32(t0, val);
1062 return t0;
1065 TCGv_i64 tcg_const_local_i64(int64_t val)
1067 TCGv_i64 t0;
1068 t0 = tcg_temp_local_new_i64();
1069 tcg_gen_movi_i64(t0, val);
1070 return t0;
1073 #if defined(CONFIG_DEBUG_TCG)
1074 void tcg_clear_temp_count(void)
1076 TCGContext *s = tcg_ctx;
1077 s->temps_in_use = 0;
1080 int tcg_check_temp_count(void)
1082 TCGContext *s = tcg_ctx;
1083 if (s->temps_in_use) {
1084 /* Clear the count so that we don't give another
1085 * warning immediately next time around.
1087 s->temps_in_use = 0;
1088 return 1;
1090 return 0;
1092 #endif
1094 /* Return true if OP may appear in the opcode stream.
1095 Test the runtime variable that controls each opcode. */
1096 bool tcg_op_supported(TCGOpcode op)
1098 const bool have_vec
1099 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1101 switch (op) {
1102 case INDEX_op_discard:
1103 case INDEX_op_set_label:
1104 case INDEX_op_call:
1105 case INDEX_op_br:
1106 case INDEX_op_mb:
1107 case INDEX_op_insn_start:
1108 case INDEX_op_exit_tb:
1109 case INDEX_op_goto_tb:
1110 case INDEX_op_qemu_ld_i32:
1111 case INDEX_op_qemu_st_i32:
1112 case INDEX_op_qemu_ld_i64:
1113 case INDEX_op_qemu_st_i64:
1114 return true;
1116 case INDEX_op_qemu_st8_i32:
1117 return TCG_TARGET_HAS_qemu_st8_i32;
1119 case INDEX_op_goto_ptr:
1120 return TCG_TARGET_HAS_goto_ptr;
1122 case INDEX_op_mov_i32:
1123 case INDEX_op_setcond_i32:
1124 case INDEX_op_brcond_i32:
1125 case INDEX_op_ld8u_i32:
1126 case INDEX_op_ld8s_i32:
1127 case INDEX_op_ld16u_i32:
1128 case INDEX_op_ld16s_i32:
1129 case INDEX_op_ld_i32:
1130 case INDEX_op_st8_i32:
1131 case INDEX_op_st16_i32:
1132 case INDEX_op_st_i32:
1133 case INDEX_op_add_i32:
1134 case INDEX_op_sub_i32:
1135 case INDEX_op_mul_i32:
1136 case INDEX_op_and_i32:
1137 case INDEX_op_or_i32:
1138 case INDEX_op_xor_i32:
1139 case INDEX_op_shl_i32:
1140 case INDEX_op_shr_i32:
1141 case INDEX_op_sar_i32:
1142 return true;
1144 case INDEX_op_movcond_i32:
1145 return TCG_TARGET_HAS_movcond_i32;
1146 case INDEX_op_div_i32:
1147 case INDEX_op_divu_i32:
1148 return TCG_TARGET_HAS_div_i32;
1149 case INDEX_op_rem_i32:
1150 case INDEX_op_remu_i32:
1151 return TCG_TARGET_HAS_rem_i32;
1152 case INDEX_op_div2_i32:
1153 case INDEX_op_divu2_i32:
1154 return TCG_TARGET_HAS_div2_i32;
1155 case INDEX_op_rotl_i32:
1156 case INDEX_op_rotr_i32:
1157 return TCG_TARGET_HAS_rot_i32;
1158 case INDEX_op_deposit_i32:
1159 return TCG_TARGET_HAS_deposit_i32;
1160 case INDEX_op_extract_i32:
1161 return TCG_TARGET_HAS_extract_i32;
1162 case INDEX_op_sextract_i32:
1163 return TCG_TARGET_HAS_sextract_i32;
1164 case INDEX_op_extract2_i32:
1165 return TCG_TARGET_HAS_extract2_i32;
1166 case INDEX_op_add2_i32:
1167 return TCG_TARGET_HAS_add2_i32;
1168 case INDEX_op_sub2_i32:
1169 return TCG_TARGET_HAS_sub2_i32;
1170 case INDEX_op_mulu2_i32:
1171 return TCG_TARGET_HAS_mulu2_i32;
1172 case INDEX_op_muls2_i32:
1173 return TCG_TARGET_HAS_muls2_i32;
1174 case INDEX_op_muluh_i32:
1175 return TCG_TARGET_HAS_muluh_i32;
1176 case INDEX_op_mulsh_i32:
1177 return TCG_TARGET_HAS_mulsh_i32;
1178 case INDEX_op_ext8s_i32:
1179 return TCG_TARGET_HAS_ext8s_i32;
1180 case INDEX_op_ext16s_i32:
1181 return TCG_TARGET_HAS_ext16s_i32;
1182 case INDEX_op_ext8u_i32:
1183 return TCG_TARGET_HAS_ext8u_i32;
1184 case INDEX_op_ext16u_i32:
1185 return TCG_TARGET_HAS_ext16u_i32;
1186 case INDEX_op_bswap16_i32:
1187 return TCG_TARGET_HAS_bswap16_i32;
1188 case INDEX_op_bswap32_i32:
1189 return TCG_TARGET_HAS_bswap32_i32;
1190 case INDEX_op_not_i32:
1191 return TCG_TARGET_HAS_not_i32;
1192 case INDEX_op_neg_i32:
1193 return TCG_TARGET_HAS_neg_i32;
1194 case INDEX_op_andc_i32:
1195 return TCG_TARGET_HAS_andc_i32;
1196 case INDEX_op_orc_i32:
1197 return TCG_TARGET_HAS_orc_i32;
1198 case INDEX_op_eqv_i32:
1199 return TCG_TARGET_HAS_eqv_i32;
1200 case INDEX_op_nand_i32:
1201 return TCG_TARGET_HAS_nand_i32;
1202 case INDEX_op_nor_i32:
1203 return TCG_TARGET_HAS_nor_i32;
1204 case INDEX_op_clz_i32:
1205 return TCG_TARGET_HAS_clz_i32;
1206 case INDEX_op_ctz_i32:
1207 return TCG_TARGET_HAS_ctz_i32;
1208 case INDEX_op_ctpop_i32:
1209 return TCG_TARGET_HAS_ctpop_i32;
1211 case INDEX_op_brcond2_i32:
1212 case INDEX_op_setcond2_i32:
1213 return TCG_TARGET_REG_BITS == 32;
1215 case INDEX_op_mov_i64:
1216 case INDEX_op_setcond_i64:
1217 case INDEX_op_brcond_i64:
1218 case INDEX_op_ld8u_i64:
1219 case INDEX_op_ld8s_i64:
1220 case INDEX_op_ld16u_i64:
1221 case INDEX_op_ld16s_i64:
1222 case INDEX_op_ld32u_i64:
1223 case INDEX_op_ld32s_i64:
1224 case INDEX_op_ld_i64:
1225 case INDEX_op_st8_i64:
1226 case INDEX_op_st16_i64:
1227 case INDEX_op_st32_i64:
1228 case INDEX_op_st_i64:
1229 case INDEX_op_add_i64:
1230 case INDEX_op_sub_i64:
1231 case INDEX_op_mul_i64:
1232 case INDEX_op_and_i64:
1233 case INDEX_op_or_i64:
1234 case INDEX_op_xor_i64:
1235 case INDEX_op_shl_i64:
1236 case INDEX_op_shr_i64:
1237 case INDEX_op_sar_i64:
1238 case INDEX_op_ext_i32_i64:
1239 case INDEX_op_extu_i32_i64:
1240 return TCG_TARGET_REG_BITS == 64;
1242 case INDEX_op_movcond_i64:
1243 return TCG_TARGET_HAS_movcond_i64;
1244 case INDEX_op_div_i64:
1245 case INDEX_op_divu_i64:
1246 return TCG_TARGET_HAS_div_i64;
1247 case INDEX_op_rem_i64:
1248 case INDEX_op_remu_i64:
1249 return TCG_TARGET_HAS_rem_i64;
1250 case INDEX_op_div2_i64:
1251 case INDEX_op_divu2_i64:
1252 return TCG_TARGET_HAS_div2_i64;
1253 case INDEX_op_rotl_i64:
1254 case INDEX_op_rotr_i64:
1255 return TCG_TARGET_HAS_rot_i64;
1256 case INDEX_op_deposit_i64:
1257 return TCG_TARGET_HAS_deposit_i64;
1258 case INDEX_op_extract_i64:
1259 return TCG_TARGET_HAS_extract_i64;
1260 case INDEX_op_sextract_i64:
1261 return TCG_TARGET_HAS_sextract_i64;
1262 case INDEX_op_extract2_i64:
1263 return TCG_TARGET_HAS_extract2_i64;
1264 case INDEX_op_extrl_i64_i32:
1265 return TCG_TARGET_HAS_extrl_i64_i32;
1266 case INDEX_op_extrh_i64_i32:
1267 return TCG_TARGET_HAS_extrh_i64_i32;
1268 case INDEX_op_ext8s_i64:
1269 return TCG_TARGET_HAS_ext8s_i64;
1270 case INDEX_op_ext16s_i64:
1271 return TCG_TARGET_HAS_ext16s_i64;
1272 case INDEX_op_ext32s_i64:
1273 return TCG_TARGET_HAS_ext32s_i64;
1274 case INDEX_op_ext8u_i64:
1275 return TCG_TARGET_HAS_ext8u_i64;
1276 case INDEX_op_ext16u_i64:
1277 return TCG_TARGET_HAS_ext16u_i64;
1278 case INDEX_op_ext32u_i64:
1279 return TCG_TARGET_HAS_ext32u_i64;
1280 case INDEX_op_bswap16_i64:
1281 return TCG_TARGET_HAS_bswap16_i64;
1282 case INDEX_op_bswap32_i64:
1283 return TCG_TARGET_HAS_bswap32_i64;
1284 case INDEX_op_bswap64_i64:
1285 return TCG_TARGET_HAS_bswap64_i64;
1286 case INDEX_op_not_i64:
1287 return TCG_TARGET_HAS_not_i64;
1288 case INDEX_op_neg_i64:
1289 return TCG_TARGET_HAS_neg_i64;
1290 case INDEX_op_andc_i64:
1291 return TCG_TARGET_HAS_andc_i64;
1292 case INDEX_op_orc_i64:
1293 return TCG_TARGET_HAS_orc_i64;
1294 case INDEX_op_eqv_i64:
1295 return TCG_TARGET_HAS_eqv_i64;
1296 case INDEX_op_nand_i64:
1297 return TCG_TARGET_HAS_nand_i64;
1298 case INDEX_op_nor_i64:
1299 return TCG_TARGET_HAS_nor_i64;
1300 case INDEX_op_clz_i64:
1301 return TCG_TARGET_HAS_clz_i64;
1302 case INDEX_op_ctz_i64:
1303 return TCG_TARGET_HAS_ctz_i64;
1304 case INDEX_op_ctpop_i64:
1305 return TCG_TARGET_HAS_ctpop_i64;
1306 case INDEX_op_add2_i64:
1307 return TCG_TARGET_HAS_add2_i64;
1308 case INDEX_op_sub2_i64:
1309 return TCG_TARGET_HAS_sub2_i64;
1310 case INDEX_op_mulu2_i64:
1311 return TCG_TARGET_HAS_mulu2_i64;
1312 case INDEX_op_muls2_i64:
1313 return TCG_TARGET_HAS_muls2_i64;
1314 case INDEX_op_muluh_i64:
1315 return TCG_TARGET_HAS_muluh_i64;
1316 case INDEX_op_mulsh_i64:
1317 return TCG_TARGET_HAS_mulsh_i64;
1319 case INDEX_op_mov_vec:
1320 case INDEX_op_dup_vec:
1321 case INDEX_op_dupm_vec:
1322 case INDEX_op_ld_vec:
1323 case INDEX_op_st_vec:
1324 case INDEX_op_add_vec:
1325 case INDEX_op_sub_vec:
1326 case INDEX_op_and_vec:
1327 case INDEX_op_or_vec:
1328 case INDEX_op_xor_vec:
1329 case INDEX_op_cmp_vec:
1330 return have_vec;
1331 case INDEX_op_dup2_vec:
1332 return have_vec && TCG_TARGET_REG_BITS == 32;
1333 case INDEX_op_not_vec:
1334 return have_vec && TCG_TARGET_HAS_not_vec;
1335 case INDEX_op_neg_vec:
1336 return have_vec && TCG_TARGET_HAS_neg_vec;
1337 case INDEX_op_abs_vec:
1338 return have_vec && TCG_TARGET_HAS_abs_vec;
1339 case INDEX_op_andc_vec:
1340 return have_vec && TCG_TARGET_HAS_andc_vec;
1341 case INDEX_op_orc_vec:
1342 return have_vec && TCG_TARGET_HAS_orc_vec;
1343 case INDEX_op_mul_vec:
1344 return have_vec && TCG_TARGET_HAS_mul_vec;
1345 case INDEX_op_shli_vec:
1346 case INDEX_op_shri_vec:
1347 case INDEX_op_sari_vec:
1348 return have_vec && TCG_TARGET_HAS_shi_vec;
1349 case INDEX_op_shls_vec:
1350 case INDEX_op_shrs_vec:
1351 case INDEX_op_sars_vec:
1352 return have_vec && TCG_TARGET_HAS_shs_vec;
1353 case INDEX_op_shlv_vec:
1354 case INDEX_op_shrv_vec:
1355 case INDEX_op_sarv_vec:
1356 return have_vec && TCG_TARGET_HAS_shv_vec;
1357 case INDEX_op_rotli_vec:
1358 return have_vec && TCG_TARGET_HAS_roti_vec;
1359 case INDEX_op_rotls_vec:
1360 return have_vec && TCG_TARGET_HAS_rots_vec;
1361 case INDEX_op_rotlv_vec:
1362 case INDEX_op_rotrv_vec:
1363 return have_vec && TCG_TARGET_HAS_rotv_vec;
1364 case INDEX_op_ssadd_vec:
1365 case INDEX_op_usadd_vec:
1366 case INDEX_op_sssub_vec:
1367 case INDEX_op_ussub_vec:
1368 return have_vec && TCG_TARGET_HAS_sat_vec;
1369 case INDEX_op_smin_vec:
1370 case INDEX_op_umin_vec:
1371 case INDEX_op_smax_vec:
1372 case INDEX_op_umax_vec:
1373 return have_vec && TCG_TARGET_HAS_minmax_vec;
1374 case INDEX_op_bitsel_vec:
1375 return have_vec && TCG_TARGET_HAS_bitsel_vec;
1376 case INDEX_op_cmpsel_vec:
1377 return have_vec && TCG_TARGET_HAS_cmpsel_vec;
1379 default:
1380 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1381 return true;
1385 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1386 and endian swap. Maybe it would be better to do the alignment
1387 and endian swap in tcg_reg_alloc_call(). */
1388 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
1390 int i, real_args, nb_rets, pi;
1391 unsigned typemask;
1392 const TCGHelperInfo *info;
1393 TCGOp *op;
1395 info = g_hash_table_lookup(helper_table, (gpointer)func);
1396 typemask = info->typemask;
1398 #ifdef CONFIG_PLUGIN
1399 /* detect non-plugin helpers */
1400 if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) {
1401 tcg_ctx->plugin_insn->calls_helpers = true;
1403 #endif
1405 #if defined(__sparc__) && !defined(__arch64__) \
1406 && !defined(CONFIG_TCG_INTERPRETER)
1407 /* We have 64-bit values in one register, but need to pass as two
1408 separate parameters. Split them. */
1409 int orig_typemask = typemask;
1410 int orig_nargs = nargs;
1411 TCGv_i64 retl, reth;
1412 TCGTemp *split_args[MAX_OPC_PARAM];
1414 retl = NULL;
1415 reth = NULL;
1416 typemask = 0;
1417 for (i = real_args = 0; i < nargs; ++i) {
1418 int argtype = extract32(orig_typemask, (i + 1) * 3, 3);
1419 bool is_64bit = (argtype & ~1) == dh_typecode_i64;
1421 if (is_64bit) {
1422 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1423 TCGv_i32 h = tcg_temp_new_i32();
1424 TCGv_i32 l = tcg_temp_new_i32();
1425 tcg_gen_extr_i64_i32(l, h, orig);
1426 split_args[real_args++] = tcgv_i32_temp(h);
1427 typemask |= dh_typecode_i32 << (real_args * 3);
1428 split_args[real_args++] = tcgv_i32_temp(l);
1429 typemask |= dh_typecode_i32 << (real_args * 3);
1430 } else {
1431 split_args[real_args++] = args[i];
1432 typemask |= argtype << (real_args * 3);
1435 nargs = real_args;
1436 args = split_args;
1437 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1438 for (i = 0; i < nargs; ++i) {
1439 int argtype = extract32(typemask, (i + 1) * 3, 3);
1440 bool is_32bit = (argtype & ~1) == dh_typecode_i32;
1441 bool is_signed = argtype & 1;
1443 if (is_32bit) {
1444 TCGv_i64 temp = tcg_temp_new_i64();
1445 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1446 if (is_signed) {
1447 tcg_gen_ext32s_i64(temp, orig);
1448 } else {
1449 tcg_gen_ext32u_i64(temp, orig);
1451 args[i] = tcgv_i64_temp(temp);
1454 #endif /* TCG_TARGET_EXTEND_ARGS */
1456 op = tcg_emit_op(INDEX_op_call);
1458 pi = 0;
1459 if (ret != NULL) {
1460 #if defined(__sparc__) && !defined(__arch64__) \
1461 && !defined(CONFIG_TCG_INTERPRETER)
1462 if ((typemask & 6) == dh_typecode_i64) {
1463 /* The 32-bit ABI is going to return the 64-bit value in
1464 the %o0/%o1 register pair. Prepare for this by using
1465 two return temporaries, and reassemble below. */
1466 retl = tcg_temp_new_i64();
1467 reth = tcg_temp_new_i64();
1468 op->args[pi++] = tcgv_i64_arg(reth);
1469 op->args[pi++] = tcgv_i64_arg(retl);
1470 nb_rets = 2;
1471 } else {
1472 op->args[pi++] = temp_arg(ret);
1473 nb_rets = 1;
1475 #else
1476 if (TCG_TARGET_REG_BITS < 64 && (typemask & 6) == dh_typecode_i64) {
1477 #ifdef HOST_WORDS_BIGENDIAN
1478 op->args[pi++] = temp_arg(ret + 1);
1479 op->args[pi++] = temp_arg(ret);
1480 #else
1481 op->args[pi++] = temp_arg(ret);
1482 op->args[pi++] = temp_arg(ret + 1);
1483 #endif
1484 nb_rets = 2;
1485 } else {
1486 op->args[pi++] = temp_arg(ret);
1487 nb_rets = 1;
1489 #endif
1490 } else {
1491 nb_rets = 0;
1493 TCGOP_CALLO(op) = nb_rets;
1495 real_args = 0;
1496 for (i = 0; i < nargs; i++) {
1497 int argtype = extract32(typemask, (i + 1) * 3, 3);
1498 bool is_64bit = (argtype & ~1) == dh_typecode_i64;
1500 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
1501 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1502 /* some targets want aligned 64 bit args */
1503 if (real_args & 1) {
1504 op->args[pi++] = TCG_CALL_DUMMY_ARG;
1505 real_args++;
1507 #endif
1508 /* If stack grows up, then we will be placing successive
1509 arguments at lower addresses, which means we need to
1510 reverse the order compared to how we would normally
1511 treat either big or little-endian. For those arguments
1512 that will wind up in registers, this still works for
1513 HPPA (the only current STACK_GROWSUP target) since the
1514 argument registers are *also* allocated in decreasing
1515 order. If another such target is added, this logic may
1516 have to get more complicated to differentiate between
1517 stack arguments and register arguments. */
1518 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1519 op->args[pi++] = temp_arg(args[i] + 1);
1520 op->args[pi++] = temp_arg(args[i]);
1521 #else
1522 op->args[pi++] = temp_arg(args[i]);
1523 op->args[pi++] = temp_arg(args[i] + 1);
1524 #endif
1525 real_args += 2;
1526 continue;
1529 op->args[pi++] = temp_arg(args[i]);
1530 real_args++;
1532 op->args[pi++] = (uintptr_t)func;
1533 op->args[pi++] = (uintptr_t)info;
1534 TCGOP_CALLI(op) = real_args;
1536 /* Make sure the fields didn't overflow. */
1537 tcg_debug_assert(TCGOP_CALLI(op) == real_args);
1538 tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
1540 #if defined(__sparc__) && !defined(__arch64__) \
1541 && !defined(CONFIG_TCG_INTERPRETER)
1542 /* Free all of the parts we allocated above. */
1543 for (i = real_args = 0; i < orig_nargs; ++i) {
1544 int argtype = extract32(orig_typemask, (i + 1) * 3, 3);
1545 bool is_64bit = (argtype & ~1) == dh_typecode_i64;
1547 if (is_64bit) {
1548 tcg_temp_free_internal(args[real_args++]);
1549 tcg_temp_free_internal(args[real_args++]);
1550 } else {
1551 real_args++;
1554 if ((orig_typemask & 6) == dh_typecode_i64) {
1555 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1556 Note that describing these as TCGv_i64 eliminates an unnecessary
1557 zero-extension that tcg_gen_concat_i32_i64 would create. */
1558 tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
1559 tcg_temp_free_i64(retl);
1560 tcg_temp_free_i64(reth);
1562 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1563 for (i = 0; i < nargs; ++i) {
1564 int argtype = extract32(typemask, (i + 1) * 3, 3);
1565 bool is_32bit = (argtype & ~1) == dh_typecode_i32;
1567 if (is_32bit) {
1568 tcg_temp_free_internal(args[i]);
1571 #endif /* TCG_TARGET_EXTEND_ARGS */
1574 static void tcg_reg_alloc_start(TCGContext *s)
1576 int i, n;
1578 for (i = 0, n = s->nb_temps; i < n; i++) {
1579 TCGTemp *ts = &s->temps[i];
1580 TCGTempVal val = TEMP_VAL_MEM;
1582 switch (ts->kind) {
1583 case TEMP_CONST:
1584 val = TEMP_VAL_CONST;
1585 break;
1586 case TEMP_FIXED:
1587 val = TEMP_VAL_REG;
1588 break;
1589 case TEMP_GLOBAL:
1590 break;
1591 case TEMP_NORMAL:
1592 val = TEMP_VAL_DEAD;
1593 /* fall through */
1594 case TEMP_LOCAL:
1595 ts->mem_allocated = 0;
1596 break;
1597 default:
1598 g_assert_not_reached();
1600 ts->val_type = val;
1603 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
1606 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1607 TCGTemp *ts)
1609 int idx = temp_idx(ts);
1611 switch (ts->kind) {
1612 case TEMP_FIXED:
1613 case TEMP_GLOBAL:
1614 pstrcpy(buf, buf_size, ts->name);
1615 break;
1616 case TEMP_LOCAL:
1617 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1618 break;
1619 case TEMP_NORMAL:
1620 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1621 break;
1622 case TEMP_CONST:
1623 switch (ts->type) {
1624 case TCG_TYPE_I32:
1625 snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
1626 break;
1627 #if TCG_TARGET_REG_BITS > 32
1628 case TCG_TYPE_I64:
1629 snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
1630 break;
1631 #endif
1632 case TCG_TYPE_V64:
1633 case TCG_TYPE_V128:
1634 case TCG_TYPE_V256:
1635 snprintf(buf, buf_size, "v%d$0x%" PRIx64,
1636 64 << (ts->type - TCG_TYPE_V64), ts->val);
1637 break;
1638 default:
1639 g_assert_not_reached();
1641 break;
1643 return buf;
1646 static char *tcg_get_arg_str(TCGContext *s, char *buf,
1647 int buf_size, TCGArg arg)
1649 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
1652 static const char * const cond_name[] =
1654 [TCG_COND_NEVER] = "never",
1655 [TCG_COND_ALWAYS] = "always",
1656 [TCG_COND_EQ] = "eq",
1657 [TCG_COND_NE] = "ne",
1658 [TCG_COND_LT] = "lt",
1659 [TCG_COND_GE] = "ge",
1660 [TCG_COND_LE] = "le",
1661 [TCG_COND_GT] = "gt",
1662 [TCG_COND_LTU] = "ltu",
1663 [TCG_COND_GEU] = "geu",
1664 [TCG_COND_LEU] = "leu",
1665 [TCG_COND_GTU] = "gtu"
1668 static const char * const ldst_name[] =
1670 [MO_UB] = "ub",
1671 [MO_SB] = "sb",
1672 [MO_LEUW] = "leuw",
1673 [MO_LESW] = "lesw",
1674 [MO_LEUL] = "leul",
1675 [MO_LESL] = "lesl",
1676 [MO_LEQ] = "leq",
1677 [MO_BEUW] = "beuw",
1678 [MO_BESW] = "besw",
1679 [MO_BEUL] = "beul",
1680 [MO_BESL] = "besl",
1681 [MO_BEQ] = "beq",
1684 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1685 #ifdef TARGET_ALIGNED_ONLY
1686 [MO_UNALN >> MO_ASHIFT] = "un+",
1687 [MO_ALIGN >> MO_ASHIFT] = "",
1688 #else
1689 [MO_UNALN >> MO_ASHIFT] = "",
1690 [MO_ALIGN >> MO_ASHIFT] = "al+",
1691 #endif
1692 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1693 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1694 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1695 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1696 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1697 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1700 static inline bool tcg_regset_single(TCGRegSet d)
1702 return (d & (d - 1)) == 0;
1705 static inline TCGReg tcg_regset_first(TCGRegSet d)
1707 if (TCG_TARGET_NB_REGS <= 32) {
1708 return ctz32(d);
1709 } else {
1710 return ctz64(d);
1714 static void tcg_dump_ops(TCGContext *s, bool have_prefs)
1716 char buf[128];
1717 TCGOp *op;
1719 QTAILQ_FOREACH(op, &s->ops, link) {
1720 int i, k, nb_oargs, nb_iargs, nb_cargs;
1721 const TCGOpDef *def;
1722 TCGOpcode c;
1723 int col = 0;
1725 c = op->opc;
1726 def = &tcg_op_defs[c];
1728 if (c == INDEX_op_insn_start) {
1729 nb_oargs = 0;
1730 col += qemu_log("\n ----");
1732 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1733 target_ulong a;
1734 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1735 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
1736 #else
1737 a = op->args[i];
1738 #endif
1739 col += qemu_log(" " TARGET_FMT_lx, a);
1741 } else if (c == INDEX_op_call) {
1742 const TCGHelperInfo *info = tcg_call_info(op);
1743 void *func;
1745 /* variable number of arguments */
1746 nb_oargs = TCGOP_CALLO(op);
1747 nb_iargs = TCGOP_CALLI(op);
1748 nb_cargs = def->nb_cargs;
1750 col += qemu_log(" %s ", def->name);
1753 * Print the function name from TCGHelperInfo, if available.
1754 * Note that plugins have a template function for the info,
1755 * but the actual function pointer comes from the plugin.
1757 func = (void *)(uintptr_t)op->args[nb_oargs + nb_iargs];
1758 if (func == info->func) {
1759 col += qemu_log("%s", info->name);
1760 } else {
1761 col += qemu_log("plugin(%p)", func);
1764 col += qemu_log("$0x%x,$%d", info->flags, nb_oargs);
1765 for (i = 0; i < nb_oargs; i++) {
1766 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1767 op->args[i]));
1769 for (i = 0; i < nb_iargs; i++) {
1770 TCGArg arg = op->args[nb_oargs + i];
1771 const char *t = "<dummy>";
1772 if (arg != TCG_CALL_DUMMY_ARG) {
1773 t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
1775 col += qemu_log(",%s", t);
1777 } else {
1778 col += qemu_log(" %s ", def->name);
1780 nb_oargs = def->nb_oargs;
1781 nb_iargs = def->nb_iargs;
1782 nb_cargs = def->nb_cargs;
1784 if (def->flags & TCG_OPF_VECTOR) {
1785 col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
1786 8 << TCGOP_VECE(op));
1789 k = 0;
1790 for (i = 0; i < nb_oargs; i++) {
1791 if (k != 0) {
1792 col += qemu_log(",");
1794 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1795 op->args[k++]));
1797 for (i = 0; i < nb_iargs; i++) {
1798 if (k != 0) {
1799 col += qemu_log(",");
1801 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1802 op->args[k++]));
1804 switch (c) {
1805 case INDEX_op_brcond_i32:
1806 case INDEX_op_setcond_i32:
1807 case INDEX_op_movcond_i32:
1808 case INDEX_op_brcond2_i32:
1809 case INDEX_op_setcond2_i32:
1810 case INDEX_op_brcond_i64:
1811 case INDEX_op_setcond_i64:
1812 case INDEX_op_movcond_i64:
1813 case INDEX_op_cmp_vec:
1814 case INDEX_op_cmpsel_vec:
1815 if (op->args[k] < ARRAY_SIZE(cond_name)
1816 && cond_name[op->args[k]]) {
1817 col += qemu_log(",%s", cond_name[op->args[k++]]);
1818 } else {
1819 col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
1821 i = 1;
1822 break;
1823 case INDEX_op_qemu_ld_i32:
1824 case INDEX_op_qemu_st_i32:
1825 case INDEX_op_qemu_st8_i32:
1826 case INDEX_op_qemu_ld_i64:
1827 case INDEX_op_qemu_st_i64:
1829 TCGMemOpIdx oi = op->args[k++];
1830 MemOp op = get_memop(oi);
1831 unsigned ix = get_mmuidx(oi);
1833 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1834 col += qemu_log(",$0x%x,%u", op, ix);
1835 } else {
1836 const char *s_al, *s_op;
1837 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1838 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1839 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
1841 i = 1;
1843 break;
1844 default:
1845 i = 0;
1846 break;
1848 switch (c) {
1849 case INDEX_op_set_label:
1850 case INDEX_op_br:
1851 case INDEX_op_brcond_i32:
1852 case INDEX_op_brcond_i64:
1853 case INDEX_op_brcond2_i32:
1854 col += qemu_log("%s$L%d", k ? "," : "",
1855 arg_label(op->args[k])->id);
1856 i++, k++;
1857 break;
1858 default:
1859 break;
1861 for (; i < nb_cargs; i++, k++) {
1862 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
1866 if (have_prefs || op->life) {
1868 QemuLogFile *logfile;
1870 rcu_read_lock();
1871 logfile = qatomic_rcu_read(&qemu_logfile);
1872 if (logfile) {
1873 for (; col < 40; ++col) {
1874 putc(' ', logfile->fd);
1877 rcu_read_unlock();
1880 if (op->life) {
1881 unsigned life = op->life;
1883 if (life & (SYNC_ARG * 3)) {
1884 qemu_log(" sync:");
1885 for (i = 0; i < 2; ++i) {
1886 if (life & (SYNC_ARG << i)) {
1887 qemu_log(" %d", i);
1891 life /= DEAD_ARG;
1892 if (life) {
1893 qemu_log(" dead:");
1894 for (i = 0; life; ++i, life >>= 1) {
1895 if (life & 1) {
1896 qemu_log(" %d", i);
1902 if (have_prefs) {
1903 for (i = 0; i < nb_oargs; ++i) {
1904 TCGRegSet set = op->output_pref[i];
1906 if (i == 0) {
1907 qemu_log(" pref=");
1908 } else {
1909 qemu_log(",");
1911 if (set == 0) {
1912 qemu_log("none");
1913 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
1914 qemu_log("all");
1915 #ifdef CONFIG_DEBUG_TCG
1916 } else if (tcg_regset_single(set)) {
1917 TCGReg reg = tcg_regset_first(set);
1918 qemu_log("%s", tcg_target_reg_names[reg]);
1919 #endif
1920 } else if (TCG_TARGET_NB_REGS <= 32) {
1921 qemu_log("%#x", (uint32_t)set);
1922 } else {
1923 qemu_log("%#" PRIx64, (uint64_t)set);
1928 qemu_log("\n");
1932 /* we give more priority to constraints with less registers */
1933 static int get_constraint_priority(const TCGOpDef *def, int k)
1935 const TCGArgConstraint *arg_ct = &def->args_ct[k];
1936 int n;
1938 if (arg_ct->oalias) {
1939 /* an alias is equivalent to a single register */
1940 n = 1;
1941 } else {
1942 n = ctpop64(arg_ct->regs);
1944 return TCG_TARGET_NB_REGS - n + 1;
1947 /* sort from highest priority to lowest */
1948 static void sort_constraints(TCGOpDef *def, int start, int n)
1950 int i, j;
1951 TCGArgConstraint *a = def->args_ct;
1953 for (i = 0; i < n; i++) {
1954 a[start + i].sort_index = start + i;
1956 if (n <= 1) {
1957 return;
1959 for (i = 0; i < n - 1; i++) {
1960 for (j = i + 1; j < n; j++) {
1961 int p1 = get_constraint_priority(def, a[start + i].sort_index);
1962 int p2 = get_constraint_priority(def, a[start + j].sort_index);
1963 if (p1 < p2) {
1964 int tmp = a[start + i].sort_index;
1965 a[start + i].sort_index = a[start + j].sort_index;
1966 a[start + j].sort_index = tmp;
1972 static void process_op_defs(TCGContext *s)
1974 TCGOpcode op;
1976 for (op = 0; op < NB_OPS; op++) {
1977 TCGOpDef *def = &tcg_op_defs[op];
1978 const TCGTargetOpDef *tdefs;
1979 int i, nb_args;
1981 if (def->flags & TCG_OPF_NOT_PRESENT) {
1982 continue;
1985 nb_args = def->nb_iargs + def->nb_oargs;
1986 if (nb_args == 0) {
1987 continue;
1991 * Macro magic should make it impossible, but double-check that
1992 * the array index is in range. Since the signness of an enum
1993 * is implementation defined, force the result to unsigned.
1995 unsigned con_set = tcg_target_op_def(op);
1996 tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
1997 tdefs = &constraint_sets[con_set];
1999 for (i = 0; i < nb_args; i++) {
2000 const char *ct_str = tdefs->args_ct_str[i];
2001 /* Incomplete TCGTargetOpDef entry. */
2002 tcg_debug_assert(ct_str != NULL);
2004 while (*ct_str != '\0') {
2005 switch(*ct_str) {
2006 case '0' ... '9':
2008 int oarg = *ct_str - '0';
2009 tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
2010 tcg_debug_assert(oarg < def->nb_oargs);
2011 tcg_debug_assert(def->args_ct[oarg].regs != 0);
2012 def->args_ct[i] = def->args_ct[oarg];
2013 /* The output sets oalias. */
2014 def->args_ct[oarg].oalias = true;
2015 def->args_ct[oarg].alias_index = i;
2016 /* The input sets ialias. */
2017 def->args_ct[i].ialias = true;
2018 def->args_ct[i].alias_index = oarg;
2020 ct_str++;
2021 break;
2022 case '&':
2023 def->args_ct[i].newreg = true;
2024 ct_str++;
2025 break;
2026 case 'i':
2027 def->args_ct[i].ct |= TCG_CT_CONST;
2028 ct_str++;
2029 break;
2031 /* Include all of the target-specific constraints. */
2033 #undef CONST
2034 #define CONST(CASE, MASK) \
2035 case CASE: def->args_ct[i].ct |= MASK; ct_str++; break;
2036 #define REGS(CASE, MASK) \
2037 case CASE: def->args_ct[i].regs |= MASK; ct_str++; break;
2039 #include "tcg-target-con-str.h"
2041 #undef REGS
2042 #undef CONST
2043 default:
2044 /* Typo in TCGTargetOpDef constraint. */
2045 g_assert_not_reached();
2050 /* TCGTargetOpDef entry with too much information? */
2051 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
2053 /* sort the constraints (XXX: this is just an heuristic) */
2054 sort_constraints(def, 0, def->nb_oargs);
2055 sort_constraints(def, def->nb_oargs, def->nb_iargs);
2059 void tcg_op_remove(TCGContext *s, TCGOp *op)
2061 TCGLabel *label;
2063 switch (op->opc) {
2064 case INDEX_op_br:
2065 label = arg_label(op->args[0]);
2066 label->refs--;
2067 break;
2068 case INDEX_op_brcond_i32:
2069 case INDEX_op_brcond_i64:
2070 label = arg_label(op->args[3]);
2071 label->refs--;
2072 break;
2073 case INDEX_op_brcond2_i32:
2074 label = arg_label(op->args[5]);
2075 label->refs--;
2076 break;
2077 default:
2078 break;
2081 QTAILQ_REMOVE(&s->ops, op, link);
2082 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
2083 s->nb_ops--;
2085 #ifdef CONFIG_PROFILER
2086 qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
2087 #endif
2090 void tcg_remove_ops_after(TCGOp *op)
2092 TCGContext *s = tcg_ctx;
2094 while (true) {
2095 TCGOp *last = tcg_last_op();
2096 if (last == op) {
2097 return;
2099 tcg_op_remove(s, last);
2103 static TCGOp *tcg_op_alloc(TCGOpcode opc)
2105 TCGContext *s = tcg_ctx;
2106 TCGOp *op;
2108 if (likely(QTAILQ_EMPTY(&s->free_ops))) {
2109 op = tcg_malloc(sizeof(TCGOp));
2110 } else {
2111 op = QTAILQ_FIRST(&s->free_ops);
2112 QTAILQ_REMOVE(&s->free_ops, op, link);
2114 memset(op, 0, offsetof(TCGOp, link));
2115 op->opc = opc;
2116 s->nb_ops++;
2118 return op;
2121 TCGOp *tcg_emit_op(TCGOpcode opc)
2123 TCGOp *op = tcg_op_alloc(opc);
2124 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2125 return op;
2128 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
2130 TCGOp *new_op = tcg_op_alloc(opc);
2131 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
2132 return new_op;
2135 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
2137 TCGOp *new_op = tcg_op_alloc(opc);
2138 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
2139 return new_op;
2142 /* Reachable analysis : remove unreachable code. */
2143 static void reachable_code_pass(TCGContext *s)
2145 TCGOp *op, *op_next;
2146 bool dead = false;
2148 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2149 bool remove = dead;
2150 TCGLabel *label;
2152 switch (op->opc) {
2153 case INDEX_op_set_label:
2154 label = arg_label(op->args[0]);
2155 if (label->refs == 0) {
2157 * While there is an occasional backward branch, virtually
2158 * all branches generated by the translators are forward.
2159 * Which means that generally we will have already removed
2160 * all references to the label that will be, and there is
2161 * little to be gained by iterating.
2163 remove = true;
2164 } else {
2165 /* Once we see a label, insns become live again. */
2166 dead = false;
2167 remove = false;
2170 * Optimization can fold conditional branches to unconditional.
2171 * If we find a label with one reference which is preceded by
2172 * an unconditional branch to it, remove both. This needed to
2173 * wait until the dead code in between them was removed.
2175 if (label->refs == 1) {
2176 TCGOp *op_prev = QTAILQ_PREV(op, link);
2177 if (op_prev->opc == INDEX_op_br &&
2178 label == arg_label(op_prev->args[0])) {
2179 tcg_op_remove(s, op_prev);
2180 remove = true;
2184 break;
2186 case INDEX_op_br:
2187 case INDEX_op_exit_tb:
2188 case INDEX_op_goto_ptr:
2189 /* Unconditional branches; everything following is dead. */
2190 dead = true;
2191 break;
2193 case INDEX_op_call:
2194 /* Notice noreturn helper calls, raising exceptions. */
2195 if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
2196 dead = true;
2198 break;
2200 case INDEX_op_insn_start:
2201 /* Never remove -- we need to keep these for unwind. */
2202 remove = false;
2203 break;
2205 default:
2206 break;
2209 if (remove) {
2210 tcg_op_remove(s, op);
2215 #define TS_DEAD 1
2216 #define TS_MEM 2
2218 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2219 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2221 /* For liveness_pass_1, the register preferences for a given temp. */
2222 static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2224 return ts->state_ptr;
2227 /* For liveness_pass_1, reset the preferences for a given temp to the
2228 * maximal regset for its type.
2230 static inline void la_reset_pref(TCGTemp *ts)
2232 *la_temp_pref(ts)
2233 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2236 /* liveness analysis: end of function: all temps are dead, and globals
2237 should be in memory. */
2238 static void la_func_end(TCGContext *s, int ng, int nt)
2240 int i;
2242 for (i = 0; i < ng; ++i) {
2243 s->temps[i].state = TS_DEAD | TS_MEM;
2244 la_reset_pref(&s->temps[i]);
2246 for (i = ng; i < nt; ++i) {
2247 s->temps[i].state = TS_DEAD;
2248 la_reset_pref(&s->temps[i]);
2252 /* liveness analysis: end of basic block: all temps are dead, globals
2253 and local temps should be in memory. */
2254 static void la_bb_end(TCGContext *s, int ng, int nt)
2256 int i;
2258 for (i = 0; i < nt; ++i) {
2259 TCGTemp *ts = &s->temps[i];
2260 int state;
2262 switch (ts->kind) {
2263 case TEMP_FIXED:
2264 case TEMP_GLOBAL:
2265 case TEMP_LOCAL:
2266 state = TS_DEAD | TS_MEM;
2267 break;
2268 case TEMP_NORMAL:
2269 case TEMP_CONST:
2270 state = TS_DEAD;
2271 break;
2272 default:
2273 g_assert_not_reached();
2275 ts->state = state;
2276 la_reset_pref(ts);
2280 /* liveness analysis: sync globals back to memory. */
2281 static void la_global_sync(TCGContext *s, int ng)
2283 int i;
2285 for (i = 0; i < ng; ++i) {
2286 int state = s->temps[i].state;
2287 s->temps[i].state = state | TS_MEM;
2288 if (state == TS_DEAD) {
2289 /* If the global was previously dead, reset prefs. */
2290 la_reset_pref(&s->temps[i]);
2296 * liveness analysis: conditional branch: all temps are dead,
2297 * globals and local temps should be synced.
2299 static void la_bb_sync(TCGContext *s, int ng, int nt)
2301 la_global_sync(s, ng);
2303 for (int i = ng; i < nt; ++i) {
2304 TCGTemp *ts = &s->temps[i];
2305 int state;
2307 switch (ts->kind) {
2308 case TEMP_LOCAL:
2309 state = ts->state;
2310 ts->state = state | TS_MEM;
2311 if (state != TS_DEAD) {
2312 continue;
2314 break;
2315 case TEMP_NORMAL:
2316 s->temps[i].state = TS_DEAD;
2317 break;
2318 case TEMP_CONST:
2319 continue;
2320 default:
2321 g_assert_not_reached();
2323 la_reset_pref(&s->temps[i]);
2327 /* liveness analysis: sync globals back to memory and kill. */
2328 static void la_global_kill(TCGContext *s, int ng)
2330 int i;
2332 for (i = 0; i < ng; i++) {
2333 s->temps[i].state = TS_DEAD | TS_MEM;
2334 la_reset_pref(&s->temps[i]);
2338 /* liveness analysis: note live globals crossing calls. */
2339 static void la_cross_call(TCGContext *s, int nt)
2341 TCGRegSet mask = ~tcg_target_call_clobber_regs;
2342 int i;
2344 for (i = 0; i < nt; i++) {
2345 TCGTemp *ts = &s->temps[i];
2346 if (!(ts->state & TS_DEAD)) {
2347 TCGRegSet *pset = la_temp_pref(ts);
2348 TCGRegSet set = *pset;
2350 set &= mask;
2351 /* If the combination is not possible, restart. */
2352 if (set == 0) {
2353 set = tcg_target_available_regs[ts->type] & mask;
2355 *pset = set;
2360 /* Liveness analysis : update the opc_arg_life array to tell if a
2361 given input arguments is dead. Instructions updating dead
2362 temporaries are removed. */
2363 static void liveness_pass_1(TCGContext *s)
2365 int nb_globals = s->nb_globals;
2366 int nb_temps = s->nb_temps;
2367 TCGOp *op, *op_prev;
2368 TCGRegSet *prefs;
2369 int i;
2371 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
2372 for (i = 0; i < nb_temps; ++i) {
2373 s->temps[i].state_ptr = prefs + i;
2376 /* ??? Should be redundant with the exit_tb that ends the TB. */
2377 la_func_end(s, nb_globals, nb_temps);
2379 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
2380 int nb_iargs, nb_oargs;
2381 TCGOpcode opc_new, opc_new2;
2382 bool have_opc_new2;
2383 TCGLifeData arg_life = 0;
2384 TCGTemp *ts;
2385 TCGOpcode opc = op->opc;
2386 const TCGOpDef *def = &tcg_op_defs[opc];
2388 switch (opc) {
2389 case INDEX_op_call:
2391 int call_flags;
2392 int nb_call_regs;
2394 nb_oargs = TCGOP_CALLO(op);
2395 nb_iargs = TCGOP_CALLI(op);
2396 call_flags = tcg_call_flags(op);
2398 /* pure functions can be removed if their result is unused */
2399 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
2400 for (i = 0; i < nb_oargs; i++) {
2401 ts = arg_temp(op->args[i]);
2402 if (ts->state != TS_DEAD) {
2403 goto do_not_remove_call;
2406 goto do_remove;
2408 do_not_remove_call:
2410 /* Output args are dead. */
2411 for (i = 0; i < nb_oargs; i++) {
2412 ts = arg_temp(op->args[i]);
2413 if (ts->state & TS_DEAD) {
2414 arg_life |= DEAD_ARG << i;
2416 if (ts->state & TS_MEM) {
2417 arg_life |= SYNC_ARG << i;
2419 ts->state = TS_DEAD;
2420 la_reset_pref(ts);
2422 /* Not used -- it will be tcg_target_call_oarg_regs[i]. */
2423 op->output_pref[i] = 0;
2426 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2427 TCG_CALL_NO_READ_GLOBALS))) {
2428 la_global_kill(s, nb_globals);
2429 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
2430 la_global_sync(s, nb_globals);
2433 /* Record arguments that die in this helper. */
2434 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2435 ts = arg_temp(op->args[i]);
2436 if (ts && ts->state & TS_DEAD) {
2437 arg_life |= DEAD_ARG << i;
2441 /* For all live registers, remove call-clobbered prefs. */
2442 la_cross_call(s, nb_temps);
2444 nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2446 /* Input arguments are live for preceding opcodes. */
2447 for (i = 0; i < nb_iargs; i++) {
2448 ts = arg_temp(op->args[i + nb_oargs]);
2449 if (ts && ts->state & TS_DEAD) {
2450 /* For those arguments that die, and will be allocated
2451 * in registers, clear the register set for that arg,
2452 * to be filled in below. For args that will be on
2453 * the stack, reset to any available reg.
2455 *la_temp_pref(ts)
2456 = (i < nb_call_regs ? 0 :
2457 tcg_target_available_regs[ts->type]);
2458 ts->state &= ~TS_DEAD;
2462 /* For each input argument, add its input register to prefs.
2463 If a temp is used once, this produces a single set bit. */
2464 for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) {
2465 ts = arg_temp(op->args[i + nb_oargs]);
2466 if (ts) {
2467 tcg_regset_set_reg(*la_temp_pref(ts),
2468 tcg_target_call_iarg_regs[i]);
2472 break;
2473 case INDEX_op_insn_start:
2474 break;
2475 case INDEX_op_discard:
2476 /* mark the temporary as dead */
2477 ts = arg_temp(op->args[0]);
2478 ts->state = TS_DEAD;
2479 la_reset_pref(ts);
2480 break;
2482 case INDEX_op_add2_i32:
2483 opc_new = INDEX_op_add_i32;
2484 goto do_addsub2;
2485 case INDEX_op_sub2_i32:
2486 opc_new = INDEX_op_sub_i32;
2487 goto do_addsub2;
2488 case INDEX_op_add2_i64:
2489 opc_new = INDEX_op_add_i64;
2490 goto do_addsub2;
2491 case INDEX_op_sub2_i64:
2492 opc_new = INDEX_op_sub_i64;
2493 do_addsub2:
2494 nb_iargs = 4;
2495 nb_oargs = 2;
2496 /* Test if the high part of the operation is dead, but not
2497 the low part. The result can be optimized to a simple
2498 add or sub. This happens often for x86_64 guest when the
2499 cpu mode is set to 32 bit. */
2500 if (arg_temp(op->args[1])->state == TS_DEAD) {
2501 if (arg_temp(op->args[0])->state == TS_DEAD) {
2502 goto do_remove;
2504 /* Replace the opcode and adjust the args in place,
2505 leaving 3 unused args at the end. */
2506 op->opc = opc = opc_new;
2507 op->args[1] = op->args[2];
2508 op->args[2] = op->args[4];
2509 /* Fall through and mark the single-word operation live. */
2510 nb_iargs = 2;
2511 nb_oargs = 1;
2513 goto do_not_remove;
2515 case INDEX_op_mulu2_i32:
2516 opc_new = INDEX_op_mul_i32;
2517 opc_new2 = INDEX_op_muluh_i32;
2518 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
2519 goto do_mul2;
2520 case INDEX_op_muls2_i32:
2521 opc_new = INDEX_op_mul_i32;
2522 opc_new2 = INDEX_op_mulsh_i32;
2523 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
2524 goto do_mul2;
2525 case INDEX_op_mulu2_i64:
2526 opc_new = INDEX_op_mul_i64;
2527 opc_new2 = INDEX_op_muluh_i64;
2528 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
2529 goto do_mul2;
2530 case INDEX_op_muls2_i64:
2531 opc_new = INDEX_op_mul_i64;
2532 opc_new2 = INDEX_op_mulsh_i64;
2533 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
2534 goto do_mul2;
2535 do_mul2:
2536 nb_iargs = 2;
2537 nb_oargs = 2;
2538 if (arg_temp(op->args[1])->state == TS_DEAD) {
2539 if (arg_temp(op->args[0])->state == TS_DEAD) {
2540 /* Both parts of the operation are dead. */
2541 goto do_remove;
2543 /* The high part of the operation is dead; generate the low. */
2544 op->opc = opc = opc_new;
2545 op->args[1] = op->args[2];
2546 op->args[2] = op->args[3];
2547 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
2548 /* The low part of the operation is dead; generate the high. */
2549 op->opc = opc = opc_new2;
2550 op->args[0] = op->args[1];
2551 op->args[1] = op->args[2];
2552 op->args[2] = op->args[3];
2553 } else {
2554 goto do_not_remove;
2556 /* Mark the single-word operation live. */
2557 nb_oargs = 1;
2558 goto do_not_remove;
2560 default:
2561 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2562 nb_iargs = def->nb_iargs;
2563 nb_oargs = def->nb_oargs;
2565 /* Test if the operation can be removed because all
2566 its outputs are dead. We assume that nb_oargs == 0
2567 implies side effects */
2568 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
2569 for (i = 0; i < nb_oargs; i++) {
2570 if (arg_temp(op->args[i])->state != TS_DEAD) {
2571 goto do_not_remove;
2574 goto do_remove;
2576 goto do_not_remove;
2578 do_remove:
2579 tcg_op_remove(s, op);
2580 break;
2582 do_not_remove:
2583 for (i = 0; i < nb_oargs; i++) {
2584 ts = arg_temp(op->args[i]);
2586 /* Remember the preference of the uses that followed. */
2587 op->output_pref[i] = *la_temp_pref(ts);
2589 /* Output args are dead. */
2590 if (ts->state & TS_DEAD) {
2591 arg_life |= DEAD_ARG << i;
2593 if (ts->state & TS_MEM) {
2594 arg_life |= SYNC_ARG << i;
2596 ts->state = TS_DEAD;
2597 la_reset_pref(ts);
2600 /* If end of basic block, update. */
2601 if (def->flags & TCG_OPF_BB_EXIT) {
2602 la_func_end(s, nb_globals, nb_temps);
2603 } else if (def->flags & TCG_OPF_COND_BRANCH) {
2604 la_bb_sync(s, nb_globals, nb_temps);
2605 } else if (def->flags & TCG_OPF_BB_END) {
2606 la_bb_end(s, nb_globals, nb_temps);
2607 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2608 la_global_sync(s, nb_globals);
2609 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2610 la_cross_call(s, nb_temps);
2614 /* Record arguments that die in this opcode. */
2615 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2616 ts = arg_temp(op->args[i]);
2617 if (ts->state & TS_DEAD) {
2618 arg_life |= DEAD_ARG << i;
2622 /* Input arguments are live for preceding opcodes. */
2623 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2624 ts = arg_temp(op->args[i]);
2625 if (ts->state & TS_DEAD) {
2626 /* For operands that were dead, initially allow
2627 all regs for the type. */
2628 *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
2629 ts->state &= ~TS_DEAD;
2633 /* Incorporate constraints for this operand. */
2634 switch (opc) {
2635 case INDEX_op_mov_i32:
2636 case INDEX_op_mov_i64:
2637 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2638 have proper constraints. That said, special case
2639 moves to propagate preferences backward. */
2640 if (IS_DEAD_ARG(1)) {
2641 *la_temp_pref(arg_temp(op->args[0]))
2642 = *la_temp_pref(arg_temp(op->args[1]));
2644 break;
2646 default:
2647 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2648 const TCGArgConstraint *ct = &def->args_ct[i];
2649 TCGRegSet set, *pset;
2651 ts = arg_temp(op->args[i]);
2652 pset = la_temp_pref(ts);
2653 set = *pset;
2655 set &= ct->regs;
2656 if (ct->ialias) {
2657 set &= op->output_pref[ct->alias_index];
2659 /* If the combination is not possible, restart. */
2660 if (set == 0) {
2661 set = ct->regs;
2663 *pset = set;
2665 break;
2667 break;
2669 op->life = arg_life;
2673 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2674 static bool liveness_pass_2(TCGContext *s)
2676 int nb_globals = s->nb_globals;
2677 int nb_temps, i;
2678 bool changes = false;
2679 TCGOp *op, *op_next;
2681 /* Create a temporary for each indirect global. */
2682 for (i = 0; i < nb_globals; ++i) {
2683 TCGTemp *its = &s->temps[i];
2684 if (its->indirect_reg) {
2685 TCGTemp *dts = tcg_temp_alloc(s);
2686 dts->type = its->type;
2687 dts->base_type = its->base_type;
2688 its->state_ptr = dts;
2689 } else {
2690 its->state_ptr = NULL;
2692 /* All globals begin dead. */
2693 its->state = TS_DEAD;
2695 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2696 TCGTemp *its = &s->temps[i];
2697 its->state_ptr = NULL;
2698 its->state = TS_DEAD;
2701 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2702 TCGOpcode opc = op->opc;
2703 const TCGOpDef *def = &tcg_op_defs[opc];
2704 TCGLifeData arg_life = op->life;
2705 int nb_iargs, nb_oargs, call_flags;
2706 TCGTemp *arg_ts, *dir_ts;
2708 if (opc == INDEX_op_call) {
2709 nb_oargs = TCGOP_CALLO(op);
2710 nb_iargs = TCGOP_CALLI(op);
2711 call_flags = tcg_call_flags(op);
2712 } else {
2713 nb_iargs = def->nb_iargs;
2714 nb_oargs = def->nb_oargs;
2716 /* Set flags similar to how calls require. */
2717 if (def->flags & TCG_OPF_COND_BRANCH) {
2718 /* Like reading globals: sync_globals */
2719 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2720 } else if (def->flags & TCG_OPF_BB_END) {
2721 /* Like writing globals: save_globals */
2722 call_flags = 0;
2723 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2724 /* Like reading globals: sync_globals */
2725 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2726 } else {
2727 /* No effect on globals. */
2728 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2729 TCG_CALL_NO_WRITE_GLOBALS);
2733 /* Make sure that input arguments are available. */
2734 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2735 arg_ts = arg_temp(op->args[i]);
2736 if (arg_ts) {
2737 dir_ts = arg_ts->state_ptr;
2738 if (dir_ts && arg_ts->state == TS_DEAD) {
2739 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
2740 ? INDEX_op_ld_i32
2741 : INDEX_op_ld_i64);
2742 TCGOp *lop = tcg_op_insert_before(s, op, lopc);
2744 lop->args[0] = temp_arg(dir_ts);
2745 lop->args[1] = temp_arg(arg_ts->mem_base);
2746 lop->args[2] = arg_ts->mem_offset;
2748 /* Loaded, but synced with memory. */
2749 arg_ts->state = TS_MEM;
2754 /* Perform input replacement, and mark inputs that became dead.
2755 No action is required except keeping temp_state up to date
2756 so that we reload when needed. */
2757 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2758 arg_ts = arg_temp(op->args[i]);
2759 if (arg_ts) {
2760 dir_ts = arg_ts->state_ptr;
2761 if (dir_ts) {
2762 op->args[i] = temp_arg(dir_ts);
2763 changes = true;
2764 if (IS_DEAD_ARG(i)) {
2765 arg_ts->state = TS_DEAD;
2771 /* Liveness analysis should ensure that the following are
2772 all correct, for call sites and basic block end points. */
2773 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2774 /* Nothing to do */
2775 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2776 for (i = 0; i < nb_globals; ++i) {
2777 /* Liveness should see that globals are synced back,
2778 that is, either TS_DEAD or TS_MEM. */
2779 arg_ts = &s->temps[i];
2780 tcg_debug_assert(arg_ts->state_ptr == 0
2781 || arg_ts->state != 0);
2783 } else {
2784 for (i = 0; i < nb_globals; ++i) {
2785 /* Liveness should see that globals are saved back,
2786 that is, TS_DEAD, waiting to be reloaded. */
2787 arg_ts = &s->temps[i];
2788 tcg_debug_assert(arg_ts->state_ptr == 0
2789 || arg_ts->state == TS_DEAD);
2793 /* Outputs become available. */
2794 if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
2795 arg_ts = arg_temp(op->args[0]);
2796 dir_ts = arg_ts->state_ptr;
2797 if (dir_ts) {
2798 op->args[0] = temp_arg(dir_ts);
2799 changes = true;
2801 /* The output is now live and modified. */
2802 arg_ts->state = 0;
2804 if (NEED_SYNC_ARG(0)) {
2805 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
2806 ? INDEX_op_st_i32
2807 : INDEX_op_st_i64);
2808 TCGOp *sop = tcg_op_insert_after(s, op, sopc);
2809 TCGTemp *out_ts = dir_ts;
2811 if (IS_DEAD_ARG(0)) {
2812 out_ts = arg_temp(op->args[1]);
2813 arg_ts->state = TS_DEAD;
2814 tcg_op_remove(s, op);
2815 } else {
2816 arg_ts->state = TS_MEM;
2819 sop->args[0] = temp_arg(out_ts);
2820 sop->args[1] = temp_arg(arg_ts->mem_base);
2821 sop->args[2] = arg_ts->mem_offset;
2822 } else {
2823 tcg_debug_assert(!IS_DEAD_ARG(0));
2826 } else {
2827 for (i = 0; i < nb_oargs; i++) {
2828 arg_ts = arg_temp(op->args[i]);
2829 dir_ts = arg_ts->state_ptr;
2830 if (!dir_ts) {
2831 continue;
2833 op->args[i] = temp_arg(dir_ts);
2834 changes = true;
2836 /* The output is now live and modified. */
2837 arg_ts->state = 0;
2839 /* Sync outputs upon their last write. */
2840 if (NEED_SYNC_ARG(i)) {
2841 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
2842 ? INDEX_op_st_i32
2843 : INDEX_op_st_i64);
2844 TCGOp *sop = tcg_op_insert_after(s, op, sopc);
2846 sop->args[0] = temp_arg(dir_ts);
2847 sop->args[1] = temp_arg(arg_ts->mem_base);
2848 sop->args[2] = arg_ts->mem_offset;
2850 arg_ts->state = TS_MEM;
2852 /* Drop outputs that are dead. */
2853 if (IS_DEAD_ARG(i)) {
2854 arg_ts->state = TS_DEAD;
2860 return changes;
2863 #ifdef CONFIG_DEBUG_TCG
2864 static void dump_regs(TCGContext *s)
2866 TCGTemp *ts;
2867 int i;
2868 char buf[64];
2870 for(i = 0; i < s->nb_temps; i++) {
2871 ts = &s->temps[i];
2872 printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2873 switch(ts->val_type) {
2874 case TEMP_VAL_REG:
2875 printf("%s", tcg_target_reg_names[ts->reg]);
2876 break;
2877 case TEMP_VAL_MEM:
2878 printf("%d(%s)", (int)ts->mem_offset,
2879 tcg_target_reg_names[ts->mem_base->reg]);
2880 break;
2881 case TEMP_VAL_CONST:
2882 printf("$0x%" PRIx64, ts->val);
2883 break;
2884 case TEMP_VAL_DEAD:
2885 printf("D");
2886 break;
2887 default:
2888 printf("???");
2889 break;
2891 printf("\n");
2894 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2895 if (s->reg_to_temp[i] != NULL) {
2896 printf("%s: %s\n",
2897 tcg_target_reg_names[i],
2898 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
2903 static void check_regs(TCGContext *s)
2905 int reg;
2906 int k;
2907 TCGTemp *ts;
2908 char buf[64];
2910 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2911 ts = s->reg_to_temp[reg];
2912 if (ts != NULL) {
2913 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
2914 printf("Inconsistency for register %s:\n",
2915 tcg_target_reg_names[reg]);
2916 goto fail;
2920 for (k = 0; k < s->nb_temps; k++) {
2921 ts = &s->temps[k];
2922 if (ts->val_type == TEMP_VAL_REG
2923 && ts->kind != TEMP_FIXED
2924 && s->reg_to_temp[ts->reg] != ts) {
2925 printf("Inconsistency for temp %s:\n",
2926 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2927 fail:
2928 printf("reg state:\n");
2929 dump_regs(s);
2930 tcg_abort();
2934 #endif
2936 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
2938 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2939 /* Sparc64 stack is accessed with offset of 2047 */
2940 s->current_frame_offset = (s->current_frame_offset +
2941 (tcg_target_long)sizeof(tcg_target_long) - 1) &
2942 ~(sizeof(tcg_target_long) - 1);
2943 #endif
2944 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2945 s->frame_end) {
2946 tcg_abort();
2948 ts->mem_offset = s->current_frame_offset;
2949 ts->mem_base = s->frame_temp;
2950 ts->mem_allocated = 1;
2951 s->current_frame_offset += sizeof(tcg_target_long);
2954 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
2956 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2957 mark it free; otherwise mark it dead. */
2958 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
2960 TCGTempVal new_type;
2962 switch (ts->kind) {
2963 case TEMP_FIXED:
2964 return;
2965 case TEMP_GLOBAL:
2966 case TEMP_LOCAL:
2967 new_type = TEMP_VAL_MEM;
2968 break;
2969 case TEMP_NORMAL:
2970 new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
2971 break;
2972 case TEMP_CONST:
2973 new_type = TEMP_VAL_CONST;
2974 break;
2975 default:
2976 g_assert_not_reached();
2978 if (ts->val_type == TEMP_VAL_REG) {
2979 s->reg_to_temp[ts->reg] = NULL;
2981 ts->val_type = new_type;
2984 /* Mark a temporary as dead. */
2985 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2987 temp_free_or_dead(s, ts, 1);
2990 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2991 registers needs to be allocated to store a constant. If 'free_or_dead'
2992 is non-zero, subsequently release the temporary; if it is positive, the
2993 temp is dead; if it is negative, the temp is free. */
2994 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
2995 TCGRegSet preferred_regs, int free_or_dead)
2997 if (!temp_readonly(ts) && !ts->mem_coherent) {
2998 if (!ts->mem_allocated) {
2999 temp_allocate_frame(s, ts);
3001 switch (ts->val_type) {
3002 case TEMP_VAL_CONST:
3003 /* If we're going to free the temp immediately, then we won't
3004 require it later in a register, so attempt to store the
3005 constant to memory directly. */
3006 if (free_or_dead
3007 && tcg_out_sti(s, ts->type, ts->val,
3008 ts->mem_base->reg, ts->mem_offset)) {
3009 break;
3011 temp_load(s, ts, tcg_target_available_regs[ts->type],
3012 allocated_regs, preferred_regs);
3013 /* fallthrough */
3015 case TEMP_VAL_REG:
3016 tcg_out_st(s, ts->type, ts->reg,
3017 ts->mem_base->reg, ts->mem_offset);
3018 break;
3020 case TEMP_VAL_MEM:
3021 break;
3023 case TEMP_VAL_DEAD:
3024 default:
3025 tcg_abort();
3027 ts->mem_coherent = 1;
3029 if (free_or_dead) {
3030 temp_free_or_dead(s, ts, free_or_dead);
3034 /* free register 'reg' by spilling the corresponding temporary if necessary */
3035 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
3037 TCGTemp *ts = s->reg_to_temp[reg];
3038 if (ts != NULL) {
3039 temp_sync(s, ts, allocated_regs, 0, -1);
3044 * tcg_reg_alloc:
3045 * @required_regs: Set of registers in which we must allocate.
3046 * @allocated_regs: Set of registers which must be avoided.
3047 * @preferred_regs: Set of registers we should prefer.
3048 * @rev: True if we search the registers in "indirect" order.
3050 * The allocated register must be in @required_regs & ~@allocated_regs,
3051 * but if we can put it in @preferred_regs we may save a move later.
3053 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3054 TCGRegSet allocated_regs,
3055 TCGRegSet preferred_regs, bool rev)
3057 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3058 TCGRegSet reg_ct[2];
3059 const int *order;
3061 reg_ct[1] = required_regs & ~allocated_regs;
3062 tcg_debug_assert(reg_ct[1] != 0);
3063 reg_ct[0] = reg_ct[1] & preferred_regs;
3065 /* Skip the preferred_regs option if it cannot be satisfied,
3066 or if the preference made no difference. */
3067 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3069 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3071 /* Try free registers, preferences first. */
3072 for (j = f; j < 2; j++) {
3073 TCGRegSet set = reg_ct[j];
3075 if (tcg_regset_single(set)) {
3076 /* One register in the set. */
3077 TCGReg reg = tcg_regset_first(set);
3078 if (s->reg_to_temp[reg] == NULL) {
3079 return reg;
3081 } else {
3082 for (i = 0; i < n; i++) {
3083 TCGReg reg = order[i];
3084 if (s->reg_to_temp[reg] == NULL &&
3085 tcg_regset_test_reg(set, reg)) {
3086 return reg;
3092 /* We must spill something. */
3093 for (j = f; j < 2; j++) {
3094 TCGRegSet set = reg_ct[j];
3096 if (tcg_regset_single(set)) {
3097 /* One register in the set. */
3098 TCGReg reg = tcg_regset_first(set);
3099 tcg_reg_free(s, reg, allocated_regs);
3100 return reg;
3101 } else {
3102 for (i = 0; i < n; i++) {
3103 TCGReg reg = order[i];
3104 if (tcg_regset_test_reg(set, reg)) {
3105 tcg_reg_free(s, reg, allocated_regs);
3106 return reg;
3112 tcg_abort();
3115 /* Make sure the temporary is in a register. If needed, allocate the register
3116 from DESIRED while avoiding ALLOCATED. */
3117 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
3118 TCGRegSet allocated_regs, TCGRegSet preferred_regs)
3120 TCGReg reg;
3122 switch (ts->val_type) {
3123 case TEMP_VAL_REG:
3124 return;
3125 case TEMP_VAL_CONST:
3126 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3127 preferred_regs, ts->indirect_base);
3128 if (ts->type <= TCG_TYPE_I64) {
3129 tcg_out_movi(s, ts->type, reg, ts->val);
3130 } else {
3131 uint64_t val = ts->val;
3132 MemOp vece = MO_64;
3135 * Find the minimal vector element that matches the constant.
3136 * The targets will, in general, have to do this search anyway,
3137 * do this generically.
3139 if (val == dup_const(MO_8, val)) {
3140 vece = MO_8;
3141 } else if (val == dup_const(MO_16, val)) {
3142 vece = MO_16;
3143 } else if (val == dup_const(MO_32, val)) {
3144 vece = MO_32;
3147 tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
3149 ts->mem_coherent = 0;
3150 break;
3151 case TEMP_VAL_MEM:
3152 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3153 preferred_regs, ts->indirect_base);
3154 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3155 ts->mem_coherent = 1;
3156 break;
3157 case TEMP_VAL_DEAD:
3158 default:
3159 tcg_abort();
3161 ts->reg = reg;
3162 ts->val_type = TEMP_VAL_REG;
3163 s->reg_to_temp[reg] = ts;
3166 /* Save a temporary to memory. 'allocated_regs' is used in case a
3167 temporary registers needs to be allocated to store a constant. */
3168 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
3170 /* The liveness analysis already ensures that globals are back
3171 in memory. Keep an tcg_debug_assert for safety. */
3172 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
3175 /* save globals to their canonical location and assume they can be
3176 modified be the following code. 'allocated_regs' is used in case a
3177 temporary registers needs to be allocated to store a constant. */
3178 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
3180 int i, n;
3182 for (i = 0, n = s->nb_globals; i < n; i++) {
3183 temp_save(s, &s->temps[i], allocated_regs);
3187 /* sync globals to their canonical location and assume they can be
3188 read by the following code. 'allocated_regs' is used in case a
3189 temporary registers needs to be allocated to store a constant. */
3190 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3192 int i, n;
3194 for (i = 0, n = s->nb_globals; i < n; i++) {
3195 TCGTemp *ts = &s->temps[i];
3196 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
3197 || ts->kind == TEMP_FIXED
3198 || ts->mem_coherent);
3202 /* at the end of a basic block, we assume all temporaries are dead and
3203 all globals are stored at their canonical location. */
3204 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
3206 int i;
3208 for (i = s->nb_globals; i < s->nb_temps; i++) {
3209 TCGTemp *ts = &s->temps[i];
3211 switch (ts->kind) {
3212 case TEMP_LOCAL:
3213 temp_save(s, ts, allocated_regs);
3214 break;
3215 case TEMP_NORMAL:
3216 /* The liveness analysis already ensures that temps are dead.
3217 Keep an tcg_debug_assert for safety. */
3218 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
3219 break;
3220 case TEMP_CONST:
3221 /* Similarly, we should have freed any allocated register. */
3222 tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
3223 break;
3224 default:
3225 g_assert_not_reached();
3229 save_globals(s, allocated_regs);
3233 * At a conditional branch, we assume all temporaries are dead and
3234 * all globals and local temps are synced to their location.
3236 static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
3238 sync_globals(s, allocated_regs);
3240 for (int i = s->nb_globals; i < s->nb_temps; i++) {
3241 TCGTemp *ts = &s->temps[i];
3243 * The liveness analysis already ensures that temps are dead.
3244 * Keep tcg_debug_asserts for safety.
3246 switch (ts->kind) {
3247 case TEMP_LOCAL:
3248 tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
3249 break;
3250 case TEMP_NORMAL:
3251 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
3252 break;
3253 case TEMP_CONST:
3254 break;
3255 default:
3256 g_assert_not_reached();
3262 * Specialized code generation for INDEX_op_mov_* with a constant.
3264 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
3265 tcg_target_ulong val, TCGLifeData arg_life,
3266 TCGRegSet preferred_regs)
3268 /* ENV should not be modified. */
3269 tcg_debug_assert(!temp_readonly(ots));
3271 /* The movi is not explicitly generated here. */
3272 if (ots->val_type == TEMP_VAL_REG) {
3273 s->reg_to_temp[ots->reg] = NULL;
3275 ots->val_type = TEMP_VAL_CONST;
3276 ots->val = val;
3277 ots->mem_coherent = 0;
3278 if (NEED_SYNC_ARG(0)) {
3279 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
3280 } else if (IS_DEAD_ARG(0)) {
3281 temp_dead(s, ots);
3286 * Specialized code generation for INDEX_op_mov_*.
3288 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
3290 const TCGLifeData arg_life = op->life;
3291 TCGRegSet allocated_regs, preferred_regs;
3292 TCGTemp *ts, *ots;
3293 TCGType otype, itype;
3295 allocated_regs = s->reserved_regs;
3296 preferred_regs = op->output_pref[0];
3297 ots = arg_temp(op->args[0]);
3298 ts = arg_temp(op->args[1]);
3300 /* ENV should not be modified. */
3301 tcg_debug_assert(!temp_readonly(ots));
3303 /* Note that otype != itype for no-op truncation. */
3304 otype = ots->type;
3305 itype = ts->type;
3307 if (ts->val_type == TEMP_VAL_CONST) {
3308 /* propagate constant or generate sti */
3309 tcg_target_ulong val = ts->val;
3310 if (IS_DEAD_ARG(1)) {
3311 temp_dead(s, ts);
3313 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
3314 return;
3317 /* If the source value is in memory we're going to be forced
3318 to have it in a register in order to perform the copy. Copy
3319 the SOURCE value into its own register first, that way we
3320 don't have to reload SOURCE the next time it is used. */
3321 if (ts->val_type == TEMP_VAL_MEM) {
3322 temp_load(s, ts, tcg_target_available_regs[itype],
3323 allocated_regs, preferred_regs);
3326 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
3327 if (IS_DEAD_ARG(0)) {
3328 /* mov to a non-saved dead register makes no sense (even with
3329 liveness analysis disabled). */
3330 tcg_debug_assert(NEED_SYNC_ARG(0));
3331 if (!ots->mem_allocated) {
3332 temp_allocate_frame(s, ots);
3334 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
3335 if (IS_DEAD_ARG(1)) {
3336 temp_dead(s, ts);
3338 temp_dead(s, ots);
3339 } else {
3340 if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
3341 /* the mov can be suppressed */
3342 if (ots->val_type == TEMP_VAL_REG) {
3343 s->reg_to_temp[ots->reg] = NULL;
3345 ots->reg = ts->reg;
3346 temp_dead(s, ts);
3347 } else {
3348 if (ots->val_type != TEMP_VAL_REG) {
3349 /* When allocating a new register, make sure to not spill the
3350 input one. */
3351 tcg_regset_set_reg(allocated_regs, ts->reg);
3352 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
3353 allocated_regs, preferred_regs,
3354 ots->indirect_base);
3356 if (!tcg_out_mov(s, otype, ots->reg, ts->reg)) {
3358 * Cross register class move not supported.
3359 * Store the source register into the destination slot
3360 * and leave the destination temp as TEMP_VAL_MEM.
3362 assert(!temp_readonly(ots));
3363 if (!ts->mem_allocated) {
3364 temp_allocate_frame(s, ots);
3366 tcg_out_st(s, ts->type, ts->reg,
3367 ots->mem_base->reg, ots->mem_offset);
3368 ots->mem_coherent = 1;
3369 temp_free_or_dead(s, ots, -1);
3370 return;
3373 ots->val_type = TEMP_VAL_REG;
3374 ots->mem_coherent = 0;
3375 s->reg_to_temp[ots->reg] = ots;
3376 if (NEED_SYNC_ARG(0)) {
3377 temp_sync(s, ots, allocated_regs, 0, 0);
3383 * Specialized code generation for INDEX_op_dup_vec.
3385 static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
3387 const TCGLifeData arg_life = op->life;
3388 TCGRegSet dup_out_regs, dup_in_regs;
3389 TCGTemp *its, *ots;
3390 TCGType itype, vtype;
3391 intptr_t endian_fixup;
3392 unsigned vece;
3393 bool ok;
3395 ots = arg_temp(op->args[0]);
3396 its = arg_temp(op->args[1]);
3398 /* ENV should not be modified. */
3399 tcg_debug_assert(!temp_readonly(ots));
3401 itype = its->type;
3402 vece = TCGOP_VECE(op);
3403 vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
3405 if (its->val_type == TEMP_VAL_CONST) {
3406 /* Propagate constant via movi -> dupi. */
3407 tcg_target_ulong val = its->val;
3408 if (IS_DEAD_ARG(1)) {
3409 temp_dead(s, its);
3411 tcg_reg_alloc_do_movi(s, ots, val, arg_life, op->output_pref[0]);
3412 return;
3415 dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
3416 dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
3418 /* Allocate the output register now. */
3419 if (ots->val_type != TEMP_VAL_REG) {
3420 TCGRegSet allocated_regs = s->reserved_regs;
3422 if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
3423 /* Make sure to not spill the input register. */
3424 tcg_regset_set_reg(allocated_regs, its->reg);
3426 ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
3427 op->output_pref[0], ots->indirect_base);
3428 ots->val_type = TEMP_VAL_REG;
3429 ots->mem_coherent = 0;
3430 s->reg_to_temp[ots->reg] = ots;
3433 switch (its->val_type) {
3434 case TEMP_VAL_REG:
3436 * The dup constriaints must be broad, covering all possible VECE.
3437 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3438 * to fail, indicating that extra moves are required for that case.
3440 if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
3441 if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
3442 goto done;
3444 /* Try again from memory or a vector input register. */
3446 if (!its->mem_coherent) {
3448 * The input register is not synced, and so an extra store
3449 * would be required to use memory. Attempt an integer-vector
3450 * register move first. We do not have a TCGRegSet for this.
3452 if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
3453 break;
3455 /* Sync the temp back to its slot and load from there. */
3456 temp_sync(s, its, s->reserved_regs, 0, 0);
3458 /* fall through */
3460 case TEMP_VAL_MEM:
3461 #ifdef HOST_WORDS_BIGENDIAN
3462 endian_fixup = itype == TCG_TYPE_I32 ? 4 : 8;
3463 endian_fixup -= 1 << vece;
3464 #else
3465 endian_fixup = 0;
3466 #endif
3467 if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
3468 its->mem_offset + endian_fixup)) {
3469 goto done;
3471 tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
3472 break;
3474 default:
3475 g_assert_not_reached();
3478 /* We now have a vector input register, so dup must succeed. */
3479 ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
3480 tcg_debug_assert(ok);
3482 done:
3483 if (IS_DEAD_ARG(1)) {
3484 temp_dead(s, its);
3486 if (NEED_SYNC_ARG(0)) {
3487 temp_sync(s, ots, s->reserved_regs, 0, 0);
3489 if (IS_DEAD_ARG(0)) {
3490 temp_dead(s, ots);
3494 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
3496 const TCGLifeData arg_life = op->life;
3497 const TCGOpDef * const def = &tcg_op_defs[op->opc];
3498 TCGRegSet i_allocated_regs;
3499 TCGRegSet o_allocated_regs;
3500 int i, k, nb_iargs, nb_oargs;
3501 TCGReg reg;
3502 TCGArg arg;
3503 const TCGArgConstraint *arg_ct;
3504 TCGTemp *ts;
3505 TCGArg new_args[TCG_MAX_OP_ARGS];
3506 int const_args[TCG_MAX_OP_ARGS];
3508 nb_oargs = def->nb_oargs;
3509 nb_iargs = def->nb_iargs;
3511 /* copy constants */
3512 memcpy(new_args + nb_oargs + nb_iargs,
3513 op->args + nb_oargs + nb_iargs,
3514 sizeof(TCGArg) * def->nb_cargs);
3516 i_allocated_regs = s->reserved_regs;
3517 o_allocated_regs = s->reserved_regs;
3519 /* satisfy input constraints */
3520 for (k = 0; k < nb_iargs; k++) {
3521 TCGRegSet i_preferred_regs, o_preferred_regs;
3523 i = def->args_ct[nb_oargs + k].sort_index;
3524 arg = op->args[i];
3525 arg_ct = &def->args_ct[i];
3526 ts = arg_temp(arg);
3528 if (ts->val_type == TEMP_VAL_CONST
3529 && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
3530 /* constant is OK for instruction */
3531 const_args[i] = 1;
3532 new_args[i] = ts->val;
3533 continue;
3536 i_preferred_regs = o_preferred_regs = 0;
3537 if (arg_ct->ialias) {
3538 o_preferred_regs = op->output_pref[arg_ct->alias_index];
3541 * If the input is readonly, then it cannot also be an
3542 * output and aliased to itself. If the input is not
3543 * dead after the instruction, we must allocate a new
3544 * register and move it.
3546 if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
3547 goto allocate_in_reg;
3551 * Check if the current register has already been allocated
3552 * for another input aliased to an output.
3554 if (ts->val_type == TEMP_VAL_REG) {
3555 reg = ts->reg;
3556 for (int k2 = 0; k2 < k; k2++) {
3557 int i2 = def->args_ct[nb_oargs + k2].sort_index;
3558 if (def->args_ct[i2].ialias && reg == new_args[i2]) {
3559 goto allocate_in_reg;
3563 i_preferred_regs = o_preferred_regs;
3566 temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs);
3567 reg = ts->reg;
3569 if (!tcg_regset_test_reg(arg_ct->regs, reg)) {
3570 allocate_in_reg:
3572 * Allocate a new register matching the constraint
3573 * and move the temporary register into it.
3575 temp_load(s, ts, tcg_target_available_regs[ts->type],
3576 i_allocated_regs, 0);
3577 reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs,
3578 o_preferred_regs, ts->indirect_base);
3579 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
3581 * Cross register class move not supported. Sync the
3582 * temp back to its slot and load from there.
3584 temp_sync(s, ts, i_allocated_regs, 0, 0);
3585 tcg_out_ld(s, ts->type, reg,
3586 ts->mem_base->reg, ts->mem_offset);
3589 new_args[i] = reg;
3590 const_args[i] = 0;
3591 tcg_regset_set_reg(i_allocated_regs, reg);
3594 /* mark dead temporaries and free the associated registers */
3595 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3596 if (IS_DEAD_ARG(i)) {
3597 temp_dead(s, arg_temp(op->args[i]));
3601 if (def->flags & TCG_OPF_COND_BRANCH) {
3602 tcg_reg_alloc_cbranch(s, i_allocated_regs);
3603 } else if (def->flags & TCG_OPF_BB_END) {
3604 tcg_reg_alloc_bb_end(s, i_allocated_regs);
3605 } else {
3606 if (def->flags & TCG_OPF_CALL_CLOBBER) {
3607 /* XXX: permit generic clobber register list ? */
3608 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3609 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
3610 tcg_reg_free(s, i, i_allocated_regs);
3614 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3615 /* sync globals if the op has side effects and might trigger
3616 an exception. */
3617 sync_globals(s, i_allocated_regs);
3620 /* satisfy the output constraints */
3621 for(k = 0; k < nb_oargs; k++) {
3622 i = def->args_ct[k].sort_index;
3623 arg = op->args[i];
3624 arg_ct = &def->args_ct[i];
3625 ts = arg_temp(arg);
3627 /* ENV should not be modified. */
3628 tcg_debug_assert(!temp_readonly(ts));
3630 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
3631 reg = new_args[arg_ct->alias_index];
3632 } else if (arg_ct->newreg) {
3633 reg = tcg_reg_alloc(s, arg_ct->regs,
3634 i_allocated_regs | o_allocated_regs,
3635 op->output_pref[k], ts->indirect_base);
3636 } else {
3637 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
3638 op->output_pref[k], ts->indirect_base);
3640 tcg_regset_set_reg(o_allocated_regs, reg);
3641 if (ts->val_type == TEMP_VAL_REG) {
3642 s->reg_to_temp[ts->reg] = NULL;
3644 ts->val_type = TEMP_VAL_REG;
3645 ts->reg = reg;
3647 * Temp value is modified, so the value kept in memory is
3648 * potentially not the same.
3650 ts->mem_coherent = 0;
3651 s->reg_to_temp[reg] = ts;
3652 new_args[i] = reg;
3656 /* emit instruction */
3657 if (def->flags & TCG_OPF_VECTOR) {
3658 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
3659 new_args, const_args);
3660 } else {
3661 tcg_out_op(s, op->opc, new_args, const_args);
3664 /* move the outputs in the correct register if needed */
3665 for(i = 0; i < nb_oargs; i++) {
3666 ts = arg_temp(op->args[i]);
3668 /* ENV should not be modified. */
3669 tcg_debug_assert(!temp_readonly(ts));
3671 if (NEED_SYNC_ARG(i)) {
3672 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
3673 } else if (IS_DEAD_ARG(i)) {
3674 temp_dead(s, ts);
3679 static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
3681 const TCGLifeData arg_life = op->life;
3682 TCGTemp *ots, *itsl, *itsh;
3683 TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
3685 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
3686 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
3687 tcg_debug_assert(TCGOP_VECE(op) == MO_64);
3689 ots = arg_temp(op->args[0]);
3690 itsl = arg_temp(op->args[1]);
3691 itsh = arg_temp(op->args[2]);
3693 /* ENV should not be modified. */
3694 tcg_debug_assert(!temp_readonly(ots));
3696 /* Allocate the output register now. */
3697 if (ots->val_type != TEMP_VAL_REG) {
3698 TCGRegSet allocated_regs = s->reserved_regs;
3699 TCGRegSet dup_out_regs =
3700 tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
3702 /* Make sure to not spill the input registers. */
3703 if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
3704 tcg_regset_set_reg(allocated_regs, itsl->reg);
3706 if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
3707 tcg_regset_set_reg(allocated_regs, itsh->reg);
3710 ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
3711 op->output_pref[0], ots->indirect_base);
3712 ots->val_type = TEMP_VAL_REG;
3713 ots->mem_coherent = 0;
3714 s->reg_to_temp[ots->reg] = ots;
3717 /* Promote dup2 of immediates to dupi_vec. */
3718 if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
3719 uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
3720 MemOp vece = MO_64;
3722 if (val == dup_const(MO_8, val)) {
3723 vece = MO_8;
3724 } else if (val == dup_const(MO_16, val)) {
3725 vece = MO_16;
3726 } else if (val == dup_const(MO_32, val)) {
3727 vece = MO_32;
3730 tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
3731 goto done;
3734 /* If the two inputs form one 64-bit value, try dupm_vec. */
3735 if (itsl + 1 == itsh && itsl->base_type == TCG_TYPE_I64) {
3736 if (!itsl->mem_coherent) {
3737 temp_sync(s, itsl, s->reserved_regs, 0, 0);
3739 if (!itsh->mem_coherent) {
3740 temp_sync(s, itsh, s->reserved_regs, 0, 0);
3742 #ifdef HOST_WORDS_BIGENDIAN
3743 TCGTemp *its = itsh;
3744 #else
3745 TCGTemp *its = itsl;
3746 #endif
3747 if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
3748 its->mem_base->reg, its->mem_offset)) {
3749 goto done;
3753 /* Fall back to generic expansion. */
3754 return false;
3756 done:
3757 if (IS_DEAD_ARG(1)) {
3758 temp_dead(s, itsl);
3760 if (IS_DEAD_ARG(2)) {
3761 temp_dead(s, itsh);
3763 if (NEED_SYNC_ARG(0)) {
3764 temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
3765 } else if (IS_DEAD_ARG(0)) {
3766 temp_dead(s, ots);
3768 return true;
3771 #ifdef TCG_TARGET_STACK_GROWSUP
3772 #define STACK_DIR(x) (-(x))
3773 #else
3774 #define STACK_DIR(x) (x)
3775 #endif
3777 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
3779 const int nb_oargs = TCGOP_CALLO(op);
3780 const int nb_iargs = TCGOP_CALLI(op);
3781 const TCGLifeData arg_life = op->life;
3782 int flags, nb_regs, i;
3783 TCGReg reg;
3784 TCGArg arg;
3785 TCGTemp *ts;
3786 intptr_t stack_offset;
3787 size_t call_stack_size;
3788 tcg_insn_unit *func_addr;
3789 int allocate_args;
3790 TCGRegSet allocated_regs;
3792 func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
3793 flags = tcg_call_flags(op);
3795 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
3796 if (nb_regs > nb_iargs) {
3797 nb_regs = nb_iargs;
3800 /* assign stack slots first */
3801 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
3802 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
3803 ~(TCG_TARGET_STACK_ALIGN - 1);
3804 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
3805 if (allocate_args) {
3806 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3807 preallocate call stack */
3808 tcg_abort();
3811 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
3812 for (i = nb_regs; i < nb_iargs; i++) {
3813 arg = op->args[nb_oargs + i];
3814 #ifdef TCG_TARGET_STACK_GROWSUP
3815 stack_offset -= sizeof(tcg_target_long);
3816 #endif
3817 if (arg != TCG_CALL_DUMMY_ARG) {
3818 ts = arg_temp(arg);
3819 temp_load(s, ts, tcg_target_available_regs[ts->type],
3820 s->reserved_regs, 0);
3821 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
3823 #ifndef TCG_TARGET_STACK_GROWSUP
3824 stack_offset += sizeof(tcg_target_long);
3825 #endif
3828 /* assign input registers */
3829 allocated_regs = s->reserved_regs;
3830 for (i = 0; i < nb_regs; i++) {
3831 arg = op->args[nb_oargs + i];
3832 if (arg != TCG_CALL_DUMMY_ARG) {
3833 ts = arg_temp(arg);
3834 reg = tcg_target_call_iarg_regs[i];
3836 if (ts->val_type == TEMP_VAL_REG) {
3837 if (ts->reg != reg) {
3838 tcg_reg_free(s, reg, allocated_regs);
3839 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
3841 * Cross register class move not supported. Sync the
3842 * temp back to its slot and load from there.
3844 temp_sync(s, ts, allocated_regs, 0, 0);
3845 tcg_out_ld(s, ts->type, reg,
3846 ts->mem_base->reg, ts->mem_offset);
3849 } else {
3850 TCGRegSet arg_set = 0;
3852 tcg_reg_free(s, reg, allocated_regs);
3853 tcg_regset_set_reg(arg_set, reg);
3854 temp_load(s, ts, arg_set, allocated_regs, 0);
3857 tcg_regset_set_reg(allocated_regs, reg);
3861 /* mark dead temporaries and free the associated registers */
3862 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3863 if (IS_DEAD_ARG(i)) {
3864 temp_dead(s, arg_temp(op->args[i]));
3868 /* clobber call registers */
3869 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3870 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
3871 tcg_reg_free(s, i, allocated_regs);
3875 /* Save globals if they might be written by the helper, sync them if
3876 they might be read. */
3877 if (flags & TCG_CALL_NO_READ_GLOBALS) {
3878 /* Nothing to do */
3879 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
3880 sync_globals(s, allocated_regs);
3881 } else {
3882 save_globals(s, allocated_regs);
3885 tcg_out_call(s, func_addr);
3887 /* assign output registers and emit moves if needed */
3888 for(i = 0; i < nb_oargs; i++) {
3889 arg = op->args[i];
3890 ts = arg_temp(arg);
3892 /* ENV should not be modified. */
3893 tcg_debug_assert(!temp_readonly(ts));
3895 reg = tcg_target_call_oarg_regs[i];
3896 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3897 if (ts->val_type == TEMP_VAL_REG) {
3898 s->reg_to_temp[ts->reg] = NULL;
3900 ts->val_type = TEMP_VAL_REG;
3901 ts->reg = reg;
3902 ts->mem_coherent = 0;
3903 s->reg_to_temp[reg] = ts;
3904 if (NEED_SYNC_ARG(i)) {
3905 temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
3906 } else if (IS_DEAD_ARG(i)) {
3907 temp_dead(s, ts);
3912 #ifdef CONFIG_PROFILER
3914 /* avoid copy/paste errors */
3915 #define PROF_ADD(to, from, field) \
3916 do { \
3917 (to)->field += qatomic_read(&((from)->field)); \
3918 } while (0)
3920 #define PROF_MAX(to, from, field) \
3921 do { \
3922 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
3923 if (val__ > (to)->field) { \
3924 (to)->field = val__; \
3926 } while (0)
3928 /* Pass in a zero'ed @prof */
3929 static inline
3930 void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
3932 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
3933 unsigned int i;
3935 for (i = 0; i < n_ctxs; i++) {
3936 TCGContext *s = qatomic_read(&tcg_ctxs[i]);
3937 const TCGProfile *orig = &s->prof;
3939 if (counters) {
3940 PROF_ADD(prof, orig, cpu_exec_time);
3941 PROF_ADD(prof, orig, tb_count1);
3942 PROF_ADD(prof, orig, tb_count);
3943 PROF_ADD(prof, orig, op_count);
3944 PROF_MAX(prof, orig, op_count_max);
3945 PROF_ADD(prof, orig, temp_count);
3946 PROF_MAX(prof, orig, temp_count_max);
3947 PROF_ADD(prof, orig, del_op_count);
3948 PROF_ADD(prof, orig, code_in_len);
3949 PROF_ADD(prof, orig, code_out_len);
3950 PROF_ADD(prof, orig, search_out_len);
3951 PROF_ADD(prof, orig, interm_time);
3952 PROF_ADD(prof, orig, code_time);
3953 PROF_ADD(prof, orig, la_time);
3954 PROF_ADD(prof, orig, opt_time);
3955 PROF_ADD(prof, orig, restore_count);
3956 PROF_ADD(prof, orig, restore_time);
3958 if (table) {
3959 int i;
3961 for (i = 0; i < NB_OPS; i++) {
3962 PROF_ADD(prof, orig, table_op_count[i]);
3968 #undef PROF_ADD
3969 #undef PROF_MAX
3971 static void tcg_profile_snapshot_counters(TCGProfile *prof)
3973 tcg_profile_snapshot(prof, true, false);
3976 static void tcg_profile_snapshot_table(TCGProfile *prof)
3978 tcg_profile_snapshot(prof, false, true);
3981 void tcg_dump_op_count(void)
3983 TCGProfile prof = {};
3984 int i;
3986 tcg_profile_snapshot_table(&prof);
3987 for (i = 0; i < NB_OPS; i++) {
3988 qemu_printf("%s %" PRId64 "\n", tcg_op_defs[i].name,
3989 prof.table_op_count[i]);
3993 int64_t tcg_cpu_exec_time(void)
3995 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
3996 unsigned int i;
3997 int64_t ret = 0;
3999 for (i = 0; i < n_ctxs; i++) {
4000 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
4001 const TCGProfile *prof = &s->prof;
4003 ret += qatomic_read(&prof->cpu_exec_time);
4005 return ret;
4007 #else
4008 void tcg_dump_op_count(void)
4010 qemu_printf("[TCG profiler not compiled]\n");
4013 int64_t tcg_cpu_exec_time(void)
4015 error_report("%s: TCG profiler not compiled", __func__);
4016 exit(EXIT_FAILURE);
4018 #endif
4021 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
4023 #ifdef CONFIG_PROFILER
4024 TCGProfile *prof = &s->prof;
4025 #endif
4026 int i, num_insns;
4027 TCGOp *op;
4029 #ifdef CONFIG_PROFILER
4031 int n = 0;
4033 QTAILQ_FOREACH(op, &s->ops, link) {
4034 n++;
4036 qatomic_set(&prof->op_count, prof->op_count + n);
4037 if (n > prof->op_count_max) {
4038 qatomic_set(&prof->op_count_max, n);
4041 n = s->nb_temps;
4042 qatomic_set(&prof->temp_count, prof->temp_count + n);
4043 if (n > prof->temp_count_max) {
4044 qatomic_set(&prof->temp_count_max, n);
4047 #endif
4049 #ifdef DEBUG_DISAS
4050 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
4051 && qemu_log_in_addr_range(tb->pc))) {
4052 FILE *logfile = qemu_log_lock();
4053 qemu_log("OP:\n");
4054 tcg_dump_ops(s, false);
4055 qemu_log("\n");
4056 qemu_log_unlock(logfile);
4058 #endif
4060 #ifdef CONFIG_DEBUG_TCG
4061 /* Ensure all labels referenced have been emitted. */
4063 TCGLabel *l;
4064 bool error = false;
4066 QSIMPLEQ_FOREACH(l, &s->labels, next) {
4067 if (unlikely(!l->present) && l->refs) {
4068 qemu_log_mask(CPU_LOG_TB_OP,
4069 "$L%d referenced but not present.\n", l->id);
4070 error = true;
4073 assert(!error);
4075 #endif
4077 #ifdef CONFIG_PROFILER
4078 qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
4079 #endif
4081 #ifdef USE_TCG_OPTIMIZATIONS
4082 tcg_optimize(s);
4083 #endif
4085 #ifdef CONFIG_PROFILER
4086 qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
4087 qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
4088 #endif
4090 reachable_code_pass(s);
4091 liveness_pass_1(s);
4093 if (s->nb_indirects > 0) {
4094 #ifdef DEBUG_DISAS
4095 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
4096 && qemu_log_in_addr_range(tb->pc))) {
4097 FILE *logfile = qemu_log_lock();
4098 qemu_log("OP before indirect lowering:\n");
4099 tcg_dump_ops(s, false);
4100 qemu_log("\n");
4101 qemu_log_unlock(logfile);
4103 #endif
4104 /* Replace indirect temps with direct temps. */
4105 if (liveness_pass_2(s)) {
4106 /* If changes were made, re-run liveness. */
4107 liveness_pass_1(s);
4111 #ifdef CONFIG_PROFILER
4112 qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
4113 #endif
4115 #ifdef DEBUG_DISAS
4116 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
4117 && qemu_log_in_addr_range(tb->pc))) {
4118 FILE *logfile = qemu_log_lock();
4119 qemu_log("OP after optimization and liveness analysis:\n");
4120 tcg_dump_ops(s, true);
4121 qemu_log("\n");
4122 qemu_log_unlock(logfile);
4124 #endif
4126 tcg_reg_alloc_start(s);
4129 * Reset the buffer pointers when restarting after overflow.
4130 * TODO: Move this into translate-all.c with the rest of the
4131 * buffer management. Having only this done here is confusing.
4133 s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
4134 s->code_ptr = s->code_buf;
4136 #ifdef TCG_TARGET_NEED_LDST_LABELS
4137 QSIMPLEQ_INIT(&s->ldst_labels);
4138 #endif
4139 #ifdef TCG_TARGET_NEED_POOL_LABELS
4140 s->pool_labels = NULL;
4141 #endif
4143 num_insns = -1;
4144 QTAILQ_FOREACH(op, &s->ops, link) {
4145 TCGOpcode opc = op->opc;
4147 #ifdef CONFIG_PROFILER
4148 qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
4149 #endif
4151 switch (opc) {
4152 case INDEX_op_mov_i32:
4153 case INDEX_op_mov_i64:
4154 case INDEX_op_mov_vec:
4155 tcg_reg_alloc_mov(s, op);
4156 break;
4157 case INDEX_op_dup_vec:
4158 tcg_reg_alloc_dup(s, op);
4159 break;
4160 case INDEX_op_insn_start:
4161 if (num_insns >= 0) {
4162 size_t off = tcg_current_code_size(s);
4163 s->gen_insn_end_off[num_insns] = off;
4164 /* Assert that we do not overflow our stored offset. */
4165 assert(s->gen_insn_end_off[num_insns] == off);
4167 num_insns++;
4168 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
4169 target_ulong a;
4170 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
4171 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
4172 #else
4173 a = op->args[i];
4174 #endif
4175 s->gen_insn_data[num_insns][i] = a;
4177 break;
4178 case INDEX_op_discard:
4179 temp_dead(s, arg_temp(op->args[0]));
4180 break;
4181 case INDEX_op_set_label:
4182 tcg_reg_alloc_bb_end(s, s->reserved_regs);
4183 tcg_out_label(s, arg_label(op->args[0]));
4184 break;
4185 case INDEX_op_call:
4186 tcg_reg_alloc_call(s, op);
4187 break;
4188 case INDEX_op_dup2_vec:
4189 if (tcg_reg_alloc_dup2(s, op)) {
4190 break;
4192 /* fall through */
4193 default:
4194 /* Sanity check that we've not introduced any unhandled opcodes. */
4195 tcg_debug_assert(tcg_op_supported(opc));
4196 /* Note: in order to speed up the code, it would be much
4197 faster to have specialized register allocator functions for
4198 some common argument patterns */
4199 tcg_reg_alloc_op(s, op);
4200 break;
4202 #ifdef CONFIG_DEBUG_TCG
4203 check_regs(s);
4204 #endif
4205 /* Test for (pending) buffer overflow. The assumption is that any
4206 one operation beginning below the high water mark cannot overrun
4207 the buffer completely. Thus we can test for overflow after
4208 generating code without having to check during generation. */
4209 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
4210 return -1;
4212 /* Test for TB overflow, as seen by gen_insn_end_off. */
4213 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
4214 return -2;
4217 tcg_debug_assert(num_insns >= 0);
4218 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
4220 /* Generate TB finalization at the end of block */
4221 #ifdef TCG_TARGET_NEED_LDST_LABELS
4222 i = tcg_out_ldst_finalize(s);
4223 if (i < 0) {
4224 return i;
4226 #endif
4227 #ifdef TCG_TARGET_NEED_POOL_LABELS
4228 i = tcg_out_pool_finalize(s);
4229 if (i < 0) {
4230 return i;
4232 #endif
4233 if (!tcg_resolve_relocs(s)) {
4234 return -2;
4237 #ifndef CONFIG_TCG_INTERPRETER
4238 /* flush instruction cache */
4239 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
4240 (uintptr_t)s->code_buf,
4241 tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
4242 #endif
4244 return tcg_current_code_size(s);
4247 #ifdef CONFIG_PROFILER
4248 void tcg_dump_info(void)
4250 TCGProfile prof = {};
4251 const TCGProfile *s;
4252 int64_t tb_count;
4253 int64_t tb_div_count;
4254 int64_t tot;
4256 tcg_profile_snapshot_counters(&prof);
4257 s = &prof;
4258 tb_count = s->tb_count;
4259 tb_div_count = tb_count ? tb_count : 1;
4260 tot = s->interm_time + s->code_time;
4262 qemu_printf("JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
4263 tot, tot / 2.4e9);
4264 qemu_printf("translated TBs %" PRId64 " (aborted=%" PRId64
4265 " %0.1f%%)\n",
4266 tb_count, s->tb_count1 - tb_count,
4267 (double)(s->tb_count1 - s->tb_count)
4268 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
4269 qemu_printf("avg ops/TB %0.1f max=%d\n",
4270 (double)s->op_count / tb_div_count, s->op_count_max);
4271 qemu_printf("deleted ops/TB %0.2f\n",
4272 (double)s->del_op_count / tb_div_count);
4273 qemu_printf("avg temps/TB %0.2f max=%d\n",
4274 (double)s->temp_count / tb_div_count, s->temp_count_max);
4275 qemu_printf("avg host code/TB %0.1f\n",
4276 (double)s->code_out_len / tb_div_count);
4277 qemu_printf("avg search data/TB %0.1f\n",
4278 (double)s->search_out_len / tb_div_count);
4280 qemu_printf("cycles/op %0.1f\n",
4281 s->op_count ? (double)tot / s->op_count : 0);
4282 qemu_printf("cycles/in byte %0.1f\n",
4283 s->code_in_len ? (double)tot / s->code_in_len : 0);
4284 qemu_printf("cycles/out byte %0.1f\n",
4285 s->code_out_len ? (double)tot / s->code_out_len : 0);
4286 qemu_printf("cycles/search byte %0.1f\n",
4287 s->search_out_len ? (double)tot / s->search_out_len : 0);
4288 if (tot == 0) {
4289 tot = 1;
4291 qemu_printf(" gen_interm time %0.1f%%\n",
4292 (double)s->interm_time / tot * 100.0);
4293 qemu_printf(" gen_code time %0.1f%%\n",
4294 (double)s->code_time / tot * 100.0);
4295 qemu_printf("optim./code time %0.1f%%\n",
4296 (double)s->opt_time / (s->code_time ? s->code_time : 1)
4297 * 100.0);
4298 qemu_printf("liveness/code time %0.1f%%\n",
4299 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
4300 qemu_printf("cpu_restore count %" PRId64 "\n",
4301 s->restore_count);
4302 qemu_printf(" avg cycles %0.1f\n",
4303 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
4305 #else
4306 void tcg_dump_info(void)
4308 qemu_printf("[TCG profiler not compiled]\n");
4310 #endif
4312 #ifdef ELF_HOST_MACHINE
4313 /* In order to use this feature, the backend needs to do three things:
4315 (1) Define ELF_HOST_MACHINE to indicate both what value to
4316 put into the ELF image and to indicate support for the feature.
4318 (2) Define tcg_register_jit. This should create a buffer containing
4319 the contents of a .debug_frame section that describes the post-
4320 prologue unwind info for the tcg machine.
4322 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4325 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4326 typedef enum {
4327 JIT_NOACTION = 0,
4328 JIT_REGISTER_FN,
4329 JIT_UNREGISTER_FN
4330 } jit_actions_t;
4332 struct jit_code_entry {
4333 struct jit_code_entry *next_entry;
4334 struct jit_code_entry *prev_entry;
4335 const void *symfile_addr;
4336 uint64_t symfile_size;
4339 struct jit_descriptor {
4340 uint32_t version;
4341 uint32_t action_flag;
4342 struct jit_code_entry *relevant_entry;
4343 struct jit_code_entry *first_entry;
4346 void __jit_debug_register_code(void) __attribute__((noinline));
4347 void __jit_debug_register_code(void)
4349 asm("");
4352 /* Must statically initialize the version, because GDB may check
4353 the version before we can set it. */
4354 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
4356 /* End GDB interface. */
4358 static int find_string(const char *strtab, const char *str)
4360 const char *p = strtab + 1;
4362 while (1) {
4363 if (strcmp(p, str) == 0) {
4364 return p - strtab;
4366 p += strlen(p) + 1;
4370 static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
4371 const void *debug_frame,
4372 size_t debug_frame_size)
4374 struct __attribute__((packed)) DebugInfo {
4375 uint32_t len;
4376 uint16_t version;
4377 uint32_t abbrev;
4378 uint8_t ptr_size;
4379 uint8_t cu_die;
4380 uint16_t cu_lang;
4381 uintptr_t cu_low_pc;
4382 uintptr_t cu_high_pc;
4383 uint8_t fn_die;
4384 char fn_name[16];
4385 uintptr_t fn_low_pc;
4386 uintptr_t fn_high_pc;
4387 uint8_t cu_eoc;
4390 struct ElfImage {
4391 ElfW(Ehdr) ehdr;
4392 ElfW(Phdr) phdr;
4393 ElfW(Shdr) shdr[7];
4394 ElfW(Sym) sym[2];
4395 struct DebugInfo di;
4396 uint8_t da[24];
4397 char str[80];
4400 struct ElfImage *img;
4402 static const struct ElfImage img_template = {
4403 .ehdr = {
4404 .e_ident[EI_MAG0] = ELFMAG0,
4405 .e_ident[EI_MAG1] = ELFMAG1,
4406 .e_ident[EI_MAG2] = ELFMAG2,
4407 .e_ident[EI_MAG3] = ELFMAG3,
4408 .e_ident[EI_CLASS] = ELF_CLASS,
4409 .e_ident[EI_DATA] = ELF_DATA,
4410 .e_ident[EI_VERSION] = EV_CURRENT,
4411 .e_type = ET_EXEC,
4412 .e_machine = ELF_HOST_MACHINE,
4413 .e_version = EV_CURRENT,
4414 .e_phoff = offsetof(struct ElfImage, phdr),
4415 .e_shoff = offsetof(struct ElfImage, shdr),
4416 .e_ehsize = sizeof(ElfW(Shdr)),
4417 .e_phentsize = sizeof(ElfW(Phdr)),
4418 .e_phnum = 1,
4419 .e_shentsize = sizeof(ElfW(Shdr)),
4420 .e_shnum = ARRAY_SIZE(img->shdr),
4421 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
4422 #ifdef ELF_HOST_FLAGS
4423 .e_flags = ELF_HOST_FLAGS,
4424 #endif
4425 #ifdef ELF_OSABI
4426 .e_ident[EI_OSABI] = ELF_OSABI,
4427 #endif
4429 .phdr = {
4430 .p_type = PT_LOAD,
4431 .p_flags = PF_X,
4433 .shdr = {
4434 [0] = { .sh_type = SHT_NULL },
4435 /* Trick: The contents of code_gen_buffer are not present in
4436 this fake ELF file; that got allocated elsewhere. Therefore
4437 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4438 will not look for contents. We can record any address. */
4439 [1] = { /* .text */
4440 .sh_type = SHT_NOBITS,
4441 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
4443 [2] = { /* .debug_info */
4444 .sh_type = SHT_PROGBITS,
4445 .sh_offset = offsetof(struct ElfImage, di),
4446 .sh_size = sizeof(struct DebugInfo),
4448 [3] = { /* .debug_abbrev */
4449 .sh_type = SHT_PROGBITS,
4450 .sh_offset = offsetof(struct ElfImage, da),
4451 .sh_size = sizeof(img->da),
4453 [4] = { /* .debug_frame */
4454 .sh_type = SHT_PROGBITS,
4455 .sh_offset = sizeof(struct ElfImage),
4457 [5] = { /* .symtab */
4458 .sh_type = SHT_SYMTAB,
4459 .sh_offset = offsetof(struct ElfImage, sym),
4460 .sh_size = sizeof(img->sym),
4461 .sh_info = 1,
4462 .sh_link = ARRAY_SIZE(img->shdr) - 1,
4463 .sh_entsize = sizeof(ElfW(Sym)),
4465 [6] = { /* .strtab */
4466 .sh_type = SHT_STRTAB,
4467 .sh_offset = offsetof(struct ElfImage, str),
4468 .sh_size = sizeof(img->str),
4471 .sym = {
4472 [1] = { /* code_gen_buffer */
4473 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
4474 .st_shndx = 1,
4477 .di = {
4478 .len = sizeof(struct DebugInfo) - 4,
4479 .version = 2,
4480 .ptr_size = sizeof(void *),
4481 .cu_die = 1,
4482 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
4483 .fn_die = 2,
4484 .fn_name = "code_gen_buffer"
4486 .da = {
4487 1, /* abbrev number (the cu) */
4488 0x11, 1, /* DW_TAG_compile_unit, has children */
4489 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
4490 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4491 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4492 0, 0, /* end of abbrev */
4493 2, /* abbrev number (the fn) */
4494 0x2e, 0, /* DW_TAG_subprogram, no children */
4495 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
4496 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4497 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4498 0, 0, /* end of abbrev */
4499 0 /* no more abbrev */
4501 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
4502 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
4505 /* We only need a single jit entry; statically allocate it. */
4506 static struct jit_code_entry one_entry;
4508 uintptr_t buf = (uintptr_t)buf_ptr;
4509 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
4510 DebugFrameHeader *dfh;
4512 img = g_malloc(img_size);
4513 *img = img_template;
4515 img->phdr.p_vaddr = buf;
4516 img->phdr.p_paddr = buf;
4517 img->phdr.p_memsz = buf_size;
4519 img->shdr[1].sh_name = find_string(img->str, ".text");
4520 img->shdr[1].sh_addr = buf;
4521 img->shdr[1].sh_size = buf_size;
4523 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
4524 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
4526 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
4527 img->shdr[4].sh_size = debug_frame_size;
4529 img->shdr[5].sh_name = find_string(img->str, ".symtab");
4530 img->shdr[6].sh_name = find_string(img->str, ".strtab");
4532 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
4533 img->sym[1].st_value = buf;
4534 img->sym[1].st_size = buf_size;
4536 img->di.cu_low_pc = buf;
4537 img->di.cu_high_pc = buf + buf_size;
4538 img->di.fn_low_pc = buf;
4539 img->di.fn_high_pc = buf + buf_size;
4541 dfh = (DebugFrameHeader *)(img + 1);
4542 memcpy(dfh, debug_frame, debug_frame_size);
4543 dfh->fde.func_start = buf;
4544 dfh->fde.func_len = buf_size;
4546 #ifdef DEBUG_JIT
4547 /* Enable this block to be able to debug the ELF image file creation.
4548 One can use readelf, objdump, or other inspection utilities. */
4550 FILE *f = fopen("/tmp/qemu.jit", "w+b");
4551 if (f) {
4552 if (fwrite(img, img_size, 1, f) != img_size) {
4553 /* Avoid stupid unused return value warning for fwrite. */
4555 fclose(f);
4558 #endif
4560 one_entry.symfile_addr = img;
4561 one_entry.symfile_size = img_size;
4563 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
4564 __jit_debug_descriptor.relevant_entry = &one_entry;
4565 __jit_debug_descriptor.first_entry = &one_entry;
4566 __jit_debug_register_code();
4568 #else
4569 /* No support for the feature. Provide the entry point expected by exec.c,
4570 and implement the internal function we declared earlier. */
4572 static void tcg_register_jit_int(const void *buf, size_t size,
4573 const void *debug_frame,
4574 size_t debug_frame_size)
4578 void tcg_register_jit(const void *buf, size_t buf_size)
4581 #endif /* ELF_HOST_MACHINE */
4583 #if !TCG_TARGET_MAYBE_vec
4584 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
4586 g_assert_not_reached();
4588 #endif