2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
38 #include "qemu/cacheflush.h"
40 /* Note: the long term plan is to reduce the dependencies on the QEMU
41 CPU definitions. Currently they are used for qemu_ld/st
43 #define NO_CPU_IO_DEFS
45 #include "exec/exec-all.h"
46 #include "tcg/tcg-op.h"
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS ELFCLASS32
51 # define ELF_CLASS ELFCLASS64
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA ELFDATA2MSB
56 # define ELF_DATA ELFDATA2LSB
61 #include "tcg-internal.h"
63 #ifdef CONFIG_TCG_INTERPRETER
67 /* Forward declarations for functions declared in tcg-target.c.inc and
69 static void tcg_target_init(TCGContext
*s
);
70 static void tcg_target_qemu_prologue(TCGContext
*s
);
71 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
72 intptr_t value
, intptr_t addend
);
74 /* The CIE and FDE header definitions will be common to all hosts. */
76 uint32_t len
__attribute__((aligned((sizeof(void *)))));
82 uint8_t return_column
;
85 typedef struct QEMU_PACKED
{
86 uint32_t len
__attribute__((aligned((sizeof(void *)))));
90 } DebugFrameFDEHeader
;
92 typedef struct QEMU_PACKED
{
94 DebugFrameFDEHeader fde
;
97 static void tcg_register_jit_int(const void *buf
, size_t size
,
98 const void *debug_frame
,
99 size_t debug_frame_size
)
100 __attribute__((unused
));
102 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
103 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
105 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
106 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
107 TCGReg ret
, tcg_target_long arg
);
108 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
109 const TCGArg args
[TCG_MAX_OP_ARGS
],
110 const int const_args
[TCG_MAX_OP_ARGS
]);
111 #if TCG_TARGET_MAYBE_vec
112 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
113 TCGReg dst
, TCGReg src
);
114 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
115 TCGReg dst
, TCGReg base
, intptr_t offset
);
116 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
117 TCGReg dst
, int64_t arg
);
118 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
119 unsigned vecl
, unsigned vece
,
120 const TCGArg args
[TCG_MAX_OP_ARGS
],
121 const int const_args
[TCG_MAX_OP_ARGS
]);
123 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
124 TCGReg dst
, TCGReg src
)
126 g_assert_not_reached();
128 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
129 TCGReg dst
, TCGReg base
, intptr_t offset
)
131 g_assert_not_reached();
133 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
134 TCGReg dst
, int64_t arg
)
136 g_assert_not_reached();
138 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
139 unsigned vecl
, unsigned vece
,
140 const TCGArg args
[TCG_MAX_OP_ARGS
],
141 const int const_args
[TCG_MAX_OP_ARGS
])
143 g_assert_not_reached();
146 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
148 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
149 TCGReg base
, intptr_t ofs
);
150 #ifdef CONFIG_TCG_INTERPRETER
151 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
,
154 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
);
156 static bool tcg_target_const_match(int64_t val
, TCGType type
, int ct
);
157 #ifdef TCG_TARGET_NEED_LDST_LABELS
158 static int tcg_out_ldst_finalize(TCGContext
*s
);
161 TCGContext tcg_init_ctx
;
162 __thread TCGContext
*tcg_ctx
;
164 TCGContext
**tcg_ctxs
;
165 unsigned int tcg_cur_ctxs
;
166 unsigned int tcg_max_ctxs
;
167 TCGv_env cpu_env
= 0;
168 const void *tcg_code_gen_epilogue
;
169 uintptr_t tcg_splitwx_diff
;
171 #ifndef CONFIG_TCG_INTERPRETER
172 tcg_prologue_fn
*tcg_qemu_tb_exec
;
175 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
176 static TCGRegSet tcg_target_call_clobber_regs
;
178 #if TCG_TARGET_INSN_UNIT_SIZE == 1
179 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
184 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
191 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
192 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
194 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
197 tcg_insn_unit
*p
= s
->code_ptr
;
198 memcpy(p
, &v
, sizeof(v
));
199 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
203 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
206 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
209 memcpy(p
, &v
, sizeof(v
));
214 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
215 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
217 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
220 tcg_insn_unit
*p
= s
->code_ptr
;
221 memcpy(p
, &v
, sizeof(v
));
222 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
226 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
229 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
232 memcpy(p
, &v
, sizeof(v
));
237 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
238 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
240 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
243 tcg_insn_unit
*p
= s
->code_ptr
;
244 memcpy(p
, &v
, sizeof(v
));
245 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
249 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
252 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
255 memcpy(p
, &v
, sizeof(v
));
260 /* label relocation processing */
262 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
263 TCGLabel
*l
, intptr_t addend
)
265 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
270 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
273 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
)
275 tcg_debug_assert(!l
->has_value
);
277 l
->u
.value_ptr
= tcg_splitwx_to_rx(s
->code_ptr
);
280 TCGLabel
*gen_new_label(void)
282 TCGContext
*s
= tcg_ctx
;
283 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
285 memset(l
, 0, sizeof(TCGLabel
));
286 l
->id
= s
->nb_labels
++;
287 QSIMPLEQ_INIT(&l
->relocs
);
289 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
294 static bool tcg_resolve_relocs(TCGContext
*s
)
298 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
300 uintptr_t value
= l
->u
.value
;
302 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
303 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
311 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
314 * We will check for overflow at the end of the opcode loop in
315 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
317 s
->tb_jmp_reset_offset
[which
] = tcg_current_code_size(s
);
320 /* Signal overflow, starting over with fewer guest insns. */
321 static void QEMU_NORETURN
tcg_raise_tb_overflow(TCGContext
*s
)
323 siglongjmp(s
->jmp_trans
, -2);
326 #define C_PFX1(P, A) P##A
327 #define C_PFX2(P, A, B) P##A##_##B
328 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
329 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
330 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
331 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
333 /* Define an enumeration for the various combinations. */
335 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
336 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
337 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
338 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
340 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
341 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
342 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
343 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
345 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
347 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
348 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
349 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
350 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
353 #include "tcg-target-con-set.h"
354 } TCGConstraintSetIndex
;
356 static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode
);
372 /* Put all of the constraint sets into an array, indexed by the enum. */
374 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
375 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
376 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
377 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
379 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
380 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
381 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
382 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
384 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
386 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
387 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
388 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
389 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
391 static const TCGTargetOpDef constraint_sets
[] = {
392 #include "tcg-target-con-set.h"
410 /* Expand the enumerator to be returned from tcg_target_op_def(). */
412 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
413 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
414 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
415 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
417 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
418 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
419 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
420 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
422 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
424 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
425 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
426 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
427 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
429 #include "tcg-target.c.inc"
431 static void alloc_tcg_plugin_context(TCGContext
*s
)
434 s
->plugin_tb
= g_new0(struct qemu_plugin_tb
, 1);
435 s
->plugin_tb
->insns
=
436 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn
);
441 * All TCG threads except the parent (i.e. the one that called tcg_context_init
442 * and registered the target's TCG globals) must register with this function
443 * before initiating translation.
445 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
446 * of tcg_region_init() for the reasoning behind this.
448 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
449 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
450 * is not used anymore for translation once this function is called.
452 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
453 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
455 #ifdef CONFIG_USER_ONLY
456 void tcg_register_thread(void)
458 tcg_ctx
= &tcg_init_ctx
;
461 void tcg_register_thread(void)
463 TCGContext
*s
= g_malloc(sizeof(*s
));
468 /* Relink mem_base. */
469 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
470 if (tcg_init_ctx
.temps
[i
].mem_base
) {
471 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
472 tcg_debug_assert(b
>= 0 && b
< n
);
473 s
->temps
[i
].mem_base
= &s
->temps
[b
];
477 /* Claim an entry in tcg_ctxs */
478 n
= qatomic_fetch_inc(&tcg_cur_ctxs
);
479 g_assert(n
< tcg_max_ctxs
);
480 qatomic_set(&tcg_ctxs
[n
], s
);
483 alloc_tcg_plugin_context(s
);
484 tcg_region_initial_alloc(s
);
489 #endif /* !CONFIG_USER_ONLY */
491 /* pool based memory allocation */
492 void *tcg_malloc_internal(TCGContext
*s
, int size
)
497 if (size
> TCG_POOL_CHUNK_SIZE
) {
498 /* big malloc: insert a new pool (XXX: could optimize) */
499 p
= g_malloc(sizeof(TCGPool
) + size
);
501 p
->next
= s
->pool_first_large
;
502 s
->pool_first_large
= p
;
513 pool_size
= TCG_POOL_CHUNK_SIZE
;
514 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
518 s
->pool_current
->next
= p
;
527 s
->pool_cur
= p
->data
+ size
;
528 s
->pool_end
= p
->data
+ p
->size
;
532 void tcg_pool_reset(TCGContext
*s
)
535 for (p
= s
->pool_first_large
; p
; p
= t
) {
539 s
->pool_first_large
= NULL
;
540 s
->pool_cur
= s
->pool_end
= NULL
;
541 s
->pool_current
= NULL
;
544 #include "exec/helper-proto.h"
546 static const TCGHelperInfo all_helpers
[] = {
547 #include "exec/helper-tcg.h"
549 static GHashTable
*helper_table
;
551 #ifdef CONFIG_TCG_INTERPRETER
552 static GHashTable
*ffi_table
;
554 static ffi_type
* const typecode_to_ffi
[8] = {
555 [dh_typecode_void
] = &ffi_type_void
,
556 [dh_typecode_i32
] = &ffi_type_uint32
,
557 [dh_typecode_s32
] = &ffi_type_sint32
,
558 [dh_typecode_i64
] = &ffi_type_uint64
,
559 [dh_typecode_s64
] = &ffi_type_sint64
,
560 [dh_typecode_ptr
] = &ffi_type_pointer
,
564 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
565 static void process_op_defs(TCGContext
*s
);
566 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
567 TCGReg reg
, const char *name
);
569 static void tcg_context_init(unsigned max_cpus
)
571 TCGContext
*s
= &tcg_init_ctx
;
572 int op
, total_args
, n
, i
;
574 TCGArgConstraint
*args_ct
;
577 memset(s
, 0, sizeof(*s
));
580 /* Count total number of arguments and allocate the corresponding
583 for(op
= 0; op
< NB_OPS
; op
++) {
584 def
= &tcg_op_defs
[op
];
585 n
= def
->nb_iargs
+ def
->nb_oargs
;
589 args_ct
= g_new0(TCGArgConstraint
, total_args
);
591 for(op
= 0; op
< NB_OPS
; op
++) {
592 def
= &tcg_op_defs
[op
];
593 def
->args_ct
= args_ct
;
594 n
= def
->nb_iargs
+ def
->nb_oargs
;
598 /* Register helpers. */
599 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
600 helper_table
= g_hash_table_new(NULL
, NULL
);
602 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
603 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
604 (gpointer
)&all_helpers
[i
]);
607 #ifdef CONFIG_TCG_INTERPRETER
608 /* g_direct_hash/equal for direct comparisons on uint32_t. */
609 ffi_table
= g_hash_table_new(NULL
, NULL
);
610 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
615 uint32_t typemask
= all_helpers
[i
].typemask
;
616 gpointer hash
= (gpointer
)(uintptr_t)typemask
;
620 if (g_hash_table_lookup(ffi_table
, hash
)) {
624 /* Ignoring the return type, find the last non-zero field. */
625 nargs
= 32 - clz32(typemask
>> 3);
626 nargs
= DIV_ROUND_UP(nargs
, 3);
628 ca
= g_malloc0(sizeof(*ca
) + nargs
* sizeof(ffi_type
*));
629 ca
->cif
.rtype
= typecode_to_ffi
[typemask
& 7];
630 ca
->cif
.nargs
= nargs
;
633 ca
->cif
.arg_types
= ca
->args
;
634 for (i
= 0; i
< nargs
; ++i
) {
635 int typecode
= extract32(typemask
, (i
+ 1) * 3, 3);
636 ca
->args
[i
] = typecode_to_ffi
[typecode
];
640 status
= ffi_prep_cif(&ca
->cif
, FFI_DEFAULT_ABI
, nargs
,
641 ca
->cif
.rtype
, ca
->cif
.arg_types
);
642 assert(status
== FFI_OK
);
644 g_hash_table_insert(ffi_table
, hash
, (gpointer
)&ca
->cif
);
651 /* Reverse the order of the saved registers, assuming they're all at
652 the start of tcg_target_reg_alloc_order. */
653 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
654 int r
= tcg_target_reg_alloc_order
[n
];
655 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
659 for (i
= 0; i
< n
; ++i
) {
660 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
662 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
663 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
666 alloc_tcg_plugin_context(s
);
670 * In user-mode we simply share the init context among threads, since we
671 * use a single region. See the documentation tcg_region_init() for the
672 * reasoning behind this.
673 * In softmmu we will have at most max_cpus TCG threads.
675 #ifdef CONFIG_USER_ONLY
680 tcg_max_ctxs
= max_cpus
;
681 tcg_ctxs
= g_new0(TCGContext
*, max_cpus
);
684 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
685 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
686 cpu_env
= temp_tcgv_ptr(ts
);
689 void tcg_init(size_t tb_size
, int splitwx
, unsigned max_cpus
)
691 tcg_context_init(max_cpus
);
692 tcg_region_init(tb_size
, splitwx
, max_cpus
);
696 * Allocate TBs right before their corresponding translated code, making
697 * sure that TBs and code are on different cache lines.
699 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
701 uintptr_t align
= qemu_icache_linesize
;
702 TranslationBlock
*tb
;
706 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
707 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
709 if (unlikely(next
> s
->code_gen_highwater
)) {
710 if (tcg_region_alloc(s
)) {
715 qatomic_set(&s
->code_gen_ptr
, next
);
716 s
->data_gen_ptr
= NULL
;
720 void tcg_prologue_init(TCGContext
*s
)
722 size_t prologue_size
;
724 s
->code_ptr
= s
->code_gen_ptr
;
725 s
->code_buf
= s
->code_gen_ptr
;
726 s
->data_gen_ptr
= NULL
;
728 #ifndef CONFIG_TCG_INTERPRETER
729 tcg_qemu_tb_exec
= (tcg_prologue_fn
*)tcg_splitwx_to_rx(s
->code_ptr
);
732 #ifdef TCG_TARGET_NEED_POOL_LABELS
733 s
->pool_labels
= NULL
;
736 qemu_thread_jit_write();
737 /* Generate the prologue. */
738 tcg_target_qemu_prologue(s
);
740 #ifdef TCG_TARGET_NEED_POOL_LABELS
741 /* Allow the prologue to put e.g. guest_base into a pool entry. */
743 int result
= tcg_out_pool_finalize(s
);
744 tcg_debug_assert(result
== 0);
748 prologue_size
= tcg_current_code_size(s
);
750 #ifndef CONFIG_TCG_INTERPRETER
751 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
752 (uintptr_t)s
->code_buf
, prologue_size
);
755 tcg_region_prologue_set(s
);
758 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
759 FILE *logfile
= qemu_log_lock();
760 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size
);
761 if (s
->data_gen_ptr
) {
762 size_t code_size
= s
->data_gen_ptr
- s
->code_gen_ptr
;
763 size_t data_size
= prologue_size
- code_size
;
766 log_disas(s
->code_gen_ptr
, code_size
);
768 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
769 if (sizeof(tcg_target_ulong
) == 8) {
770 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
771 (uintptr_t)s
->data_gen_ptr
+ i
,
772 *(uint64_t *)(s
->data_gen_ptr
+ i
));
774 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
775 (uintptr_t)s
->data_gen_ptr
+ i
,
776 *(uint32_t *)(s
->data_gen_ptr
+ i
));
780 log_disas(s
->code_gen_ptr
, prologue_size
);
784 qemu_log_unlock(logfile
);
788 #ifndef CONFIG_TCG_INTERPRETER
790 * Assert that goto_ptr is implemented completely, setting an epilogue.
791 * For tci, we use NULL as the signal to return from the interpreter,
792 * so skip this check.
794 if (TCG_TARGET_HAS_goto_ptr
) {
795 tcg_debug_assert(tcg_code_gen_epilogue
!= NULL
);
800 void tcg_func_start(TCGContext
*s
)
803 s
->nb_temps
= s
->nb_globals
;
805 /* No temps have been previously allocated for size or locality. */
806 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
808 /* No constant temps have been previously allocated. */
809 for (int i
= 0; i
< TCG_TYPE_COUNT
; ++i
) {
810 if (s
->const_table
[i
]) {
811 g_hash_table_remove_all(s
->const_table
[i
]);
817 s
->current_frame_offset
= s
->frame_start
;
819 #ifdef CONFIG_DEBUG_TCG
820 s
->goto_tb_issue_mask
= 0;
823 QTAILQ_INIT(&s
->ops
);
824 QTAILQ_INIT(&s
->free_ops
);
825 QSIMPLEQ_INIT(&s
->labels
);
828 static TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
830 int n
= s
->nb_temps
++;
832 if (n
>= TCG_MAX_TEMPS
) {
833 tcg_raise_tb_overflow(s
);
835 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
838 static TCGTemp
*tcg_global_alloc(TCGContext
*s
)
842 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
843 tcg_debug_assert(s
->nb_globals
< TCG_MAX_TEMPS
);
845 ts
= tcg_temp_alloc(s
);
846 ts
->kind
= TEMP_GLOBAL
;
851 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
852 TCGReg reg
, const char *name
)
856 if (TCG_TARGET_REG_BITS
== 32 && type
!= TCG_TYPE_I32
) {
860 ts
= tcg_global_alloc(s
);
861 ts
->base_type
= type
;
863 ts
->kind
= TEMP_FIXED
;
866 tcg_regset_set_reg(s
->reserved_regs
, reg
);
871 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
873 s
->frame_start
= start
;
874 s
->frame_end
= start
+ size
;
876 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
879 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
880 intptr_t offset
, const char *name
)
882 TCGContext
*s
= tcg_ctx
;
883 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
884 TCGTemp
*ts
= tcg_global_alloc(s
);
885 int indirect_reg
= 0, bigendian
= 0;
886 #ifdef HOST_WORDS_BIGENDIAN
890 switch (base_ts
->kind
) {
894 /* We do not support double-indirect registers. */
895 tcg_debug_assert(!base_ts
->indirect_reg
);
896 base_ts
->indirect_base
= 1;
897 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
902 g_assert_not_reached();
905 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
906 TCGTemp
*ts2
= tcg_global_alloc(s
);
909 ts
->base_type
= TCG_TYPE_I64
;
910 ts
->type
= TCG_TYPE_I32
;
911 ts
->indirect_reg
= indirect_reg
;
912 ts
->mem_allocated
= 1;
913 ts
->mem_base
= base_ts
;
914 ts
->mem_offset
= offset
+ bigendian
* 4;
915 pstrcpy(buf
, sizeof(buf
), name
);
916 pstrcat(buf
, sizeof(buf
), "_0");
917 ts
->name
= strdup(buf
);
919 tcg_debug_assert(ts2
== ts
+ 1);
920 ts2
->base_type
= TCG_TYPE_I64
;
921 ts2
->type
= TCG_TYPE_I32
;
922 ts2
->indirect_reg
= indirect_reg
;
923 ts2
->mem_allocated
= 1;
924 ts2
->mem_base
= base_ts
;
925 ts2
->mem_offset
= offset
+ (1 - bigendian
) * 4;
926 pstrcpy(buf
, sizeof(buf
), name
);
927 pstrcat(buf
, sizeof(buf
), "_1");
928 ts2
->name
= strdup(buf
);
930 ts
->base_type
= type
;
932 ts
->indirect_reg
= indirect_reg
;
933 ts
->mem_allocated
= 1;
934 ts
->mem_base
= base_ts
;
935 ts
->mem_offset
= offset
;
941 TCGTemp
*tcg_temp_new_internal(TCGType type
, bool temp_local
)
943 TCGContext
*s
= tcg_ctx
;
944 TCGTempKind kind
= temp_local
? TEMP_LOCAL
: TEMP_NORMAL
;
948 k
= type
+ (temp_local
? TCG_TYPE_COUNT
: 0);
949 idx
= find_first_bit(s
->free_temps
[k
].l
, TCG_MAX_TEMPS
);
950 if (idx
< TCG_MAX_TEMPS
) {
951 /* There is already an available temp with the right type. */
952 clear_bit(idx
, s
->free_temps
[k
].l
);
955 ts
->temp_allocated
= 1;
956 tcg_debug_assert(ts
->base_type
== type
);
957 tcg_debug_assert(ts
->kind
== kind
);
959 ts
= tcg_temp_alloc(s
);
960 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
961 TCGTemp
*ts2
= tcg_temp_alloc(s
);
963 ts
->base_type
= type
;
964 ts
->type
= TCG_TYPE_I32
;
965 ts
->temp_allocated
= 1;
968 tcg_debug_assert(ts2
== ts
+ 1);
969 ts2
->base_type
= TCG_TYPE_I64
;
970 ts2
->type
= TCG_TYPE_I32
;
971 ts2
->temp_allocated
= 1;
974 ts
->base_type
= type
;
976 ts
->temp_allocated
= 1;
981 #if defined(CONFIG_DEBUG_TCG)
987 TCGv_vec
tcg_temp_new_vec(TCGType type
)
991 #ifdef CONFIG_DEBUG_TCG
994 assert(TCG_TARGET_HAS_v64
);
997 assert(TCG_TARGET_HAS_v128
);
1000 assert(TCG_TARGET_HAS_v256
);
1003 g_assert_not_reached();
1007 t
= tcg_temp_new_internal(type
, 0);
1008 return temp_tcgv_vec(t
);
1011 /* Create a new temp of the same type as an existing temp. */
1012 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1014 TCGTemp
*t
= tcgv_vec_temp(match
);
1016 tcg_debug_assert(t
->temp_allocated
!= 0);
1018 t
= tcg_temp_new_internal(t
->base_type
, 0);
1019 return temp_tcgv_vec(t
);
1022 void tcg_temp_free_internal(TCGTemp
*ts
)
1024 TCGContext
*s
= tcg_ctx
;
1027 /* In order to simplify users of tcg_constant_*, silently ignore free. */
1028 if (ts
->kind
== TEMP_CONST
) {
1032 #if defined(CONFIG_DEBUG_TCG)
1034 if (s
->temps_in_use
< 0) {
1035 fprintf(stderr
, "More temporaries freed than allocated!\n");
1039 tcg_debug_assert(ts
->kind
< TEMP_GLOBAL
);
1040 tcg_debug_assert(ts
->temp_allocated
!= 0);
1041 ts
->temp_allocated
= 0;
1044 k
= ts
->base_type
+ (ts
->kind
== TEMP_NORMAL
? 0 : TCG_TYPE_COUNT
);
1045 set_bit(idx
, s
->free_temps
[k
].l
);
1048 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
)
1050 TCGContext
*s
= tcg_ctx
;
1051 GHashTable
*h
= s
->const_table
[type
];
1055 h
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
1056 s
->const_table
[type
] = h
;
1059 ts
= g_hash_table_lookup(h
, &val
);
1061 ts
= tcg_temp_alloc(s
);
1063 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1064 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1066 ts
->base_type
= TCG_TYPE_I64
;
1067 ts
->type
= TCG_TYPE_I32
;
1068 ts
->kind
= TEMP_CONST
;
1069 ts
->temp_allocated
= 1;
1071 * Retain the full value of the 64-bit constant in the low
1072 * part, so that the hash table works. Actual uses will
1073 * truncate the value to the low part.
1077 tcg_debug_assert(ts2
== ts
+ 1);
1078 ts2
->base_type
= TCG_TYPE_I64
;
1079 ts2
->type
= TCG_TYPE_I32
;
1080 ts2
->kind
= TEMP_CONST
;
1081 ts2
->temp_allocated
= 1;
1082 ts2
->val
= val
>> 32;
1084 ts
->base_type
= type
;
1086 ts
->kind
= TEMP_CONST
;
1087 ts
->temp_allocated
= 1;
1090 g_hash_table_insert(h
, &ts
->val
, ts
);
1096 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
)
1098 val
= dup_const(vece
, val
);
1099 return temp_tcgv_vec(tcg_constant_internal(type
, val
));
1102 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
)
1104 TCGTemp
*t
= tcgv_vec_temp(match
);
1106 tcg_debug_assert(t
->temp_allocated
!= 0);
1107 return tcg_constant_vec(t
->base_type
, vece
, val
);
1110 TCGv_i32
tcg_const_i32(int32_t val
)
1113 t0
= tcg_temp_new_i32();
1114 tcg_gen_movi_i32(t0
, val
);
1118 TCGv_i64
tcg_const_i64(int64_t val
)
1121 t0
= tcg_temp_new_i64();
1122 tcg_gen_movi_i64(t0
, val
);
1126 TCGv_i32
tcg_const_local_i32(int32_t val
)
1129 t0
= tcg_temp_local_new_i32();
1130 tcg_gen_movi_i32(t0
, val
);
1134 TCGv_i64
tcg_const_local_i64(int64_t val
)
1137 t0
= tcg_temp_local_new_i64();
1138 tcg_gen_movi_i64(t0
, val
);
1142 #if defined(CONFIG_DEBUG_TCG)
1143 void tcg_clear_temp_count(void)
1145 TCGContext
*s
= tcg_ctx
;
1146 s
->temps_in_use
= 0;
1149 int tcg_check_temp_count(void)
1151 TCGContext
*s
= tcg_ctx
;
1152 if (s
->temps_in_use
) {
1153 /* Clear the count so that we don't give another
1154 * warning immediately next time around.
1156 s
->temps_in_use
= 0;
1163 /* Return true if OP may appear in the opcode stream.
1164 Test the runtime variable that controls each opcode. */
1165 bool tcg_op_supported(TCGOpcode op
)
1168 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1171 case INDEX_op_discard
:
1172 case INDEX_op_set_label
:
1176 case INDEX_op_insn_start
:
1177 case INDEX_op_exit_tb
:
1178 case INDEX_op_goto_tb
:
1179 case INDEX_op_qemu_ld_i32
:
1180 case INDEX_op_qemu_st_i32
:
1181 case INDEX_op_qemu_ld_i64
:
1182 case INDEX_op_qemu_st_i64
:
1185 case INDEX_op_qemu_st8_i32
:
1186 return TCG_TARGET_HAS_qemu_st8_i32
;
1188 case INDEX_op_goto_ptr
:
1189 return TCG_TARGET_HAS_goto_ptr
;
1191 case INDEX_op_mov_i32
:
1192 case INDEX_op_setcond_i32
:
1193 case INDEX_op_brcond_i32
:
1194 case INDEX_op_ld8u_i32
:
1195 case INDEX_op_ld8s_i32
:
1196 case INDEX_op_ld16u_i32
:
1197 case INDEX_op_ld16s_i32
:
1198 case INDEX_op_ld_i32
:
1199 case INDEX_op_st8_i32
:
1200 case INDEX_op_st16_i32
:
1201 case INDEX_op_st_i32
:
1202 case INDEX_op_add_i32
:
1203 case INDEX_op_sub_i32
:
1204 case INDEX_op_mul_i32
:
1205 case INDEX_op_and_i32
:
1206 case INDEX_op_or_i32
:
1207 case INDEX_op_xor_i32
:
1208 case INDEX_op_shl_i32
:
1209 case INDEX_op_shr_i32
:
1210 case INDEX_op_sar_i32
:
1213 case INDEX_op_movcond_i32
:
1214 return TCG_TARGET_HAS_movcond_i32
;
1215 case INDEX_op_div_i32
:
1216 case INDEX_op_divu_i32
:
1217 return TCG_TARGET_HAS_div_i32
;
1218 case INDEX_op_rem_i32
:
1219 case INDEX_op_remu_i32
:
1220 return TCG_TARGET_HAS_rem_i32
;
1221 case INDEX_op_div2_i32
:
1222 case INDEX_op_divu2_i32
:
1223 return TCG_TARGET_HAS_div2_i32
;
1224 case INDEX_op_rotl_i32
:
1225 case INDEX_op_rotr_i32
:
1226 return TCG_TARGET_HAS_rot_i32
;
1227 case INDEX_op_deposit_i32
:
1228 return TCG_TARGET_HAS_deposit_i32
;
1229 case INDEX_op_extract_i32
:
1230 return TCG_TARGET_HAS_extract_i32
;
1231 case INDEX_op_sextract_i32
:
1232 return TCG_TARGET_HAS_sextract_i32
;
1233 case INDEX_op_extract2_i32
:
1234 return TCG_TARGET_HAS_extract2_i32
;
1235 case INDEX_op_add2_i32
:
1236 return TCG_TARGET_HAS_add2_i32
;
1237 case INDEX_op_sub2_i32
:
1238 return TCG_TARGET_HAS_sub2_i32
;
1239 case INDEX_op_mulu2_i32
:
1240 return TCG_TARGET_HAS_mulu2_i32
;
1241 case INDEX_op_muls2_i32
:
1242 return TCG_TARGET_HAS_muls2_i32
;
1243 case INDEX_op_muluh_i32
:
1244 return TCG_TARGET_HAS_muluh_i32
;
1245 case INDEX_op_mulsh_i32
:
1246 return TCG_TARGET_HAS_mulsh_i32
;
1247 case INDEX_op_ext8s_i32
:
1248 return TCG_TARGET_HAS_ext8s_i32
;
1249 case INDEX_op_ext16s_i32
:
1250 return TCG_TARGET_HAS_ext16s_i32
;
1251 case INDEX_op_ext8u_i32
:
1252 return TCG_TARGET_HAS_ext8u_i32
;
1253 case INDEX_op_ext16u_i32
:
1254 return TCG_TARGET_HAS_ext16u_i32
;
1255 case INDEX_op_bswap16_i32
:
1256 return TCG_TARGET_HAS_bswap16_i32
;
1257 case INDEX_op_bswap32_i32
:
1258 return TCG_TARGET_HAS_bswap32_i32
;
1259 case INDEX_op_not_i32
:
1260 return TCG_TARGET_HAS_not_i32
;
1261 case INDEX_op_neg_i32
:
1262 return TCG_TARGET_HAS_neg_i32
;
1263 case INDEX_op_andc_i32
:
1264 return TCG_TARGET_HAS_andc_i32
;
1265 case INDEX_op_orc_i32
:
1266 return TCG_TARGET_HAS_orc_i32
;
1267 case INDEX_op_eqv_i32
:
1268 return TCG_TARGET_HAS_eqv_i32
;
1269 case INDEX_op_nand_i32
:
1270 return TCG_TARGET_HAS_nand_i32
;
1271 case INDEX_op_nor_i32
:
1272 return TCG_TARGET_HAS_nor_i32
;
1273 case INDEX_op_clz_i32
:
1274 return TCG_TARGET_HAS_clz_i32
;
1275 case INDEX_op_ctz_i32
:
1276 return TCG_TARGET_HAS_ctz_i32
;
1277 case INDEX_op_ctpop_i32
:
1278 return TCG_TARGET_HAS_ctpop_i32
;
1280 case INDEX_op_brcond2_i32
:
1281 case INDEX_op_setcond2_i32
:
1282 return TCG_TARGET_REG_BITS
== 32;
1284 case INDEX_op_mov_i64
:
1285 case INDEX_op_setcond_i64
:
1286 case INDEX_op_brcond_i64
:
1287 case INDEX_op_ld8u_i64
:
1288 case INDEX_op_ld8s_i64
:
1289 case INDEX_op_ld16u_i64
:
1290 case INDEX_op_ld16s_i64
:
1291 case INDEX_op_ld32u_i64
:
1292 case INDEX_op_ld32s_i64
:
1293 case INDEX_op_ld_i64
:
1294 case INDEX_op_st8_i64
:
1295 case INDEX_op_st16_i64
:
1296 case INDEX_op_st32_i64
:
1297 case INDEX_op_st_i64
:
1298 case INDEX_op_add_i64
:
1299 case INDEX_op_sub_i64
:
1300 case INDEX_op_mul_i64
:
1301 case INDEX_op_and_i64
:
1302 case INDEX_op_or_i64
:
1303 case INDEX_op_xor_i64
:
1304 case INDEX_op_shl_i64
:
1305 case INDEX_op_shr_i64
:
1306 case INDEX_op_sar_i64
:
1307 case INDEX_op_ext_i32_i64
:
1308 case INDEX_op_extu_i32_i64
:
1309 return TCG_TARGET_REG_BITS
== 64;
1311 case INDEX_op_movcond_i64
:
1312 return TCG_TARGET_HAS_movcond_i64
;
1313 case INDEX_op_div_i64
:
1314 case INDEX_op_divu_i64
:
1315 return TCG_TARGET_HAS_div_i64
;
1316 case INDEX_op_rem_i64
:
1317 case INDEX_op_remu_i64
:
1318 return TCG_TARGET_HAS_rem_i64
;
1319 case INDEX_op_div2_i64
:
1320 case INDEX_op_divu2_i64
:
1321 return TCG_TARGET_HAS_div2_i64
;
1322 case INDEX_op_rotl_i64
:
1323 case INDEX_op_rotr_i64
:
1324 return TCG_TARGET_HAS_rot_i64
;
1325 case INDEX_op_deposit_i64
:
1326 return TCG_TARGET_HAS_deposit_i64
;
1327 case INDEX_op_extract_i64
:
1328 return TCG_TARGET_HAS_extract_i64
;
1329 case INDEX_op_sextract_i64
:
1330 return TCG_TARGET_HAS_sextract_i64
;
1331 case INDEX_op_extract2_i64
:
1332 return TCG_TARGET_HAS_extract2_i64
;
1333 case INDEX_op_extrl_i64_i32
:
1334 return TCG_TARGET_HAS_extrl_i64_i32
;
1335 case INDEX_op_extrh_i64_i32
:
1336 return TCG_TARGET_HAS_extrh_i64_i32
;
1337 case INDEX_op_ext8s_i64
:
1338 return TCG_TARGET_HAS_ext8s_i64
;
1339 case INDEX_op_ext16s_i64
:
1340 return TCG_TARGET_HAS_ext16s_i64
;
1341 case INDEX_op_ext32s_i64
:
1342 return TCG_TARGET_HAS_ext32s_i64
;
1343 case INDEX_op_ext8u_i64
:
1344 return TCG_TARGET_HAS_ext8u_i64
;
1345 case INDEX_op_ext16u_i64
:
1346 return TCG_TARGET_HAS_ext16u_i64
;
1347 case INDEX_op_ext32u_i64
:
1348 return TCG_TARGET_HAS_ext32u_i64
;
1349 case INDEX_op_bswap16_i64
:
1350 return TCG_TARGET_HAS_bswap16_i64
;
1351 case INDEX_op_bswap32_i64
:
1352 return TCG_TARGET_HAS_bswap32_i64
;
1353 case INDEX_op_bswap64_i64
:
1354 return TCG_TARGET_HAS_bswap64_i64
;
1355 case INDEX_op_not_i64
:
1356 return TCG_TARGET_HAS_not_i64
;
1357 case INDEX_op_neg_i64
:
1358 return TCG_TARGET_HAS_neg_i64
;
1359 case INDEX_op_andc_i64
:
1360 return TCG_TARGET_HAS_andc_i64
;
1361 case INDEX_op_orc_i64
:
1362 return TCG_TARGET_HAS_orc_i64
;
1363 case INDEX_op_eqv_i64
:
1364 return TCG_TARGET_HAS_eqv_i64
;
1365 case INDEX_op_nand_i64
:
1366 return TCG_TARGET_HAS_nand_i64
;
1367 case INDEX_op_nor_i64
:
1368 return TCG_TARGET_HAS_nor_i64
;
1369 case INDEX_op_clz_i64
:
1370 return TCG_TARGET_HAS_clz_i64
;
1371 case INDEX_op_ctz_i64
:
1372 return TCG_TARGET_HAS_ctz_i64
;
1373 case INDEX_op_ctpop_i64
:
1374 return TCG_TARGET_HAS_ctpop_i64
;
1375 case INDEX_op_add2_i64
:
1376 return TCG_TARGET_HAS_add2_i64
;
1377 case INDEX_op_sub2_i64
:
1378 return TCG_TARGET_HAS_sub2_i64
;
1379 case INDEX_op_mulu2_i64
:
1380 return TCG_TARGET_HAS_mulu2_i64
;
1381 case INDEX_op_muls2_i64
:
1382 return TCG_TARGET_HAS_muls2_i64
;
1383 case INDEX_op_muluh_i64
:
1384 return TCG_TARGET_HAS_muluh_i64
;
1385 case INDEX_op_mulsh_i64
:
1386 return TCG_TARGET_HAS_mulsh_i64
;
1388 case INDEX_op_mov_vec
:
1389 case INDEX_op_dup_vec
:
1390 case INDEX_op_dupm_vec
:
1391 case INDEX_op_ld_vec
:
1392 case INDEX_op_st_vec
:
1393 case INDEX_op_add_vec
:
1394 case INDEX_op_sub_vec
:
1395 case INDEX_op_and_vec
:
1396 case INDEX_op_or_vec
:
1397 case INDEX_op_xor_vec
:
1398 case INDEX_op_cmp_vec
:
1400 case INDEX_op_dup2_vec
:
1401 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
1402 case INDEX_op_not_vec
:
1403 return have_vec
&& TCG_TARGET_HAS_not_vec
;
1404 case INDEX_op_neg_vec
:
1405 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
1406 case INDEX_op_abs_vec
:
1407 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
1408 case INDEX_op_andc_vec
:
1409 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
1410 case INDEX_op_orc_vec
:
1411 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
1412 case INDEX_op_mul_vec
:
1413 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
1414 case INDEX_op_shli_vec
:
1415 case INDEX_op_shri_vec
:
1416 case INDEX_op_sari_vec
:
1417 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
1418 case INDEX_op_shls_vec
:
1419 case INDEX_op_shrs_vec
:
1420 case INDEX_op_sars_vec
:
1421 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
1422 case INDEX_op_shlv_vec
:
1423 case INDEX_op_shrv_vec
:
1424 case INDEX_op_sarv_vec
:
1425 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
1426 case INDEX_op_rotli_vec
:
1427 return have_vec
&& TCG_TARGET_HAS_roti_vec
;
1428 case INDEX_op_rotls_vec
:
1429 return have_vec
&& TCG_TARGET_HAS_rots_vec
;
1430 case INDEX_op_rotlv_vec
:
1431 case INDEX_op_rotrv_vec
:
1432 return have_vec
&& TCG_TARGET_HAS_rotv_vec
;
1433 case INDEX_op_ssadd_vec
:
1434 case INDEX_op_usadd_vec
:
1435 case INDEX_op_sssub_vec
:
1436 case INDEX_op_ussub_vec
:
1437 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
1438 case INDEX_op_smin_vec
:
1439 case INDEX_op_umin_vec
:
1440 case INDEX_op_smax_vec
:
1441 case INDEX_op_umax_vec
:
1442 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
1443 case INDEX_op_bitsel_vec
:
1444 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
1445 case INDEX_op_cmpsel_vec
:
1446 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
1449 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
1454 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1455 and endian swap. Maybe it would be better to do the alignment
1456 and endian swap in tcg_reg_alloc_call(). */
1457 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
1459 int i
, real_args
, nb_rets
, pi
;
1461 const TCGHelperInfo
*info
;
1464 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
1465 typemask
= info
->typemask
;
1467 #ifdef CONFIG_PLUGIN
1468 /* detect non-plugin helpers */
1469 if (tcg_ctx
->plugin_insn
&& unlikely(strncmp(info
->name
, "plugin_", 7))) {
1470 tcg_ctx
->plugin_insn
->calls_helpers
= true;
1474 #if defined(__sparc__) && !defined(__arch64__) \
1475 && !defined(CONFIG_TCG_INTERPRETER)
1476 /* We have 64-bit values in one register, but need to pass as two
1477 separate parameters. Split them. */
1478 int orig_typemask
= typemask
;
1479 int orig_nargs
= nargs
;
1480 TCGv_i64 retl
, reth
;
1481 TCGTemp
*split_args
[MAX_OPC_PARAM
];
1486 for (i
= real_args
= 0; i
< nargs
; ++i
) {
1487 int argtype
= extract32(orig_typemask
, (i
+ 1) * 3, 3);
1488 bool is_64bit
= (argtype
& ~1) == dh_typecode_i64
;
1491 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1492 TCGv_i32 h
= tcg_temp_new_i32();
1493 TCGv_i32 l
= tcg_temp_new_i32();
1494 tcg_gen_extr_i64_i32(l
, h
, orig
);
1495 split_args
[real_args
++] = tcgv_i32_temp(h
);
1496 typemask
|= dh_typecode_i32
<< (real_args
* 3);
1497 split_args
[real_args
++] = tcgv_i32_temp(l
);
1498 typemask
|= dh_typecode_i32
<< (real_args
* 3);
1500 split_args
[real_args
++] = args
[i
];
1501 typemask
|= argtype
<< (real_args
* 3);
1506 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1507 for (i
= 0; i
< nargs
; ++i
) {
1508 int argtype
= extract32(typemask
, (i
+ 1) * 3, 3);
1509 bool is_32bit
= (argtype
& ~1) == dh_typecode_i32
;
1510 bool is_signed
= argtype
& 1;
1513 TCGv_i64 temp
= tcg_temp_new_i64();
1514 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1516 tcg_gen_ext32s_i64(temp
, orig
);
1518 tcg_gen_ext32u_i64(temp
, orig
);
1520 args
[i
] = tcgv_i64_temp(temp
);
1523 #endif /* TCG_TARGET_EXTEND_ARGS */
1525 op
= tcg_emit_op(INDEX_op_call
);
1529 #if defined(__sparc__) && !defined(__arch64__) \
1530 && !defined(CONFIG_TCG_INTERPRETER)
1531 if ((typemask
& 6) == dh_typecode_i64
) {
1532 /* The 32-bit ABI is going to return the 64-bit value in
1533 the %o0/%o1 register pair. Prepare for this by using
1534 two return temporaries, and reassemble below. */
1535 retl
= tcg_temp_new_i64();
1536 reth
= tcg_temp_new_i64();
1537 op
->args
[pi
++] = tcgv_i64_arg(reth
);
1538 op
->args
[pi
++] = tcgv_i64_arg(retl
);
1541 op
->args
[pi
++] = temp_arg(ret
);
1545 if (TCG_TARGET_REG_BITS
< 64 && (typemask
& 6) == dh_typecode_i64
) {
1546 #ifdef HOST_WORDS_BIGENDIAN
1547 op
->args
[pi
++] = temp_arg(ret
+ 1);
1548 op
->args
[pi
++] = temp_arg(ret
);
1550 op
->args
[pi
++] = temp_arg(ret
);
1551 op
->args
[pi
++] = temp_arg(ret
+ 1);
1555 op
->args
[pi
++] = temp_arg(ret
);
1562 TCGOP_CALLO(op
) = nb_rets
;
1565 for (i
= 0; i
< nargs
; i
++) {
1566 int argtype
= extract32(typemask
, (i
+ 1) * 3, 3);
1567 bool is_64bit
= (argtype
& ~1) == dh_typecode_i64
;
1568 bool want_align
= false;
1570 #if defined(CONFIG_TCG_INTERPRETER)
1572 * Align all arguments, so that they land in predictable places
1573 * for passing off to ffi_call.
1576 #elif defined(TCG_TARGET_CALL_ALIGN_ARGS)
1577 /* Some targets want aligned 64 bit args */
1578 want_align
= is_64bit
;
1581 if (TCG_TARGET_REG_BITS
< 64 && want_align
&& (real_args
& 1)) {
1582 op
->args
[pi
++] = TCG_CALL_DUMMY_ARG
;
1586 if (TCG_TARGET_REG_BITS
< 64 && is_64bit
) {
1588 * If stack grows up, then we will be placing successive
1589 * arguments at lower addresses, which means we need to
1590 * reverse the order compared to how we would normally
1591 * treat either big or little-endian. For those arguments
1592 * that will wind up in registers, this still works for
1593 * HPPA (the only current STACK_GROWSUP target) since the
1594 * argument registers are *also* allocated in decreasing
1595 * order. If another such target is added, this logic may
1596 * have to get more complicated to differentiate between
1597 * stack arguments and register arguments.
1599 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1600 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1601 op
->args
[pi
++] = temp_arg(args
[i
]);
1603 op
->args
[pi
++] = temp_arg(args
[i
]);
1604 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1610 op
->args
[pi
++] = temp_arg(args
[i
]);
1613 op
->args
[pi
++] = (uintptr_t)func
;
1614 op
->args
[pi
++] = (uintptr_t)info
;
1615 TCGOP_CALLI(op
) = real_args
;
1617 /* Make sure the fields didn't overflow. */
1618 tcg_debug_assert(TCGOP_CALLI(op
) == real_args
);
1619 tcg_debug_assert(pi
<= ARRAY_SIZE(op
->args
));
1621 #if defined(__sparc__) && !defined(__arch64__) \
1622 && !defined(CONFIG_TCG_INTERPRETER)
1623 /* Free all of the parts we allocated above. */
1624 for (i
= real_args
= 0; i
< orig_nargs
; ++i
) {
1625 int argtype
= extract32(orig_typemask
, (i
+ 1) * 3, 3);
1626 bool is_64bit
= (argtype
& ~1) == dh_typecode_i64
;
1629 tcg_temp_free_internal(args
[real_args
++]);
1630 tcg_temp_free_internal(args
[real_args
++]);
1635 if ((orig_typemask
& 6) == dh_typecode_i64
) {
1636 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1637 Note that describing these as TCGv_i64 eliminates an unnecessary
1638 zero-extension that tcg_gen_concat_i32_i64 would create. */
1639 tcg_gen_concat32_i64(temp_tcgv_i64(ret
), retl
, reth
);
1640 tcg_temp_free_i64(retl
);
1641 tcg_temp_free_i64(reth
);
1643 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1644 for (i
= 0; i
< nargs
; ++i
) {
1645 int argtype
= extract32(typemask
, (i
+ 1) * 3, 3);
1646 bool is_32bit
= (argtype
& ~1) == dh_typecode_i32
;
1649 tcg_temp_free_internal(args
[i
]);
1652 #endif /* TCG_TARGET_EXTEND_ARGS */
1655 static void tcg_reg_alloc_start(TCGContext
*s
)
1659 for (i
= 0, n
= s
->nb_temps
; i
< n
; i
++) {
1660 TCGTemp
*ts
= &s
->temps
[i
];
1661 TCGTempVal val
= TEMP_VAL_MEM
;
1665 val
= TEMP_VAL_CONST
;
1673 val
= TEMP_VAL_DEAD
;
1676 ts
->mem_allocated
= 0;
1679 g_assert_not_reached();
1684 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
1687 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
1690 int idx
= temp_idx(ts
);
1695 pstrcpy(buf
, buf_size
, ts
->name
);
1698 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
1701 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
1706 snprintf(buf
, buf_size
, "$0x%x", (int32_t)ts
->val
);
1708 #if TCG_TARGET_REG_BITS > 32
1710 snprintf(buf
, buf_size
, "$0x%" PRIx64
, ts
->val
);
1716 snprintf(buf
, buf_size
, "v%d$0x%" PRIx64
,
1717 64 << (ts
->type
- TCG_TYPE_V64
), ts
->val
);
1720 g_assert_not_reached();
1727 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
1728 int buf_size
, TCGArg arg
)
1730 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
1733 static const char * const cond_name
[] =
1735 [TCG_COND_NEVER
] = "never",
1736 [TCG_COND_ALWAYS
] = "always",
1737 [TCG_COND_EQ
] = "eq",
1738 [TCG_COND_NE
] = "ne",
1739 [TCG_COND_LT
] = "lt",
1740 [TCG_COND_GE
] = "ge",
1741 [TCG_COND_LE
] = "le",
1742 [TCG_COND_GT
] = "gt",
1743 [TCG_COND_LTU
] = "ltu",
1744 [TCG_COND_GEU
] = "geu",
1745 [TCG_COND_LEU
] = "leu",
1746 [TCG_COND_GTU
] = "gtu"
1749 static const char * const ldst_name
[] =
1765 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
1766 #ifdef TARGET_ALIGNED_ONLY
1767 [MO_UNALN
>> MO_ASHIFT
] = "un+",
1768 [MO_ALIGN
>> MO_ASHIFT
] = "",
1770 [MO_UNALN
>> MO_ASHIFT
] = "",
1771 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
1773 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
1774 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
1775 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
1776 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
1777 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
1778 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
1781 static const char bswap_flag_name
[][6] = {
1782 [TCG_BSWAP_IZ
] = "iz",
1783 [TCG_BSWAP_OZ
] = "oz",
1784 [TCG_BSWAP_OS
] = "os",
1785 [TCG_BSWAP_IZ
| TCG_BSWAP_OZ
] = "iz,oz",
1786 [TCG_BSWAP_IZ
| TCG_BSWAP_OS
] = "iz,os",
1789 static inline bool tcg_regset_single(TCGRegSet d
)
1791 return (d
& (d
- 1)) == 0;
1794 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
1796 if (TCG_TARGET_NB_REGS
<= 32) {
1803 static void tcg_dump_ops(TCGContext
*s
, bool have_prefs
)
1808 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
1809 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
1810 const TCGOpDef
*def
;
1815 def
= &tcg_op_defs
[c
];
1817 if (c
== INDEX_op_insn_start
) {
1819 col
+= qemu_log("\n ----");
1821 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
1823 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1824 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
1828 col
+= qemu_log(" " TARGET_FMT_lx
, a
);
1830 } else if (c
== INDEX_op_call
) {
1831 const TCGHelperInfo
*info
= tcg_call_info(op
);
1832 void *func
= tcg_call_func(op
);
1834 /* variable number of arguments */
1835 nb_oargs
= TCGOP_CALLO(op
);
1836 nb_iargs
= TCGOP_CALLI(op
);
1837 nb_cargs
= def
->nb_cargs
;
1839 col
+= qemu_log(" %s ", def
->name
);
1842 * Print the function name from TCGHelperInfo, if available.
1843 * Note that plugins have a template function for the info,
1844 * but the actual function pointer comes from the plugin.
1846 if (func
== info
->func
) {
1847 col
+= qemu_log("%s", info
->name
);
1849 col
+= qemu_log("plugin(%p)", func
);
1852 col
+= qemu_log(",$0x%x,$%d", info
->flags
, nb_oargs
);
1853 for (i
= 0; i
< nb_oargs
; i
++) {
1854 col
+= qemu_log(",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1857 for (i
= 0; i
< nb_iargs
; i
++) {
1858 TCGArg arg
= op
->args
[nb_oargs
+ i
];
1859 const char *t
= "<dummy>";
1860 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1861 t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
1863 col
+= qemu_log(",%s", t
);
1866 col
+= qemu_log(" %s ", def
->name
);
1868 nb_oargs
= def
->nb_oargs
;
1869 nb_iargs
= def
->nb_iargs
;
1870 nb_cargs
= def
->nb_cargs
;
1872 if (def
->flags
& TCG_OPF_VECTOR
) {
1873 col
+= qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op
),
1874 8 << TCGOP_VECE(op
));
1878 for (i
= 0; i
< nb_oargs
; i
++) {
1880 col
+= qemu_log(",");
1882 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1885 for (i
= 0; i
< nb_iargs
; i
++) {
1887 col
+= qemu_log(",");
1889 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1893 case INDEX_op_brcond_i32
:
1894 case INDEX_op_setcond_i32
:
1895 case INDEX_op_movcond_i32
:
1896 case INDEX_op_brcond2_i32
:
1897 case INDEX_op_setcond2_i32
:
1898 case INDEX_op_brcond_i64
:
1899 case INDEX_op_setcond_i64
:
1900 case INDEX_op_movcond_i64
:
1901 case INDEX_op_cmp_vec
:
1902 case INDEX_op_cmpsel_vec
:
1903 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
1904 && cond_name
[op
->args
[k
]]) {
1905 col
+= qemu_log(",%s", cond_name
[op
->args
[k
++]]);
1907 col
+= qemu_log(",$0x%" TCG_PRIlx
, op
->args
[k
++]);
1911 case INDEX_op_qemu_ld_i32
:
1912 case INDEX_op_qemu_st_i32
:
1913 case INDEX_op_qemu_st8_i32
:
1914 case INDEX_op_qemu_ld_i64
:
1915 case INDEX_op_qemu_st_i64
:
1917 TCGMemOpIdx oi
= op
->args
[k
++];
1918 MemOp op
= get_memop(oi
);
1919 unsigned ix
= get_mmuidx(oi
);
1921 if (op
& ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
)) {
1922 col
+= qemu_log(",$0x%x,%u", op
, ix
);
1924 const char *s_al
, *s_op
;
1925 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
1926 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
1927 col
+= qemu_log(",%s%s,%u", s_al
, s_op
, ix
);
1932 case INDEX_op_bswap16_i32
:
1933 case INDEX_op_bswap16_i64
:
1934 case INDEX_op_bswap32_i32
:
1935 case INDEX_op_bswap32_i64
:
1936 case INDEX_op_bswap64_i64
:
1938 TCGArg flags
= op
->args
[k
];
1939 const char *name
= NULL
;
1941 if (flags
< ARRAY_SIZE(bswap_flag_name
)) {
1942 name
= bswap_flag_name
[flags
];
1945 col
+= qemu_log(",%s", name
);
1947 col
+= qemu_log(",$0x%" TCG_PRIlx
, flags
);
1957 case INDEX_op_set_label
:
1959 case INDEX_op_brcond_i32
:
1960 case INDEX_op_brcond_i64
:
1961 case INDEX_op_brcond2_i32
:
1962 col
+= qemu_log("%s$L%d", k
? "," : "",
1963 arg_label(op
->args
[k
])->id
);
1969 for (; i
< nb_cargs
; i
++, k
++) {
1970 col
+= qemu_log("%s$0x%" TCG_PRIlx
, k
? "," : "", op
->args
[k
]);
1974 if (have_prefs
|| op
->life
) {
1976 QemuLogFile
*logfile
;
1979 logfile
= qatomic_rcu_read(&qemu_logfile
);
1981 for (; col
< 40; ++col
) {
1982 putc(' ', logfile
->fd
);
1989 unsigned life
= op
->life
;
1991 if (life
& (SYNC_ARG
* 3)) {
1993 for (i
= 0; i
< 2; ++i
) {
1994 if (life
& (SYNC_ARG
<< i
)) {
2002 for (i
= 0; life
; ++i
, life
>>= 1) {
2011 for (i
= 0; i
< nb_oargs
; ++i
) {
2012 TCGRegSet set
= op
->output_pref
[i
];
2021 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2023 #ifdef CONFIG_DEBUG_TCG
2024 } else if (tcg_regset_single(set
)) {
2025 TCGReg reg
= tcg_regset_first(set
);
2026 qemu_log("%s", tcg_target_reg_names
[reg
]);
2028 } else if (TCG_TARGET_NB_REGS
<= 32) {
2029 qemu_log("%#x", (uint32_t)set
);
2031 qemu_log("%#" PRIx64
, (uint64_t)set
);
2040 /* we give more priority to constraints with less registers */
2041 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2043 const TCGArgConstraint
*arg_ct
= &def
->args_ct
[k
];
2046 if (arg_ct
->oalias
) {
2047 /* an alias is equivalent to a single register */
2050 n
= ctpop64(arg_ct
->regs
);
2052 return TCG_TARGET_NB_REGS
- n
+ 1;
2055 /* sort from highest priority to lowest */
2056 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2059 TCGArgConstraint
*a
= def
->args_ct
;
2061 for (i
= 0; i
< n
; i
++) {
2062 a
[start
+ i
].sort_index
= start
+ i
;
2067 for (i
= 0; i
< n
- 1; i
++) {
2068 for (j
= i
+ 1; j
< n
; j
++) {
2069 int p1
= get_constraint_priority(def
, a
[start
+ i
].sort_index
);
2070 int p2
= get_constraint_priority(def
, a
[start
+ j
].sort_index
);
2072 int tmp
= a
[start
+ i
].sort_index
;
2073 a
[start
+ i
].sort_index
= a
[start
+ j
].sort_index
;
2074 a
[start
+ j
].sort_index
= tmp
;
2080 static void process_op_defs(TCGContext
*s
)
2084 for (op
= 0; op
< NB_OPS
; op
++) {
2085 TCGOpDef
*def
= &tcg_op_defs
[op
];
2086 const TCGTargetOpDef
*tdefs
;
2089 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2093 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2099 * Macro magic should make it impossible, but double-check that
2100 * the array index is in range. Since the signness of an enum
2101 * is implementation defined, force the result to unsigned.
2103 unsigned con_set
= tcg_target_op_def(op
);
2104 tcg_debug_assert(con_set
< ARRAY_SIZE(constraint_sets
));
2105 tdefs
= &constraint_sets
[con_set
];
2107 for (i
= 0; i
< nb_args
; i
++) {
2108 const char *ct_str
= tdefs
->args_ct_str
[i
];
2109 /* Incomplete TCGTargetOpDef entry. */
2110 tcg_debug_assert(ct_str
!= NULL
);
2112 while (*ct_str
!= '\0') {
2116 int oarg
= *ct_str
- '0';
2117 tcg_debug_assert(ct_str
== tdefs
->args_ct_str
[i
]);
2118 tcg_debug_assert(oarg
< def
->nb_oargs
);
2119 tcg_debug_assert(def
->args_ct
[oarg
].regs
!= 0);
2120 def
->args_ct
[i
] = def
->args_ct
[oarg
];
2121 /* The output sets oalias. */
2122 def
->args_ct
[oarg
].oalias
= true;
2123 def
->args_ct
[oarg
].alias_index
= i
;
2124 /* The input sets ialias. */
2125 def
->args_ct
[i
].ialias
= true;
2126 def
->args_ct
[i
].alias_index
= oarg
;
2131 def
->args_ct
[i
].newreg
= true;
2135 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2139 /* Include all of the target-specific constraints. */
2142 #define CONST(CASE, MASK) \
2143 case CASE: def->args_ct[i].ct |= MASK; ct_str++; break;
2144 #define REGS(CASE, MASK) \
2145 case CASE: def->args_ct[i].regs |= MASK; ct_str++; break;
2147 #include "tcg-target-con-str.h"
2152 /* Typo in TCGTargetOpDef constraint. */
2153 g_assert_not_reached();
2158 /* TCGTargetOpDef entry with too much information? */
2159 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2161 /* sort the constraints (XXX: this is just an heuristic) */
2162 sort_constraints(def
, 0, def
->nb_oargs
);
2163 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
2167 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
2173 label
= arg_label(op
->args
[0]);
2176 case INDEX_op_brcond_i32
:
2177 case INDEX_op_brcond_i64
:
2178 label
= arg_label(op
->args
[3]);
2181 case INDEX_op_brcond2_i32
:
2182 label
= arg_label(op
->args
[5]);
2189 QTAILQ_REMOVE(&s
->ops
, op
, link
);
2190 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
2193 #ifdef CONFIG_PROFILER
2194 qatomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
2198 void tcg_remove_ops_after(TCGOp
*op
)
2200 TCGContext
*s
= tcg_ctx
;
2203 TCGOp
*last
= tcg_last_op();
2207 tcg_op_remove(s
, last
);
2211 static TCGOp
*tcg_op_alloc(TCGOpcode opc
)
2213 TCGContext
*s
= tcg_ctx
;
2216 if (likely(QTAILQ_EMPTY(&s
->free_ops
))) {
2217 op
= tcg_malloc(sizeof(TCGOp
));
2219 op
= QTAILQ_FIRST(&s
->free_ops
);
2220 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
2222 memset(op
, 0, offsetof(TCGOp
, link
));
2229 TCGOp
*tcg_emit_op(TCGOpcode opc
)
2231 TCGOp
*op
= tcg_op_alloc(opc
);
2232 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2236 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2238 TCGOp
*new_op
= tcg_op_alloc(opc
);
2239 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
2243 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2245 TCGOp
*new_op
= tcg_op_alloc(opc
);
2246 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
2250 /* Reachable analysis : remove unreachable code. */
2251 static void reachable_code_pass(TCGContext
*s
)
2253 TCGOp
*op
, *op_next
;
2256 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2261 case INDEX_op_set_label
:
2262 label
= arg_label(op
->args
[0]);
2263 if (label
->refs
== 0) {
2265 * While there is an occasional backward branch, virtually
2266 * all branches generated by the translators are forward.
2267 * Which means that generally we will have already removed
2268 * all references to the label that will be, and there is
2269 * little to be gained by iterating.
2273 /* Once we see a label, insns become live again. */
2278 * Optimization can fold conditional branches to unconditional.
2279 * If we find a label with one reference which is preceded by
2280 * an unconditional branch to it, remove both. This needed to
2281 * wait until the dead code in between them was removed.
2283 if (label
->refs
== 1) {
2284 TCGOp
*op_prev
= QTAILQ_PREV(op
, link
);
2285 if (op_prev
->opc
== INDEX_op_br
&&
2286 label
== arg_label(op_prev
->args
[0])) {
2287 tcg_op_remove(s
, op_prev
);
2295 case INDEX_op_exit_tb
:
2296 case INDEX_op_goto_ptr
:
2297 /* Unconditional branches; everything following is dead. */
2302 /* Notice noreturn helper calls, raising exceptions. */
2303 if (tcg_call_flags(op
) & TCG_CALL_NO_RETURN
) {
2308 case INDEX_op_insn_start
:
2309 /* Never remove -- we need to keep these for unwind. */
2318 tcg_op_remove(s
, op
);
2326 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2327 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2329 /* For liveness_pass_1, the register preferences for a given temp. */
2330 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
2332 return ts
->state_ptr
;
2335 /* For liveness_pass_1, reset the preferences for a given temp to the
2336 * maximal regset for its type.
2338 static inline void la_reset_pref(TCGTemp
*ts
)
2341 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
2344 /* liveness analysis: end of function: all temps are dead, and globals
2345 should be in memory. */
2346 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
2350 for (i
= 0; i
< ng
; ++i
) {
2351 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2352 la_reset_pref(&s
->temps
[i
]);
2354 for (i
= ng
; i
< nt
; ++i
) {
2355 s
->temps
[i
].state
= TS_DEAD
;
2356 la_reset_pref(&s
->temps
[i
]);
2360 /* liveness analysis: end of basic block: all temps are dead, globals
2361 and local temps should be in memory. */
2362 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
2366 for (i
= 0; i
< nt
; ++i
) {
2367 TCGTemp
*ts
= &s
->temps
[i
];
2374 state
= TS_DEAD
| TS_MEM
;
2381 g_assert_not_reached();
2388 /* liveness analysis: sync globals back to memory. */
2389 static void la_global_sync(TCGContext
*s
, int ng
)
2393 for (i
= 0; i
< ng
; ++i
) {
2394 int state
= s
->temps
[i
].state
;
2395 s
->temps
[i
].state
= state
| TS_MEM
;
2396 if (state
== TS_DEAD
) {
2397 /* If the global was previously dead, reset prefs. */
2398 la_reset_pref(&s
->temps
[i
]);
2404 * liveness analysis: conditional branch: all temps are dead,
2405 * globals and local temps should be synced.
2407 static void la_bb_sync(TCGContext
*s
, int ng
, int nt
)
2409 la_global_sync(s
, ng
);
2411 for (int i
= ng
; i
< nt
; ++i
) {
2412 TCGTemp
*ts
= &s
->temps
[i
];
2418 ts
->state
= state
| TS_MEM
;
2419 if (state
!= TS_DEAD
) {
2424 s
->temps
[i
].state
= TS_DEAD
;
2429 g_assert_not_reached();
2431 la_reset_pref(&s
->temps
[i
]);
2435 /* liveness analysis: sync globals back to memory and kill. */
2436 static void la_global_kill(TCGContext
*s
, int ng
)
2440 for (i
= 0; i
< ng
; i
++) {
2441 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2442 la_reset_pref(&s
->temps
[i
]);
2446 /* liveness analysis: note live globals crossing calls. */
2447 static void la_cross_call(TCGContext
*s
, int nt
)
2449 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
2452 for (i
= 0; i
< nt
; i
++) {
2453 TCGTemp
*ts
= &s
->temps
[i
];
2454 if (!(ts
->state
& TS_DEAD
)) {
2455 TCGRegSet
*pset
= la_temp_pref(ts
);
2456 TCGRegSet set
= *pset
;
2459 /* If the combination is not possible, restart. */
2461 set
= tcg_target_available_regs
[ts
->type
] & mask
;
2468 /* Liveness analysis : update the opc_arg_life array to tell if a
2469 given input arguments is dead. Instructions updating dead
2470 temporaries are removed. */
2471 static void liveness_pass_1(TCGContext
*s
)
2473 int nb_globals
= s
->nb_globals
;
2474 int nb_temps
= s
->nb_temps
;
2475 TCGOp
*op
, *op_prev
;
2479 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
2480 for (i
= 0; i
< nb_temps
; ++i
) {
2481 s
->temps
[i
].state_ptr
= prefs
+ i
;
2484 /* ??? Should be redundant with the exit_tb that ends the TB. */
2485 la_func_end(s
, nb_globals
, nb_temps
);
2487 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
2488 int nb_iargs
, nb_oargs
;
2489 TCGOpcode opc_new
, opc_new2
;
2491 TCGLifeData arg_life
= 0;
2493 TCGOpcode opc
= op
->opc
;
2494 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2502 nb_oargs
= TCGOP_CALLO(op
);
2503 nb_iargs
= TCGOP_CALLI(op
);
2504 call_flags
= tcg_call_flags(op
);
2506 /* pure functions can be removed if their result is unused */
2507 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
2508 for (i
= 0; i
< nb_oargs
; i
++) {
2509 ts
= arg_temp(op
->args
[i
]);
2510 if (ts
->state
!= TS_DEAD
) {
2511 goto do_not_remove_call
;
2518 /* Output args are dead. */
2519 for (i
= 0; i
< nb_oargs
; i
++) {
2520 ts
= arg_temp(op
->args
[i
]);
2521 if (ts
->state
& TS_DEAD
) {
2522 arg_life
|= DEAD_ARG
<< i
;
2524 if (ts
->state
& TS_MEM
) {
2525 arg_life
|= SYNC_ARG
<< i
;
2527 ts
->state
= TS_DEAD
;
2530 /* Not used -- it will be tcg_target_call_oarg_regs[i]. */
2531 op
->output_pref
[i
] = 0;
2534 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
2535 TCG_CALL_NO_READ_GLOBALS
))) {
2536 la_global_kill(s
, nb_globals
);
2537 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
2538 la_global_sync(s
, nb_globals
);
2541 /* Record arguments that die in this helper. */
2542 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2543 ts
= arg_temp(op
->args
[i
]);
2544 if (ts
&& ts
->state
& TS_DEAD
) {
2545 arg_life
|= DEAD_ARG
<< i
;
2549 /* For all live registers, remove call-clobbered prefs. */
2550 la_cross_call(s
, nb_temps
);
2552 nb_call_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
2554 /* Input arguments are live for preceding opcodes. */
2555 for (i
= 0; i
< nb_iargs
; i
++) {
2556 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
2557 if (ts
&& ts
->state
& TS_DEAD
) {
2558 /* For those arguments that die, and will be allocated
2559 * in registers, clear the register set for that arg,
2560 * to be filled in below. For args that will be on
2561 * the stack, reset to any available reg.
2564 = (i
< nb_call_regs
? 0 :
2565 tcg_target_available_regs
[ts
->type
]);
2566 ts
->state
&= ~TS_DEAD
;
2570 /* For each input argument, add its input register to prefs.
2571 If a temp is used once, this produces a single set bit. */
2572 for (i
= 0; i
< MIN(nb_call_regs
, nb_iargs
); i
++) {
2573 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
2575 tcg_regset_set_reg(*la_temp_pref(ts
),
2576 tcg_target_call_iarg_regs
[i
]);
2581 case INDEX_op_insn_start
:
2583 case INDEX_op_discard
:
2584 /* mark the temporary as dead */
2585 ts
= arg_temp(op
->args
[0]);
2586 ts
->state
= TS_DEAD
;
2590 case INDEX_op_add2_i32
:
2591 opc_new
= INDEX_op_add_i32
;
2593 case INDEX_op_sub2_i32
:
2594 opc_new
= INDEX_op_sub_i32
;
2596 case INDEX_op_add2_i64
:
2597 opc_new
= INDEX_op_add_i64
;
2599 case INDEX_op_sub2_i64
:
2600 opc_new
= INDEX_op_sub_i64
;
2604 /* Test if the high part of the operation is dead, but not
2605 the low part. The result can be optimized to a simple
2606 add or sub. This happens often for x86_64 guest when the
2607 cpu mode is set to 32 bit. */
2608 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2609 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2612 /* Replace the opcode and adjust the args in place,
2613 leaving 3 unused args at the end. */
2614 op
->opc
= opc
= opc_new
;
2615 op
->args
[1] = op
->args
[2];
2616 op
->args
[2] = op
->args
[4];
2617 /* Fall through and mark the single-word operation live. */
2623 case INDEX_op_mulu2_i32
:
2624 opc_new
= INDEX_op_mul_i32
;
2625 opc_new2
= INDEX_op_muluh_i32
;
2626 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
2628 case INDEX_op_muls2_i32
:
2629 opc_new
= INDEX_op_mul_i32
;
2630 opc_new2
= INDEX_op_mulsh_i32
;
2631 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
2633 case INDEX_op_mulu2_i64
:
2634 opc_new
= INDEX_op_mul_i64
;
2635 opc_new2
= INDEX_op_muluh_i64
;
2636 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
2638 case INDEX_op_muls2_i64
:
2639 opc_new
= INDEX_op_mul_i64
;
2640 opc_new2
= INDEX_op_mulsh_i64
;
2641 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
2646 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2647 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2648 /* Both parts of the operation are dead. */
2651 /* The high part of the operation is dead; generate the low. */
2652 op
->opc
= opc
= opc_new
;
2653 op
->args
[1] = op
->args
[2];
2654 op
->args
[2] = op
->args
[3];
2655 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
2656 /* The low part of the operation is dead; generate the high. */
2657 op
->opc
= opc
= opc_new2
;
2658 op
->args
[0] = op
->args
[1];
2659 op
->args
[1] = op
->args
[2];
2660 op
->args
[2] = op
->args
[3];
2664 /* Mark the single-word operation live. */
2669 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2670 nb_iargs
= def
->nb_iargs
;
2671 nb_oargs
= def
->nb_oargs
;
2673 /* Test if the operation can be removed because all
2674 its outputs are dead. We assume that nb_oargs == 0
2675 implies side effects */
2676 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
2677 for (i
= 0; i
< nb_oargs
; i
++) {
2678 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
2687 tcg_op_remove(s
, op
);
2691 for (i
= 0; i
< nb_oargs
; i
++) {
2692 ts
= arg_temp(op
->args
[i
]);
2694 /* Remember the preference of the uses that followed. */
2695 op
->output_pref
[i
] = *la_temp_pref(ts
);
2697 /* Output args are dead. */
2698 if (ts
->state
& TS_DEAD
) {
2699 arg_life
|= DEAD_ARG
<< i
;
2701 if (ts
->state
& TS_MEM
) {
2702 arg_life
|= SYNC_ARG
<< i
;
2704 ts
->state
= TS_DEAD
;
2708 /* If end of basic block, update. */
2709 if (def
->flags
& TCG_OPF_BB_EXIT
) {
2710 la_func_end(s
, nb_globals
, nb_temps
);
2711 } else if (def
->flags
& TCG_OPF_COND_BRANCH
) {
2712 la_bb_sync(s
, nb_globals
, nb_temps
);
2713 } else if (def
->flags
& TCG_OPF_BB_END
) {
2714 la_bb_end(s
, nb_globals
, nb_temps
);
2715 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2716 la_global_sync(s
, nb_globals
);
2717 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
2718 la_cross_call(s
, nb_temps
);
2722 /* Record arguments that die in this opcode. */
2723 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2724 ts
= arg_temp(op
->args
[i
]);
2725 if (ts
->state
& TS_DEAD
) {
2726 arg_life
|= DEAD_ARG
<< i
;
2730 /* Input arguments are live for preceding opcodes. */
2731 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2732 ts
= arg_temp(op
->args
[i
]);
2733 if (ts
->state
& TS_DEAD
) {
2734 /* For operands that were dead, initially allow
2735 all regs for the type. */
2736 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
2737 ts
->state
&= ~TS_DEAD
;
2741 /* Incorporate constraints for this operand. */
2743 case INDEX_op_mov_i32
:
2744 case INDEX_op_mov_i64
:
2745 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2746 have proper constraints. That said, special case
2747 moves to propagate preferences backward. */
2748 if (IS_DEAD_ARG(1)) {
2749 *la_temp_pref(arg_temp(op
->args
[0]))
2750 = *la_temp_pref(arg_temp(op
->args
[1]));
2755 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2756 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
2757 TCGRegSet set
, *pset
;
2759 ts
= arg_temp(op
->args
[i
]);
2760 pset
= la_temp_pref(ts
);
2765 set
&= op
->output_pref
[ct
->alias_index
];
2767 /* If the combination is not possible, restart. */
2777 op
->life
= arg_life
;
2781 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2782 static bool liveness_pass_2(TCGContext
*s
)
2784 int nb_globals
= s
->nb_globals
;
2786 bool changes
= false;
2787 TCGOp
*op
, *op_next
;
2789 /* Create a temporary for each indirect global. */
2790 for (i
= 0; i
< nb_globals
; ++i
) {
2791 TCGTemp
*its
= &s
->temps
[i
];
2792 if (its
->indirect_reg
) {
2793 TCGTemp
*dts
= tcg_temp_alloc(s
);
2794 dts
->type
= its
->type
;
2795 dts
->base_type
= its
->base_type
;
2796 its
->state_ptr
= dts
;
2798 its
->state_ptr
= NULL
;
2800 /* All globals begin dead. */
2801 its
->state
= TS_DEAD
;
2803 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
2804 TCGTemp
*its
= &s
->temps
[i
];
2805 its
->state_ptr
= NULL
;
2806 its
->state
= TS_DEAD
;
2809 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2810 TCGOpcode opc
= op
->opc
;
2811 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2812 TCGLifeData arg_life
= op
->life
;
2813 int nb_iargs
, nb_oargs
, call_flags
;
2814 TCGTemp
*arg_ts
, *dir_ts
;
2816 if (opc
== INDEX_op_call
) {
2817 nb_oargs
= TCGOP_CALLO(op
);
2818 nb_iargs
= TCGOP_CALLI(op
);
2819 call_flags
= tcg_call_flags(op
);
2821 nb_iargs
= def
->nb_iargs
;
2822 nb_oargs
= def
->nb_oargs
;
2824 /* Set flags similar to how calls require. */
2825 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
2826 /* Like reading globals: sync_globals */
2827 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
2828 } else if (def
->flags
& TCG_OPF_BB_END
) {
2829 /* Like writing globals: save_globals */
2831 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2832 /* Like reading globals: sync_globals */
2833 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
2835 /* No effect on globals. */
2836 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
2837 TCG_CALL_NO_WRITE_GLOBALS
);
2841 /* Make sure that input arguments are available. */
2842 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2843 arg_ts
= arg_temp(op
->args
[i
]);
2845 dir_ts
= arg_ts
->state_ptr
;
2846 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
2847 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
2850 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
);
2852 lop
->args
[0] = temp_arg(dir_ts
);
2853 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2854 lop
->args
[2] = arg_ts
->mem_offset
;
2856 /* Loaded, but synced with memory. */
2857 arg_ts
->state
= TS_MEM
;
2862 /* Perform input replacement, and mark inputs that became dead.
2863 No action is required except keeping temp_state up to date
2864 so that we reload when needed. */
2865 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2866 arg_ts
= arg_temp(op
->args
[i
]);
2868 dir_ts
= arg_ts
->state_ptr
;
2870 op
->args
[i
] = temp_arg(dir_ts
);
2872 if (IS_DEAD_ARG(i
)) {
2873 arg_ts
->state
= TS_DEAD
;
2879 /* Liveness analysis should ensure that the following are
2880 all correct, for call sites and basic block end points. */
2881 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
2883 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
2884 for (i
= 0; i
< nb_globals
; ++i
) {
2885 /* Liveness should see that globals are synced back,
2886 that is, either TS_DEAD or TS_MEM. */
2887 arg_ts
= &s
->temps
[i
];
2888 tcg_debug_assert(arg_ts
->state_ptr
== 0
2889 || arg_ts
->state
!= 0);
2892 for (i
= 0; i
< nb_globals
; ++i
) {
2893 /* Liveness should see that globals are saved back,
2894 that is, TS_DEAD, waiting to be reloaded. */
2895 arg_ts
= &s
->temps
[i
];
2896 tcg_debug_assert(arg_ts
->state_ptr
== 0
2897 || arg_ts
->state
== TS_DEAD
);
2901 /* Outputs become available. */
2902 if (opc
== INDEX_op_mov_i32
|| opc
== INDEX_op_mov_i64
) {
2903 arg_ts
= arg_temp(op
->args
[0]);
2904 dir_ts
= arg_ts
->state_ptr
;
2906 op
->args
[0] = temp_arg(dir_ts
);
2909 /* The output is now live and modified. */
2912 if (NEED_SYNC_ARG(0)) {
2913 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
2916 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
);
2917 TCGTemp
*out_ts
= dir_ts
;
2919 if (IS_DEAD_ARG(0)) {
2920 out_ts
= arg_temp(op
->args
[1]);
2921 arg_ts
->state
= TS_DEAD
;
2922 tcg_op_remove(s
, op
);
2924 arg_ts
->state
= TS_MEM
;
2927 sop
->args
[0] = temp_arg(out_ts
);
2928 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2929 sop
->args
[2] = arg_ts
->mem_offset
;
2931 tcg_debug_assert(!IS_DEAD_ARG(0));
2935 for (i
= 0; i
< nb_oargs
; i
++) {
2936 arg_ts
= arg_temp(op
->args
[i
]);
2937 dir_ts
= arg_ts
->state_ptr
;
2941 op
->args
[i
] = temp_arg(dir_ts
);
2944 /* The output is now live and modified. */
2947 /* Sync outputs upon their last write. */
2948 if (NEED_SYNC_ARG(i
)) {
2949 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
2952 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
);
2954 sop
->args
[0] = temp_arg(dir_ts
);
2955 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2956 sop
->args
[2] = arg_ts
->mem_offset
;
2958 arg_ts
->state
= TS_MEM
;
2960 /* Drop outputs that are dead. */
2961 if (IS_DEAD_ARG(i
)) {
2962 arg_ts
->state
= TS_DEAD
;
2971 #ifdef CONFIG_DEBUG_TCG
2972 static void dump_regs(TCGContext
*s
)
2978 for(i
= 0; i
< s
->nb_temps
; i
++) {
2980 printf(" %10s: ", tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
2981 switch(ts
->val_type
) {
2983 printf("%s", tcg_target_reg_names
[ts
->reg
]);
2986 printf("%d(%s)", (int)ts
->mem_offset
,
2987 tcg_target_reg_names
[ts
->mem_base
->reg
]);
2989 case TEMP_VAL_CONST
:
2990 printf("$0x%" PRIx64
, ts
->val
);
3002 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3003 if (s
->reg_to_temp
[i
] != NULL
) {
3005 tcg_target_reg_names
[i
],
3006 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
3011 static void check_regs(TCGContext
*s
)
3018 for (reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
3019 ts
= s
->reg_to_temp
[reg
];
3021 if (ts
->val_type
!= TEMP_VAL_REG
|| ts
->reg
!= reg
) {
3022 printf("Inconsistency for register %s:\n",
3023 tcg_target_reg_names
[reg
]);
3028 for (k
= 0; k
< s
->nb_temps
; k
++) {
3030 if (ts
->val_type
== TEMP_VAL_REG
3031 && ts
->kind
!= TEMP_FIXED
3032 && s
->reg_to_temp
[ts
->reg
] != ts
) {
3033 printf("Inconsistency for temp %s:\n",
3034 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
3036 printf("reg state:\n");
3044 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3046 intptr_t off
, size
, align
;
3060 /* Note that we do not require aligned storage for V256. */
3061 size
= 32, align
= 16;
3064 g_assert_not_reached();
3067 assert(align
<= TCG_TARGET_STACK_ALIGN
);
3068 off
= ROUND_UP(s
->current_frame_offset
, align
);
3070 /* If we've exhausted the stack frame, restart with a smaller TB. */
3071 if (off
+ size
> s
->frame_end
) {
3072 tcg_raise_tb_overflow(s
);
3074 s
->current_frame_offset
= off
+ size
;
3076 ts
->mem_offset
= off
;
3077 #if defined(__sparc__)
3078 ts
->mem_offset
+= TCG_TARGET_STACK_BIAS
;
3080 ts
->mem_base
= s
->frame_temp
;
3081 ts
->mem_allocated
= 1;
3084 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
3086 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3087 mark it free; otherwise mark it dead. */
3088 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
3090 TCGTempVal new_type
;
3097 new_type
= TEMP_VAL_MEM
;
3100 new_type
= free_or_dead
< 0 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
;
3103 new_type
= TEMP_VAL_CONST
;
3106 g_assert_not_reached();
3108 if (ts
->val_type
== TEMP_VAL_REG
) {
3109 s
->reg_to_temp
[ts
->reg
] = NULL
;
3111 ts
->val_type
= new_type
;
3114 /* Mark a temporary as dead. */
3115 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
3117 temp_free_or_dead(s
, ts
, 1);
3120 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3121 registers needs to be allocated to store a constant. If 'free_or_dead'
3122 is non-zero, subsequently release the temporary; if it is positive, the
3123 temp is dead; if it is negative, the temp is free. */
3124 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
3125 TCGRegSet preferred_regs
, int free_or_dead
)
3127 if (!temp_readonly(ts
) && !ts
->mem_coherent
) {
3128 if (!ts
->mem_allocated
) {
3129 temp_allocate_frame(s
, ts
);
3131 switch (ts
->val_type
) {
3132 case TEMP_VAL_CONST
:
3133 /* If we're going to free the temp immediately, then we won't
3134 require it later in a register, so attempt to store the
3135 constant to memory directly. */
3137 && tcg_out_sti(s
, ts
->type
, ts
->val
,
3138 ts
->mem_base
->reg
, ts
->mem_offset
)) {
3141 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3142 allocated_regs
, preferred_regs
);
3146 tcg_out_st(s
, ts
->type
, ts
->reg
,
3147 ts
->mem_base
->reg
, ts
->mem_offset
);
3157 ts
->mem_coherent
= 1;
3160 temp_free_or_dead(s
, ts
, free_or_dead
);
3164 /* free register 'reg' by spilling the corresponding temporary if necessary */
3165 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
3167 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
3169 temp_sync(s
, ts
, allocated_regs
, 0, -1);
3175 * @required_regs: Set of registers in which we must allocate.
3176 * @allocated_regs: Set of registers which must be avoided.
3177 * @preferred_regs: Set of registers we should prefer.
3178 * @rev: True if we search the registers in "indirect" order.
3180 * The allocated register must be in @required_regs & ~@allocated_regs,
3181 * but if we can put it in @preferred_regs we may save a move later.
3183 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
3184 TCGRegSet allocated_regs
,
3185 TCGRegSet preferred_regs
, bool rev
)
3187 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
3188 TCGRegSet reg_ct
[2];
3191 reg_ct
[1] = required_regs
& ~allocated_regs
;
3192 tcg_debug_assert(reg_ct
[1] != 0);
3193 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
3195 /* Skip the preferred_regs option if it cannot be satisfied,
3196 or if the preference made no difference. */
3197 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
3199 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
3201 /* Try free registers, preferences first. */
3202 for (j
= f
; j
< 2; j
++) {
3203 TCGRegSet set
= reg_ct
[j
];
3205 if (tcg_regset_single(set
)) {
3206 /* One register in the set. */
3207 TCGReg reg
= tcg_regset_first(set
);
3208 if (s
->reg_to_temp
[reg
] == NULL
) {
3212 for (i
= 0; i
< n
; i
++) {
3213 TCGReg reg
= order
[i
];
3214 if (s
->reg_to_temp
[reg
] == NULL
&&
3215 tcg_regset_test_reg(set
, reg
)) {
3222 /* We must spill something. */
3223 for (j
= f
; j
< 2; j
++) {
3224 TCGRegSet set
= reg_ct
[j
];
3226 if (tcg_regset_single(set
)) {
3227 /* One register in the set. */
3228 TCGReg reg
= tcg_regset_first(set
);
3229 tcg_reg_free(s
, reg
, allocated_regs
);
3232 for (i
= 0; i
< n
; i
++) {
3233 TCGReg reg
= order
[i
];
3234 if (tcg_regset_test_reg(set
, reg
)) {
3235 tcg_reg_free(s
, reg
, allocated_regs
);
3245 /* Make sure the temporary is in a register. If needed, allocate the register
3246 from DESIRED while avoiding ALLOCATED. */
3247 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
3248 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
3252 switch (ts
->val_type
) {
3255 case TEMP_VAL_CONST
:
3256 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3257 preferred_regs
, ts
->indirect_base
);
3258 if (ts
->type
<= TCG_TYPE_I64
) {
3259 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
3261 uint64_t val
= ts
->val
;
3265 * Find the minimal vector element that matches the constant.
3266 * The targets will, in general, have to do this search anyway,
3267 * do this generically.
3269 if (val
== dup_const(MO_8
, val
)) {
3271 } else if (val
== dup_const(MO_16
, val
)) {
3273 } else if (val
== dup_const(MO_32
, val
)) {
3277 tcg_out_dupi_vec(s
, ts
->type
, vece
, reg
, ts
->val
);
3279 ts
->mem_coherent
= 0;
3282 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3283 preferred_regs
, ts
->indirect_base
);
3284 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
3285 ts
->mem_coherent
= 1;
3292 ts
->val_type
= TEMP_VAL_REG
;
3293 s
->reg_to_temp
[reg
] = ts
;
3296 /* Save a temporary to memory. 'allocated_regs' is used in case a
3297 temporary registers needs to be allocated to store a constant. */
3298 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
3300 /* The liveness analysis already ensures that globals are back
3301 in memory. Keep an tcg_debug_assert for safety. */
3302 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| temp_readonly(ts
));
3305 /* save globals to their canonical location and assume they can be
3306 modified be the following code. 'allocated_regs' is used in case a
3307 temporary registers needs to be allocated to store a constant. */
3308 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3312 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3313 temp_save(s
, &s
->temps
[i
], allocated_regs
);
3317 /* sync globals to their canonical location and assume they can be
3318 read by the following code. 'allocated_regs' is used in case a
3319 temporary registers needs to be allocated to store a constant. */
3320 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3324 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3325 TCGTemp
*ts
= &s
->temps
[i
];
3326 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
3327 || ts
->kind
== TEMP_FIXED
3328 || ts
->mem_coherent
);
3332 /* at the end of a basic block, we assume all temporaries are dead and
3333 all globals are stored at their canonical location. */
3334 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
3338 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3339 TCGTemp
*ts
= &s
->temps
[i
];
3343 temp_save(s
, ts
, allocated_regs
);
3346 /* The liveness analysis already ensures that temps are dead.
3347 Keep an tcg_debug_assert for safety. */
3348 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3351 /* Similarly, we should have freed any allocated register. */
3352 tcg_debug_assert(ts
->val_type
== TEMP_VAL_CONST
);
3355 g_assert_not_reached();
3359 save_globals(s
, allocated_regs
);
3363 * At a conditional branch, we assume all temporaries are dead and
3364 * all globals and local temps are synced to their location.
3366 static void tcg_reg_alloc_cbranch(TCGContext
*s
, TCGRegSet allocated_regs
)
3368 sync_globals(s
, allocated_regs
);
3370 for (int i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3371 TCGTemp
*ts
= &s
->temps
[i
];
3373 * The liveness analysis already ensures that temps are dead.
3374 * Keep tcg_debug_asserts for safety.
3378 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
|| ts
->mem_coherent
);
3381 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3386 g_assert_not_reached();
3392 * Specialized code generation for INDEX_op_mov_* with a constant.
3394 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
3395 tcg_target_ulong val
, TCGLifeData arg_life
,
3396 TCGRegSet preferred_regs
)
3398 /* ENV should not be modified. */
3399 tcg_debug_assert(!temp_readonly(ots
));
3401 /* The movi is not explicitly generated here. */
3402 if (ots
->val_type
== TEMP_VAL_REG
) {
3403 s
->reg_to_temp
[ots
->reg
] = NULL
;
3405 ots
->val_type
= TEMP_VAL_CONST
;
3407 ots
->mem_coherent
= 0;
3408 if (NEED_SYNC_ARG(0)) {
3409 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
3410 } else if (IS_DEAD_ARG(0)) {
3416 * Specialized code generation for INDEX_op_mov_*.
3418 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
3420 const TCGLifeData arg_life
= op
->life
;
3421 TCGRegSet allocated_regs
, preferred_regs
;
3423 TCGType otype
, itype
;
3425 allocated_regs
= s
->reserved_regs
;
3426 preferred_regs
= op
->output_pref
[0];
3427 ots
= arg_temp(op
->args
[0]);
3428 ts
= arg_temp(op
->args
[1]);
3430 /* ENV should not be modified. */
3431 tcg_debug_assert(!temp_readonly(ots
));
3433 /* Note that otype != itype for no-op truncation. */
3437 if (ts
->val_type
== TEMP_VAL_CONST
) {
3438 /* propagate constant or generate sti */
3439 tcg_target_ulong val
= ts
->val
;
3440 if (IS_DEAD_ARG(1)) {
3443 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
3447 /* If the source value is in memory we're going to be forced
3448 to have it in a register in order to perform the copy. Copy
3449 the SOURCE value into its own register first, that way we
3450 don't have to reload SOURCE the next time it is used. */
3451 if (ts
->val_type
== TEMP_VAL_MEM
) {
3452 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
3453 allocated_regs
, preferred_regs
);
3456 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
3457 if (IS_DEAD_ARG(0)) {
3458 /* mov to a non-saved dead register makes no sense (even with
3459 liveness analysis disabled). */
3460 tcg_debug_assert(NEED_SYNC_ARG(0));
3461 if (!ots
->mem_allocated
) {
3462 temp_allocate_frame(s
, ots
);
3464 tcg_out_st(s
, otype
, ts
->reg
, ots
->mem_base
->reg
, ots
->mem_offset
);
3465 if (IS_DEAD_ARG(1)) {
3470 if (IS_DEAD_ARG(1) && ts
->kind
!= TEMP_FIXED
) {
3471 /* the mov can be suppressed */
3472 if (ots
->val_type
== TEMP_VAL_REG
) {
3473 s
->reg_to_temp
[ots
->reg
] = NULL
;
3478 if (ots
->val_type
!= TEMP_VAL_REG
) {
3479 /* When allocating a new register, make sure to not spill the
3481 tcg_regset_set_reg(allocated_regs
, ts
->reg
);
3482 ots
->reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
3483 allocated_regs
, preferred_regs
,
3484 ots
->indirect_base
);
3486 if (!tcg_out_mov(s
, otype
, ots
->reg
, ts
->reg
)) {
3488 * Cross register class move not supported.
3489 * Store the source register into the destination slot
3490 * and leave the destination temp as TEMP_VAL_MEM.
3492 assert(!temp_readonly(ots
));
3493 if (!ts
->mem_allocated
) {
3494 temp_allocate_frame(s
, ots
);
3496 tcg_out_st(s
, ts
->type
, ts
->reg
,
3497 ots
->mem_base
->reg
, ots
->mem_offset
);
3498 ots
->mem_coherent
= 1;
3499 temp_free_or_dead(s
, ots
, -1);
3503 ots
->val_type
= TEMP_VAL_REG
;
3504 ots
->mem_coherent
= 0;
3505 s
->reg_to_temp
[ots
->reg
] = ots
;
3506 if (NEED_SYNC_ARG(0)) {
3507 temp_sync(s
, ots
, allocated_regs
, 0, 0);
3513 * Specialized code generation for INDEX_op_dup_vec.
3515 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
3517 const TCGLifeData arg_life
= op
->life
;
3518 TCGRegSet dup_out_regs
, dup_in_regs
;
3520 TCGType itype
, vtype
;
3521 intptr_t endian_fixup
;
3525 ots
= arg_temp(op
->args
[0]);
3526 its
= arg_temp(op
->args
[1]);
3528 /* ENV should not be modified. */
3529 tcg_debug_assert(!temp_readonly(ots
));
3532 vece
= TCGOP_VECE(op
);
3533 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
3535 if (its
->val_type
== TEMP_VAL_CONST
) {
3536 /* Propagate constant via movi -> dupi. */
3537 tcg_target_ulong val
= its
->val
;
3538 if (IS_DEAD_ARG(1)) {
3541 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, op
->output_pref
[0]);
3545 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
3546 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].regs
;
3548 /* Allocate the output register now. */
3549 if (ots
->val_type
!= TEMP_VAL_REG
) {
3550 TCGRegSet allocated_regs
= s
->reserved_regs
;
3552 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
3553 /* Make sure to not spill the input register. */
3554 tcg_regset_set_reg(allocated_regs
, its
->reg
);
3556 ots
->reg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
3557 op
->output_pref
[0], ots
->indirect_base
);
3558 ots
->val_type
= TEMP_VAL_REG
;
3559 ots
->mem_coherent
= 0;
3560 s
->reg_to_temp
[ots
->reg
] = ots
;
3563 switch (its
->val_type
) {
3566 * The dup constriaints must be broad, covering all possible VECE.
3567 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3568 * to fail, indicating that extra moves are required for that case.
3570 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
3571 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
3574 /* Try again from memory or a vector input register. */
3576 if (!its
->mem_coherent
) {
3578 * The input register is not synced, and so an extra store
3579 * would be required to use memory. Attempt an integer-vector
3580 * register move first. We do not have a TCGRegSet for this.
3582 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
3585 /* Sync the temp back to its slot and load from there. */
3586 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
3591 #ifdef HOST_WORDS_BIGENDIAN
3592 endian_fixup
= itype
== TCG_TYPE_I32
? 4 : 8;
3593 endian_fixup
-= 1 << vece
;
3597 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
3598 its
->mem_offset
+ endian_fixup
)) {
3601 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
3605 g_assert_not_reached();
3608 /* We now have a vector input register, so dup must succeed. */
3609 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
3610 tcg_debug_assert(ok
);
3613 if (IS_DEAD_ARG(1)) {
3616 if (NEED_SYNC_ARG(0)) {
3617 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
3619 if (IS_DEAD_ARG(0)) {
3624 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
3626 const TCGLifeData arg_life
= op
->life
;
3627 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
3628 TCGRegSet i_allocated_regs
;
3629 TCGRegSet o_allocated_regs
;
3630 int i
, k
, nb_iargs
, nb_oargs
;
3633 const TCGArgConstraint
*arg_ct
;
3635 TCGArg new_args
[TCG_MAX_OP_ARGS
];
3636 int const_args
[TCG_MAX_OP_ARGS
];
3638 nb_oargs
= def
->nb_oargs
;
3639 nb_iargs
= def
->nb_iargs
;
3641 /* copy constants */
3642 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
3643 op
->args
+ nb_oargs
+ nb_iargs
,
3644 sizeof(TCGArg
) * def
->nb_cargs
);
3646 i_allocated_regs
= s
->reserved_regs
;
3647 o_allocated_regs
= s
->reserved_regs
;
3649 /* satisfy input constraints */
3650 for (k
= 0; k
< nb_iargs
; k
++) {
3651 TCGRegSet i_preferred_regs
, o_preferred_regs
;
3653 i
= def
->args_ct
[nb_oargs
+ k
].sort_index
;
3655 arg_ct
= &def
->args_ct
[i
];
3658 if (ts
->val_type
== TEMP_VAL_CONST
3659 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
->ct
)) {
3660 /* constant is OK for instruction */
3662 new_args
[i
] = ts
->val
;
3666 i_preferred_regs
= o_preferred_regs
= 0;
3667 if (arg_ct
->ialias
) {
3668 o_preferred_regs
= op
->output_pref
[arg_ct
->alias_index
];
3671 * If the input is readonly, then it cannot also be an
3672 * output and aliased to itself. If the input is not
3673 * dead after the instruction, we must allocate a new
3674 * register and move it.
3676 if (temp_readonly(ts
) || !IS_DEAD_ARG(i
)) {
3677 goto allocate_in_reg
;
3681 * Check if the current register has already been allocated
3682 * for another input aliased to an output.
3684 if (ts
->val_type
== TEMP_VAL_REG
) {
3686 for (int k2
= 0; k2
< k
; k2
++) {
3687 int i2
= def
->args_ct
[nb_oargs
+ k2
].sort_index
;
3688 if (def
->args_ct
[i2
].ialias
&& reg
== new_args
[i2
]) {
3689 goto allocate_in_reg
;
3693 i_preferred_regs
= o_preferred_regs
;
3696 temp_load(s
, ts
, arg_ct
->regs
, i_allocated_regs
, i_preferred_regs
);
3699 if (!tcg_regset_test_reg(arg_ct
->regs
, reg
)) {
3702 * Allocate a new register matching the constraint
3703 * and move the temporary register into it.
3705 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3706 i_allocated_regs
, 0);
3707 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, i_allocated_regs
,
3708 o_preferred_regs
, ts
->indirect_base
);
3709 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
3711 * Cross register class move not supported. Sync the
3712 * temp back to its slot and load from there.
3714 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
3715 tcg_out_ld(s
, ts
->type
, reg
,
3716 ts
->mem_base
->reg
, ts
->mem_offset
);
3721 tcg_regset_set_reg(i_allocated_regs
, reg
);
3724 /* mark dead temporaries and free the associated registers */
3725 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3726 if (IS_DEAD_ARG(i
)) {
3727 temp_dead(s
, arg_temp(op
->args
[i
]));
3731 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3732 tcg_reg_alloc_cbranch(s
, i_allocated_regs
);
3733 } else if (def
->flags
& TCG_OPF_BB_END
) {
3734 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
3736 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3737 /* XXX: permit generic clobber register list ? */
3738 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3739 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
3740 tcg_reg_free(s
, i
, i_allocated_regs
);
3744 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3745 /* sync globals if the op has side effects and might trigger
3747 sync_globals(s
, i_allocated_regs
);
3750 /* satisfy the output constraints */
3751 for(k
= 0; k
< nb_oargs
; k
++) {
3752 i
= def
->args_ct
[k
].sort_index
;
3754 arg_ct
= &def
->args_ct
[i
];
3757 /* ENV should not be modified. */
3758 tcg_debug_assert(!temp_readonly(ts
));
3760 if (arg_ct
->oalias
&& !const_args
[arg_ct
->alias_index
]) {
3761 reg
= new_args
[arg_ct
->alias_index
];
3762 } else if (arg_ct
->newreg
) {
3763 reg
= tcg_reg_alloc(s
, arg_ct
->regs
,
3764 i_allocated_regs
| o_allocated_regs
,
3765 op
->output_pref
[k
], ts
->indirect_base
);
3767 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, o_allocated_regs
,
3768 op
->output_pref
[k
], ts
->indirect_base
);
3770 tcg_regset_set_reg(o_allocated_regs
, reg
);
3771 if (ts
->val_type
== TEMP_VAL_REG
) {
3772 s
->reg_to_temp
[ts
->reg
] = NULL
;
3774 ts
->val_type
= TEMP_VAL_REG
;
3777 * Temp value is modified, so the value kept in memory is
3778 * potentially not the same.
3780 ts
->mem_coherent
= 0;
3781 s
->reg_to_temp
[reg
] = ts
;
3786 /* emit instruction */
3787 if (def
->flags
& TCG_OPF_VECTOR
) {
3788 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
3789 new_args
, const_args
);
3791 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
3794 /* move the outputs in the correct register if needed */
3795 for(i
= 0; i
< nb_oargs
; i
++) {
3796 ts
= arg_temp(op
->args
[i
]);
3798 /* ENV should not be modified. */
3799 tcg_debug_assert(!temp_readonly(ts
));
3801 if (NEED_SYNC_ARG(i
)) {
3802 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
3803 } else if (IS_DEAD_ARG(i
)) {
3809 static bool tcg_reg_alloc_dup2(TCGContext
*s
, const TCGOp
*op
)
3811 const TCGLifeData arg_life
= op
->life
;
3812 TCGTemp
*ots
, *itsl
, *itsh
;
3813 TCGType vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
3815 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
3816 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
3817 tcg_debug_assert(TCGOP_VECE(op
) == MO_64
);
3819 ots
= arg_temp(op
->args
[0]);
3820 itsl
= arg_temp(op
->args
[1]);
3821 itsh
= arg_temp(op
->args
[2]);
3823 /* ENV should not be modified. */
3824 tcg_debug_assert(!temp_readonly(ots
));
3826 /* Allocate the output register now. */
3827 if (ots
->val_type
!= TEMP_VAL_REG
) {
3828 TCGRegSet allocated_regs
= s
->reserved_regs
;
3829 TCGRegSet dup_out_regs
=
3830 tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
3832 /* Make sure to not spill the input registers. */
3833 if (!IS_DEAD_ARG(1) && itsl
->val_type
== TEMP_VAL_REG
) {
3834 tcg_regset_set_reg(allocated_regs
, itsl
->reg
);
3836 if (!IS_DEAD_ARG(2) && itsh
->val_type
== TEMP_VAL_REG
) {
3837 tcg_regset_set_reg(allocated_regs
, itsh
->reg
);
3840 ots
->reg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
3841 op
->output_pref
[0], ots
->indirect_base
);
3842 ots
->val_type
= TEMP_VAL_REG
;
3843 ots
->mem_coherent
= 0;
3844 s
->reg_to_temp
[ots
->reg
] = ots
;
3847 /* Promote dup2 of immediates to dupi_vec. */
3848 if (itsl
->val_type
== TEMP_VAL_CONST
&& itsh
->val_type
== TEMP_VAL_CONST
) {
3849 uint64_t val
= deposit64(itsl
->val
, 32, 32, itsh
->val
);
3852 if (val
== dup_const(MO_8
, val
)) {
3854 } else if (val
== dup_const(MO_16
, val
)) {
3856 } else if (val
== dup_const(MO_32
, val
)) {
3860 tcg_out_dupi_vec(s
, vtype
, vece
, ots
->reg
, val
);
3864 /* If the two inputs form one 64-bit value, try dupm_vec. */
3865 if (itsl
+ 1 == itsh
&& itsl
->base_type
== TCG_TYPE_I64
) {
3866 if (!itsl
->mem_coherent
) {
3867 temp_sync(s
, itsl
, s
->reserved_regs
, 0, 0);
3869 if (!itsh
->mem_coherent
) {
3870 temp_sync(s
, itsh
, s
->reserved_regs
, 0, 0);
3872 #ifdef HOST_WORDS_BIGENDIAN
3873 TCGTemp
*its
= itsh
;
3875 TCGTemp
*its
= itsl
;
3877 if (tcg_out_dupm_vec(s
, vtype
, MO_64
, ots
->reg
,
3878 its
->mem_base
->reg
, its
->mem_offset
)) {
3883 /* Fall back to generic expansion. */
3887 if (IS_DEAD_ARG(1)) {
3890 if (IS_DEAD_ARG(2)) {
3893 if (NEED_SYNC_ARG(0)) {
3894 temp_sync(s
, ots
, s
->reserved_regs
, 0, IS_DEAD_ARG(0));
3895 } else if (IS_DEAD_ARG(0)) {
3901 #ifdef TCG_TARGET_STACK_GROWSUP
3902 #define STACK_DIR(x) (-(x))
3904 #define STACK_DIR(x) (x)
3907 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
3909 const int nb_oargs
= TCGOP_CALLO(op
);
3910 const int nb_iargs
= TCGOP_CALLI(op
);
3911 const TCGLifeData arg_life
= op
->life
;
3912 const TCGHelperInfo
*info
;
3913 int flags
, nb_regs
, i
;
3917 intptr_t stack_offset
;
3918 size_t call_stack_size
;
3919 tcg_insn_unit
*func_addr
;
3921 TCGRegSet allocated_regs
;
3923 func_addr
= tcg_call_func(op
);
3924 info
= tcg_call_info(op
);
3925 flags
= info
->flags
;
3927 nb_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
3928 if (nb_regs
> nb_iargs
) {
3932 /* assign stack slots first */
3933 call_stack_size
= (nb_iargs
- nb_regs
) * sizeof(tcg_target_long
);
3934 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
3935 ~(TCG_TARGET_STACK_ALIGN
- 1);
3936 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
3937 if (allocate_args
) {
3938 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3939 preallocate call stack */
3943 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
3944 for (i
= nb_regs
; i
< nb_iargs
; i
++) {
3945 arg
= op
->args
[nb_oargs
+ i
];
3946 #ifdef TCG_TARGET_STACK_GROWSUP
3947 stack_offset
-= sizeof(tcg_target_long
);
3949 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3951 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3952 s
->reserved_regs
, 0);
3953 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
3955 #ifndef TCG_TARGET_STACK_GROWSUP
3956 stack_offset
+= sizeof(tcg_target_long
);
3960 /* assign input registers */
3961 allocated_regs
= s
->reserved_regs
;
3962 for (i
= 0; i
< nb_regs
; i
++) {
3963 arg
= op
->args
[nb_oargs
+ i
];
3964 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3966 reg
= tcg_target_call_iarg_regs
[i
];
3968 if (ts
->val_type
== TEMP_VAL_REG
) {
3969 if (ts
->reg
!= reg
) {
3970 tcg_reg_free(s
, reg
, allocated_regs
);
3971 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
3973 * Cross register class move not supported. Sync the
3974 * temp back to its slot and load from there.
3976 temp_sync(s
, ts
, allocated_regs
, 0, 0);
3977 tcg_out_ld(s
, ts
->type
, reg
,
3978 ts
->mem_base
->reg
, ts
->mem_offset
);
3982 TCGRegSet arg_set
= 0;
3984 tcg_reg_free(s
, reg
, allocated_regs
);
3985 tcg_regset_set_reg(arg_set
, reg
);
3986 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
3989 tcg_regset_set_reg(allocated_regs
, reg
);
3993 /* mark dead temporaries and free the associated registers */
3994 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3995 if (IS_DEAD_ARG(i
)) {
3996 temp_dead(s
, arg_temp(op
->args
[i
]));
4000 /* clobber call registers */
4001 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4002 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4003 tcg_reg_free(s
, i
, allocated_regs
);
4007 /* Save globals if they might be written by the helper, sync them if
4008 they might be read. */
4009 if (flags
& TCG_CALL_NO_READ_GLOBALS
) {
4011 } else if (flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
4012 sync_globals(s
, allocated_regs
);
4014 save_globals(s
, allocated_regs
);
4017 #ifdef CONFIG_TCG_INTERPRETER
4019 gpointer hash
= (gpointer
)(uintptr_t)info
->typemask
;
4020 ffi_cif
*cif
= g_hash_table_lookup(ffi_table
, hash
);
4021 assert(cif
!= NULL
);
4022 tcg_out_call(s
, func_addr
, cif
);
4025 tcg_out_call(s
, func_addr
);
4028 /* assign output registers and emit moves if needed */
4029 for(i
= 0; i
< nb_oargs
; i
++) {
4033 /* ENV should not be modified. */
4034 tcg_debug_assert(!temp_readonly(ts
));
4036 reg
= tcg_target_call_oarg_regs
[i
];
4037 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
4038 if (ts
->val_type
== TEMP_VAL_REG
) {
4039 s
->reg_to_temp
[ts
->reg
] = NULL
;
4041 ts
->val_type
= TEMP_VAL_REG
;
4043 ts
->mem_coherent
= 0;
4044 s
->reg_to_temp
[reg
] = ts
;
4045 if (NEED_SYNC_ARG(i
)) {
4046 temp_sync(s
, ts
, allocated_regs
, 0, IS_DEAD_ARG(i
));
4047 } else if (IS_DEAD_ARG(i
)) {
4053 #ifdef CONFIG_PROFILER
4055 /* avoid copy/paste errors */
4056 #define PROF_ADD(to, from, field) \
4058 (to)->field += qatomic_read(&((from)->field)); \
4061 #define PROF_MAX(to, from, field) \
4063 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
4064 if (val__ > (to)->field) { \
4065 (to)->field = val__; \
4069 /* Pass in a zero'ed @prof */
4071 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
4073 unsigned int n_ctxs
= qatomic_read(&tcg_cur_ctxs
);
4076 for (i
= 0; i
< n_ctxs
; i
++) {
4077 TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
4078 const TCGProfile
*orig
= &s
->prof
;
4081 PROF_ADD(prof
, orig
, cpu_exec_time
);
4082 PROF_ADD(prof
, orig
, tb_count1
);
4083 PROF_ADD(prof
, orig
, tb_count
);
4084 PROF_ADD(prof
, orig
, op_count
);
4085 PROF_MAX(prof
, orig
, op_count_max
);
4086 PROF_ADD(prof
, orig
, temp_count
);
4087 PROF_MAX(prof
, orig
, temp_count_max
);
4088 PROF_ADD(prof
, orig
, del_op_count
);
4089 PROF_ADD(prof
, orig
, code_in_len
);
4090 PROF_ADD(prof
, orig
, code_out_len
);
4091 PROF_ADD(prof
, orig
, search_out_len
);
4092 PROF_ADD(prof
, orig
, interm_time
);
4093 PROF_ADD(prof
, orig
, code_time
);
4094 PROF_ADD(prof
, orig
, la_time
);
4095 PROF_ADD(prof
, orig
, opt_time
);
4096 PROF_ADD(prof
, orig
, restore_count
);
4097 PROF_ADD(prof
, orig
, restore_time
);
4102 for (i
= 0; i
< NB_OPS
; i
++) {
4103 PROF_ADD(prof
, orig
, table_op_count
[i
]);
4112 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
4114 tcg_profile_snapshot(prof
, true, false);
4117 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
4119 tcg_profile_snapshot(prof
, false, true);
4122 void tcg_dump_op_count(void)
4124 TCGProfile prof
= {};
4127 tcg_profile_snapshot_table(&prof
);
4128 for (i
= 0; i
< NB_OPS
; i
++) {
4129 qemu_printf("%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
4130 prof
.table_op_count
[i
]);
4134 int64_t tcg_cpu_exec_time(void)
4136 unsigned int n_ctxs
= qatomic_read(&tcg_cur_ctxs
);
4140 for (i
= 0; i
< n_ctxs
; i
++) {
4141 const TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
4142 const TCGProfile
*prof
= &s
->prof
;
4144 ret
+= qatomic_read(&prof
->cpu_exec_time
);
4149 void tcg_dump_op_count(void)
4151 qemu_printf("[TCG profiler not compiled]\n");
4154 int64_t tcg_cpu_exec_time(void)
4156 error_report("%s: TCG profiler not compiled", __func__
);
4162 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
)
4164 #ifdef CONFIG_PROFILER
4165 TCGProfile
*prof
= &s
->prof
;
4170 #ifdef CONFIG_PROFILER
4174 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4177 qatomic_set(&prof
->op_count
, prof
->op_count
+ n
);
4178 if (n
> prof
->op_count_max
) {
4179 qatomic_set(&prof
->op_count_max
, n
);
4183 qatomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
4184 if (n
> prof
->temp_count_max
) {
4185 qatomic_set(&prof
->temp_count_max
, n
);
4191 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
4192 && qemu_log_in_addr_range(tb
->pc
))) {
4193 FILE *logfile
= qemu_log_lock();
4195 tcg_dump_ops(s
, false);
4197 qemu_log_unlock(logfile
);
4201 #ifdef CONFIG_DEBUG_TCG
4202 /* Ensure all labels referenced have been emitted. */
4207 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
4208 if (unlikely(!l
->present
) && l
->refs
) {
4209 qemu_log_mask(CPU_LOG_TB_OP
,
4210 "$L%d referenced but not present.\n", l
->id
);
4218 #ifdef CONFIG_PROFILER
4219 qatomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
4222 #ifdef USE_TCG_OPTIMIZATIONS
4226 #ifdef CONFIG_PROFILER
4227 qatomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
4228 qatomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
4231 reachable_code_pass(s
);
4234 if (s
->nb_indirects
> 0) {
4236 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
4237 && qemu_log_in_addr_range(tb
->pc
))) {
4238 FILE *logfile
= qemu_log_lock();
4239 qemu_log("OP before indirect lowering:\n");
4240 tcg_dump_ops(s
, false);
4242 qemu_log_unlock(logfile
);
4245 /* Replace indirect temps with direct temps. */
4246 if (liveness_pass_2(s
)) {
4247 /* If changes were made, re-run liveness. */
4252 #ifdef CONFIG_PROFILER
4253 qatomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
4257 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
4258 && qemu_log_in_addr_range(tb
->pc
))) {
4259 FILE *logfile
= qemu_log_lock();
4260 qemu_log("OP after optimization and liveness analysis:\n");
4261 tcg_dump_ops(s
, true);
4263 qemu_log_unlock(logfile
);
4267 tcg_reg_alloc_start(s
);
4270 * Reset the buffer pointers when restarting after overflow.
4271 * TODO: Move this into translate-all.c with the rest of the
4272 * buffer management. Having only this done here is confusing.
4274 s
->code_buf
= tcg_splitwx_to_rw(tb
->tc
.ptr
);
4275 s
->code_ptr
= s
->code_buf
;
4277 #ifdef TCG_TARGET_NEED_LDST_LABELS
4278 QSIMPLEQ_INIT(&s
->ldst_labels
);
4280 #ifdef TCG_TARGET_NEED_POOL_LABELS
4281 s
->pool_labels
= NULL
;
4285 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4286 TCGOpcode opc
= op
->opc
;
4288 #ifdef CONFIG_PROFILER
4289 qatomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
4293 case INDEX_op_mov_i32
:
4294 case INDEX_op_mov_i64
:
4295 case INDEX_op_mov_vec
:
4296 tcg_reg_alloc_mov(s
, op
);
4298 case INDEX_op_dup_vec
:
4299 tcg_reg_alloc_dup(s
, op
);
4301 case INDEX_op_insn_start
:
4302 if (num_insns
>= 0) {
4303 size_t off
= tcg_current_code_size(s
);
4304 s
->gen_insn_end_off
[num_insns
] = off
;
4305 /* Assert that we do not overflow our stored offset. */
4306 assert(s
->gen_insn_end_off
[num_insns
] == off
);
4309 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
4311 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
4312 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
4316 s
->gen_insn_data
[num_insns
][i
] = a
;
4319 case INDEX_op_discard
:
4320 temp_dead(s
, arg_temp(op
->args
[0]));
4322 case INDEX_op_set_label
:
4323 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
4324 tcg_out_label(s
, arg_label(op
->args
[0]));
4327 tcg_reg_alloc_call(s
, op
);
4329 case INDEX_op_dup2_vec
:
4330 if (tcg_reg_alloc_dup2(s
, op
)) {
4335 /* Sanity check that we've not introduced any unhandled opcodes. */
4336 tcg_debug_assert(tcg_op_supported(opc
));
4337 /* Note: in order to speed up the code, it would be much
4338 faster to have specialized register allocator functions for
4339 some common argument patterns */
4340 tcg_reg_alloc_op(s
, op
);
4343 #ifdef CONFIG_DEBUG_TCG
4346 /* Test for (pending) buffer overflow. The assumption is that any
4347 one operation beginning below the high water mark cannot overrun
4348 the buffer completely. Thus we can test for overflow after
4349 generating code without having to check during generation. */
4350 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
4353 /* Test for TB overflow, as seen by gen_insn_end_off. */
4354 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
4358 tcg_debug_assert(num_insns
>= 0);
4359 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
4361 /* Generate TB finalization at the end of block */
4362 #ifdef TCG_TARGET_NEED_LDST_LABELS
4363 i
= tcg_out_ldst_finalize(s
);
4368 #ifdef TCG_TARGET_NEED_POOL_LABELS
4369 i
= tcg_out_pool_finalize(s
);
4374 if (!tcg_resolve_relocs(s
)) {
4378 #ifndef CONFIG_TCG_INTERPRETER
4379 /* flush instruction cache */
4380 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
4381 (uintptr_t)s
->code_buf
,
4382 tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
));
4385 return tcg_current_code_size(s
);
4388 #ifdef CONFIG_PROFILER
4389 void tcg_dump_info(void)
4391 TCGProfile prof
= {};
4392 const TCGProfile
*s
;
4394 int64_t tb_div_count
;
4397 tcg_profile_snapshot_counters(&prof
);
4399 tb_count
= s
->tb_count
;
4400 tb_div_count
= tb_count
? tb_count
: 1;
4401 tot
= s
->interm_time
+ s
->code_time
;
4403 qemu_printf("JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
4405 qemu_printf("translated TBs %" PRId64
" (aborted=%" PRId64
4407 tb_count
, s
->tb_count1
- tb_count
,
4408 (double)(s
->tb_count1
- s
->tb_count
)
4409 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
4410 qemu_printf("avg ops/TB %0.1f max=%d\n",
4411 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
4412 qemu_printf("deleted ops/TB %0.2f\n",
4413 (double)s
->del_op_count
/ tb_div_count
);
4414 qemu_printf("avg temps/TB %0.2f max=%d\n",
4415 (double)s
->temp_count
/ tb_div_count
, s
->temp_count_max
);
4416 qemu_printf("avg host code/TB %0.1f\n",
4417 (double)s
->code_out_len
/ tb_div_count
);
4418 qemu_printf("avg search data/TB %0.1f\n",
4419 (double)s
->search_out_len
/ tb_div_count
);
4421 qemu_printf("cycles/op %0.1f\n",
4422 s
->op_count
? (double)tot
/ s
->op_count
: 0);
4423 qemu_printf("cycles/in byte %0.1f\n",
4424 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
4425 qemu_printf("cycles/out byte %0.1f\n",
4426 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
4427 qemu_printf("cycles/search byte %0.1f\n",
4428 s
->search_out_len
? (double)tot
/ s
->search_out_len
: 0);
4432 qemu_printf(" gen_interm time %0.1f%%\n",
4433 (double)s
->interm_time
/ tot
* 100.0);
4434 qemu_printf(" gen_code time %0.1f%%\n",
4435 (double)s
->code_time
/ tot
* 100.0);
4436 qemu_printf("optim./code time %0.1f%%\n",
4437 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
4439 qemu_printf("liveness/code time %0.1f%%\n",
4440 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
4441 qemu_printf("cpu_restore count %" PRId64
"\n",
4443 qemu_printf(" avg cycles %0.1f\n",
4444 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
4447 void tcg_dump_info(void)
4449 qemu_printf("[TCG profiler not compiled]\n");
4453 #ifdef ELF_HOST_MACHINE
4454 /* In order to use this feature, the backend needs to do three things:
4456 (1) Define ELF_HOST_MACHINE to indicate both what value to
4457 put into the ELF image and to indicate support for the feature.
4459 (2) Define tcg_register_jit. This should create a buffer containing
4460 the contents of a .debug_frame section that describes the post-
4461 prologue unwind info for the tcg machine.
4463 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4466 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4473 struct jit_code_entry
{
4474 struct jit_code_entry
*next_entry
;
4475 struct jit_code_entry
*prev_entry
;
4476 const void *symfile_addr
;
4477 uint64_t symfile_size
;
4480 struct jit_descriptor
{
4482 uint32_t action_flag
;
4483 struct jit_code_entry
*relevant_entry
;
4484 struct jit_code_entry
*first_entry
;
4487 void __jit_debug_register_code(void) __attribute__((noinline
));
4488 void __jit_debug_register_code(void)
4493 /* Must statically initialize the version, because GDB may check
4494 the version before we can set it. */
4495 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
4497 /* End GDB interface. */
4499 static int find_string(const char *strtab
, const char *str
)
4501 const char *p
= strtab
+ 1;
4504 if (strcmp(p
, str
) == 0) {
4511 static void tcg_register_jit_int(const void *buf_ptr
, size_t buf_size
,
4512 const void *debug_frame
,
4513 size_t debug_frame_size
)
4515 struct __attribute__((packed
)) DebugInfo
{
4522 uintptr_t cu_low_pc
;
4523 uintptr_t cu_high_pc
;
4526 uintptr_t fn_low_pc
;
4527 uintptr_t fn_high_pc
;
4536 struct DebugInfo di
;
4541 struct ElfImage
*img
;
4543 static const struct ElfImage img_template
= {
4545 .e_ident
[EI_MAG0
] = ELFMAG0
,
4546 .e_ident
[EI_MAG1
] = ELFMAG1
,
4547 .e_ident
[EI_MAG2
] = ELFMAG2
,
4548 .e_ident
[EI_MAG3
] = ELFMAG3
,
4549 .e_ident
[EI_CLASS
] = ELF_CLASS
,
4550 .e_ident
[EI_DATA
] = ELF_DATA
,
4551 .e_ident
[EI_VERSION
] = EV_CURRENT
,
4553 .e_machine
= ELF_HOST_MACHINE
,
4554 .e_version
= EV_CURRENT
,
4555 .e_phoff
= offsetof(struct ElfImage
, phdr
),
4556 .e_shoff
= offsetof(struct ElfImage
, shdr
),
4557 .e_ehsize
= sizeof(ElfW(Shdr
)),
4558 .e_phentsize
= sizeof(ElfW(Phdr
)),
4560 .e_shentsize
= sizeof(ElfW(Shdr
)),
4561 .e_shnum
= ARRAY_SIZE(img
->shdr
),
4562 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
4563 #ifdef ELF_HOST_FLAGS
4564 .e_flags
= ELF_HOST_FLAGS
,
4567 .e_ident
[EI_OSABI
] = ELF_OSABI
,
4575 [0] = { .sh_type
= SHT_NULL
},
4576 /* Trick: The contents of code_gen_buffer are not present in
4577 this fake ELF file; that got allocated elsewhere. Therefore
4578 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4579 will not look for contents. We can record any address. */
4581 .sh_type
= SHT_NOBITS
,
4582 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
4584 [2] = { /* .debug_info */
4585 .sh_type
= SHT_PROGBITS
,
4586 .sh_offset
= offsetof(struct ElfImage
, di
),
4587 .sh_size
= sizeof(struct DebugInfo
),
4589 [3] = { /* .debug_abbrev */
4590 .sh_type
= SHT_PROGBITS
,
4591 .sh_offset
= offsetof(struct ElfImage
, da
),
4592 .sh_size
= sizeof(img
->da
),
4594 [4] = { /* .debug_frame */
4595 .sh_type
= SHT_PROGBITS
,
4596 .sh_offset
= sizeof(struct ElfImage
),
4598 [5] = { /* .symtab */
4599 .sh_type
= SHT_SYMTAB
,
4600 .sh_offset
= offsetof(struct ElfImage
, sym
),
4601 .sh_size
= sizeof(img
->sym
),
4603 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
4604 .sh_entsize
= sizeof(ElfW(Sym
)),
4606 [6] = { /* .strtab */
4607 .sh_type
= SHT_STRTAB
,
4608 .sh_offset
= offsetof(struct ElfImage
, str
),
4609 .sh_size
= sizeof(img
->str
),
4613 [1] = { /* code_gen_buffer */
4614 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
4619 .len
= sizeof(struct DebugInfo
) - 4,
4621 .ptr_size
= sizeof(void *),
4623 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
4625 .fn_name
= "code_gen_buffer"
4628 1, /* abbrev number (the cu) */
4629 0x11, 1, /* DW_TAG_compile_unit, has children */
4630 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
4631 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4632 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4633 0, 0, /* end of abbrev */
4634 2, /* abbrev number (the fn) */
4635 0x2e, 0, /* DW_TAG_subprogram, no children */
4636 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
4637 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4638 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4639 0, 0, /* end of abbrev */
4640 0 /* no more abbrev */
4642 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
4643 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
4646 /* We only need a single jit entry; statically allocate it. */
4647 static struct jit_code_entry one_entry
;
4649 uintptr_t buf
= (uintptr_t)buf_ptr
;
4650 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
4651 DebugFrameHeader
*dfh
;
4653 img
= g_malloc(img_size
);
4654 *img
= img_template
;
4656 img
->phdr
.p_vaddr
= buf
;
4657 img
->phdr
.p_paddr
= buf
;
4658 img
->phdr
.p_memsz
= buf_size
;
4660 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
4661 img
->shdr
[1].sh_addr
= buf
;
4662 img
->shdr
[1].sh_size
= buf_size
;
4664 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
4665 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
4667 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
4668 img
->shdr
[4].sh_size
= debug_frame_size
;
4670 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
4671 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
4673 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
4674 img
->sym
[1].st_value
= buf
;
4675 img
->sym
[1].st_size
= buf_size
;
4677 img
->di
.cu_low_pc
= buf
;
4678 img
->di
.cu_high_pc
= buf
+ buf_size
;
4679 img
->di
.fn_low_pc
= buf
;
4680 img
->di
.fn_high_pc
= buf
+ buf_size
;
4682 dfh
= (DebugFrameHeader
*)(img
+ 1);
4683 memcpy(dfh
, debug_frame
, debug_frame_size
);
4684 dfh
->fde
.func_start
= buf
;
4685 dfh
->fde
.func_len
= buf_size
;
4688 /* Enable this block to be able to debug the ELF image file creation.
4689 One can use readelf, objdump, or other inspection utilities. */
4691 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
4693 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
4694 /* Avoid stupid unused return value warning for fwrite. */
4701 one_entry
.symfile_addr
= img
;
4702 one_entry
.symfile_size
= img_size
;
4704 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
4705 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
4706 __jit_debug_descriptor
.first_entry
= &one_entry
;
4707 __jit_debug_register_code();
4710 /* No support for the feature. Provide the entry point expected by exec.c,
4711 and implement the internal function we declared earlier. */
4713 static void tcg_register_jit_int(const void *buf
, size_t size
,
4714 const void *debug_frame
,
4715 size_t debug_frame_size
)
4719 void tcg_register_jit(const void *buf
, size_t buf_size
)
4722 #endif /* ELF_HOST_MACHINE */
4724 #if !TCG_TARGET_MAYBE_vec
4725 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
4727 g_assert_not_reached();