2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
38 #include "qemu/cacheflush.h"
39 #include "qemu/cacheinfo.h"
41 /* Note: the long term plan is to reduce the dependencies on the QEMU
42 CPU definitions. Currently they are used for qemu_ld/st
44 #define NO_CPU_IO_DEFS
46 #include "exec/exec-all.h"
47 #include "tcg/tcg-op.h"
49 #if UINTPTR_MAX == UINT32_MAX
50 # define ELF_CLASS ELFCLASS32
52 # define ELF_CLASS ELFCLASS64
55 # define ELF_DATA ELFDATA2MSB
57 # define ELF_DATA ELFDATA2LSB
62 #include "tcg/tcg-ldst.h"
63 #include "tcg-internal.h"
65 /* Forward declarations for functions declared in tcg-target.c.inc and
67 static void tcg_target_init(TCGContext
*s
);
68 static void tcg_target_qemu_prologue(TCGContext
*s
);
69 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
70 intptr_t value
, intptr_t addend
);
72 /* The CIE and FDE header definitions will be common to all hosts. */
74 uint32_t len
__attribute__((aligned((sizeof(void *)))));
80 uint8_t return_column
;
83 typedef struct QEMU_PACKED
{
84 uint32_t len
__attribute__((aligned((sizeof(void *)))));
88 } DebugFrameFDEHeader
;
90 typedef struct QEMU_PACKED
{
92 DebugFrameFDEHeader fde
;
95 static void tcg_register_jit_int(const void *buf
, size_t size
,
96 const void *debug_frame
,
97 size_t debug_frame_size
)
98 __attribute__((unused
));
100 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
101 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
103 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
104 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
105 TCGReg ret
, tcg_target_long arg
);
106 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
107 const TCGArg args
[TCG_MAX_OP_ARGS
],
108 const int const_args
[TCG_MAX_OP_ARGS
]);
109 #if TCG_TARGET_MAYBE_vec
110 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
111 TCGReg dst
, TCGReg src
);
112 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
113 TCGReg dst
, TCGReg base
, intptr_t offset
);
114 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
115 TCGReg dst
, int64_t arg
);
116 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
117 unsigned vecl
, unsigned vece
,
118 const TCGArg args
[TCG_MAX_OP_ARGS
],
119 const int const_args
[TCG_MAX_OP_ARGS
]);
121 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
122 TCGReg dst
, TCGReg src
)
124 g_assert_not_reached();
126 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
127 TCGReg dst
, TCGReg base
, intptr_t offset
)
129 g_assert_not_reached();
131 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
132 TCGReg dst
, int64_t arg
)
134 g_assert_not_reached();
136 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
137 unsigned vecl
, unsigned vece
,
138 const TCGArg args
[TCG_MAX_OP_ARGS
],
139 const int const_args
[TCG_MAX_OP_ARGS
])
141 g_assert_not_reached();
144 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
146 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
147 TCGReg base
, intptr_t ofs
);
148 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
,
149 const TCGHelperInfo
*info
);
150 static bool tcg_target_const_match(int64_t val
, TCGType type
, int ct
);
151 #ifdef TCG_TARGET_NEED_LDST_LABELS
152 static int tcg_out_ldst_finalize(TCGContext
*s
);
155 TCGContext tcg_init_ctx
;
156 __thread TCGContext
*tcg_ctx
;
158 TCGContext
**tcg_ctxs
;
159 unsigned int tcg_cur_ctxs
;
160 unsigned int tcg_max_ctxs
;
161 TCGv_env cpu_env
= 0;
162 const void *tcg_code_gen_epilogue
;
163 uintptr_t tcg_splitwx_diff
;
165 #ifndef CONFIG_TCG_INTERPRETER
166 tcg_prologue_fn
*tcg_qemu_tb_exec
;
169 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
170 static TCGRegSet tcg_target_call_clobber_regs
;
172 #if TCG_TARGET_INSN_UNIT_SIZE == 1
173 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
178 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
185 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
186 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
188 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
191 tcg_insn_unit
*p
= s
->code_ptr
;
192 memcpy(p
, &v
, sizeof(v
));
193 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
197 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
200 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
203 memcpy(p
, &v
, sizeof(v
));
208 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
209 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
211 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
214 tcg_insn_unit
*p
= s
->code_ptr
;
215 memcpy(p
, &v
, sizeof(v
));
216 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
220 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
223 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
226 memcpy(p
, &v
, sizeof(v
));
231 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
232 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
234 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
237 tcg_insn_unit
*p
= s
->code_ptr
;
238 memcpy(p
, &v
, sizeof(v
));
239 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
243 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
246 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
249 memcpy(p
, &v
, sizeof(v
));
254 /* label relocation processing */
256 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
257 TCGLabel
*l
, intptr_t addend
)
259 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
264 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
267 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
)
269 tcg_debug_assert(!l
->has_value
);
271 l
->u
.value_ptr
= tcg_splitwx_to_rx(s
->code_ptr
);
274 TCGLabel
*gen_new_label(void)
276 TCGContext
*s
= tcg_ctx
;
277 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
279 memset(l
, 0, sizeof(TCGLabel
));
280 l
->id
= s
->nb_labels
++;
281 QSIMPLEQ_INIT(&l
->relocs
);
283 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
288 static bool tcg_resolve_relocs(TCGContext
*s
)
292 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
294 uintptr_t value
= l
->u
.value
;
296 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
297 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
305 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
308 * We will check for overflow at the end of the opcode loop in
309 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
311 s
->tb_jmp_reset_offset
[which
] = tcg_current_code_size(s
);
314 /* Signal overflow, starting over with fewer guest insns. */
316 void tcg_raise_tb_overflow(TCGContext
*s
)
318 siglongjmp(s
->jmp_trans
, -2);
321 #define C_PFX1(P, A) P##A
322 #define C_PFX2(P, A, B) P##A##_##B
323 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
324 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
325 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
326 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
328 /* Define an enumeration for the various combinations. */
330 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
331 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
332 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
333 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
335 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
336 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
337 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
338 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
340 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
342 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
343 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
344 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
345 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
348 #include "tcg-target-con-set.h"
349 } TCGConstraintSetIndex
;
351 static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode
);
367 /* Put all of the constraint sets into an array, indexed by the enum. */
369 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
370 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
371 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
372 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
374 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
375 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
376 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
377 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
379 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
381 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
382 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
383 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
384 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
386 static const TCGTargetOpDef constraint_sets
[] = {
387 #include "tcg-target-con-set.h"
405 /* Expand the enumerator to be returned from tcg_target_op_def(). */
407 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
408 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
409 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
410 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
412 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
413 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
414 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
415 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
417 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
419 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
420 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
421 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
422 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
424 #include "tcg-target.c.inc"
426 static void alloc_tcg_plugin_context(TCGContext
*s
)
429 s
->plugin_tb
= g_new0(struct qemu_plugin_tb
, 1);
430 s
->plugin_tb
->insns
=
431 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn
);
436 * All TCG threads except the parent (i.e. the one that called tcg_context_init
437 * and registered the target's TCG globals) must register with this function
438 * before initiating translation.
440 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
441 * of tcg_region_init() for the reasoning behind this.
443 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
444 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
445 * is not used anymore for translation once this function is called.
447 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
448 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
450 #ifdef CONFIG_USER_ONLY
451 void tcg_register_thread(void)
453 tcg_ctx
= &tcg_init_ctx
;
456 void tcg_register_thread(void)
458 TCGContext
*s
= g_malloc(sizeof(*s
));
463 /* Relink mem_base. */
464 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
465 if (tcg_init_ctx
.temps
[i
].mem_base
) {
466 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
467 tcg_debug_assert(b
>= 0 && b
< n
);
468 s
->temps
[i
].mem_base
= &s
->temps
[b
];
472 /* Claim an entry in tcg_ctxs */
473 n
= qatomic_fetch_inc(&tcg_cur_ctxs
);
474 g_assert(n
< tcg_max_ctxs
);
475 qatomic_set(&tcg_ctxs
[n
], s
);
478 alloc_tcg_plugin_context(s
);
479 tcg_region_initial_alloc(s
);
484 #endif /* !CONFIG_USER_ONLY */
486 /* pool based memory allocation */
487 void *tcg_malloc_internal(TCGContext
*s
, int size
)
492 if (size
> TCG_POOL_CHUNK_SIZE
) {
493 /* big malloc: insert a new pool (XXX: could optimize) */
494 p
= g_malloc(sizeof(TCGPool
) + size
);
496 p
->next
= s
->pool_first_large
;
497 s
->pool_first_large
= p
;
508 pool_size
= TCG_POOL_CHUNK_SIZE
;
509 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
512 if (s
->pool_current
) {
513 s
->pool_current
->next
= p
;
523 s
->pool_cur
= p
->data
+ size
;
524 s
->pool_end
= p
->data
+ p
->size
;
528 void tcg_pool_reset(TCGContext
*s
)
531 for (p
= s
->pool_first_large
; p
; p
= t
) {
535 s
->pool_first_large
= NULL
;
536 s
->pool_cur
= s
->pool_end
= NULL
;
537 s
->pool_current
= NULL
;
540 #include "exec/helper-proto.h"
542 static TCGHelperInfo all_helpers
[] = {
543 #include "exec/helper-tcg.h"
545 static GHashTable
*helper_table
;
547 #ifdef CONFIG_TCG_INTERPRETER
548 static ffi_type
*typecode_to_ffi(int argmask
)
551 case dh_typecode_void
:
552 return &ffi_type_void
;
553 case dh_typecode_i32
:
554 return &ffi_type_uint32
;
555 case dh_typecode_s32
:
556 return &ffi_type_sint32
;
557 case dh_typecode_i64
:
558 return &ffi_type_uint64
;
559 case dh_typecode_s64
:
560 return &ffi_type_sint64
;
561 case dh_typecode_ptr
:
562 return &ffi_type_pointer
;
564 g_assert_not_reached();
567 static void init_ffi_layouts(void)
569 /* g_direct_hash/equal for direct comparisons on uint32_t. */
570 GHashTable
*ffi_table
= g_hash_table_new(NULL
, NULL
);
572 for (int i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
573 TCGHelperInfo
*info
= &all_helpers
[i
];
574 unsigned typemask
= info
->typemask
;
575 gpointer hash
= (gpointer
)(uintptr_t)typemask
;
584 cif
= g_hash_table_lookup(ffi_table
, hash
);
590 /* Ignoring the return type, find the last non-zero field. */
591 nargs
= 32 - clz32(typemask
>> 3);
592 nargs
= DIV_ROUND_UP(nargs
, 3);
594 ca
= g_malloc0(sizeof(*ca
) + nargs
* sizeof(ffi_type
*));
595 ca
->cif
.rtype
= typecode_to_ffi(typemask
& 7);
596 ca
->cif
.nargs
= nargs
;
599 ca
->cif
.arg_types
= ca
->args
;
600 for (int j
= 0; j
< nargs
; ++j
) {
601 int typecode
= extract32(typemask
, (j
+ 1) * 3, 3);
602 ca
->args
[j
] = typecode_to_ffi(typecode
);
606 status
= ffi_prep_cif(&ca
->cif
, FFI_DEFAULT_ABI
, nargs
,
607 ca
->cif
.rtype
, ca
->cif
.arg_types
);
608 assert(status
== FFI_OK
);
612 g_hash_table_insert(ffi_table
, hash
, (gpointer
)cif
);
615 g_hash_table_destroy(ffi_table
);
617 #endif /* CONFIG_TCG_INTERPRETER */
619 typedef struct TCGCumulativeArgs
{
620 int arg_idx
; /* tcg_gen_callN args[] */
621 int info_in_idx
; /* TCGHelperInfo in[] */
622 int arg_slot
; /* regs+stack slot */
623 int ref_slot
; /* stack slots for references */
626 static void layout_arg_even(TCGCumulativeArgs
*cum
)
628 cum
->arg_slot
+= cum
->arg_slot
& 1;
631 static void layout_arg_1(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
,
632 TCGCallArgumentKind kind
)
634 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
636 *loc
= (TCGCallArgumentLoc
){
638 .arg_idx
= cum
->arg_idx
,
639 .arg_slot
= cum
->arg_slot
,
645 static void layout_arg_normal_n(TCGCumulativeArgs
*cum
,
646 TCGHelperInfo
*info
, int n
)
648 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
650 for (int i
= 0; i
< n
; ++i
) {
651 /* Layout all using the same arg_idx, adjusting the subindex. */
652 loc
[i
] = (TCGCallArgumentLoc
){
653 .kind
= TCG_CALL_ARG_NORMAL
,
654 .arg_idx
= cum
->arg_idx
,
656 .arg_slot
= cum
->arg_slot
+ i
,
659 cum
->info_in_idx
+= n
;
663 static void init_call_layout(TCGHelperInfo
*info
)
665 int max_reg_slots
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
666 int max_stk_slots
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
667 unsigned typemask
= info
->typemask
;
669 TCGCumulativeArgs cum
= { };
672 * Parse and place any function return value.
674 typecode
= typemask
& 7;
676 case dh_typecode_void
:
679 case dh_typecode_i32
:
680 case dh_typecode_s32
:
681 case dh_typecode_ptr
:
683 info
->out_kind
= TCG_CALL_RET_NORMAL
;
685 case dh_typecode_i64
:
686 case dh_typecode_s64
:
687 info
->nr_out
= 64 / TCG_TARGET_REG_BITS
;
688 info
->out_kind
= TCG_CALL_RET_NORMAL
;
691 g_assert_not_reached();
693 assert(info
->nr_out
<= ARRAY_SIZE(tcg_target_call_oarg_regs
));
696 * Parse and place function arguments.
698 for (typemask
>>= 3; typemask
; typemask
>>= 3, cum
.arg_idx
++) {
699 TCGCallArgumentKind kind
;
702 typecode
= typemask
& 7;
704 case dh_typecode_i32
:
705 case dh_typecode_s32
:
708 case dh_typecode_i64
:
709 case dh_typecode_s64
:
712 case dh_typecode_ptr
:
716 g_assert_not_reached();
721 switch (TCG_TARGET_CALL_ARG_I32
) {
722 case TCG_CALL_ARG_EVEN
:
723 layout_arg_even(&cum
);
725 case TCG_CALL_ARG_NORMAL
:
726 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
728 case TCG_CALL_ARG_EXTEND
:
729 kind
= TCG_CALL_ARG_EXTEND_U
+ (typecode
& 1);
730 layout_arg_1(&cum
, info
, kind
);
733 qemu_build_not_reached();
738 switch (TCG_TARGET_CALL_ARG_I64
) {
739 case TCG_CALL_ARG_EVEN
:
740 layout_arg_even(&cum
);
742 case TCG_CALL_ARG_NORMAL
:
743 if (TCG_TARGET_REG_BITS
== 32) {
744 layout_arg_normal_n(&cum
, info
, 2);
746 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
750 qemu_build_not_reached();
755 g_assert_not_reached();
758 info
->nr_in
= cum
.info_in_idx
;
760 /* Validate that we didn't overrun the input array. */
761 assert(cum
.info_in_idx
<= ARRAY_SIZE(info
->in
));
762 /* Validate the backend has enough argument space. */
763 assert(cum
.arg_slot
<= max_reg_slots
+ max_stk_slots
);
764 assert(cum
.ref_slot
<= max_stk_slots
);
767 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
768 static void process_op_defs(TCGContext
*s
);
769 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
770 TCGReg reg
, const char *name
);
772 static void tcg_context_init(unsigned max_cpus
)
774 TCGContext
*s
= &tcg_init_ctx
;
775 int op
, total_args
, n
, i
;
777 TCGArgConstraint
*args_ct
;
780 memset(s
, 0, sizeof(*s
));
783 /* Count total number of arguments and allocate the corresponding
786 for(op
= 0; op
< NB_OPS
; op
++) {
787 def
= &tcg_op_defs
[op
];
788 n
= def
->nb_iargs
+ def
->nb_oargs
;
792 args_ct
= g_new0(TCGArgConstraint
, total_args
);
794 for(op
= 0; op
< NB_OPS
; op
++) {
795 def
= &tcg_op_defs
[op
];
796 def
->args_ct
= args_ct
;
797 n
= def
->nb_iargs
+ def
->nb_oargs
;
801 /* Register helpers. */
802 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
803 helper_table
= g_hash_table_new(NULL
, NULL
);
805 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
806 init_call_layout(&all_helpers
[i
]);
807 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
808 (gpointer
)&all_helpers
[i
]);
811 #ifdef CONFIG_TCG_INTERPRETER
818 /* Reverse the order of the saved registers, assuming they're all at
819 the start of tcg_target_reg_alloc_order. */
820 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
821 int r
= tcg_target_reg_alloc_order
[n
];
822 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
826 for (i
= 0; i
< n
; ++i
) {
827 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
829 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
830 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
833 alloc_tcg_plugin_context(s
);
837 * In user-mode we simply share the init context among threads, since we
838 * use a single region. See the documentation tcg_region_init() for the
839 * reasoning behind this.
840 * In softmmu we will have at most max_cpus TCG threads.
842 #ifdef CONFIG_USER_ONLY
847 tcg_max_ctxs
= max_cpus
;
848 tcg_ctxs
= g_new0(TCGContext
*, max_cpus
);
851 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
852 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
853 cpu_env
= temp_tcgv_ptr(ts
);
856 void tcg_init(size_t tb_size
, int splitwx
, unsigned max_cpus
)
858 tcg_context_init(max_cpus
);
859 tcg_region_init(tb_size
, splitwx
, max_cpus
);
863 * Allocate TBs right before their corresponding translated code, making
864 * sure that TBs and code are on different cache lines.
866 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
868 uintptr_t align
= qemu_icache_linesize
;
869 TranslationBlock
*tb
;
873 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
874 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
876 if (unlikely(next
> s
->code_gen_highwater
)) {
877 if (tcg_region_alloc(s
)) {
882 qatomic_set(&s
->code_gen_ptr
, next
);
883 s
->data_gen_ptr
= NULL
;
887 void tcg_prologue_init(TCGContext
*s
)
889 size_t prologue_size
;
891 s
->code_ptr
= s
->code_gen_ptr
;
892 s
->code_buf
= s
->code_gen_ptr
;
893 s
->data_gen_ptr
= NULL
;
895 #ifndef CONFIG_TCG_INTERPRETER
896 tcg_qemu_tb_exec
= (tcg_prologue_fn
*)tcg_splitwx_to_rx(s
->code_ptr
);
899 #ifdef TCG_TARGET_NEED_POOL_LABELS
900 s
->pool_labels
= NULL
;
903 qemu_thread_jit_write();
904 /* Generate the prologue. */
905 tcg_target_qemu_prologue(s
);
907 #ifdef TCG_TARGET_NEED_POOL_LABELS
908 /* Allow the prologue to put e.g. guest_base into a pool entry. */
910 int result
= tcg_out_pool_finalize(s
);
911 tcg_debug_assert(result
== 0);
915 prologue_size
= tcg_current_code_size(s
);
917 #ifndef CONFIG_TCG_INTERPRETER
918 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
919 (uintptr_t)s
->code_buf
, prologue_size
);
923 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
924 FILE *logfile
= qemu_log_trylock();
926 fprintf(logfile
, "PROLOGUE: [size=%zu]\n", prologue_size
);
927 if (s
->data_gen_ptr
) {
928 size_t code_size
= s
->data_gen_ptr
- s
->code_gen_ptr
;
929 size_t data_size
= prologue_size
- code_size
;
932 disas(logfile
, s
->code_gen_ptr
, code_size
);
934 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
935 if (sizeof(tcg_target_ulong
) == 8) {
937 "0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
938 (uintptr_t)s
->data_gen_ptr
+ i
,
939 *(uint64_t *)(s
->data_gen_ptr
+ i
));
942 "0x%08" PRIxPTR
": .long 0x%08x\n",
943 (uintptr_t)s
->data_gen_ptr
+ i
,
944 *(uint32_t *)(s
->data_gen_ptr
+ i
));
948 disas(logfile
, s
->code_gen_ptr
, prologue_size
);
950 fprintf(logfile
, "\n");
951 qemu_log_unlock(logfile
);
956 #ifndef CONFIG_TCG_INTERPRETER
958 * Assert that goto_ptr is implemented completely, setting an epilogue.
959 * For tci, we use NULL as the signal to return from the interpreter,
960 * so skip this check.
962 tcg_debug_assert(tcg_code_gen_epilogue
!= NULL
);
965 tcg_region_prologue_set(s
);
968 void tcg_func_start(TCGContext
*s
)
971 s
->nb_temps
= s
->nb_globals
;
973 /* No temps have been previously allocated for size or locality. */
974 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
976 /* No constant temps have been previously allocated. */
977 for (int i
= 0; i
< TCG_TYPE_COUNT
; ++i
) {
978 if (s
->const_table
[i
]) {
979 g_hash_table_remove_all(s
->const_table
[i
]);
985 s
->current_frame_offset
= s
->frame_start
;
987 #ifdef CONFIG_DEBUG_TCG
988 s
->goto_tb_issue_mask
= 0;
991 QTAILQ_INIT(&s
->ops
);
992 QTAILQ_INIT(&s
->free_ops
);
993 QSIMPLEQ_INIT(&s
->labels
);
996 static TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
998 int n
= s
->nb_temps
++;
1000 if (n
>= TCG_MAX_TEMPS
) {
1001 tcg_raise_tb_overflow(s
);
1003 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1006 static TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1010 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1011 tcg_debug_assert(s
->nb_globals
< TCG_MAX_TEMPS
);
1013 ts
= tcg_temp_alloc(s
);
1014 ts
->kind
= TEMP_GLOBAL
;
1019 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1020 TCGReg reg
, const char *name
)
1024 if (TCG_TARGET_REG_BITS
== 32 && type
!= TCG_TYPE_I32
) {
1028 ts
= tcg_global_alloc(s
);
1029 ts
->base_type
= type
;
1031 ts
->kind
= TEMP_FIXED
;
1034 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1039 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1041 s
->frame_start
= start
;
1042 s
->frame_end
= start
+ size
;
1044 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1047 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
1048 intptr_t offset
, const char *name
)
1050 TCGContext
*s
= tcg_ctx
;
1051 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1052 TCGTemp
*ts
= tcg_global_alloc(s
);
1053 int indirect_reg
= 0;
1055 switch (base_ts
->kind
) {
1059 /* We do not support double-indirect registers. */
1060 tcg_debug_assert(!base_ts
->indirect_reg
);
1061 base_ts
->indirect_base
= 1;
1062 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1067 g_assert_not_reached();
1070 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1071 TCGTemp
*ts2
= tcg_global_alloc(s
);
1074 ts
->base_type
= TCG_TYPE_I64
;
1075 ts
->type
= TCG_TYPE_I32
;
1076 ts
->indirect_reg
= indirect_reg
;
1077 ts
->mem_allocated
= 1;
1078 ts
->mem_base
= base_ts
;
1079 ts
->mem_offset
= offset
;
1080 pstrcpy(buf
, sizeof(buf
), name
);
1081 pstrcat(buf
, sizeof(buf
), "_0");
1082 ts
->name
= strdup(buf
);
1084 tcg_debug_assert(ts2
== ts
+ 1);
1085 ts2
->base_type
= TCG_TYPE_I64
;
1086 ts2
->type
= TCG_TYPE_I32
;
1087 ts2
->indirect_reg
= indirect_reg
;
1088 ts2
->mem_allocated
= 1;
1089 ts2
->mem_base
= base_ts
;
1090 ts2
->mem_offset
= offset
+ 4;
1091 ts2
->temp_subindex
= 1;
1092 pstrcpy(buf
, sizeof(buf
), name
);
1093 pstrcat(buf
, sizeof(buf
), "_1");
1094 ts2
->name
= strdup(buf
);
1096 ts
->base_type
= type
;
1098 ts
->indirect_reg
= indirect_reg
;
1099 ts
->mem_allocated
= 1;
1100 ts
->mem_base
= base_ts
;
1101 ts
->mem_offset
= offset
;
1107 TCGTemp
*tcg_temp_new_internal(TCGType type
, bool temp_local
)
1109 TCGContext
*s
= tcg_ctx
;
1110 TCGTempKind kind
= temp_local
? TEMP_LOCAL
: TEMP_NORMAL
;
1114 k
= type
+ (temp_local
? TCG_TYPE_COUNT
: 0);
1115 idx
= find_first_bit(s
->free_temps
[k
].l
, TCG_MAX_TEMPS
);
1116 if (idx
< TCG_MAX_TEMPS
) {
1117 /* There is already an available temp with the right type. */
1118 clear_bit(idx
, s
->free_temps
[k
].l
);
1120 ts
= &s
->temps
[idx
];
1121 ts
->temp_allocated
= 1;
1122 tcg_debug_assert(ts
->base_type
== type
);
1123 tcg_debug_assert(ts
->kind
== kind
);
1125 ts
= tcg_temp_alloc(s
);
1126 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1127 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1129 ts
->base_type
= type
;
1130 ts
->type
= TCG_TYPE_I32
;
1131 ts
->temp_allocated
= 1;
1134 tcg_debug_assert(ts2
== ts
+ 1);
1135 ts2
->base_type
= TCG_TYPE_I64
;
1136 ts2
->type
= TCG_TYPE_I32
;
1137 ts2
->temp_allocated
= 1;
1138 ts2
->temp_subindex
= 1;
1141 ts
->base_type
= type
;
1143 ts
->temp_allocated
= 1;
1148 #if defined(CONFIG_DEBUG_TCG)
1154 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1158 #ifdef CONFIG_DEBUG_TCG
1161 assert(TCG_TARGET_HAS_v64
);
1164 assert(TCG_TARGET_HAS_v128
);
1167 assert(TCG_TARGET_HAS_v256
);
1170 g_assert_not_reached();
1174 t
= tcg_temp_new_internal(type
, 0);
1175 return temp_tcgv_vec(t
);
1178 /* Create a new temp of the same type as an existing temp. */
1179 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1181 TCGTemp
*t
= tcgv_vec_temp(match
);
1183 tcg_debug_assert(t
->temp_allocated
!= 0);
1185 t
= tcg_temp_new_internal(t
->base_type
, 0);
1186 return temp_tcgv_vec(t
);
1189 void tcg_temp_free_internal(TCGTemp
*ts
)
1191 TCGContext
*s
= tcg_ctx
;
1197 * In order to simplify users of tcg_constant_*,
1198 * silently ignore free.
1205 g_assert_not_reached();
1208 #if defined(CONFIG_DEBUG_TCG)
1210 if (s
->temps_in_use
< 0) {
1211 fprintf(stderr
, "More temporaries freed than allocated!\n");
1215 tcg_debug_assert(ts
->temp_allocated
!= 0);
1216 ts
->temp_allocated
= 0;
1219 k
= ts
->base_type
+ (ts
->kind
== TEMP_NORMAL
? 0 : TCG_TYPE_COUNT
);
1220 set_bit(idx
, s
->free_temps
[k
].l
);
1223 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
)
1225 TCGContext
*s
= tcg_ctx
;
1226 GHashTable
*h
= s
->const_table
[type
];
1230 h
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
1231 s
->const_table
[type
] = h
;
1234 ts
= g_hash_table_lookup(h
, &val
);
1238 ts
= tcg_temp_alloc(s
);
1240 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1241 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1243 tcg_debug_assert(ts2
== ts
+ 1);
1245 ts
->base_type
= TCG_TYPE_I64
;
1246 ts
->type
= TCG_TYPE_I32
;
1247 ts
->kind
= TEMP_CONST
;
1248 ts
->temp_allocated
= 1;
1250 ts2
->base_type
= TCG_TYPE_I64
;
1251 ts2
->type
= TCG_TYPE_I32
;
1252 ts2
->kind
= TEMP_CONST
;
1253 ts2
->temp_allocated
= 1;
1254 ts2
->temp_subindex
= 1;
1257 * Retain the full value of the 64-bit constant in the low
1258 * part, so that the hash table works. Actual uses will
1259 * truncate the value to the low part.
1261 ts
[HOST_BIG_ENDIAN
].val
= val
;
1262 ts
[!HOST_BIG_ENDIAN
].val
= val
>> 32;
1263 val_ptr
= &ts
[HOST_BIG_ENDIAN
].val
;
1265 ts
->base_type
= type
;
1267 ts
->kind
= TEMP_CONST
;
1268 ts
->temp_allocated
= 1;
1272 g_hash_table_insert(h
, val_ptr
, ts
);
1278 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
)
1280 val
= dup_const(vece
, val
);
1281 return temp_tcgv_vec(tcg_constant_internal(type
, val
));
1284 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
)
1286 TCGTemp
*t
= tcgv_vec_temp(match
);
1288 tcg_debug_assert(t
->temp_allocated
!= 0);
1289 return tcg_constant_vec(t
->base_type
, vece
, val
);
1292 TCGv_i32
tcg_const_i32(int32_t val
)
1295 t0
= tcg_temp_new_i32();
1296 tcg_gen_movi_i32(t0
, val
);
1300 TCGv_i64
tcg_const_i64(int64_t val
)
1303 t0
= tcg_temp_new_i64();
1304 tcg_gen_movi_i64(t0
, val
);
1308 TCGv_i32
tcg_const_local_i32(int32_t val
)
1311 t0
= tcg_temp_local_new_i32();
1312 tcg_gen_movi_i32(t0
, val
);
1316 TCGv_i64
tcg_const_local_i64(int64_t val
)
1319 t0
= tcg_temp_local_new_i64();
1320 tcg_gen_movi_i64(t0
, val
);
1324 #if defined(CONFIG_DEBUG_TCG)
1325 void tcg_clear_temp_count(void)
1327 TCGContext
*s
= tcg_ctx
;
1328 s
->temps_in_use
= 0;
1331 int tcg_check_temp_count(void)
1333 TCGContext
*s
= tcg_ctx
;
1334 if (s
->temps_in_use
) {
1335 /* Clear the count so that we don't give another
1336 * warning immediately next time around.
1338 s
->temps_in_use
= 0;
1345 /* Return true if OP may appear in the opcode stream.
1346 Test the runtime variable that controls each opcode. */
1347 bool tcg_op_supported(TCGOpcode op
)
1350 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1353 case INDEX_op_discard
:
1354 case INDEX_op_set_label
:
1358 case INDEX_op_insn_start
:
1359 case INDEX_op_exit_tb
:
1360 case INDEX_op_goto_tb
:
1361 case INDEX_op_goto_ptr
:
1362 case INDEX_op_qemu_ld_i32
:
1363 case INDEX_op_qemu_st_i32
:
1364 case INDEX_op_qemu_ld_i64
:
1365 case INDEX_op_qemu_st_i64
:
1368 case INDEX_op_qemu_st8_i32
:
1369 return TCG_TARGET_HAS_qemu_st8_i32
;
1371 case INDEX_op_mov_i32
:
1372 case INDEX_op_setcond_i32
:
1373 case INDEX_op_brcond_i32
:
1374 case INDEX_op_ld8u_i32
:
1375 case INDEX_op_ld8s_i32
:
1376 case INDEX_op_ld16u_i32
:
1377 case INDEX_op_ld16s_i32
:
1378 case INDEX_op_ld_i32
:
1379 case INDEX_op_st8_i32
:
1380 case INDEX_op_st16_i32
:
1381 case INDEX_op_st_i32
:
1382 case INDEX_op_add_i32
:
1383 case INDEX_op_sub_i32
:
1384 case INDEX_op_mul_i32
:
1385 case INDEX_op_and_i32
:
1386 case INDEX_op_or_i32
:
1387 case INDEX_op_xor_i32
:
1388 case INDEX_op_shl_i32
:
1389 case INDEX_op_shr_i32
:
1390 case INDEX_op_sar_i32
:
1393 case INDEX_op_movcond_i32
:
1394 return TCG_TARGET_HAS_movcond_i32
;
1395 case INDEX_op_div_i32
:
1396 case INDEX_op_divu_i32
:
1397 return TCG_TARGET_HAS_div_i32
;
1398 case INDEX_op_rem_i32
:
1399 case INDEX_op_remu_i32
:
1400 return TCG_TARGET_HAS_rem_i32
;
1401 case INDEX_op_div2_i32
:
1402 case INDEX_op_divu2_i32
:
1403 return TCG_TARGET_HAS_div2_i32
;
1404 case INDEX_op_rotl_i32
:
1405 case INDEX_op_rotr_i32
:
1406 return TCG_TARGET_HAS_rot_i32
;
1407 case INDEX_op_deposit_i32
:
1408 return TCG_TARGET_HAS_deposit_i32
;
1409 case INDEX_op_extract_i32
:
1410 return TCG_TARGET_HAS_extract_i32
;
1411 case INDEX_op_sextract_i32
:
1412 return TCG_TARGET_HAS_sextract_i32
;
1413 case INDEX_op_extract2_i32
:
1414 return TCG_TARGET_HAS_extract2_i32
;
1415 case INDEX_op_add2_i32
:
1416 return TCG_TARGET_HAS_add2_i32
;
1417 case INDEX_op_sub2_i32
:
1418 return TCG_TARGET_HAS_sub2_i32
;
1419 case INDEX_op_mulu2_i32
:
1420 return TCG_TARGET_HAS_mulu2_i32
;
1421 case INDEX_op_muls2_i32
:
1422 return TCG_TARGET_HAS_muls2_i32
;
1423 case INDEX_op_muluh_i32
:
1424 return TCG_TARGET_HAS_muluh_i32
;
1425 case INDEX_op_mulsh_i32
:
1426 return TCG_TARGET_HAS_mulsh_i32
;
1427 case INDEX_op_ext8s_i32
:
1428 return TCG_TARGET_HAS_ext8s_i32
;
1429 case INDEX_op_ext16s_i32
:
1430 return TCG_TARGET_HAS_ext16s_i32
;
1431 case INDEX_op_ext8u_i32
:
1432 return TCG_TARGET_HAS_ext8u_i32
;
1433 case INDEX_op_ext16u_i32
:
1434 return TCG_TARGET_HAS_ext16u_i32
;
1435 case INDEX_op_bswap16_i32
:
1436 return TCG_TARGET_HAS_bswap16_i32
;
1437 case INDEX_op_bswap32_i32
:
1438 return TCG_TARGET_HAS_bswap32_i32
;
1439 case INDEX_op_not_i32
:
1440 return TCG_TARGET_HAS_not_i32
;
1441 case INDEX_op_neg_i32
:
1442 return TCG_TARGET_HAS_neg_i32
;
1443 case INDEX_op_andc_i32
:
1444 return TCG_TARGET_HAS_andc_i32
;
1445 case INDEX_op_orc_i32
:
1446 return TCG_TARGET_HAS_orc_i32
;
1447 case INDEX_op_eqv_i32
:
1448 return TCG_TARGET_HAS_eqv_i32
;
1449 case INDEX_op_nand_i32
:
1450 return TCG_TARGET_HAS_nand_i32
;
1451 case INDEX_op_nor_i32
:
1452 return TCG_TARGET_HAS_nor_i32
;
1453 case INDEX_op_clz_i32
:
1454 return TCG_TARGET_HAS_clz_i32
;
1455 case INDEX_op_ctz_i32
:
1456 return TCG_TARGET_HAS_ctz_i32
;
1457 case INDEX_op_ctpop_i32
:
1458 return TCG_TARGET_HAS_ctpop_i32
;
1460 case INDEX_op_brcond2_i32
:
1461 case INDEX_op_setcond2_i32
:
1462 return TCG_TARGET_REG_BITS
== 32;
1464 case INDEX_op_mov_i64
:
1465 case INDEX_op_setcond_i64
:
1466 case INDEX_op_brcond_i64
:
1467 case INDEX_op_ld8u_i64
:
1468 case INDEX_op_ld8s_i64
:
1469 case INDEX_op_ld16u_i64
:
1470 case INDEX_op_ld16s_i64
:
1471 case INDEX_op_ld32u_i64
:
1472 case INDEX_op_ld32s_i64
:
1473 case INDEX_op_ld_i64
:
1474 case INDEX_op_st8_i64
:
1475 case INDEX_op_st16_i64
:
1476 case INDEX_op_st32_i64
:
1477 case INDEX_op_st_i64
:
1478 case INDEX_op_add_i64
:
1479 case INDEX_op_sub_i64
:
1480 case INDEX_op_mul_i64
:
1481 case INDEX_op_and_i64
:
1482 case INDEX_op_or_i64
:
1483 case INDEX_op_xor_i64
:
1484 case INDEX_op_shl_i64
:
1485 case INDEX_op_shr_i64
:
1486 case INDEX_op_sar_i64
:
1487 case INDEX_op_ext_i32_i64
:
1488 case INDEX_op_extu_i32_i64
:
1489 return TCG_TARGET_REG_BITS
== 64;
1491 case INDEX_op_movcond_i64
:
1492 return TCG_TARGET_HAS_movcond_i64
;
1493 case INDEX_op_div_i64
:
1494 case INDEX_op_divu_i64
:
1495 return TCG_TARGET_HAS_div_i64
;
1496 case INDEX_op_rem_i64
:
1497 case INDEX_op_remu_i64
:
1498 return TCG_TARGET_HAS_rem_i64
;
1499 case INDEX_op_div2_i64
:
1500 case INDEX_op_divu2_i64
:
1501 return TCG_TARGET_HAS_div2_i64
;
1502 case INDEX_op_rotl_i64
:
1503 case INDEX_op_rotr_i64
:
1504 return TCG_TARGET_HAS_rot_i64
;
1505 case INDEX_op_deposit_i64
:
1506 return TCG_TARGET_HAS_deposit_i64
;
1507 case INDEX_op_extract_i64
:
1508 return TCG_TARGET_HAS_extract_i64
;
1509 case INDEX_op_sextract_i64
:
1510 return TCG_TARGET_HAS_sextract_i64
;
1511 case INDEX_op_extract2_i64
:
1512 return TCG_TARGET_HAS_extract2_i64
;
1513 case INDEX_op_extrl_i64_i32
:
1514 return TCG_TARGET_HAS_extrl_i64_i32
;
1515 case INDEX_op_extrh_i64_i32
:
1516 return TCG_TARGET_HAS_extrh_i64_i32
;
1517 case INDEX_op_ext8s_i64
:
1518 return TCG_TARGET_HAS_ext8s_i64
;
1519 case INDEX_op_ext16s_i64
:
1520 return TCG_TARGET_HAS_ext16s_i64
;
1521 case INDEX_op_ext32s_i64
:
1522 return TCG_TARGET_HAS_ext32s_i64
;
1523 case INDEX_op_ext8u_i64
:
1524 return TCG_TARGET_HAS_ext8u_i64
;
1525 case INDEX_op_ext16u_i64
:
1526 return TCG_TARGET_HAS_ext16u_i64
;
1527 case INDEX_op_ext32u_i64
:
1528 return TCG_TARGET_HAS_ext32u_i64
;
1529 case INDEX_op_bswap16_i64
:
1530 return TCG_TARGET_HAS_bswap16_i64
;
1531 case INDEX_op_bswap32_i64
:
1532 return TCG_TARGET_HAS_bswap32_i64
;
1533 case INDEX_op_bswap64_i64
:
1534 return TCG_TARGET_HAS_bswap64_i64
;
1535 case INDEX_op_not_i64
:
1536 return TCG_TARGET_HAS_not_i64
;
1537 case INDEX_op_neg_i64
:
1538 return TCG_TARGET_HAS_neg_i64
;
1539 case INDEX_op_andc_i64
:
1540 return TCG_TARGET_HAS_andc_i64
;
1541 case INDEX_op_orc_i64
:
1542 return TCG_TARGET_HAS_orc_i64
;
1543 case INDEX_op_eqv_i64
:
1544 return TCG_TARGET_HAS_eqv_i64
;
1545 case INDEX_op_nand_i64
:
1546 return TCG_TARGET_HAS_nand_i64
;
1547 case INDEX_op_nor_i64
:
1548 return TCG_TARGET_HAS_nor_i64
;
1549 case INDEX_op_clz_i64
:
1550 return TCG_TARGET_HAS_clz_i64
;
1551 case INDEX_op_ctz_i64
:
1552 return TCG_TARGET_HAS_ctz_i64
;
1553 case INDEX_op_ctpop_i64
:
1554 return TCG_TARGET_HAS_ctpop_i64
;
1555 case INDEX_op_add2_i64
:
1556 return TCG_TARGET_HAS_add2_i64
;
1557 case INDEX_op_sub2_i64
:
1558 return TCG_TARGET_HAS_sub2_i64
;
1559 case INDEX_op_mulu2_i64
:
1560 return TCG_TARGET_HAS_mulu2_i64
;
1561 case INDEX_op_muls2_i64
:
1562 return TCG_TARGET_HAS_muls2_i64
;
1563 case INDEX_op_muluh_i64
:
1564 return TCG_TARGET_HAS_muluh_i64
;
1565 case INDEX_op_mulsh_i64
:
1566 return TCG_TARGET_HAS_mulsh_i64
;
1568 case INDEX_op_mov_vec
:
1569 case INDEX_op_dup_vec
:
1570 case INDEX_op_dupm_vec
:
1571 case INDEX_op_ld_vec
:
1572 case INDEX_op_st_vec
:
1573 case INDEX_op_add_vec
:
1574 case INDEX_op_sub_vec
:
1575 case INDEX_op_and_vec
:
1576 case INDEX_op_or_vec
:
1577 case INDEX_op_xor_vec
:
1578 case INDEX_op_cmp_vec
:
1580 case INDEX_op_dup2_vec
:
1581 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
1582 case INDEX_op_not_vec
:
1583 return have_vec
&& TCG_TARGET_HAS_not_vec
;
1584 case INDEX_op_neg_vec
:
1585 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
1586 case INDEX_op_abs_vec
:
1587 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
1588 case INDEX_op_andc_vec
:
1589 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
1590 case INDEX_op_orc_vec
:
1591 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
1592 case INDEX_op_nand_vec
:
1593 return have_vec
&& TCG_TARGET_HAS_nand_vec
;
1594 case INDEX_op_nor_vec
:
1595 return have_vec
&& TCG_TARGET_HAS_nor_vec
;
1596 case INDEX_op_eqv_vec
:
1597 return have_vec
&& TCG_TARGET_HAS_eqv_vec
;
1598 case INDEX_op_mul_vec
:
1599 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
1600 case INDEX_op_shli_vec
:
1601 case INDEX_op_shri_vec
:
1602 case INDEX_op_sari_vec
:
1603 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
1604 case INDEX_op_shls_vec
:
1605 case INDEX_op_shrs_vec
:
1606 case INDEX_op_sars_vec
:
1607 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
1608 case INDEX_op_shlv_vec
:
1609 case INDEX_op_shrv_vec
:
1610 case INDEX_op_sarv_vec
:
1611 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
1612 case INDEX_op_rotli_vec
:
1613 return have_vec
&& TCG_TARGET_HAS_roti_vec
;
1614 case INDEX_op_rotls_vec
:
1615 return have_vec
&& TCG_TARGET_HAS_rots_vec
;
1616 case INDEX_op_rotlv_vec
:
1617 case INDEX_op_rotrv_vec
:
1618 return have_vec
&& TCG_TARGET_HAS_rotv_vec
;
1619 case INDEX_op_ssadd_vec
:
1620 case INDEX_op_usadd_vec
:
1621 case INDEX_op_sssub_vec
:
1622 case INDEX_op_ussub_vec
:
1623 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
1624 case INDEX_op_smin_vec
:
1625 case INDEX_op_umin_vec
:
1626 case INDEX_op_smax_vec
:
1627 case INDEX_op_umax_vec
:
1628 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
1629 case INDEX_op_bitsel_vec
:
1630 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
1631 case INDEX_op_cmpsel_vec
:
1632 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
1635 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
1640 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
);
1642 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
1644 const TCGHelperInfo
*info
;
1645 TCGv_i64 extend_free
[MAX_CALL_IARGS
];
1648 int i
, n
, pi
= 0, total_args
;
1650 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
1651 total_args
= info
->nr_out
+ info
->nr_in
+ 2;
1652 op
= tcg_op_alloc(INDEX_op_call
, total_args
);
1654 #ifdef CONFIG_PLUGIN
1655 /* detect non-plugin helpers */
1656 if (tcg_ctx
->plugin_insn
&& unlikely(strncmp(info
->name
, "plugin_", 7))) {
1657 tcg_ctx
->plugin_insn
->calls_helpers
= true;
1661 TCGOP_CALLO(op
) = n
= info
->nr_out
;
1664 tcg_debug_assert(ret
== NULL
);
1667 tcg_debug_assert(ret
!= NULL
);
1668 op
->args
[pi
++] = temp_arg(ret
);
1671 tcg_debug_assert(ret
!= NULL
);
1672 tcg_debug_assert(ret
->base_type
== ret
->type
+ 1);
1673 tcg_debug_assert(ret
->temp_subindex
== 0);
1674 op
->args
[pi
++] = temp_arg(ret
);
1675 op
->args
[pi
++] = temp_arg(ret
+ 1);
1678 g_assert_not_reached();
1681 TCGOP_CALLI(op
) = n
= info
->nr_in
;
1682 for (i
= 0; i
< n
; i
++) {
1683 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
1684 TCGTemp
*ts
= args
[loc
->arg_idx
] + loc
->tmp_subindex
;
1686 switch (loc
->kind
) {
1687 case TCG_CALL_ARG_NORMAL
:
1688 op
->args
[pi
++] = temp_arg(ts
);
1691 case TCG_CALL_ARG_EXTEND_U
:
1692 case TCG_CALL_ARG_EXTEND_S
:
1694 TCGv_i64 temp
= tcg_temp_new_i64();
1695 TCGv_i32 orig
= temp_tcgv_i32(ts
);
1697 if (loc
->kind
== TCG_CALL_ARG_EXTEND_S
) {
1698 tcg_gen_ext_i32_i64(temp
, orig
);
1700 tcg_gen_extu_i32_i64(temp
, orig
);
1702 op
->args
[pi
++] = tcgv_i64_arg(temp
);
1703 extend_free
[n_extend
++] = temp
;
1708 g_assert_not_reached();
1711 op
->args
[pi
++] = (uintptr_t)func
;
1712 op
->args
[pi
++] = (uintptr_t)info
;
1713 tcg_debug_assert(pi
== total_args
);
1715 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
1717 tcg_debug_assert(n_extend
< ARRAY_SIZE(extend_free
));
1718 for (i
= 0; i
< n_extend
; ++i
) {
1719 tcg_temp_free_i64(extend_free
[i
]);
1723 static void tcg_reg_alloc_start(TCGContext
*s
)
1727 for (i
= 0, n
= s
->nb_temps
; i
< n
; i
++) {
1728 TCGTemp
*ts
= &s
->temps
[i
];
1729 TCGTempVal val
= TEMP_VAL_MEM
;
1733 val
= TEMP_VAL_CONST
;
1742 val
= TEMP_VAL_DEAD
;
1745 ts
->mem_allocated
= 0;
1748 g_assert_not_reached();
1753 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
1756 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
1759 int idx
= temp_idx(ts
);
1764 pstrcpy(buf
, buf_size
, ts
->name
);
1767 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
1770 snprintf(buf
, buf_size
, "ebb%d", idx
- s
->nb_globals
);
1773 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
1778 snprintf(buf
, buf_size
, "$0x%x", (int32_t)ts
->val
);
1780 #if TCG_TARGET_REG_BITS > 32
1782 snprintf(buf
, buf_size
, "$0x%" PRIx64
, ts
->val
);
1788 snprintf(buf
, buf_size
, "v%d$0x%" PRIx64
,
1789 64 << (ts
->type
- TCG_TYPE_V64
), ts
->val
);
1792 g_assert_not_reached();
1799 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
1800 int buf_size
, TCGArg arg
)
1802 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
1805 static const char * const cond_name
[] =
1807 [TCG_COND_NEVER
] = "never",
1808 [TCG_COND_ALWAYS
] = "always",
1809 [TCG_COND_EQ
] = "eq",
1810 [TCG_COND_NE
] = "ne",
1811 [TCG_COND_LT
] = "lt",
1812 [TCG_COND_GE
] = "ge",
1813 [TCG_COND_LE
] = "le",
1814 [TCG_COND_GT
] = "gt",
1815 [TCG_COND_LTU
] = "ltu",
1816 [TCG_COND_GEU
] = "geu",
1817 [TCG_COND_LEU
] = "leu",
1818 [TCG_COND_GTU
] = "gtu"
1821 static const char * const ldst_name
[] =
1837 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
1838 #ifdef TARGET_ALIGNED_ONLY
1839 [MO_UNALN
>> MO_ASHIFT
] = "un+",
1840 [MO_ALIGN
>> MO_ASHIFT
] = "",
1842 [MO_UNALN
>> MO_ASHIFT
] = "",
1843 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
1845 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
1846 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
1847 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
1848 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
1849 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
1850 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
1853 static const char bswap_flag_name
[][6] = {
1854 [TCG_BSWAP_IZ
] = "iz",
1855 [TCG_BSWAP_OZ
] = "oz",
1856 [TCG_BSWAP_OS
] = "os",
1857 [TCG_BSWAP_IZ
| TCG_BSWAP_OZ
] = "iz,oz",
1858 [TCG_BSWAP_IZ
| TCG_BSWAP_OS
] = "iz,os",
1861 static inline bool tcg_regset_single(TCGRegSet d
)
1863 return (d
& (d
- 1)) == 0;
1866 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
1868 if (TCG_TARGET_NB_REGS
<= 32) {
1875 /* Return only the number of characters output -- no error return. */
1876 #define ne_fprintf(...) \
1877 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
1879 static void tcg_dump_ops(TCGContext
*s
, FILE *f
, bool have_prefs
)
1884 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
1885 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
1886 const TCGOpDef
*def
;
1891 def
= &tcg_op_defs
[c
];
1893 if (c
== INDEX_op_insn_start
) {
1895 col
+= ne_fprintf(f
, "\n ----");
1897 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
1899 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1900 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
1904 col
+= ne_fprintf(f
, " " TARGET_FMT_lx
, a
);
1906 } else if (c
== INDEX_op_call
) {
1907 const TCGHelperInfo
*info
= tcg_call_info(op
);
1908 void *func
= tcg_call_func(op
);
1910 /* variable number of arguments */
1911 nb_oargs
= TCGOP_CALLO(op
);
1912 nb_iargs
= TCGOP_CALLI(op
);
1913 nb_cargs
= def
->nb_cargs
;
1915 col
+= ne_fprintf(f
, " %s ", def
->name
);
1918 * Print the function name from TCGHelperInfo, if available.
1919 * Note that plugins have a template function for the info,
1920 * but the actual function pointer comes from the plugin.
1922 if (func
== info
->func
) {
1923 col
+= ne_fprintf(f
, "%s", info
->name
);
1925 col
+= ne_fprintf(f
, "plugin(%p)", func
);
1928 col
+= ne_fprintf(f
, ",$0x%x,$%d", info
->flags
, nb_oargs
);
1929 for (i
= 0; i
< nb_oargs
; i
++) {
1930 col
+= ne_fprintf(f
, ",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1933 for (i
= 0; i
< nb_iargs
; i
++) {
1934 TCGArg arg
= op
->args
[nb_oargs
+ i
];
1935 const char *t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
1936 col
+= ne_fprintf(f
, ",%s", t
);
1939 col
+= ne_fprintf(f
, " %s ", def
->name
);
1941 nb_oargs
= def
->nb_oargs
;
1942 nb_iargs
= def
->nb_iargs
;
1943 nb_cargs
= def
->nb_cargs
;
1945 if (def
->flags
& TCG_OPF_VECTOR
) {
1946 col
+= ne_fprintf(f
, "v%d,e%d,", 64 << TCGOP_VECL(op
),
1947 8 << TCGOP_VECE(op
));
1951 for (i
= 0; i
< nb_oargs
; i
++) {
1952 const char *sep
= k
? "," : "";
1953 col
+= ne_fprintf(f
, "%s%s", sep
,
1954 tcg_get_arg_str(s
, buf
, sizeof(buf
),
1957 for (i
= 0; i
< nb_iargs
; i
++) {
1958 const char *sep
= k
? "," : "";
1959 col
+= ne_fprintf(f
, "%s%s", sep
,
1960 tcg_get_arg_str(s
, buf
, sizeof(buf
),
1964 case INDEX_op_brcond_i32
:
1965 case INDEX_op_setcond_i32
:
1966 case INDEX_op_movcond_i32
:
1967 case INDEX_op_brcond2_i32
:
1968 case INDEX_op_setcond2_i32
:
1969 case INDEX_op_brcond_i64
:
1970 case INDEX_op_setcond_i64
:
1971 case INDEX_op_movcond_i64
:
1972 case INDEX_op_cmp_vec
:
1973 case INDEX_op_cmpsel_vec
:
1974 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
1975 && cond_name
[op
->args
[k
]]) {
1976 col
+= ne_fprintf(f
, ",%s", cond_name
[op
->args
[k
++]]);
1978 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, op
->args
[k
++]);
1982 case INDEX_op_qemu_ld_i32
:
1983 case INDEX_op_qemu_st_i32
:
1984 case INDEX_op_qemu_st8_i32
:
1985 case INDEX_op_qemu_ld_i64
:
1986 case INDEX_op_qemu_st_i64
:
1988 MemOpIdx oi
= op
->args
[k
++];
1989 MemOp op
= get_memop(oi
);
1990 unsigned ix
= get_mmuidx(oi
);
1992 if (op
& ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
)) {
1993 col
+= ne_fprintf(f
, ",$0x%x,%u", op
, ix
);
1995 const char *s_al
, *s_op
;
1996 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
1997 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
1998 col
+= ne_fprintf(f
, ",%s%s,%u", s_al
, s_op
, ix
);
2003 case INDEX_op_bswap16_i32
:
2004 case INDEX_op_bswap16_i64
:
2005 case INDEX_op_bswap32_i32
:
2006 case INDEX_op_bswap32_i64
:
2007 case INDEX_op_bswap64_i64
:
2009 TCGArg flags
= op
->args
[k
];
2010 const char *name
= NULL
;
2012 if (flags
< ARRAY_SIZE(bswap_flag_name
)) {
2013 name
= bswap_flag_name
[flags
];
2016 col
+= ne_fprintf(f
, ",%s", name
);
2018 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, flags
);
2028 case INDEX_op_set_label
:
2030 case INDEX_op_brcond_i32
:
2031 case INDEX_op_brcond_i64
:
2032 case INDEX_op_brcond2_i32
:
2033 col
+= ne_fprintf(f
, "%s$L%d", k
? "," : "",
2034 arg_label(op
->args
[k
])->id
);
2040 for (; i
< nb_cargs
; i
++, k
++) {
2041 col
+= ne_fprintf(f
, "%s$0x%" TCG_PRIlx
, k
? "," : "",
2046 if (have_prefs
|| op
->life
) {
2047 for (; col
< 40; ++col
) {
2053 unsigned life
= op
->life
;
2055 if (life
& (SYNC_ARG
* 3)) {
2056 ne_fprintf(f
, " sync:");
2057 for (i
= 0; i
< 2; ++i
) {
2058 if (life
& (SYNC_ARG
<< i
)) {
2059 ne_fprintf(f
, " %d", i
);
2065 ne_fprintf(f
, " dead:");
2066 for (i
= 0; life
; ++i
, life
>>= 1) {
2068 ne_fprintf(f
, " %d", i
);
2075 for (i
= 0; i
< nb_oargs
; ++i
) {
2076 TCGRegSet set
= output_pref(op
, i
);
2079 ne_fprintf(f
, " pref=");
2084 ne_fprintf(f
, "none");
2085 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2086 ne_fprintf(f
, "all");
2087 #ifdef CONFIG_DEBUG_TCG
2088 } else if (tcg_regset_single(set
)) {
2089 TCGReg reg
= tcg_regset_first(set
);
2090 ne_fprintf(f
, "%s", tcg_target_reg_names
[reg
]);
2092 } else if (TCG_TARGET_NB_REGS
<= 32) {
2093 ne_fprintf(f
, "0x%x", (uint32_t)set
);
2095 ne_fprintf(f
, "0x%" PRIx64
, (uint64_t)set
);
2104 /* we give more priority to constraints with less registers */
2105 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2107 const TCGArgConstraint
*arg_ct
= &def
->args_ct
[k
];
2108 int n
= ctpop64(arg_ct
->regs
);
2111 * Sort constraints of a single register first, which includes output
2112 * aliases (which must exactly match the input already allocated).
2114 if (n
== 1 || arg_ct
->oalias
) {
2119 * Sort register pairs next, first then second immediately after.
2120 * Arbitrarily sort multiple pairs by the index of the first reg;
2121 * there shouldn't be many pairs.
2123 switch (arg_ct
->pair
) {
2128 return (arg_ct
->pair_index
+ 1) * 2 - 1;
2131 /* Finally, sort by decreasing register count. */
2136 /* sort from highest priority to lowest */
2137 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2140 TCGArgConstraint
*a
= def
->args_ct
;
2142 for (i
= 0; i
< n
; i
++) {
2143 a
[start
+ i
].sort_index
= start
+ i
;
2148 for (i
= 0; i
< n
- 1; i
++) {
2149 for (j
= i
+ 1; j
< n
; j
++) {
2150 int p1
= get_constraint_priority(def
, a
[start
+ i
].sort_index
);
2151 int p2
= get_constraint_priority(def
, a
[start
+ j
].sort_index
);
2153 int tmp
= a
[start
+ i
].sort_index
;
2154 a
[start
+ i
].sort_index
= a
[start
+ j
].sort_index
;
2155 a
[start
+ j
].sort_index
= tmp
;
2161 static void process_op_defs(TCGContext
*s
)
2165 for (op
= 0; op
< NB_OPS
; op
++) {
2166 TCGOpDef
*def
= &tcg_op_defs
[op
];
2167 const TCGTargetOpDef
*tdefs
;
2168 bool saw_alias_pair
= false;
2169 int i
, o
, i2
, o2
, nb_args
;
2171 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2175 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2181 * Macro magic should make it impossible, but double-check that
2182 * the array index is in range. Since the signness of an enum
2183 * is implementation defined, force the result to unsigned.
2185 unsigned con_set
= tcg_target_op_def(op
);
2186 tcg_debug_assert(con_set
< ARRAY_SIZE(constraint_sets
));
2187 tdefs
= &constraint_sets
[con_set
];
2189 for (i
= 0; i
< nb_args
; i
++) {
2190 const char *ct_str
= tdefs
->args_ct_str
[i
];
2191 bool input_p
= i
>= def
->nb_oargs
;
2193 /* Incomplete TCGTargetOpDef entry. */
2194 tcg_debug_assert(ct_str
!= NULL
);
2199 tcg_debug_assert(input_p
);
2200 tcg_debug_assert(o
< def
->nb_oargs
);
2201 tcg_debug_assert(def
->args_ct
[o
].regs
!= 0);
2202 tcg_debug_assert(!def
->args_ct
[o
].oalias
);
2203 def
->args_ct
[i
] = def
->args_ct
[o
];
2204 /* The output sets oalias. */
2205 def
->args_ct
[o
].oalias
= 1;
2206 def
->args_ct
[o
].alias_index
= i
;
2207 /* The input sets ialias. */
2208 def
->args_ct
[i
].ialias
= 1;
2209 def
->args_ct
[i
].alias_index
= o
;
2210 if (def
->args_ct
[i
].pair
) {
2211 saw_alias_pair
= true;
2213 tcg_debug_assert(ct_str
[1] == '\0');
2217 tcg_debug_assert(!input_p
);
2218 def
->args_ct
[i
].newreg
= true;
2222 case 'p': /* plus */
2223 /* Allocate to the register after the previous. */
2224 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2226 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2227 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2228 def
->args_ct
[i
] = (TCGArgConstraint
){
2231 .regs
= def
->args_ct
[o
].regs
<< 1,
2233 def
->args_ct
[o
].pair
= 1;
2234 def
->args_ct
[o
].pair_index
= i
;
2235 tcg_debug_assert(ct_str
[1] == '\0');
2238 case 'm': /* minus */
2239 /* Allocate to the register before the previous. */
2240 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2242 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2243 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2244 def
->args_ct
[i
] = (TCGArgConstraint
){
2247 .regs
= def
->args_ct
[o
].regs
>> 1,
2249 def
->args_ct
[o
].pair
= 2;
2250 def
->args_ct
[o
].pair_index
= i
;
2251 tcg_debug_assert(ct_str
[1] == '\0');
2258 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2261 /* Include all of the target-specific constraints. */
2264 #define CONST(CASE, MASK) \
2265 case CASE: def->args_ct[i].ct |= MASK; break;
2266 #define REGS(CASE, MASK) \
2267 case CASE: def->args_ct[i].regs |= MASK; break;
2269 #include "tcg-target-con-str.h"
2278 /* Typo in TCGTargetOpDef constraint. */
2279 g_assert_not_reached();
2281 } while (*++ct_str
!= '\0');
2284 /* TCGTargetOpDef entry with too much information? */
2285 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2288 * Fix up output pairs that are aliased with inputs.
2289 * When we created the alias, we copied pair from the output.
2290 * There are three cases:
2291 * (1a) Pairs of inputs alias pairs of outputs.
2292 * (1b) One input aliases the first of a pair of outputs.
2293 * (2) One input aliases the second of a pair of outputs.
2295 * Case 1a is handled by making sure that the pair_index'es are
2296 * properly updated so that they appear the same as a pair of inputs.
2298 * Case 1b is handled by setting the pair_index of the input to
2299 * itself, simply so it doesn't point to an unrelated argument.
2300 * Since we don't encounter the "second" during the input allocation
2301 * phase, nothing happens with the second half of the input pair.
2303 * Case 2 is handled by setting the second input to pair=3, the
2304 * first output to pair=3, and the pair_index'es to match.
2306 if (saw_alias_pair
) {
2307 for (i
= def
->nb_oargs
; i
< nb_args
; i
++) {
2309 * Since [0-9pm] must be alone in the constraint string,
2310 * the only way they can both be set is if the pair comes
2311 * from the output alias.
2313 if (!def
->args_ct
[i
].ialias
) {
2316 switch (def
->args_ct
[i
].pair
) {
2320 o
= def
->args_ct
[i
].alias_index
;
2321 o2
= def
->args_ct
[o
].pair_index
;
2322 tcg_debug_assert(def
->args_ct
[o
].pair
== 1);
2323 tcg_debug_assert(def
->args_ct
[o2
].pair
== 2);
2324 if (def
->args_ct
[o2
].oalias
) {
2326 i2
= def
->args_ct
[o2
].alias_index
;
2327 tcg_debug_assert(def
->args_ct
[i2
].pair
== 2);
2328 def
->args_ct
[i2
].pair_index
= i
;
2329 def
->args_ct
[i
].pair_index
= i2
;
2332 def
->args_ct
[i
].pair_index
= i
;
2336 o
= def
->args_ct
[i
].alias_index
;
2337 o2
= def
->args_ct
[o
].pair_index
;
2338 tcg_debug_assert(def
->args_ct
[o
].pair
== 2);
2339 tcg_debug_assert(def
->args_ct
[o2
].pair
== 1);
2340 if (def
->args_ct
[o2
].oalias
) {
2342 i2
= def
->args_ct
[o2
].alias_index
;
2343 tcg_debug_assert(def
->args_ct
[i2
].pair
== 1);
2344 def
->args_ct
[i2
].pair_index
= i
;
2345 def
->args_ct
[i
].pair_index
= i2
;
2348 def
->args_ct
[i
].pair
= 3;
2349 def
->args_ct
[o2
].pair
= 3;
2350 def
->args_ct
[i
].pair_index
= o2
;
2351 def
->args_ct
[o2
].pair_index
= i
;
2355 g_assert_not_reached();
2360 /* sort the constraints (XXX: this is just an heuristic) */
2361 sort_constraints(def
, 0, def
->nb_oargs
);
2362 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
2366 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
2372 label
= arg_label(op
->args
[0]);
2375 case INDEX_op_brcond_i32
:
2376 case INDEX_op_brcond_i64
:
2377 label
= arg_label(op
->args
[3]);
2380 case INDEX_op_brcond2_i32
:
2381 label
= arg_label(op
->args
[5]);
2388 QTAILQ_REMOVE(&s
->ops
, op
, link
);
2389 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
2392 #ifdef CONFIG_PROFILER
2393 qatomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
2397 void tcg_remove_ops_after(TCGOp
*op
)
2399 TCGContext
*s
= tcg_ctx
;
2402 TCGOp
*last
= tcg_last_op();
2406 tcg_op_remove(s
, last
);
2410 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
)
2412 TCGContext
*s
= tcg_ctx
;
2415 if (unlikely(!QTAILQ_EMPTY(&s
->free_ops
))) {
2416 QTAILQ_FOREACH(op
, &s
->free_ops
, link
) {
2417 if (nargs
<= op
->nargs
) {
2418 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
2425 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
2426 nargs
= MAX(4, nargs
);
2427 op
= tcg_malloc(sizeof(TCGOp
) + sizeof(TCGArg
) * nargs
);
2430 memset(op
, 0, offsetof(TCGOp
, link
));
2434 /* Check for bitfield overflow. */
2435 tcg_debug_assert(op
->nargs
== nargs
);
2441 TCGOp
*tcg_emit_op(TCGOpcode opc
, unsigned nargs
)
2443 TCGOp
*op
= tcg_op_alloc(opc
, nargs
);
2444 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2448 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
,
2449 TCGOpcode opc
, unsigned nargs
)
2451 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
2452 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
2456 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
,
2457 TCGOpcode opc
, unsigned nargs
)
2459 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
2460 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
2464 /* Reachable analysis : remove unreachable code. */
2465 static void reachable_code_pass(TCGContext
*s
)
2467 TCGOp
*op
, *op_next
;
2470 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2475 case INDEX_op_set_label
:
2476 label
= arg_label(op
->args
[0]);
2477 if (label
->refs
== 0) {
2479 * While there is an occasional backward branch, virtually
2480 * all branches generated by the translators are forward.
2481 * Which means that generally we will have already removed
2482 * all references to the label that will be, and there is
2483 * little to be gained by iterating.
2487 /* Once we see a label, insns become live again. */
2492 * Optimization can fold conditional branches to unconditional.
2493 * If we find a label with one reference which is preceded by
2494 * an unconditional branch to it, remove both. This needed to
2495 * wait until the dead code in between them was removed.
2497 if (label
->refs
== 1) {
2498 TCGOp
*op_prev
= QTAILQ_PREV(op
, link
);
2499 if (op_prev
->opc
== INDEX_op_br
&&
2500 label
== arg_label(op_prev
->args
[0])) {
2501 tcg_op_remove(s
, op_prev
);
2509 case INDEX_op_exit_tb
:
2510 case INDEX_op_goto_ptr
:
2511 /* Unconditional branches; everything following is dead. */
2516 /* Notice noreturn helper calls, raising exceptions. */
2517 if (tcg_call_flags(op
) & TCG_CALL_NO_RETURN
) {
2522 case INDEX_op_insn_start
:
2523 /* Never remove -- we need to keep these for unwind. */
2532 tcg_op_remove(s
, op
);
2540 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2541 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2543 /* For liveness_pass_1, the register preferences for a given temp. */
2544 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
2546 return ts
->state_ptr
;
2549 /* For liveness_pass_1, reset the preferences for a given temp to the
2550 * maximal regset for its type.
2552 static inline void la_reset_pref(TCGTemp
*ts
)
2555 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
2558 /* liveness analysis: end of function: all temps are dead, and globals
2559 should be in memory. */
2560 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
2564 for (i
= 0; i
< ng
; ++i
) {
2565 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2566 la_reset_pref(&s
->temps
[i
]);
2568 for (i
= ng
; i
< nt
; ++i
) {
2569 s
->temps
[i
].state
= TS_DEAD
;
2570 la_reset_pref(&s
->temps
[i
]);
2574 /* liveness analysis: end of basic block: all temps are dead, globals
2575 and local temps should be in memory. */
2576 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
2580 for (i
= 0; i
< nt
; ++i
) {
2581 TCGTemp
*ts
= &s
->temps
[i
];
2588 state
= TS_DEAD
| TS_MEM
;
2596 g_assert_not_reached();
2603 /* liveness analysis: sync globals back to memory. */
2604 static void la_global_sync(TCGContext
*s
, int ng
)
2608 for (i
= 0; i
< ng
; ++i
) {
2609 int state
= s
->temps
[i
].state
;
2610 s
->temps
[i
].state
= state
| TS_MEM
;
2611 if (state
== TS_DEAD
) {
2612 /* If the global was previously dead, reset prefs. */
2613 la_reset_pref(&s
->temps
[i
]);
2619 * liveness analysis: conditional branch: all temps are dead unless
2620 * explicitly live-across-conditional-branch, globals and local temps
2623 static void la_bb_sync(TCGContext
*s
, int ng
, int nt
)
2625 la_global_sync(s
, ng
);
2627 for (int i
= ng
; i
< nt
; ++i
) {
2628 TCGTemp
*ts
= &s
->temps
[i
];
2634 ts
->state
= state
| TS_MEM
;
2635 if (state
!= TS_DEAD
) {
2640 s
->temps
[i
].state
= TS_DEAD
;
2646 g_assert_not_reached();
2648 la_reset_pref(&s
->temps
[i
]);
2652 /* liveness analysis: sync globals back to memory and kill. */
2653 static void la_global_kill(TCGContext
*s
, int ng
)
2657 for (i
= 0; i
< ng
; i
++) {
2658 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2659 la_reset_pref(&s
->temps
[i
]);
2663 /* liveness analysis: note live globals crossing calls. */
2664 static void la_cross_call(TCGContext
*s
, int nt
)
2666 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
2669 for (i
= 0; i
< nt
; i
++) {
2670 TCGTemp
*ts
= &s
->temps
[i
];
2671 if (!(ts
->state
& TS_DEAD
)) {
2672 TCGRegSet
*pset
= la_temp_pref(ts
);
2673 TCGRegSet set
= *pset
;
2676 /* If the combination is not possible, restart. */
2678 set
= tcg_target_available_regs
[ts
->type
] & mask
;
2685 /* Liveness analysis : update the opc_arg_life array to tell if a
2686 given input arguments is dead. Instructions updating dead
2687 temporaries are removed. */
2688 static void liveness_pass_1(TCGContext
*s
)
2690 int nb_globals
= s
->nb_globals
;
2691 int nb_temps
= s
->nb_temps
;
2692 TCGOp
*op
, *op_prev
;
2696 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
2697 for (i
= 0; i
< nb_temps
; ++i
) {
2698 s
->temps
[i
].state_ptr
= prefs
+ i
;
2701 /* ??? Should be redundant with the exit_tb that ends the TB. */
2702 la_func_end(s
, nb_globals
, nb_temps
);
2704 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
2705 int nb_iargs
, nb_oargs
;
2706 TCGOpcode opc_new
, opc_new2
;
2708 TCGLifeData arg_life
= 0;
2710 TCGOpcode opc
= op
->opc
;
2711 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2716 const TCGHelperInfo
*info
= tcg_call_info(op
);
2717 int call_flags
= tcg_call_flags(op
);
2719 nb_oargs
= TCGOP_CALLO(op
);
2720 nb_iargs
= TCGOP_CALLI(op
);
2722 /* pure functions can be removed if their result is unused */
2723 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
2724 for (i
= 0; i
< nb_oargs
; i
++) {
2725 ts
= arg_temp(op
->args
[i
]);
2726 if (ts
->state
!= TS_DEAD
) {
2727 goto do_not_remove_call
;
2734 /* Output args are dead. */
2735 for (i
= 0; i
< nb_oargs
; i
++) {
2736 ts
= arg_temp(op
->args
[i
]);
2737 if (ts
->state
& TS_DEAD
) {
2738 arg_life
|= DEAD_ARG
<< i
;
2740 if (ts
->state
& TS_MEM
) {
2741 arg_life
|= SYNC_ARG
<< i
;
2743 ts
->state
= TS_DEAD
;
2747 /* Not used -- it will be tcg_target_call_oarg_reg(). */
2748 memset(op
->output_pref
, 0, sizeof(op
->output_pref
));
2750 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
2751 TCG_CALL_NO_READ_GLOBALS
))) {
2752 la_global_kill(s
, nb_globals
);
2753 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
2754 la_global_sync(s
, nb_globals
);
2757 /* Record arguments that die in this helper. */
2758 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2759 ts
= arg_temp(op
->args
[i
]);
2760 if (ts
->state
& TS_DEAD
) {
2761 arg_life
|= DEAD_ARG
<< i
;
2765 /* For all live registers, remove call-clobbered prefs. */
2766 la_cross_call(s
, nb_temps
);
2769 * Input arguments are live for preceding opcodes.
2771 * For those arguments that die, and will be allocated in
2772 * registers, clear the register set for that arg, to be
2773 * filled in below. For args that will be on the stack,
2774 * reset to any available reg. Process arguments in reverse
2775 * order so that if a temp is used more than once, the stack
2776 * reset to max happens before the register reset to 0.
2778 for (i
= nb_iargs
- 1; i
>= 0; i
--) {
2779 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
2780 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
2782 if (ts
->state
& TS_DEAD
) {
2783 switch (loc
->kind
) {
2784 case TCG_CALL_ARG_NORMAL
:
2785 case TCG_CALL_ARG_EXTEND_U
:
2786 case TCG_CALL_ARG_EXTEND_S
:
2788 *la_temp_pref(ts
) = 0;
2794 tcg_target_available_regs
[ts
->type
];
2797 ts
->state
&= ~TS_DEAD
;
2802 * For each input argument, add its input register to prefs.
2803 * If a temp is used once, this produces a single set bit;
2804 * if a temp is used multiple times, this produces a set.
2806 for (i
= 0; i
< nb_iargs
; i
++) {
2807 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
2808 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
2810 switch (loc
->kind
) {
2811 case TCG_CALL_ARG_NORMAL
:
2812 case TCG_CALL_ARG_EXTEND_U
:
2813 case TCG_CALL_ARG_EXTEND_S
:
2815 tcg_regset_set_reg(*la_temp_pref(ts
),
2816 tcg_target_call_iarg_regs
[loc
->arg_slot
]);
2825 case INDEX_op_insn_start
:
2827 case INDEX_op_discard
:
2828 /* mark the temporary as dead */
2829 ts
= arg_temp(op
->args
[0]);
2830 ts
->state
= TS_DEAD
;
2834 case INDEX_op_add2_i32
:
2835 opc_new
= INDEX_op_add_i32
;
2837 case INDEX_op_sub2_i32
:
2838 opc_new
= INDEX_op_sub_i32
;
2840 case INDEX_op_add2_i64
:
2841 opc_new
= INDEX_op_add_i64
;
2843 case INDEX_op_sub2_i64
:
2844 opc_new
= INDEX_op_sub_i64
;
2848 /* Test if the high part of the operation is dead, but not
2849 the low part. The result can be optimized to a simple
2850 add or sub. This happens often for x86_64 guest when the
2851 cpu mode is set to 32 bit. */
2852 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2853 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2856 /* Replace the opcode and adjust the args in place,
2857 leaving 3 unused args at the end. */
2858 op
->opc
= opc
= opc_new
;
2859 op
->args
[1] = op
->args
[2];
2860 op
->args
[2] = op
->args
[4];
2861 /* Fall through and mark the single-word operation live. */
2867 case INDEX_op_mulu2_i32
:
2868 opc_new
= INDEX_op_mul_i32
;
2869 opc_new2
= INDEX_op_muluh_i32
;
2870 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
2872 case INDEX_op_muls2_i32
:
2873 opc_new
= INDEX_op_mul_i32
;
2874 opc_new2
= INDEX_op_mulsh_i32
;
2875 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
2877 case INDEX_op_mulu2_i64
:
2878 opc_new
= INDEX_op_mul_i64
;
2879 opc_new2
= INDEX_op_muluh_i64
;
2880 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
2882 case INDEX_op_muls2_i64
:
2883 opc_new
= INDEX_op_mul_i64
;
2884 opc_new2
= INDEX_op_mulsh_i64
;
2885 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
2890 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2891 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2892 /* Both parts of the operation are dead. */
2895 /* The high part of the operation is dead; generate the low. */
2896 op
->opc
= opc
= opc_new
;
2897 op
->args
[1] = op
->args
[2];
2898 op
->args
[2] = op
->args
[3];
2899 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
2900 /* The low part of the operation is dead; generate the high. */
2901 op
->opc
= opc
= opc_new2
;
2902 op
->args
[0] = op
->args
[1];
2903 op
->args
[1] = op
->args
[2];
2904 op
->args
[2] = op
->args
[3];
2908 /* Mark the single-word operation live. */
2913 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2914 nb_iargs
= def
->nb_iargs
;
2915 nb_oargs
= def
->nb_oargs
;
2917 /* Test if the operation can be removed because all
2918 its outputs are dead. We assume that nb_oargs == 0
2919 implies side effects */
2920 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
2921 for (i
= 0; i
< nb_oargs
; i
++) {
2922 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
2931 tcg_op_remove(s
, op
);
2935 for (i
= 0; i
< nb_oargs
; i
++) {
2936 ts
= arg_temp(op
->args
[i
]);
2938 /* Remember the preference of the uses that followed. */
2939 if (i
< ARRAY_SIZE(op
->output_pref
)) {
2940 op
->output_pref
[i
] = *la_temp_pref(ts
);
2943 /* Output args are dead. */
2944 if (ts
->state
& TS_DEAD
) {
2945 arg_life
|= DEAD_ARG
<< i
;
2947 if (ts
->state
& TS_MEM
) {
2948 arg_life
|= SYNC_ARG
<< i
;
2950 ts
->state
= TS_DEAD
;
2954 /* If end of basic block, update. */
2955 if (def
->flags
& TCG_OPF_BB_EXIT
) {
2956 la_func_end(s
, nb_globals
, nb_temps
);
2957 } else if (def
->flags
& TCG_OPF_COND_BRANCH
) {
2958 la_bb_sync(s
, nb_globals
, nb_temps
);
2959 } else if (def
->flags
& TCG_OPF_BB_END
) {
2960 la_bb_end(s
, nb_globals
, nb_temps
);
2961 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2962 la_global_sync(s
, nb_globals
);
2963 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
2964 la_cross_call(s
, nb_temps
);
2968 /* Record arguments that die in this opcode. */
2969 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2970 ts
= arg_temp(op
->args
[i
]);
2971 if (ts
->state
& TS_DEAD
) {
2972 arg_life
|= DEAD_ARG
<< i
;
2976 /* Input arguments are live for preceding opcodes. */
2977 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2978 ts
= arg_temp(op
->args
[i
]);
2979 if (ts
->state
& TS_DEAD
) {
2980 /* For operands that were dead, initially allow
2981 all regs for the type. */
2982 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
2983 ts
->state
&= ~TS_DEAD
;
2987 /* Incorporate constraints for this operand. */
2989 case INDEX_op_mov_i32
:
2990 case INDEX_op_mov_i64
:
2991 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2992 have proper constraints. That said, special case
2993 moves to propagate preferences backward. */
2994 if (IS_DEAD_ARG(1)) {
2995 *la_temp_pref(arg_temp(op
->args
[0]))
2996 = *la_temp_pref(arg_temp(op
->args
[1]));
3001 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3002 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
3003 TCGRegSet set
, *pset
;
3005 ts
= arg_temp(op
->args
[i
]);
3006 pset
= la_temp_pref(ts
);
3011 set
&= output_pref(op
, ct
->alias_index
);
3013 /* If the combination is not possible, restart. */
3023 op
->life
= arg_life
;
3027 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3028 static bool liveness_pass_2(TCGContext
*s
)
3030 int nb_globals
= s
->nb_globals
;
3032 bool changes
= false;
3033 TCGOp
*op
, *op_next
;
3035 /* Create a temporary for each indirect global. */
3036 for (i
= 0; i
< nb_globals
; ++i
) {
3037 TCGTemp
*its
= &s
->temps
[i
];
3038 if (its
->indirect_reg
) {
3039 TCGTemp
*dts
= tcg_temp_alloc(s
);
3040 dts
->type
= its
->type
;
3041 dts
->base_type
= its
->base_type
;
3042 dts
->kind
= TEMP_EBB
;
3043 its
->state_ptr
= dts
;
3045 its
->state_ptr
= NULL
;
3047 /* All globals begin dead. */
3048 its
->state
= TS_DEAD
;
3050 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
3051 TCGTemp
*its
= &s
->temps
[i
];
3052 its
->state_ptr
= NULL
;
3053 its
->state
= TS_DEAD
;
3056 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3057 TCGOpcode opc
= op
->opc
;
3058 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3059 TCGLifeData arg_life
= op
->life
;
3060 int nb_iargs
, nb_oargs
, call_flags
;
3061 TCGTemp
*arg_ts
, *dir_ts
;
3063 if (opc
== INDEX_op_call
) {
3064 nb_oargs
= TCGOP_CALLO(op
);
3065 nb_iargs
= TCGOP_CALLI(op
);
3066 call_flags
= tcg_call_flags(op
);
3068 nb_iargs
= def
->nb_iargs
;
3069 nb_oargs
= def
->nb_oargs
;
3071 /* Set flags similar to how calls require. */
3072 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3073 /* Like reading globals: sync_globals */
3074 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3075 } else if (def
->flags
& TCG_OPF_BB_END
) {
3076 /* Like writing globals: save_globals */
3078 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3079 /* Like reading globals: sync_globals */
3080 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3082 /* No effect on globals. */
3083 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
3084 TCG_CALL_NO_WRITE_GLOBALS
);
3088 /* Make sure that input arguments are available. */
3089 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3090 arg_ts
= arg_temp(op
->args
[i
]);
3091 dir_ts
= arg_ts
->state_ptr
;
3092 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
3093 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
3096 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
, 3);
3098 lop
->args
[0] = temp_arg(dir_ts
);
3099 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3100 lop
->args
[2] = arg_ts
->mem_offset
;
3102 /* Loaded, but synced with memory. */
3103 arg_ts
->state
= TS_MEM
;
3107 /* Perform input replacement, and mark inputs that became dead.
3108 No action is required except keeping temp_state up to date
3109 so that we reload when needed. */
3110 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3111 arg_ts
= arg_temp(op
->args
[i
]);
3112 dir_ts
= arg_ts
->state_ptr
;
3114 op
->args
[i
] = temp_arg(dir_ts
);
3116 if (IS_DEAD_ARG(i
)) {
3117 arg_ts
->state
= TS_DEAD
;
3122 /* Liveness analysis should ensure that the following are
3123 all correct, for call sites and basic block end points. */
3124 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
3126 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3127 for (i
= 0; i
< nb_globals
; ++i
) {
3128 /* Liveness should see that globals are synced back,
3129 that is, either TS_DEAD or TS_MEM. */
3130 arg_ts
= &s
->temps
[i
];
3131 tcg_debug_assert(arg_ts
->state_ptr
== 0
3132 || arg_ts
->state
!= 0);
3135 for (i
= 0; i
< nb_globals
; ++i
) {
3136 /* Liveness should see that globals are saved back,
3137 that is, TS_DEAD, waiting to be reloaded. */
3138 arg_ts
= &s
->temps
[i
];
3139 tcg_debug_assert(arg_ts
->state_ptr
== 0
3140 || arg_ts
->state
== TS_DEAD
);
3144 /* Outputs become available. */
3145 if (opc
== INDEX_op_mov_i32
|| opc
== INDEX_op_mov_i64
) {
3146 arg_ts
= arg_temp(op
->args
[0]);
3147 dir_ts
= arg_ts
->state_ptr
;
3149 op
->args
[0] = temp_arg(dir_ts
);
3152 /* The output is now live and modified. */
3155 if (NEED_SYNC_ARG(0)) {
3156 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3159 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3160 TCGTemp
*out_ts
= dir_ts
;
3162 if (IS_DEAD_ARG(0)) {
3163 out_ts
= arg_temp(op
->args
[1]);
3164 arg_ts
->state
= TS_DEAD
;
3165 tcg_op_remove(s
, op
);
3167 arg_ts
->state
= TS_MEM
;
3170 sop
->args
[0] = temp_arg(out_ts
);
3171 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3172 sop
->args
[2] = arg_ts
->mem_offset
;
3174 tcg_debug_assert(!IS_DEAD_ARG(0));
3178 for (i
= 0; i
< nb_oargs
; i
++) {
3179 arg_ts
= arg_temp(op
->args
[i
]);
3180 dir_ts
= arg_ts
->state_ptr
;
3184 op
->args
[i
] = temp_arg(dir_ts
);
3187 /* The output is now live and modified. */
3190 /* Sync outputs upon their last write. */
3191 if (NEED_SYNC_ARG(i
)) {
3192 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3195 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3197 sop
->args
[0] = temp_arg(dir_ts
);
3198 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3199 sop
->args
[2] = arg_ts
->mem_offset
;
3201 arg_ts
->state
= TS_MEM
;
3203 /* Drop outputs that are dead. */
3204 if (IS_DEAD_ARG(i
)) {
3205 arg_ts
->state
= TS_DEAD
;
3214 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3216 int size
= tcg_type_size(ts
->type
);
3230 /* Note that we do not require aligned storage for V256. */
3234 g_assert_not_reached();
3238 * Assume the stack is sufficiently aligned.
3239 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
3240 * and do not require 16 byte vector alignment. This seems slightly
3241 * easier than fully parameterizing the above switch statement.
3243 align
= MIN(TCG_TARGET_STACK_ALIGN
, align
);
3244 off
= ROUND_UP(s
->current_frame_offset
, align
);
3246 /* If we've exhausted the stack frame, restart with a smaller TB. */
3247 if (off
+ size
> s
->frame_end
) {
3248 tcg_raise_tb_overflow(s
);
3250 s
->current_frame_offset
= off
+ size
;
3252 ts
->mem_offset
= off
;
3253 #if defined(__sparc__)
3254 ts
->mem_offset
+= TCG_TARGET_STACK_BIAS
;
3256 ts
->mem_base
= s
->frame_temp
;
3257 ts
->mem_allocated
= 1;
3260 /* Assign @reg to @ts, and update reg_to_temp[]. */
3261 static void set_temp_val_reg(TCGContext
*s
, TCGTemp
*ts
, TCGReg reg
)
3263 if (ts
->val_type
== TEMP_VAL_REG
) {
3264 TCGReg old
= ts
->reg
;
3265 tcg_debug_assert(s
->reg_to_temp
[old
] == ts
);
3269 s
->reg_to_temp
[old
] = NULL
;
3271 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
3272 s
->reg_to_temp
[reg
] = ts
;
3273 ts
->val_type
= TEMP_VAL_REG
;
3277 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
3278 static void set_temp_val_nonreg(TCGContext
*s
, TCGTemp
*ts
, TCGTempVal type
)
3280 tcg_debug_assert(type
!= TEMP_VAL_REG
);
3281 if (ts
->val_type
== TEMP_VAL_REG
) {
3282 TCGReg reg
= ts
->reg
;
3283 tcg_debug_assert(s
->reg_to_temp
[reg
] == ts
);
3284 s
->reg_to_temp
[reg
] = NULL
;
3286 ts
->val_type
= type
;
3289 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
3291 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3292 mark it free; otherwise mark it dead. */
3293 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
3295 TCGTempVal new_type
;
3302 new_type
= TEMP_VAL_MEM
;
3306 new_type
= free_or_dead
< 0 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
;
3309 new_type
= TEMP_VAL_CONST
;
3312 g_assert_not_reached();
3314 set_temp_val_nonreg(s
, ts
, new_type
);
3317 /* Mark a temporary as dead. */
3318 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
3320 temp_free_or_dead(s
, ts
, 1);
3323 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3324 registers needs to be allocated to store a constant. If 'free_or_dead'
3325 is non-zero, subsequently release the temporary; if it is positive, the
3326 temp is dead; if it is negative, the temp is free. */
3327 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
3328 TCGRegSet preferred_regs
, int free_or_dead
)
3330 if (!temp_readonly(ts
) && !ts
->mem_coherent
) {
3331 if (!ts
->mem_allocated
) {
3332 temp_allocate_frame(s
, ts
);
3334 switch (ts
->val_type
) {
3335 case TEMP_VAL_CONST
:
3336 /* If we're going to free the temp immediately, then we won't
3337 require it later in a register, so attempt to store the
3338 constant to memory directly. */
3340 && tcg_out_sti(s
, ts
->type
, ts
->val
,
3341 ts
->mem_base
->reg
, ts
->mem_offset
)) {
3344 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3345 allocated_regs
, preferred_regs
);
3349 tcg_out_st(s
, ts
->type
, ts
->reg
,
3350 ts
->mem_base
->reg
, ts
->mem_offset
);
3360 ts
->mem_coherent
= 1;
3363 temp_free_or_dead(s
, ts
, free_or_dead
);
3367 /* free register 'reg' by spilling the corresponding temporary if necessary */
3368 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
3370 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
3372 temp_sync(s
, ts
, allocated_regs
, 0, -1);
3378 * @required_regs: Set of registers in which we must allocate.
3379 * @allocated_regs: Set of registers which must be avoided.
3380 * @preferred_regs: Set of registers we should prefer.
3381 * @rev: True if we search the registers in "indirect" order.
3383 * The allocated register must be in @required_regs & ~@allocated_regs,
3384 * but if we can put it in @preferred_regs we may save a move later.
3386 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
3387 TCGRegSet allocated_regs
,
3388 TCGRegSet preferred_regs
, bool rev
)
3390 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
3391 TCGRegSet reg_ct
[2];
3394 reg_ct
[1] = required_regs
& ~allocated_regs
;
3395 tcg_debug_assert(reg_ct
[1] != 0);
3396 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
3398 /* Skip the preferred_regs option if it cannot be satisfied,
3399 or if the preference made no difference. */
3400 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
3402 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
3404 /* Try free registers, preferences first. */
3405 for (j
= f
; j
< 2; j
++) {
3406 TCGRegSet set
= reg_ct
[j
];
3408 if (tcg_regset_single(set
)) {
3409 /* One register in the set. */
3410 TCGReg reg
= tcg_regset_first(set
);
3411 if (s
->reg_to_temp
[reg
] == NULL
) {
3415 for (i
= 0; i
< n
; i
++) {
3416 TCGReg reg
= order
[i
];
3417 if (s
->reg_to_temp
[reg
] == NULL
&&
3418 tcg_regset_test_reg(set
, reg
)) {
3425 /* We must spill something. */
3426 for (j
= f
; j
< 2; j
++) {
3427 TCGRegSet set
= reg_ct
[j
];
3429 if (tcg_regset_single(set
)) {
3430 /* One register in the set. */
3431 TCGReg reg
= tcg_regset_first(set
);
3432 tcg_reg_free(s
, reg
, allocated_regs
);
3435 for (i
= 0; i
< n
; i
++) {
3436 TCGReg reg
= order
[i
];
3437 if (tcg_regset_test_reg(set
, reg
)) {
3438 tcg_reg_free(s
, reg
, allocated_regs
);
3448 static TCGReg
tcg_reg_alloc_pair(TCGContext
*s
, TCGRegSet required_regs
,
3449 TCGRegSet allocated_regs
,
3450 TCGRegSet preferred_regs
, bool rev
)
3452 int i
, j
, k
, fmin
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
3453 TCGRegSet reg_ct
[2];
3456 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
3457 reg_ct
[1] = required_regs
& ~(allocated_regs
| (allocated_regs
>> 1));
3458 tcg_debug_assert(reg_ct
[1] != 0);
3459 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
3461 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
3464 * Skip the preferred_regs option if it cannot be satisfied,
3465 * or if the preference made no difference.
3467 k
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
3470 * Minimize the number of flushes by looking for 2 free registers first,
3471 * then a single flush, then two flushes.
3473 for (fmin
= 2; fmin
>= 0; fmin
--) {
3474 for (j
= k
; j
< 2; j
++) {
3475 TCGRegSet set
= reg_ct
[j
];
3477 for (i
= 0; i
< n
; i
++) {
3478 TCGReg reg
= order
[i
];
3480 if (tcg_regset_test_reg(set
, reg
)) {
3481 int f
= !s
->reg_to_temp
[reg
] + !s
->reg_to_temp
[reg
+ 1];
3483 tcg_reg_free(s
, reg
, allocated_regs
);
3484 tcg_reg_free(s
, reg
+ 1, allocated_regs
);
3494 /* Make sure the temporary is in a register. If needed, allocate the register
3495 from DESIRED while avoiding ALLOCATED. */
3496 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
3497 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
3501 switch (ts
->val_type
) {
3504 case TEMP_VAL_CONST
:
3505 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3506 preferred_regs
, ts
->indirect_base
);
3507 if (ts
->type
<= TCG_TYPE_I64
) {
3508 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
3510 uint64_t val
= ts
->val
;
3514 * Find the minimal vector element that matches the constant.
3515 * The targets will, in general, have to do this search anyway,
3516 * do this generically.
3518 if (val
== dup_const(MO_8
, val
)) {
3520 } else if (val
== dup_const(MO_16
, val
)) {
3522 } else if (val
== dup_const(MO_32
, val
)) {
3526 tcg_out_dupi_vec(s
, ts
->type
, vece
, reg
, ts
->val
);
3528 ts
->mem_coherent
= 0;
3531 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3532 preferred_regs
, ts
->indirect_base
);
3533 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
3534 ts
->mem_coherent
= 1;
3540 set_temp_val_reg(s
, ts
, reg
);
3543 /* Save a temporary to memory. 'allocated_regs' is used in case a
3544 temporary registers needs to be allocated to store a constant. */
3545 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
3547 /* The liveness analysis already ensures that globals are back
3548 in memory. Keep an tcg_debug_assert for safety. */
3549 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| temp_readonly(ts
));
3552 /* save globals to their canonical location and assume they can be
3553 modified be the following code. 'allocated_regs' is used in case a
3554 temporary registers needs to be allocated to store a constant. */
3555 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3559 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3560 temp_save(s
, &s
->temps
[i
], allocated_regs
);
3564 /* sync globals to their canonical location and assume they can be
3565 read by the following code. 'allocated_regs' is used in case a
3566 temporary registers needs to be allocated to store a constant. */
3567 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3571 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3572 TCGTemp
*ts
= &s
->temps
[i
];
3573 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
3574 || ts
->kind
== TEMP_FIXED
3575 || ts
->mem_coherent
);
3579 /* at the end of a basic block, we assume all temporaries are dead and
3580 all globals are stored at their canonical location. */
3581 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
3585 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3586 TCGTemp
*ts
= &s
->temps
[i
];
3590 temp_save(s
, ts
, allocated_regs
);
3594 /* The liveness analysis already ensures that temps are dead.
3595 Keep an tcg_debug_assert for safety. */
3596 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3599 /* Similarly, we should have freed any allocated register. */
3600 tcg_debug_assert(ts
->val_type
== TEMP_VAL_CONST
);
3603 g_assert_not_reached();
3607 save_globals(s
, allocated_regs
);
3611 * At a conditional branch, we assume all temporaries are dead unless
3612 * explicitly live-across-conditional-branch; all globals and local
3613 * temps are synced to their location.
3615 static void tcg_reg_alloc_cbranch(TCGContext
*s
, TCGRegSet allocated_regs
)
3617 sync_globals(s
, allocated_regs
);
3619 for (int i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3620 TCGTemp
*ts
= &s
->temps
[i
];
3622 * The liveness analysis already ensures that temps are dead.
3623 * Keep tcg_debug_asserts for safety.
3627 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
|| ts
->mem_coherent
);
3630 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3636 g_assert_not_reached();
3642 * Specialized code generation for INDEX_op_mov_* with a constant.
3644 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
3645 tcg_target_ulong val
, TCGLifeData arg_life
,
3646 TCGRegSet preferred_regs
)
3648 /* ENV should not be modified. */
3649 tcg_debug_assert(!temp_readonly(ots
));
3651 /* The movi is not explicitly generated here. */
3652 set_temp_val_nonreg(s
, ots
, TEMP_VAL_CONST
);
3654 ots
->mem_coherent
= 0;
3655 if (NEED_SYNC_ARG(0)) {
3656 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
3657 } else if (IS_DEAD_ARG(0)) {
3663 * Specialized code generation for INDEX_op_mov_*.
3665 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
3667 const TCGLifeData arg_life
= op
->life
;
3668 TCGRegSet allocated_regs
, preferred_regs
;
3670 TCGType otype
, itype
;
3673 allocated_regs
= s
->reserved_regs
;
3674 preferred_regs
= output_pref(op
, 0);
3675 ots
= arg_temp(op
->args
[0]);
3676 ts
= arg_temp(op
->args
[1]);
3678 /* ENV should not be modified. */
3679 tcg_debug_assert(!temp_readonly(ots
));
3681 /* Note that otype != itype for no-op truncation. */
3685 if (ts
->val_type
== TEMP_VAL_CONST
) {
3686 /* propagate constant or generate sti */
3687 tcg_target_ulong val
= ts
->val
;
3688 if (IS_DEAD_ARG(1)) {
3691 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
3695 /* If the source value is in memory we're going to be forced
3696 to have it in a register in order to perform the copy. Copy
3697 the SOURCE value into its own register first, that way we
3698 don't have to reload SOURCE the next time it is used. */
3699 if (ts
->val_type
== TEMP_VAL_MEM
) {
3700 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
3701 allocated_regs
, preferred_regs
);
3703 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
3706 if (IS_DEAD_ARG(0)) {
3707 /* mov to a non-saved dead register makes no sense (even with
3708 liveness analysis disabled). */
3709 tcg_debug_assert(NEED_SYNC_ARG(0));
3710 if (!ots
->mem_allocated
) {
3711 temp_allocate_frame(s
, ots
);
3713 tcg_out_st(s
, otype
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
3714 if (IS_DEAD_ARG(1)) {
3721 if (IS_DEAD_ARG(1) && ts
->kind
!= TEMP_FIXED
) {
3723 * The mov can be suppressed. Kill input first, so that it
3724 * is unlinked from reg_to_temp, then set the output to the
3725 * reg that we saved from the input.
3730 if (ots
->val_type
== TEMP_VAL_REG
) {
3733 /* Make sure to not spill the input register during allocation. */
3734 oreg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
3735 allocated_regs
| ((TCGRegSet
)1 << ireg
),
3736 preferred_regs
, ots
->indirect_base
);
3738 if (!tcg_out_mov(s
, otype
, oreg
, ireg
)) {
3740 * Cross register class move not supported.
3741 * Store the source register into the destination slot
3742 * and leave the destination temp as TEMP_VAL_MEM.
3744 assert(!temp_readonly(ots
));
3745 if (!ts
->mem_allocated
) {
3746 temp_allocate_frame(s
, ots
);
3748 tcg_out_st(s
, ts
->type
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
3749 set_temp_val_nonreg(s
, ts
, TEMP_VAL_MEM
);
3750 ots
->mem_coherent
= 1;
3754 set_temp_val_reg(s
, ots
, oreg
);
3755 ots
->mem_coherent
= 0;
3757 if (NEED_SYNC_ARG(0)) {
3758 temp_sync(s
, ots
, allocated_regs
, 0, 0);
3763 * Specialized code generation for INDEX_op_dup_vec.
3765 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
3767 const TCGLifeData arg_life
= op
->life
;
3768 TCGRegSet dup_out_regs
, dup_in_regs
;
3770 TCGType itype
, vtype
;
3775 ots
= arg_temp(op
->args
[0]);
3776 its
= arg_temp(op
->args
[1]);
3778 /* ENV should not be modified. */
3779 tcg_debug_assert(!temp_readonly(ots
));
3782 vece
= TCGOP_VECE(op
);
3783 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
3785 if (its
->val_type
== TEMP_VAL_CONST
) {
3786 /* Propagate constant via movi -> dupi. */
3787 tcg_target_ulong val
= its
->val
;
3788 if (IS_DEAD_ARG(1)) {
3791 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, output_pref(op
, 0));
3795 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
3796 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].regs
;
3798 /* Allocate the output register now. */
3799 if (ots
->val_type
!= TEMP_VAL_REG
) {
3800 TCGRegSet allocated_regs
= s
->reserved_regs
;
3803 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
3804 /* Make sure to not spill the input register. */
3805 tcg_regset_set_reg(allocated_regs
, its
->reg
);
3807 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
3808 output_pref(op
, 0), ots
->indirect_base
);
3809 set_temp_val_reg(s
, ots
, oreg
);
3812 switch (its
->val_type
) {
3815 * The dup constriaints must be broad, covering all possible VECE.
3816 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3817 * to fail, indicating that extra moves are required for that case.
3819 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
3820 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
3823 /* Try again from memory or a vector input register. */
3825 if (!its
->mem_coherent
) {
3827 * The input register is not synced, and so an extra store
3828 * would be required to use memory. Attempt an integer-vector
3829 * register move first. We do not have a TCGRegSet for this.
3831 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
3834 /* Sync the temp back to its slot and load from there. */
3835 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
3841 if (HOST_BIG_ENDIAN
) {
3842 lowpart_ofs
= tcg_type_size(itype
) - (1 << vece
);
3844 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
3845 its
->mem_offset
+ lowpart_ofs
)) {
3848 /* Load the input into the destination vector register. */
3849 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
3853 g_assert_not_reached();
3856 /* We now have a vector input register, so dup must succeed. */
3857 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
3858 tcg_debug_assert(ok
);
3861 ots
->mem_coherent
= 0;
3862 if (IS_DEAD_ARG(1)) {
3865 if (NEED_SYNC_ARG(0)) {
3866 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
3868 if (IS_DEAD_ARG(0)) {
3873 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
3875 const TCGLifeData arg_life
= op
->life
;
3876 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
3877 TCGRegSet i_allocated_regs
;
3878 TCGRegSet o_allocated_regs
;
3879 int i
, k
, nb_iargs
, nb_oargs
;
3882 const TCGArgConstraint
*arg_ct
;
3884 TCGArg new_args
[TCG_MAX_OP_ARGS
];
3885 int const_args
[TCG_MAX_OP_ARGS
];
3887 nb_oargs
= def
->nb_oargs
;
3888 nb_iargs
= def
->nb_iargs
;
3890 /* copy constants */
3891 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
3892 op
->args
+ nb_oargs
+ nb_iargs
,
3893 sizeof(TCGArg
) * def
->nb_cargs
);
3895 i_allocated_regs
= s
->reserved_regs
;
3896 o_allocated_regs
= s
->reserved_regs
;
3898 /* satisfy input constraints */
3899 for (k
= 0; k
< nb_iargs
; k
++) {
3900 TCGRegSet i_preferred_regs
, i_required_regs
;
3901 bool allocate_new_reg
, copyto_new_reg
;
3905 i
= def
->args_ct
[nb_oargs
+ k
].sort_index
;
3907 arg_ct
= &def
->args_ct
[i
];
3910 if (ts
->val_type
== TEMP_VAL_CONST
3911 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
->ct
)) {
3912 /* constant is OK for instruction */
3914 new_args
[i
] = ts
->val
;
3919 i_preferred_regs
= 0;
3920 i_required_regs
= arg_ct
->regs
;
3921 allocate_new_reg
= false;
3922 copyto_new_reg
= false;
3924 switch (arg_ct
->pair
) {
3925 case 0: /* not paired */
3926 if (arg_ct
->ialias
) {
3927 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
3930 * If the input is readonly, then it cannot also be an
3931 * output and aliased to itself. If the input is not
3932 * dead after the instruction, we must allocate a new
3933 * register and move it.
3935 if (temp_readonly(ts
) || !IS_DEAD_ARG(i
)) {
3936 allocate_new_reg
= true;
3937 } else if (ts
->val_type
== TEMP_VAL_REG
) {
3939 * Check if the current register has already been
3940 * allocated for another input.
3943 tcg_regset_test_reg(i_allocated_regs
, reg
);
3946 if (!allocate_new_reg
) {
3947 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
3950 allocate_new_reg
= !tcg_regset_test_reg(i_required_regs
, reg
);
3952 if (allocate_new_reg
) {
3954 * Allocate a new register matching the constraint
3955 * and move the temporary register into it.
3957 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3958 i_allocated_regs
, 0);
3959 reg
= tcg_reg_alloc(s
, i_required_regs
, i_allocated_regs
,
3960 i_preferred_regs
, ts
->indirect_base
);
3961 copyto_new_reg
= true;
3966 /* First of an input pair; if i1 == i2, the second is an output. */
3968 i2
= arg_ct
->pair_index
;
3969 ts2
= i1
!= i2
? arg_temp(op
->args
[i2
]) : NULL
;
3972 * It is easier to default to allocating a new pair
3973 * and to identify a few cases where it's not required.
3975 if (arg_ct
->ialias
) {
3976 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
3977 if (IS_DEAD_ARG(i1
) &&
3979 !temp_readonly(ts
) &&
3980 ts
->val_type
== TEMP_VAL_REG
&&
3981 ts
->reg
< TCG_TARGET_NB_REGS
- 1 &&
3982 tcg_regset_test_reg(i_required_regs
, reg
) &&
3983 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
3984 !tcg_regset_test_reg(i_allocated_regs
, reg
+ 1) &&
3986 ? ts2
->val_type
== TEMP_VAL_REG
&&
3987 ts2
->reg
== reg
+ 1 &&
3989 : s
->reg_to_temp
[reg
+ 1] == NULL
)) {
3993 /* Without aliasing, the pair must also be an input. */
3994 tcg_debug_assert(ts2
);
3995 if (ts
->val_type
== TEMP_VAL_REG
&&
3996 ts2
->val_type
== TEMP_VAL_REG
&&
3997 ts2
->reg
== reg
+ 1 &&
3998 tcg_regset_test_reg(i_required_regs
, reg
)) {
4002 reg
= tcg_reg_alloc_pair(s
, i_required_regs
, i_allocated_regs
,
4003 0, ts
->indirect_base
);
4006 case 2: /* pair second */
4007 reg
= new_args
[arg_ct
->pair_index
] + 1;
4010 case 3: /* ialias with second output, no first input */
4011 tcg_debug_assert(arg_ct
->ialias
);
4012 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4014 if (IS_DEAD_ARG(i
) &&
4015 !temp_readonly(ts
) &&
4016 ts
->val_type
== TEMP_VAL_REG
&&
4018 s
->reg_to_temp
[reg
- 1] == NULL
&&
4019 tcg_regset_test_reg(i_required_regs
, reg
) &&
4020 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4021 !tcg_regset_test_reg(i_allocated_regs
, reg
- 1)) {
4022 tcg_regset_set_reg(i_allocated_regs
, reg
- 1);
4025 reg
= tcg_reg_alloc_pair(s
, i_required_regs
>> 1,
4026 i_allocated_regs
, 0,
4028 tcg_regset_set_reg(i_allocated_regs
, reg
);
4034 * If an aliased input is not dead after the instruction,
4035 * we must allocate a new register and move it.
4037 if (arg_ct
->ialias
&& (!IS_DEAD_ARG(i
) || temp_readonly(ts
))) {
4038 TCGRegSet t_allocated_regs
= i_allocated_regs
;
4041 * Because of the alias, and the continued life, make sure
4042 * that the temp is somewhere *other* than the reg pair,
4043 * and we get a copy in reg.
4045 tcg_regset_set_reg(t_allocated_regs
, reg
);
4046 tcg_regset_set_reg(t_allocated_regs
, reg
+ 1);
4047 if (ts
->val_type
== TEMP_VAL_REG
&& ts
->reg
== reg
) {
4048 /* If ts was already in reg, copy it somewhere else. */
4052 tcg_debug_assert(ts
->kind
!= TEMP_FIXED
);
4053 nr
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
4054 t_allocated_regs
, 0, ts
->indirect_base
);
4055 ok
= tcg_out_mov(s
, ts
->type
, nr
, reg
);
4056 tcg_debug_assert(ok
);
4058 set_temp_val_reg(s
, ts
, nr
);
4060 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4061 t_allocated_regs
, 0);
4062 copyto_new_reg
= true;
4065 /* Preferably allocate to reg, otherwise copy. */
4066 i_required_regs
= (TCGRegSet
)1 << reg
;
4067 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4069 copyto_new_reg
= ts
->reg
!= reg
;
4074 g_assert_not_reached();
4077 if (copyto_new_reg
) {
4078 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
4080 * Cross register class move not supported. Sync the
4081 * temp back to its slot and load from there.
4083 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
4084 tcg_out_ld(s
, ts
->type
, reg
,
4085 ts
->mem_base
->reg
, ts
->mem_offset
);
4090 tcg_regset_set_reg(i_allocated_regs
, reg
);
4093 /* mark dead temporaries and free the associated registers */
4094 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
4095 if (IS_DEAD_ARG(i
)) {
4096 temp_dead(s
, arg_temp(op
->args
[i
]));
4100 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
4101 tcg_reg_alloc_cbranch(s
, i_allocated_regs
);
4102 } else if (def
->flags
& TCG_OPF_BB_END
) {
4103 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
4105 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
4106 /* XXX: permit generic clobber register list ? */
4107 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4108 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4109 tcg_reg_free(s
, i
, i_allocated_regs
);
4113 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
4114 /* sync globals if the op has side effects and might trigger
4116 sync_globals(s
, i_allocated_regs
);
4119 /* satisfy the output constraints */
4120 for(k
= 0; k
< nb_oargs
; k
++) {
4121 i
= def
->args_ct
[k
].sort_index
;
4123 arg_ct
= &def
->args_ct
[i
];
4126 /* ENV should not be modified. */
4127 tcg_debug_assert(!temp_readonly(ts
));
4129 switch (arg_ct
->pair
) {
4130 case 0: /* not paired */
4131 if (arg_ct
->oalias
&& !const_args
[arg_ct
->alias_index
]) {
4132 reg
= new_args
[arg_ct
->alias_index
];
4133 } else if (arg_ct
->newreg
) {
4134 reg
= tcg_reg_alloc(s
, arg_ct
->regs
,
4135 i_allocated_regs
| o_allocated_regs
,
4136 output_pref(op
, k
), ts
->indirect_base
);
4138 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, o_allocated_regs
,
4139 output_pref(op
, k
), ts
->indirect_base
);
4143 case 1: /* first of pair */
4144 tcg_debug_assert(!arg_ct
->newreg
);
4145 if (arg_ct
->oalias
) {
4146 reg
= new_args
[arg_ct
->alias_index
];
4149 reg
= tcg_reg_alloc_pair(s
, arg_ct
->regs
, o_allocated_regs
,
4150 output_pref(op
, k
), ts
->indirect_base
);
4153 case 2: /* second of pair */
4154 tcg_debug_assert(!arg_ct
->newreg
);
4155 if (arg_ct
->oalias
) {
4156 reg
= new_args
[arg_ct
->alias_index
];
4158 reg
= new_args
[arg_ct
->pair_index
] + 1;
4162 case 3: /* first of pair, aliasing with a second input */
4163 tcg_debug_assert(!arg_ct
->newreg
);
4164 reg
= new_args
[arg_ct
->pair_index
] - 1;
4168 g_assert_not_reached();
4170 tcg_regset_set_reg(o_allocated_regs
, reg
);
4171 set_temp_val_reg(s
, ts
, reg
);
4172 ts
->mem_coherent
= 0;
4177 /* emit instruction */
4178 if (def
->flags
& TCG_OPF_VECTOR
) {
4179 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
4180 new_args
, const_args
);
4182 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
4185 /* move the outputs in the correct register if needed */
4186 for(i
= 0; i
< nb_oargs
; i
++) {
4187 ts
= arg_temp(op
->args
[i
]);
4189 /* ENV should not be modified. */
4190 tcg_debug_assert(!temp_readonly(ts
));
4192 if (NEED_SYNC_ARG(i
)) {
4193 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
4194 } else if (IS_DEAD_ARG(i
)) {
4200 static bool tcg_reg_alloc_dup2(TCGContext
*s
, const TCGOp
*op
)
4202 const TCGLifeData arg_life
= op
->life
;
4203 TCGTemp
*ots
, *itsl
, *itsh
;
4204 TCGType vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
4206 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
4207 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
4208 tcg_debug_assert(TCGOP_VECE(op
) == MO_64
);
4210 ots
= arg_temp(op
->args
[0]);
4211 itsl
= arg_temp(op
->args
[1]);
4212 itsh
= arg_temp(op
->args
[2]);
4214 /* ENV should not be modified. */
4215 tcg_debug_assert(!temp_readonly(ots
));
4217 /* Allocate the output register now. */
4218 if (ots
->val_type
!= TEMP_VAL_REG
) {
4219 TCGRegSet allocated_regs
= s
->reserved_regs
;
4220 TCGRegSet dup_out_regs
=
4221 tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
4224 /* Make sure to not spill the input registers. */
4225 if (!IS_DEAD_ARG(1) && itsl
->val_type
== TEMP_VAL_REG
) {
4226 tcg_regset_set_reg(allocated_regs
, itsl
->reg
);
4228 if (!IS_DEAD_ARG(2) && itsh
->val_type
== TEMP_VAL_REG
) {
4229 tcg_regset_set_reg(allocated_regs
, itsh
->reg
);
4232 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
4233 output_pref(op
, 0), ots
->indirect_base
);
4234 set_temp_val_reg(s
, ots
, oreg
);
4237 /* Promote dup2 of immediates to dupi_vec. */
4238 if (itsl
->val_type
== TEMP_VAL_CONST
&& itsh
->val_type
== TEMP_VAL_CONST
) {
4239 uint64_t val
= deposit64(itsl
->val
, 32, 32, itsh
->val
);
4242 if (val
== dup_const(MO_8
, val
)) {
4244 } else if (val
== dup_const(MO_16
, val
)) {
4246 } else if (val
== dup_const(MO_32
, val
)) {
4250 tcg_out_dupi_vec(s
, vtype
, vece
, ots
->reg
, val
);
4254 /* If the two inputs form one 64-bit value, try dupm_vec. */
4255 if (itsl
->temp_subindex
== HOST_BIG_ENDIAN
&&
4256 itsh
->temp_subindex
== !HOST_BIG_ENDIAN
&&
4257 itsl
== itsh
+ (HOST_BIG_ENDIAN
? 1 : -1)) {
4258 TCGTemp
*its
= itsl
- HOST_BIG_ENDIAN
;
4260 temp_sync(s
, its
+ 0, s
->reserved_regs
, 0, 0);
4261 temp_sync(s
, its
+ 1, s
->reserved_regs
, 0, 0);
4263 if (tcg_out_dupm_vec(s
, vtype
, MO_64
, ots
->reg
,
4264 its
->mem_base
->reg
, its
->mem_offset
)) {
4269 /* Fall back to generic expansion. */
4273 ots
->mem_coherent
= 0;
4274 if (IS_DEAD_ARG(1)) {
4277 if (IS_DEAD_ARG(2)) {
4280 if (NEED_SYNC_ARG(0)) {
4281 temp_sync(s
, ots
, s
->reserved_regs
, 0, IS_DEAD_ARG(0));
4282 } else if (IS_DEAD_ARG(0)) {
4288 static void load_arg_reg(TCGContext
*s
, TCGReg reg
, TCGTemp
*ts
,
4289 TCGRegSet allocated_regs
)
4291 if (ts
->val_type
== TEMP_VAL_REG
) {
4292 if (ts
->reg
!= reg
) {
4293 tcg_reg_free(s
, reg
, allocated_regs
);
4294 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
4296 * Cross register class move not supported. Sync the
4297 * temp back to its slot and load from there.
4299 temp_sync(s
, ts
, allocated_regs
, 0, 0);
4300 tcg_out_ld(s
, ts
->type
, reg
,
4301 ts
->mem_base
->reg
, ts
->mem_offset
);
4305 TCGRegSet arg_set
= 0;
4307 tcg_reg_free(s
, reg
, allocated_regs
);
4308 tcg_regset_set_reg(arg_set
, reg
);
4309 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
4313 static void load_arg_stk(TCGContext
*s
, int stk_slot
, TCGTemp
*ts
,
4314 TCGRegSet allocated_regs
)
4317 * When the destination is on the stack, load up the temp and store.
4318 * If there are many call-saved registers, the temp might live to
4319 * see another use; otherwise it'll be discarded.
4321 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
], allocated_regs
, 0);
4322 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
,
4323 TCG_TARGET_CALL_STACK_OFFSET
+
4324 stk_slot
* sizeof(tcg_target_long
));
4327 static void load_arg_normal(TCGContext
*s
, const TCGCallArgumentLoc
*l
,
4328 TCGTemp
*ts
, TCGRegSet
*allocated_regs
)
4331 TCGReg reg
= tcg_target_call_iarg_regs
[l
->arg_slot
];
4332 load_arg_reg(s
, reg
, ts
, *allocated_regs
);
4333 tcg_regset_set_reg(*allocated_regs
, reg
);
4335 load_arg_stk(s
, l
->arg_slot
- ARRAY_SIZE(tcg_target_call_iarg_regs
),
4336 ts
, *allocated_regs
);
4340 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
4342 const int nb_oargs
= TCGOP_CALLO(op
);
4343 const int nb_iargs
= TCGOP_CALLI(op
);
4344 const TCGLifeData arg_life
= op
->life
;
4345 const TCGHelperInfo
*info
= tcg_call_info(op
);
4346 TCGRegSet allocated_regs
= s
->reserved_regs
;
4350 * Move inputs into place in reverse order,
4351 * so that we place stacked arguments first.
4353 for (i
= nb_iargs
- 1; i
>= 0; --i
) {
4354 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
4355 TCGTemp
*ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
4357 switch (loc
->kind
) {
4358 case TCG_CALL_ARG_NORMAL
:
4359 case TCG_CALL_ARG_EXTEND_U
:
4360 case TCG_CALL_ARG_EXTEND_S
:
4361 load_arg_normal(s
, loc
, ts
, &allocated_regs
);
4364 g_assert_not_reached();
4368 /* Mark dead temporaries and free the associated registers. */
4369 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
4370 if (IS_DEAD_ARG(i
)) {
4371 temp_dead(s
, arg_temp(op
->args
[i
]));
4375 /* Clobber call registers. */
4376 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4377 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4378 tcg_reg_free(s
, i
, allocated_regs
);
4383 * Save globals if they might be written by the helper,
4384 * sync them if they might be read.
4386 if (info
->flags
& TCG_CALL_NO_READ_GLOBALS
) {
4388 } else if (info
->flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
4389 sync_globals(s
, allocated_regs
);
4391 save_globals(s
, allocated_regs
);
4394 tcg_out_call(s
, tcg_call_func(op
), info
);
4396 /* Assign output registers and emit moves if needed. */
4397 switch (info
->out_kind
) {
4398 case TCG_CALL_RET_NORMAL
:
4399 for (i
= 0; i
< nb_oargs
; i
++) {
4400 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
4401 TCGReg reg
= tcg_target_call_oarg_regs
[i
];
4403 /* ENV should not be modified. */
4404 tcg_debug_assert(!temp_readonly(ts
));
4406 set_temp_val_reg(s
, ts
, reg
);
4407 ts
->mem_coherent
= 0;
4411 g_assert_not_reached();
4414 /* Flush or discard output registers as needed. */
4415 for (i
= 0; i
< nb_oargs
; i
++) {
4416 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
4417 if (NEED_SYNC_ARG(i
)) {
4418 temp_sync(s
, ts
, s
->reserved_regs
, 0, IS_DEAD_ARG(i
));
4419 } else if (IS_DEAD_ARG(i
)) {
4425 #ifdef CONFIG_PROFILER
4427 /* avoid copy/paste errors */
4428 #define PROF_ADD(to, from, field) \
4430 (to)->field += qatomic_read(&((from)->field)); \
4433 #define PROF_MAX(to, from, field) \
4435 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
4436 if (val__ > (to)->field) { \
4437 (to)->field = val__; \
4441 /* Pass in a zero'ed @prof */
4443 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
4445 unsigned int n_ctxs
= qatomic_read(&tcg_cur_ctxs
);
4448 for (i
= 0; i
< n_ctxs
; i
++) {
4449 TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
4450 const TCGProfile
*orig
= &s
->prof
;
4453 PROF_ADD(prof
, orig
, cpu_exec_time
);
4454 PROF_ADD(prof
, orig
, tb_count1
);
4455 PROF_ADD(prof
, orig
, tb_count
);
4456 PROF_ADD(prof
, orig
, op_count
);
4457 PROF_MAX(prof
, orig
, op_count_max
);
4458 PROF_ADD(prof
, orig
, temp_count
);
4459 PROF_MAX(prof
, orig
, temp_count_max
);
4460 PROF_ADD(prof
, orig
, del_op_count
);
4461 PROF_ADD(prof
, orig
, code_in_len
);
4462 PROF_ADD(prof
, orig
, code_out_len
);
4463 PROF_ADD(prof
, orig
, search_out_len
);
4464 PROF_ADD(prof
, orig
, interm_time
);
4465 PROF_ADD(prof
, orig
, code_time
);
4466 PROF_ADD(prof
, orig
, la_time
);
4467 PROF_ADD(prof
, orig
, opt_time
);
4468 PROF_ADD(prof
, orig
, restore_count
);
4469 PROF_ADD(prof
, orig
, restore_time
);
4474 for (i
= 0; i
< NB_OPS
; i
++) {
4475 PROF_ADD(prof
, orig
, table_op_count
[i
]);
4484 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
4486 tcg_profile_snapshot(prof
, true, false);
4489 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
4491 tcg_profile_snapshot(prof
, false, true);
4494 void tcg_dump_op_count(GString
*buf
)
4496 TCGProfile prof
= {};
4499 tcg_profile_snapshot_table(&prof
);
4500 for (i
= 0; i
< NB_OPS
; i
++) {
4501 g_string_append_printf(buf
, "%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
4502 prof
.table_op_count
[i
]);
4506 int64_t tcg_cpu_exec_time(void)
4508 unsigned int n_ctxs
= qatomic_read(&tcg_cur_ctxs
);
4512 for (i
= 0; i
< n_ctxs
; i
++) {
4513 const TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
4514 const TCGProfile
*prof
= &s
->prof
;
4516 ret
+= qatomic_read(&prof
->cpu_exec_time
);
4521 void tcg_dump_op_count(GString
*buf
)
4523 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
4526 int64_t tcg_cpu_exec_time(void)
4528 error_report("%s: TCG profiler not compiled", __func__
);
4534 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
, target_ulong pc_start
)
4536 #ifdef CONFIG_PROFILER
4537 TCGProfile
*prof
= &s
->prof
;
4542 #ifdef CONFIG_PROFILER
4546 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4549 qatomic_set(&prof
->op_count
, prof
->op_count
+ n
);
4550 if (n
> prof
->op_count_max
) {
4551 qatomic_set(&prof
->op_count_max
, n
);
4555 qatomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
4556 if (n
> prof
->temp_count_max
) {
4557 qatomic_set(&prof
->temp_count_max
, n
);
4563 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
4564 && qemu_log_in_addr_range(pc_start
))) {
4565 FILE *logfile
= qemu_log_trylock();
4567 fprintf(logfile
, "OP:\n");
4568 tcg_dump_ops(s
, logfile
, false);
4569 fprintf(logfile
, "\n");
4570 qemu_log_unlock(logfile
);
4575 #ifdef CONFIG_DEBUG_TCG
4576 /* Ensure all labels referenced have been emitted. */
4581 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
4582 if (unlikely(!l
->present
) && l
->refs
) {
4583 qemu_log_mask(CPU_LOG_TB_OP
,
4584 "$L%d referenced but not present.\n", l
->id
);
4592 #ifdef CONFIG_PROFILER
4593 qatomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
4596 #ifdef USE_TCG_OPTIMIZATIONS
4600 #ifdef CONFIG_PROFILER
4601 qatomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
4602 qatomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
4605 reachable_code_pass(s
);
4608 if (s
->nb_indirects
> 0) {
4610 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
4611 && qemu_log_in_addr_range(pc_start
))) {
4612 FILE *logfile
= qemu_log_trylock();
4614 fprintf(logfile
, "OP before indirect lowering:\n");
4615 tcg_dump_ops(s
, logfile
, false);
4616 fprintf(logfile
, "\n");
4617 qemu_log_unlock(logfile
);
4621 /* Replace indirect temps with direct temps. */
4622 if (liveness_pass_2(s
)) {
4623 /* If changes were made, re-run liveness. */
4628 #ifdef CONFIG_PROFILER
4629 qatomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
4633 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
4634 && qemu_log_in_addr_range(pc_start
))) {
4635 FILE *logfile
= qemu_log_trylock();
4637 fprintf(logfile
, "OP after optimization and liveness analysis:\n");
4638 tcg_dump_ops(s
, logfile
, true);
4639 fprintf(logfile
, "\n");
4640 qemu_log_unlock(logfile
);
4645 /* Initialize goto_tb jump offsets. */
4646 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
4647 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
4648 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
4649 if (TCG_TARGET_HAS_direct_jump
) {
4650 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
4651 tcg_ctx
->tb_jmp_target_addr
= NULL
;
4653 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
4654 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
4657 tcg_reg_alloc_start(s
);
4660 * Reset the buffer pointers when restarting after overflow.
4661 * TODO: Move this into translate-all.c with the rest of the
4662 * buffer management. Having only this done here is confusing.
4664 s
->code_buf
= tcg_splitwx_to_rw(tb
->tc
.ptr
);
4665 s
->code_ptr
= s
->code_buf
;
4667 #ifdef TCG_TARGET_NEED_LDST_LABELS
4668 QSIMPLEQ_INIT(&s
->ldst_labels
);
4670 #ifdef TCG_TARGET_NEED_POOL_LABELS
4671 s
->pool_labels
= NULL
;
4675 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4676 TCGOpcode opc
= op
->opc
;
4678 #ifdef CONFIG_PROFILER
4679 qatomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
4683 case INDEX_op_mov_i32
:
4684 case INDEX_op_mov_i64
:
4685 case INDEX_op_mov_vec
:
4686 tcg_reg_alloc_mov(s
, op
);
4688 case INDEX_op_dup_vec
:
4689 tcg_reg_alloc_dup(s
, op
);
4691 case INDEX_op_insn_start
:
4692 if (num_insns
>= 0) {
4693 size_t off
= tcg_current_code_size(s
);
4694 s
->gen_insn_end_off
[num_insns
] = off
;
4695 /* Assert that we do not overflow our stored offset. */
4696 assert(s
->gen_insn_end_off
[num_insns
] == off
);
4699 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
4701 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
4702 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
4706 s
->gen_insn_data
[num_insns
][i
] = a
;
4709 case INDEX_op_discard
:
4710 temp_dead(s
, arg_temp(op
->args
[0]));
4712 case INDEX_op_set_label
:
4713 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
4714 tcg_out_label(s
, arg_label(op
->args
[0]));
4717 tcg_reg_alloc_call(s
, op
);
4719 case INDEX_op_dup2_vec
:
4720 if (tcg_reg_alloc_dup2(s
, op
)) {
4725 /* Sanity check that we've not introduced any unhandled opcodes. */
4726 tcg_debug_assert(tcg_op_supported(opc
));
4727 /* Note: in order to speed up the code, it would be much
4728 faster to have specialized register allocator functions for
4729 some common argument patterns */
4730 tcg_reg_alloc_op(s
, op
);
4733 /* Test for (pending) buffer overflow. The assumption is that any
4734 one operation beginning below the high water mark cannot overrun
4735 the buffer completely. Thus we can test for overflow after
4736 generating code without having to check during generation. */
4737 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
4740 /* Test for TB overflow, as seen by gen_insn_end_off. */
4741 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
4745 tcg_debug_assert(num_insns
>= 0);
4746 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
4748 /* Generate TB finalization at the end of block */
4749 #ifdef TCG_TARGET_NEED_LDST_LABELS
4750 i
= tcg_out_ldst_finalize(s
);
4755 #ifdef TCG_TARGET_NEED_POOL_LABELS
4756 i
= tcg_out_pool_finalize(s
);
4761 if (!tcg_resolve_relocs(s
)) {
4765 #ifndef CONFIG_TCG_INTERPRETER
4766 /* flush instruction cache */
4767 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
4768 (uintptr_t)s
->code_buf
,
4769 tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
));
4772 return tcg_current_code_size(s
);
4775 #ifdef CONFIG_PROFILER
4776 void tcg_dump_info(GString
*buf
)
4778 TCGProfile prof
= {};
4779 const TCGProfile
*s
;
4781 int64_t tb_div_count
;
4784 tcg_profile_snapshot_counters(&prof
);
4786 tb_count
= s
->tb_count
;
4787 tb_div_count
= tb_count
? tb_count
: 1;
4788 tot
= s
->interm_time
+ s
->code_time
;
4790 g_string_append_printf(buf
, "JIT cycles %" PRId64
4791 " (%0.3f s at 2.4 GHz)\n",
4793 g_string_append_printf(buf
, "translated TBs %" PRId64
4794 " (aborted=%" PRId64
" %0.1f%%)\n",
4795 tb_count
, s
->tb_count1
- tb_count
,
4796 (double)(s
->tb_count1
- s
->tb_count
)
4797 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
4798 g_string_append_printf(buf
, "avg ops/TB %0.1f max=%d\n",
4799 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
4800 g_string_append_printf(buf
, "deleted ops/TB %0.2f\n",
4801 (double)s
->del_op_count
/ tb_div_count
);
4802 g_string_append_printf(buf
, "avg temps/TB %0.2f max=%d\n",
4803 (double)s
->temp_count
/ tb_div_count
,
4805 g_string_append_printf(buf
, "avg host code/TB %0.1f\n",
4806 (double)s
->code_out_len
/ tb_div_count
);
4807 g_string_append_printf(buf
, "avg search data/TB %0.1f\n",
4808 (double)s
->search_out_len
/ tb_div_count
);
4810 g_string_append_printf(buf
, "cycles/op %0.1f\n",
4811 s
->op_count
? (double)tot
/ s
->op_count
: 0);
4812 g_string_append_printf(buf
, "cycles/in byte %0.1f\n",
4813 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
4814 g_string_append_printf(buf
, "cycles/out byte %0.1f\n",
4815 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
4816 g_string_append_printf(buf
, "cycles/search byte %0.1f\n",
4818 (double)tot
/ s
->search_out_len
: 0);
4822 g_string_append_printf(buf
, " gen_interm time %0.1f%%\n",
4823 (double)s
->interm_time
/ tot
* 100.0);
4824 g_string_append_printf(buf
, " gen_code time %0.1f%%\n",
4825 (double)s
->code_time
/ tot
* 100.0);
4826 g_string_append_printf(buf
, "optim./code time %0.1f%%\n",
4827 (double)s
->opt_time
/ (s
->code_time
?
4830 g_string_append_printf(buf
, "liveness/code time %0.1f%%\n",
4831 (double)s
->la_time
/ (s
->code_time
?
4832 s
->code_time
: 1) * 100.0);
4833 g_string_append_printf(buf
, "cpu_restore count %" PRId64
"\n",
4835 g_string_append_printf(buf
, " avg cycles %0.1f\n",
4837 (double)s
->restore_time
/ s
->restore_count
: 0);
4840 void tcg_dump_info(GString
*buf
)
4842 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
4846 #ifdef ELF_HOST_MACHINE
4847 /* In order to use this feature, the backend needs to do three things:
4849 (1) Define ELF_HOST_MACHINE to indicate both what value to
4850 put into the ELF image and to indicate support for the feature.
4852 (2) Define tcg_register_jit. This should create a buffer containing
4853 the contents of a .debug_frame section that describes the post-
4854 prologue unwind info for the tcg machine.
4856 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4859 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4866 struct jit_code_entry
{
4867 struct jit_code_entry
*next_entry
;
4868 struct jit_code_entry
*prev_entry
;
4869 const void *symfile_addr
;
4870 uint64_t symfile_size
;
4873 struct jit_descriptor
{
4875 uint32_t action_flag
;
4876 struct jit_code_entry
*relevant_entry
;
4877 struct jit_code_entry
*first_entry
;
4880 void __jit_debug_register_code(void) __attribute__((noinline
));
4881 void __jit_debug_register_code(void)
4886 /* Must statically initialize the version, because GDB may check
4887 the version before we can set it. */
4888 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
4890 /* End GDB interface. */
4892 static int find_string(const char *strtab
, const char *str
)
4894 const char *p
= strtab
+ 1;
4897 if (strcmp(p
, str
) == 0) {
4904 static void tcg_register_jit_int(const void *buf_ptr
, size_t buf_size
,
4905 const void *debug_frame
,
4906 size_t debug_frame_size
)
4908 struct __attribute__((packed
)) DebugInfo
{
4915 uintptr_t cu_low_pc
;
4916 uintptr_t cu_high_pc
;
4919 uintptr_t fn_low_pc
;
4920 uintptr_t fn_high_pc
;
4929 struct DebugInfo di
;
4934 struct ElfImage
*img
;
4936 static const struct ElfImage img_template
= {
4938 .e_ident
[EI_MAG0
] = ELFMAG0
,
4939 .e_ident
[EI_MAG1
] = ELFMAG1
,
4940 .e_ident
[EI_MAG2
] = ELFMAG2
,
4941 .e_ident
[EI_MAG3
] = ELFMAG3
,
4942 .e_ident
[EI_CLASS
] = ELF_CLASS
,
4943 .e_ident
[EI_DATA
] = ELF_DATA
,
4944 .e_ident
[EI_VERSION
] = EV_CURRENT
,
4946 .e_machine
= ELF_HOST_MACHINE
,
4947 .e_version
= EV_CURRENT
,
4948 .e_phoff
= offsetof(struct ElfImage
, phdr
),
4949 .e_shoff
= offsetof(struct ElfImage
, shdr
),
4950 .e_ehsize
= sizeof(ElfW(Shdr
)),
4951 .e_phentsize
= sizeof(ElfW(Phdr
)),
4953 .e_shentsize
= sizeof(ElfW(Shdr
)),
4954 .e_shnum
= ARRAY_SIZE(img
->shdr
),
4955 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
4956 #ifdef ELF_HOST_FLAGS
4957 .e_flags
= ELF_HOST_FLAGS
,
4960 .e_ident
[EI_OSABI
] = ELF_OSABI
,
4968 [0] = { .sh_type
= SHT_NULL
},
4969 /* Trick: The contents of code_gen_buffer are not present in
4970 this fake ELF file; that got allocated elsewhere. Therefore
4971 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4972 will not look for contents. We can record any address. */
4974 .sh_type
= SHT_NOBITS
,
4975 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
4977 [2] = { /* .debug_info */
4978 .sh_type
= SHT_PROGBITS
,
4979 .sh_offset
= offsetof(struct ElfImage
, di
),
4980 .sh_size
= sizeof(struct DebugInfo
),
4982 [3] = { /* .debug_abbrev */
4983 .sh_type
= SHT_PROGBITS
,
4984 .sh_offset
= offsetof(struct ElfImage
, da
),
4985 .sh_size
= sizeof(img
->da
),
4987 [4] = { /* .debug_frame */
4988 .sh_type
= SHT_PROGBITS
,
4989 .sh_offset
= sizeof(struct ElfImage
),
4991 [5] = { /* .symtab */
4992 .sh_type
= SHT_SYMTAB
,
4993 .sh_offset
= offsetof(struct ElfImage
, sym
),
4994 .sh_size
= sizeof(img
->sym
),
4996 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
4997 .sh_entsize
= sizeof(ElfW(Sym
)),
4999 [6] = { /* .strtab */
5000 .sh_type
= SHT_STRTAB
,
5001 .sh_offset
= offsetof(struct ElfImage
, str
),
5002 .sh_size
= sizeof(img
->str
),
5006 [1] = { /* code_gen_buffer */
5007 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
5012 .len
= sizeof(struct DebugInfo
) - 4,
5014 .ptr_size
= sizeof(void *),
5016 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
5018 .fn_name
= "code_gen_buffer"
5021 1, /* abbrev number (the cu) */
5022 0x11, 1, /* DW_TAG_compile_unit, has children */
5023 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
5024 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5025 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5026 0, 0, /* end of abbrev */
5027 2, /* abbrev number (the fn) */
5028 0x2e, 0, /* DW_TAG_subprogram, no children */
5029 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
5030 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5031 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5032 0, 0, /* end of abbrev */
5033 0 /* no more abbrev */
5035 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
5036 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
5039 /* We only need a single jit entry; statically allocate it. */
5040 static struct jit_code_entry one_entry
;
5042 uintptr_t buf
= (uintptr_t)buf_ptr
;
5043 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
5044 DebugFrameHeader
*dfh
;
5046 img
= g_malloc(img_size
);
5047 *img
= img_template
;
5049 img
->phdr
.p_vaddr
= buf
;
5050 img
->phdr
.p_paddr
= buf
;
5051 img
->phdr
.p_memsz
= buf_size
;
5053 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
5054 img
->shdr
[1].sh_addr
= buf
;
5055 img
->shdr
[1].sh_size
= buf_size
;
5057 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
5058 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
5060 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
5061 img
->shdr
[4].sh_size
= debug_frame_size
;
5063 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
5064 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
5066 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
5067 img
->sym
[1].st_value
= buf
;
5068 img
->sym
[1].st_size
= buf_size
;
5070 img
->di
.cu_low_pc
= buf
;
5071 img
->di
.cu_high_pc
= buf
+ buf_size
;
5072 img
->di
.fn_low_pc
= buf
;
5073 img
->di
.fn_high_pc
= buf
+ buf_size
;
5075 dfh
= (DebugFrameHeader
*)(img
+ 1);
5076 memcpy(dfh
, debug_frame
, debug_frame_size
);
5077 dfh
->fde
.func_start
= buf
;
5078 dfh
->fde
.func_len
= buf_size
;
5081 /* Enable this block to be able to debug the ELF image file creation.
5082 One can use readelf, objdump, or other inspection utilities. */
5084 g_autofree
char *jit
= g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
5085 FILE *f
= fopen(jit
, "w+b");
5087 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
5088 /* Avoid stupid unused return value warning for fwrite. */
5095 one_entry
.symfile_addr
= img
;
5096 one_entry
.symfile_size
= img_size
;
5098 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
5099 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
5100 __jit_debug_descriptor
.first_entry
= &one_entry
;
5101 __jit_debug_register_code();
5104 /* No support for the feature. Provide the entry point expected by exec.c,
5105 and implement the internal function we declared earlier. */
5107 static void tcg_register_jit_int(const void *buf
, size_t size
,
5108 const void *debug_frame
,
5109 size_t debug_frame_size
)
5113 void tcg_register_jit(const void *buf
, size_t buf_size
)
5116 #endif /* ELF_HOST_MACHINE */
5118 #if !TCG_TARGET_MAYBE_vec
5119 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
5121 g_assert_not_reached();