2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 /* Define to jump the ELF file used to communicate with GDB. */
30 #include "qemu/error-report.h"
31 #include "qemu/cutils.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/qemu-print.h"
34 #include "qemu/cacheflush.h"
35 #include "qemu/cacheinfo.h"
36 #include "qemu/timer.h"
37 #include "exec/translation-block.h"
38 #include "exec/tlb-common.h"
39 #include "tcg/startup.h"
40 #include "tcg/tcg-op-common.h"
42 #if UINTPTR_MAX == UINT32_MAX
43 # define ELF_CLASS ELFCLASS32
45 # define ELF_CLASS ELFCLASS64
48 # define ELF_DATA ELFDATA2MSB
50 # define ELF_DATA ELFDATA2LSB
55 #include "tcg/tcg-ldst.h"
56 #include "tcg/tcg-temp-internal.h"
57 #include "tcg-internal.h"
59 #ifdef CONFIG_USER_ONLY
60 #include "exec/user/guest-base.h"
63 /* Forward declarations for functions declared in tcg-target.c.inc and
65 static void tcg_target_init(TCGContext
*s
);
66 static void tcg_target_qemu_prologue(TCGContext
*s
);
67 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
68 intptr_t value
, intptr_t addend
);
70 /* The CIE and FDE header definitions will be common to all hosts. */
72 uint32_t len
__attribute__((aligned((sizeof(void *)))));
78 uint8_t return_column
;
81 typedef struct QEMU_PACKED
{
82 uint32_t len
__attribute__((aligned((sizeof(void *)))));
86 } DebugFrameFDEHeader
;
88 typedef struct QEMU_PACKED
{
90 DebugFrameFDEHeader fde
;
93 typedef struct TCGLabelQemuLdst
{
94 bool is_ld
; /* qemu_ld: true, qemu_st: false */
96 TCGType type
; /* result type of a load */
97 TCGReg addrlo_reg
; /* reg index for low word of guest virtual addr */
98 TCGReg addrhi_reg
; /* reg index for high word of guest virtual addr */
99 TCGReg datalo_reg
; /* reg index for low word to be loaded or stored */
100 TCGReg datahi_reg
; /* reg index for high word to be loaded or stored */
101 const tcg_insn_unit
*raddr
; /* addr of the next IR of qemu_ld/st IR */
102 tcg_insn_unit
*label_ptr
[2]; /* label pointers to be updated */
103 QSIMPLEQ_ENTRY(TCGLabelQemuLdst
) next
;
106 static void tcg_register_jit_int(const void *buf
, size_t size
,
107 const void *debug_frame
,
108 size_t debug_frame_size
)
109 __attribute__((unused
));
111 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
112 static void tcg_out_tb_start(TCGContext
*s
);
113 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
115 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
116 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
117 TCGReg ret
, tcg_target_long arg
);
118 static void tcg_out_ext8s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
119 static void tcg_out_ext16s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
120 static void tcg_out_ext8u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
121 static void tcg_out_ext16u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
122 static void tcg_out_ext32s(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
123 static void tcg_out_ext32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
124 static void tcg_out_exts_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
125 static void tcg_out_extu_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
126 static void tcg_out_extrl_i64_i32(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
127 static void tcg_out_addi_ptr(TCGContext
*s
, TCGReg
, TCGReg
, tcg_target_long
);
128 static bool tcg_out_xchg(TCGContext
*s
, TCGType type
, TCGReg r1
, TCGReg r2
);
129 static void tcg_out_exit_tb(TCGContext
*s
, uintptr_t arg
);
130 static void tcg_out_goto_tb(TCGContext
*s
, int which
);
131 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
132 const TCGArg args
[TCG_MAX_OP_ARGS
],
133 const int const_args
[TCG_MAX_OP_ARGS
]);
134 #if TCG_TARGET_MAYBE_vec
135 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
136 TCGReg dst
, TCGReg src
);
137 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
138 TCGReg dst
, TCGReg base
, intptr_t offset
);
139 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
140 TCGReg dst
, int64_t arg
);
141 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
142 unsigned vecl
, unsigned vece
,
143 const TCGArg args
[TCG_MAX_OP_ARGS
],
144 const int const_args
[TCG_MAX_OP_ARGS
]);
146 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
147 TCGReg dst
, TCGReg src
)
149 g_assert_not_reached();
151 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
152 TCGReg dst
, TCGReg base
, intptr_t offset
)
154 g_assert_not_reached();
156 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
157 TCGReg dst
, int64_t arg
)
159 g_assert_not_reached();
161 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
162 unsigned vecl
, unsigned vece
,
163 const TCGArg args
[TCG_MAX_OP_ARGS
],
164 const int const_args
[TCG_MAX_OP_ARGS
])
166 g_assert_not_reached();
169 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
171 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
172 TCGReg base
, intptr_t ofs
);
173 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
,
174 const TCGHelperInfo
*info
);
175 static TCGReg
tcg_target_call_oarg_reg(TCGCallReturnKind kind
, int slot
);
176 static bool tcg_target_const_match(int64_t val
, int ct
,
177 TCGType type
, TCGCond cond
, int vece
);
178 #ifdef TCG_TARGET_NEED_LDST_LABELS
179 static int tcg_out_ldst_finalize(TCGContext
*s
);
182 #ifndef CONFIG_USER_ONLY
183 #define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
186 typedef struct TCGLdstHelperParam
{
187 TCGReg (*ra_gen
)(TCGContext
*s
, const TCGLabelQemuLdst
*l
, int arg_reg
);
190 } TCGLdstHelperParam
;
192 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
193 const TCGLdstHelperParam
*p
)
194 __attribute__((unused
));
195 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
196 bool load_sign
, const TCGLdstHelperParam
*p
)
197 __attribute__((unused
));
198 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
199 const TCGLdstHelperParam
*p
)
200 __attribute__((unused
));
202 static void * const qemu_ld_helpers
[MO_SSIZE
+ 1] __attribute__((unused
)) = {
203 [MO_UB
] = helper_ldub_mmu
,
204 [MO_SB
] = helper_ldsb_mmu
,
205 [MO_UW
] = helper_lduw_mmu
,
206 [MO_SW
] = helper_ldsw_mmu
,
207 [MO_UL
] = helper_ldul_mmu
,
208 [MO_UQ
] = helper_ldq_mmu
,
209 #if TCG_TARGET_REG_BITS == 64
210 [MO_SL
] = helper_ldsl_mmu
,
211 [MO_128
] = helper_ld16_mmu
,
215 static void * const qemu_st_helpers
[MO_SIZE
+ 1] __attribute__((unused
)) = {
216 [MO_8
] = helper_stb_mmu
,
217 [MO_16
] = helper_stw_mmu
,
218 [MO_32
] = helper_stl_mmu
,
219 [MO_64
] = helper_stq_mmu
,
220 #if TCG_TARGET_REG_BITS == 64
221 [MO_128
] = helper_st16_mmu
,
226 MemOp atom
; /* lg2 bits of atomicity required */
227 MemOp align
; /* lg2 bits of alignment to use */
230 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
231 MemOp host_atom
, bool allow_two_ops
)
232 __attribute__((unused
));
234 #ifdef CONFIG_USER_ONLY
235 bool tcg_use_softmmu
;
238 TCGContext tcg_init_ctx
;
239 __thread TCGContext
*tcg_ctx
;
241 TCGContext
**tcg_ctxs
;
242 unsigned int tcg_cur_ctxs
;
243 unsigned int tcg_max_ctxs
;
245 const void *tcg_code_gen_epilogue
;
246 uintptr_t tcg_splitwx_diff
;
248 #ifndef CONFIG_TCG_INTERPRETER
249 tcg_prologue_fn
*tcg_qemu_tb_exec
;
252 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
253 static TCGRegSet tcg_target_call_clobber_regs
;
255 #if TCG_TARGET_INSN_UNIT_SIZE == 1
256 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
261 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
268 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
269 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
271 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
274 tcg_insn_unit
*p
= s
->code_ptr
;
275 memcpy(p
, &v
, sizeof(v
));
276 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
280 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
283 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
286 memcpy(p
, &v
, sizeof(v
));
291 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
292 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
294 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
297 tcg_insn_unit
*p
= s
->code_ptr
;
298 memcpy(p
, &v
, sizeof(v
));
299 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
303 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
306 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
309 memcpy(p
, &v
, sizeof(v
));
314 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
315 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
317 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
320 tcg_insn_unit
*p
= s
->code_ptr
;
321 memcpy(p
, &v
, sizeof(v
));
322 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
326 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
329 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
332 memcpy(p
, &v
, sizeof(v
));
337 /* label relocation processing */
339 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
340 TCGLabel
*l
, intptr_t addend
)
342 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
347 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
350 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
)
352 tcg_debug_assert(!l
->has_value
);
354 l
->u
.value_ptr
= tcg_splitwx_to_rx(s
->code_ptr
);
357 TCGLabel
*gen_new_label(void)
359 TCGContext
*s
= tcg_ctx
;
360 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
362 memset(l
, 0, sizeof(TCGLabel
));
363 l
->id
= s
->nb_labels
++;
364 QSIMPLEQ_INIT(&l
->branches
);
365 QSIMPLEQ_INIT(&l
->relocs
);
367 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
372 static bool tcg_resolve_relocs(TCGContext
*s
)
376 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
378 uintptr_t value
= l
->u
.value
;
380 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
381 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
389 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
392 * We will check for overflow at the end of the opcode loop in
393 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
395 s
->gen_tb
->jmp_reset_offset
[which
] = tcg_current_code_size(s
);
398 static void G_GNUC_UNUSED
set_jmp_insn_offset(TCGContext
*s
, int which
)
401 * We will check for overflow at the end of the opcode loop in
402 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
404 s
->gen_tb
->jmp_insn_offset
[which
] = tcg_current_code_size(s
);
407 static uintptr_t G_GNUC_UNUSED
get_jmp_target_addr(TCGContext
*s
, int which
)
410 * Return the read-execute version of the pointer, for the benefit
411 * of any pc-relative addressing mode.
413 return (uintptr_t)tcg_splitwx_to_rx(&s
->gen_tb
->jmp_target_addr
[which
]);
416 static int __attribute__((unused
))
417 tlb_mask_table_ofs(TCGContext
*s
, int which
)
419 return (offsetof(CPUNegativeOffsetState
, tlb
.f
[which
]) -
420 sizeof(CPUNegativeOffsetState
));
423 /* Signal overflow, starting over with fewer guest insns. */
425 void tcg_raise_tb_overflow(TCGContext
*s
)
427 siglongjmp(s
->jmp_trans
, -2);
431 * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
432 * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
434 * However, tcg_out_helper_load_slots reuses this field to hold an
435 * argument slot number (which may designate a argument register or an
436 * argument stack slot), converting to TCGReg once all arguments that
437 * are destined for the stack are processed.
439 typedef struct TCGMovExtend
{
448 * tcg_out_movext -- move and extend
450 * @dst_type: integral type for destination
451 * @dst: destination register
452 * @src_type: integral type for source
453 * @src_ext: extension to apply to source
454 * @src: source register
456 * Move or extend @src into @dst, depending on @src_ext and the types.
458 static void tcg_out_movext(TCGContext
*s
, TCGType dst_type
, TCGReg dst
,
459 TCGType src_type
, MemOp src_ext
, TCGReg src
)
463 tcg_out_ext8u(s
, dst
, src
);
466 tcg_out_ext8s(s
, dst_type
, dst
, src
);
469 tcg_out_ext16u(s
, dst
, src
);
472 tcg_out_ext16s(s
, dst_type
, dst
, src
);
476 if (dst_type
== TCG_TYPE_I32
) {
477 if (src_type
== TCG_TYPE_I32
) {
478 tcg_out_mov(s
, TCG_TYPE_I32
, dst
, src
);
480 tcg_out_extrl_i64_i32(s
, dst
, src
);
482 } else if (src_type
== TCG_TYPE_I32
) {
483 if (src_ext
& MO_SIGN
) {
484 tcg_out_exts_i32_i64(s
, dst
, src
);
486 tcg_out_extu_i32_i64(s
, dst
, src
);
489 if (src_ext
& MO_SIGN
) {
490 tcg_out_ext32s(s
, dst
, src
);
492 tcg_out_ext32u(s
, dst
, src
);
497 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
498 if (dst_type
== TCG_TYPE_I32
) {
499 tcg_out_extrl_i64_i32(s
, dst
, src
);
501 tcg_out_mov(s
, TCG_TYPE_I64
, dst
, src
);
505 g_assert_not_reached();
509 /* Minor variations on a theme, using a structure. */
510 static void tcg_out_movext1_new_src(TCGContext
*s
, const TCGMovExtend
*i
,
513 tcg_out_movext(s
, i
->dst_type
, i
->dst
, i
->src_type
, i
->src_ext
, src
);
516 static void tcg_out_movext1(TCGContext
*s
, const TCGMovExtend
*i
)
518 tcg_out_movext1_new_src(s
, i
, i
->src
);
522 * tcg_out_movext2 -- move and extend two pair
524 * @i1: first move description
525 * @i2: second move description
526 * @scratch: temporary register, or -1 for none
528 * As tcg_out_movext, for both @i1 and @i2, caring for overlap
529 * between the sources and destinations.
532 static void tcg_out_movext2(TCGContext
*s
, const TCGMovExtend
*i1
,
533 const TCGMovExtend
*i2
, int scratch
)
535 TCGReg src1
= i1
->src
;
536 TCGReg src2
= i2
->src
;
538 if (i1
->dst
!= src2
) {
539 tcg_out_movext1(s
, i1
);
540 tcg_out_movext1(s
, i2
);
543 if (i2
->dst
== src1
) {
544 TCGType src1_type
= i1
->src_type
;
545 TCGType src2_type
= i2
->src_type
;
547 if (tcg_out_xchg(s
, MAX(src1_type
, src2_type
), src1
, src2
)) {
548 /* The data is now in the correct registers, now extend. */
552 tcg_debug_assert(scratch
>= 0);
553 tcg_out_mov(s
, src1_type
, scratch
, src1
);
557 tcg_out_movext1_new_src(s
, i2
, src2
);
558 tcg_out_movext1_new_src(s
, i1
, src1
);
562 * tcg_out_movext3 -- move and extend three pair
564 * @i1: first move description
565 * @i2: second move description
566 * @i3: third move description
567 * @scratch: temporary register, or -1 for none
569 * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
570 * between the sources and destinations.
573 static void tcg_out_movext3(TCGContext
*s
, const TCGMovExtend
*i1
,
574 const TCGMovExtend
*i2
, const TCGMovExtend
*i3
,
577 TCGReg src1
= i1
->src
;
578 TCGReg src2
= i2
->src
;
579 TCGReg src3
= i3
->src
;
581 if (i1
->dst
!= src2
&& i1
->dst
!= src3
) {
582 tcg_out_movext1(s
, i1
);
583 tcg_out_movext2(s
, i2
, i3
, scratch
);
586 if (i2
->dst
!= src1
&& i2
->dst
!= src3
) {
587 tcg_out_movext1(s
, i2
);
588 tcg_out_movext2(s
, i1
, i3
, scratch
);
591 if (i3
->dst
!= src1
&& i3
->dst
!= src2
) {
592 tcg_out_movext1(s
, i3
);
593 tcg_out_movext2(s
, i1
, i2
, scratch
);
598 * There is a cycle. Since there are only 3 nodes, the cycle is
599 * either "clockwise" or "anti-clockwise", and can be solved with
600 * a single scratch or two xchg.
602 if (i1
->dst
== src2
&& i2
->dst
== src3
&& i3
->dst
== src1
) {
604 if (tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
)) {
605 tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
);
606 /* The data is now in the correct registers, now extend. */
607 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
608 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
609 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
611 tcg_debug_assert(scratch
>= 0);
612 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
613 tcg_out_movext1(s
, i3
);
614 tcg_out_movext1(s
, i2
);
615 tcg_out_movext1_new_src(s
, i1
, scratch
);
617 } else if (i1
->dst
== src3
&& i2
->dst
== src1
&& i3
->dst
== src2
) {
618 /* "Anti-clockwise" */
619 if (tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
)) {
620 tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
);
621 /* The data is now in the correct registers, now extend. */
622 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
623 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
624 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
626 tcg_debug_assert(scratch
>= 0);
627 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
628 tcg_out_movext1(s
, i2
);
629 tcg_out_movext1(s
, i3
);
630 tcg_out_movext1_new_src(s
, i1
, scratch
);
633 g_assert_not_reached();
637 #define C_PFX1(P, A) P##A
638 #define C_PFX2(P, A, B) P##A##_##B
639 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
640 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
641 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
642 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
644 /* Define an enumeration for the various combinations. */
646 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
647 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
648 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
649 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
651 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
652 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
653 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
654 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
656 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
657 #define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1),
658 #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1),
660 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
661 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
662 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
663 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
664 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
667 #include "tcg-target-con-set.h"
668 } TCGConstraintSetIndex
;
670 static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode
);
689 /* Put all of the constraint sets into an array, indexed by the enum. */
691 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
692 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
693 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
694 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
696 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
697 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
698 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
699 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
701 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
702 #define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } },
703 #define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } },
705 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
706 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
707 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
708 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
709 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
711 static const TCGTargetOpDef constraint_sets
[] = {
712 #include "tcg-target-con-set.h"
733 /* Expand the enumerator to be returned from tcg_target_op_def(). */
735 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
736 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
737 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
738 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
740 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
741 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
742 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
743 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
745 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
746 #define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1)
747 #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1)
749 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
750 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
751 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
752 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
753 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
755 #include "tcg-target.c.inc"
757 #ifndef CONFIG_TCG_INTERPRETER
758 /* Validate CPUTLBDescFast placement. */
759 QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState
, tlb
.f
[0]) -
760 sizeof(CPUNegativeOffsetState
))
761 < MIN_TLB_MASK_TABLE_OFS
);
764 static void alloc_tcg_plugin_context(TCGContext
*s
)
767 s
->plugin_tb
= g_new0(struct qemu_plugin_tb
, 1);
768 s
->plugin_tb
->insns
=
769 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn
);
774 * All TCG threads except the parent (i.e. the one that called tcg_context_init
775 * and registered the target's TCG globals) must register with this function
776 * before initiating translation.
778 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
779 * of tcg_region_init() for the reasoning behind this.
781 * In system-mode each caller registers its context in tcg_ctxs[]. Note that in
782 * system-mode tcg_ctxs[] does not track tcg_ctx_init, since the initial context
783 * is not used anymore for translation once this function is called.
785 * Not tracking tcg_init_ctx in tcg_ctxs[] in system-mode keeps code that
786 * iterates over the array (e.g. tcg_code_size() the same for both system/user
789 #ifdef CONFIG_USER_ONLY
790 void tcg_register_thread(void)
792 tcg_ctx
= &tcg_init_ctx
;
795 void tcg_register_thread(void)
797 TCGContext
*s
= g_malloc(sizeof(*s
));
802 /* Relink mem_base. */
803 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
804 if (tcg_init_ctx
.temps
[i
].mem_base
) {
805 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
806 tcg_debug_assert(b
>= 0 && b
< n
);
807 s
->temps
[i
].mem_base
= &s
->temps
[b
];
811 /* Claim an entry in tcg_ctxs */
812 n
= qatomic_fetch_inc(&tcg_cur_ctxs
);
813 g_assert(n
< tcg_max_ctxs
);
814 qatomic_set(&tcg_ctxs
[n
], s
);
817 alloc_tcg_plugin_context(s
);
818 tcg_region_initial_alloc(s
);
823 #endif /* !CONFIG_USER_ONLY */
825 /* pool based memory allocation */
826 void *tcg_malloc_internal(TCGContext
*s
, int size
)
831 if (size
> TCG_POOL_CHUNK_SIZE
) {
832 /* big malloc: insert a new pool (XXX: could optimize) */
833 p
= g_malloc(sizeof(TCGPool
) + size
);
835 p
->next
= s
->pool_first_large
;
836 s
->pool_first_large
= p
;
847 pool_size
= TCG_POOL_CHUNK_SIZE
;
848 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
851 if (s
->pool_current
) {
852 s
->pool_current
->next
= p
;
862 s
->pool_cur
= p
->data
+ size
;
863 s
->pool_end
= p
->data
+ p
->size
;
867 void tcg_pool_reset(TCGContext
*s
)
870 for (p
= s
->pool_first_large
; p
; p
= t
) {
874 s
->pool_first_large
= NULL
;
875 s
->pool_cur
= s
->pool_end
= NULL
;
876 s
->pool_current
= NULL
;
880 * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
881 * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
882 * We only use these for layout in tcg_out_ld_helper_ret and
883 * tcg_out_st_helper_args, and share them between several of
884 * the helpers, with the end result that it's easier to build manually.
887 #if TCG_TARGET_REG_BITS == 32
888 # define dh_typecode_ttl dh_typecode_i32
890 # define dh_typecode_ttl dh_typecode_i64
893 static TCGHelperInfo info_helper_ld32_mmu
= {
894 .flags
= TCG_CALL_NO_WG
,
895 .typemask
= dh_typemask(ttl
, 0) /* return tcg_target_ulong */
896 | dh_typemask(env
, 1)
897 | dh_typemask(i64
, 2) /* uint64_t addr */
898 | dh_typemask(i32
, 3) /* unsigned oi */
899 | dh_typemask(ptr
, 4) /* uintptr_t ra */
902 static TCGHelperInfo info_helper_ld64_mmu
= {
903 .flags
= TCG_CALL_NO_WG
,
904 .typemask
= dh_typemask(i64
, 0) /* return uint64_t */
905 | dh_typemask(env
, 1)
906 | dh_typemask(i64
, 2) /* uint64_t addr */
907 | dh_typemask(i32
, 3) /* unsigned oi */
908 | dh_typemask(ptr
, 4) /* uintptr_t ra */
911 static TCGHelperInfo info_helper_ld128_mmu
= {
912 .flags
= TCG_CALL_NO_WG
,
913 .typemask
= dh_typemask(i128
, 0) /* return Int128 */
914 | dh_typemask(env
, 1)
915 | dh_typemask(i64
, 2) /* uint64_t addr */
916 | dh_typemask(i32
, 3) /* unsigned oi */
917 | dh_typemask(ptr
, 4) /* uintptr_t ra */
920 static TCGHelperInfo info_helper_st32_mmu
= {
921 .flags
= TCG_CALL_NO_WG
,
922 .typemask
= dh_typemask(void, 0)
923 | dh_typemask(env
, 1)
924 | dh_typemask(i64
, 2) /* uint64_t addr */
925 | dh_typemask(i32
, 3) /* uint32_t data */
926 | dh_typemask(i32
, 4) /* unsigned oi */
927 | dh_typemask(ptr
, 5) /* uintptr_t ra */
930 static TCGHelperInfo info_helper_st64_mmu
= {
931 .flags
= TCG_CALL_NO_WG
,
932 .typemask
= dh_typemask(void, 0)
933 | dh_typemask(env
, 1)
934 | dh_typemask(i64
, 2) /* uint64_t addr */
935 | dh_typemask(i64
, 3) /* uint64_t data */
936 | dh_typemask(i32
, 4) /* unsigned oi */
937 | dh_typemask(ptr
, 5) /* uintptr_t ra */
940 static TCGHelperInfo info_helper_st128_mmu
= {
941 .flags
= TCG_CALL_NO_WG
,
942 .typemask
= dh_typemask(void, 0)
943 | dh_typemask(env
, 1)
944 | dh_typemask(i64
, 2) /* uint64_t addr */
945 | dh_typemask(i128
, 3) /* Int128 data */
946 | dh_typemask(i32
, 4) /* unsigned oi */
947 | dh_typemask(ptr
, 5) /* uintptr_t ra */
950 #ifdef CONFIG_TCG_INTERPRETER
951 static ffi_type
*typecode_to_ffi(int argmask
)
954 * libffi does not support __int128_t, so we have forced Int128
955 * to use the structure definition instead of the builtin type.
957 static ffi_type
*ffi_type_i128_elements
[3] = {
962 static ffi_type ffi_type_i128
= {
964 .alignment
= __alignof__(Int128
),
965 .type
= FFI_TYPE_STRUCT
,
966 .elements
= ffi_type_i128_elements
,
970 case dh_typecode_void
:
971 return &ffi_type_void
;
972 case dh_typecode_i32
:
973 return &ffi_type_uint32
;
974 case dh_typecode_s32
:
975 return &ffi_type_sint32
;
976 case dh_typecode_i64
:
977 return &ffi_type_uint64
;
978 case dh_typecode_s64
:
979 return &ffi_type_sint64
;
980 case dh_typecode_ptr
:
981 return &ffi_type_pointer
;
982 case dh_typecode_i128
:
983 return &ffi_type_i128
;
985 g_assert_not_reached();
988 static ffi_cif
*init_ffi_layout(TCGHelperInfo
*info
)
990 unsigned typemask
= info
->typemask
;
998 /* Ignoring the return type, find the last non-zero field. */
999 nargs
= 32 - clz32(typemask
>> 3);
1000 nargs
= DIV_ROUND_UP(nargs
, 3);
1001 assert(nargs
<= MAX_CALL_IARGS
);
1003 ca
= g_malloc0(sizeof(*ca
) + nargs
* sizeof(ffi_type
*));
1004 ca
->cif
.rtype
= typecode_to_ffi(typemask
& 7);
1005 ca
->cif
.nargs
= nargs
;
1008 ca
->cif
.arg_types
= ca
->args
;
1009 for (int j
= 0; j
< nargs
; ++j
) {
1010 int typecode
= extract32(typemask
, (j
+ 1) * 3, 3);
1011 ca
->args
[j
] = typecode_to_ffi(typecode
);
1015 status
= ffi_prep_cif(&ca
->cif
, FFI_DEFAULT_ABI
, nargs
,
1016 ca
->cif
.rtype
, ca
->cif
.arg_types
);
1017 assert(status
== FFI_OK
);
1022 #define HELPER_INFO_INIT(I) (&(I)->cif)
1023 #define HELPER_INFO_INIT_VAL(I) init_ffi_layout(I)
1025 #define HELPER_INFO_INIT(I) (&(I)->init)
1026 #define HELPER_INFO_INIT_VAL(I) 1
1027 #endif /* CONFIG_TCG_INTERPRETER */
1029 static inline bool arg_slot_reg_p(unsigned arg_slot
)
1032 * Split the sizeof away from the comparison to avoid Werror from
1033 * "unsigned < 0 is always false", when iarg_regs is empty.
1035 unsigned nreg
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1036 return arg_slot
< nreg
;
1039 static inline int arg_slot_stk_ofs(unsigned arg_slot
)
1041 unsigned max
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1042 unsigned stk_slot
= arg_slot
- ARRAY_SIZE(tcg_target_call_iarg_regs
);
1044 tcg_debug_assert(stk_slot
< max
);
1045 return TCG_TARGET_CALL_STACK_OFFSET
+ stk_slot
* sizeof(tcg_target_long
);
1048 typedef struct TCGCumulativeArgs
{
1049 int arg_idx
; /* tcg_gen_callN args[] */
1050 int info_in_idx
; /* TCGHelperInfo in[] */
1051 int arg_slot
; /* regs+stack slot */
1052 int ref_slot
; /* stack slots for references */
1053 } TCGCumulativeArgs
;
1055 static void layout_arg_even(TCGCumulativeArgs
*cum
)
1057 cum
->arg_slot
+= cum
->arg_slot
& 1;
1060 static void layout_arg_1(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
,
1061 TCGCallArgumentKind kind
)
1063 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1065 *loc
= (TCGCallArgumentLoc
){
1067 .arg_idx
= cum
->arg_idx
,
1068 .arg_slot
= cum
->arg_slot
,
1074 static void layout_arg_normal_n(TCGCumulativeArgs
*cum
,
1075 TCGHelperInfo
*info
, int n
)
1077 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1079 for (int i
= 0; i
< n
; ++i
) {
1080 /* Layout all using the same arg_idx, adjusting the subindex. */
1081 loc
[i
] = (TCGCallArgumentLoc
){
1082 .kind
= TCG_CALL_ARG_NORMAL
,
1083 .arg_idx
= cum
->arg_idx
,
1085 .arg_slot
= cum
->arg_slot
+ i
,
1088 cum
->info_in_idx
+= n
;
1092 static void layout_arg_by_ref(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
)
1094 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1095 int n
= 128 / TCG_TARGET_REG_BITS
;
1097 /* The first subindex carries the pointer. */
1098 layout_arg_1(cum
, info
, TCG_CALL_ARG_BY_REF
);
1101 * The callee is allowed to clobber memory associated with
1102 * structure pass by-reference. Therefore we must make copies.
1103 * Allocate space from "ref_slot", which will be adjusted to
1104 * follow the parameters on the stack.
1106 loc
[0].ref_slot
= cum
->ref_slot
;
1109 * Subsequent words also go into the reference slot, but
1110 * do not accumulate into the regular arguments.
1112 for (int i
= 1; i
< n
; ++i
) {
1113 loc
[i
] = (TCGCallArgumentLoc
){
1114 .kind
= TCG_CALL_ARG_BY_REF_N
,
1115 .arg_idx
= cum
->arg_idx
,
1117 .ref_slot
= cum
->ref_slot
+ i
,
1120 cum
->info_in_idx
+= n
- 1; /* i=0 accounted for in layout_arg_1 */
1124 static void init_call_layout(TCGHelperInfo
*info
)
1126 int max_reg_slots
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1127 int max_stk_slots
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1128 unsigned typemask
= info
->typemask
;
1130 TCGCumulativeArgs cum
= { };
1133 * Parse and place any function return value.
1135 typecode
= typemask
& 7;
1137 case dh_typecode_void
:
1140 case dh_typecode_i32
:
1141 case dh_typecode_s32
:
1142 case dh_typecode_ptr
:
1144 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1146 case dh_typecode_i64
:
1147 case dh_typecode_s64
:
1148 info
->nr_out
= 64 / TCG_TARGET_REG_BITS
;
1149 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1150 /* Query the last register now to trigger any assert early. */
1151 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1153 case dh_typecode_i128
:
1154 info
->nr_out
= 128 / TCG_TARGET_REG_BITS
;
1155 info
->out_kind
= TCG_TARGET_CALL_RET_I128
;
1156 switch (TCG_TARGET_CALL_RET_I128
) {
1157 case TCG_CALL_RET_NORMAL
:
1158 /* Query the last register now to trigger any assert early. */
1159 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1161 case TCG_CALL_RET_BY_VEC
:
1162 /* Query the single register now to trigger any assert early. */
1163 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0);
1165 case TCG_CALL_RET_BY_REF
:
1167 * Allocate the first argument to the output.
1168 * We don't need to store this anywhere, just make it
1169 * unavailable for use in the input loop below.
1174 qemu_build_not_reached();
1178 g_assert_not_reached();
1182 * Parse and place function arguments.
1184 for (typemask
>>= 3; typemask
; typemask
>>= 3, cum
.arg_idx
++) {
1185 TCGCallArgumentKind kind
;
1188 typecode
= typemask
& 7;
1190 case dh_typecode_i32
:
1191 case dh_typecode_s32
:
1192 type
= TCG_TYPE_I32
;
1194 case dh_typecode_i64
:
1195 case dh_typecode_s64
:
1196 type
= TCG_TYPE_I64
;
1198 case dh_typecode_ptr
:
1199 type
= TCG_TYPE_PTR
;
1201 case dh_typecode_i128
:
1202 type
= TCG_TYPE_I128
;
1205 g_assert_not_reached();
1210 switch (TCG_TARGET_CALL_ARG_I32
) {
1211 case TCG_CALL_ARG_EVEN
:
1212 layout_arg_even(&cum
);
1214 case TCG_CALL_ARG_NORMAL
:
1215 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1217 case TCG_CALL_ARG_EXTEND
:
1218 kind
= TCG_CALL_ARG_EXTEND_U
+ (typecode
& 1);
1219 layout_arg_1(&cum
, info
, kind
);
1222 qemu_build_not_reached();
1227 switch (TCG_TARGET_CALL_ARG_I64
) {
1228 case TCG_CALL_ARG_EVEN
:
1229 layout_arg_even(&cum
);
1231 case TCG_CALL_ARG_NORMAL
:
1232 if (TCG_TARGET_REG_BITS
== 32) {
1233 layout_arg_normal_n(&cum
, info
, 2);
1235 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1239 qemu_build_not_reached();
1244 switch (TCG_TARGET_CALL_ARG_I128
) {
1245 case TCG_CALL_ARG_EVEN
:
1246 layout_arg_even(&cum
);
1248 case TCG_CALL_ARG_NORMAL
:
1249 layout_arg_normal_n(&cum
, info
, 128 / TCG_TARGET_REG_BITS
);
1251 case TCG_CALL_ARG_BY_REF
:
1252 layout_arg_by_ref(&cum
, info
);
1255 qemu_build_not_reached();
1260 g_assert_not_reached();
1263 info
->nr_in
= cum
.info_in_idx
;
1265 /* Validate that we didn't overrun the input array. */
1266 assert(cum
.info_in_idx
<= ARRAY_SIZE(info
->in
));
1267 /* Validate the backend has enough argument space. */
1268 assert(cum
.arg_slot
<= max_reg_slots
+ max_stk_slots
);
1271 * Relocate the "ref_slot" area to the end of the parameters.
1272 * Minimizing this stack offset helps code size for x86,
1273 * which has a signed 8-bit offset encoding.
1275 if (cum
.ref_slot
!= 0) {
1278 if (cum
.arg_slot
> max_reg_slots
) {
1279 int align
= __alignof(Int128
) / sizeof(tcg_target_long
);
1281 ref_base
= cum
.arg_slot
- max_reg_slots
;
1283 ref_base
= ROUND_UP(ref_base
, align
);
1286 assert(ref_base
+ cum
.ref_slot
<= max_stk_slots
);
1287 ref_base
+= max_reg_slots
;
1289 if (ref_base
!= 0) {
1290 for (int i
= cum
.info_in_idx
- 1; i
>= 0; --i
) {
1291 TCGCallArgumentLoc
*loc
= &info
->in
[i
];
1292 switch (loc
->kind
) {
1293 case TCG_CALL_ARG_BY_REF
:
1294 case TCG_CALL_ARG_BY_REF_N
:
1295 loc
->ref_slot
+= ref_base
;
1305 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
1306 static void process_op_defs(TCGContext
*s
);
1307 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1308 TCGReg reg
, const char *name
);
1310 static void tcg_context_init(unsigned max_cpus
)
1312 TCGContext
*s
= &tcg_init_ctx
;
1313 int op
, total_args
, n
, i
;
1315 TCGArgConstraint
*args_ct
;
1318 memset(s
, 0, sizeof(*s
));
1321 /* Count total number of arguments and allocate the corresponding
1324 for(op
= 0; op
< NB_OPS
; op
++) {
1325 def
= &tcg_op_defs
[op
];
1326 n
= def
->nb_iargs
+ def
->nb_oargs
;
1330 args_ct
= g_new0(TCGArgConstraint
, total_args
);
1332 for(op
= 0; op
< NB_OPS
; op
++) {
1333 def
= &tcg_op_defs
[op
];
1334 def
->args_ct
= args_ct
;
1335 n
= def
->nb_iargs
+ def
->nb_oargs
;
1339 init_call_layout(&info_helper_ld32_mmu
);
1340 init_call_layout(&info_helper_ld64_mmu
);
1341 init_call_layout(&info_helper_ld128_mmu
);
1342 init_call_layout(&info_helper_st32_mmu
);
1343 init_call_layout(&info_helper_st64_mmu
);
1344 init_call_layout(&info_helper_st128_mmu
);
1349 /* Reverse the order of the saved registers, assuming they're all at
1350 the start of tcg_target_reg_alloc_order. */
1351 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
1352 int r
= tcg_target_reg_alloc_order
[n
];
1353 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
1357 for (i
= 0; i
< n
; ++i
) {
1358 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
1360 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
1361 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
1364 alloc_tcg_plugin_context(s
);
1368 * In user-mode we simply share the init context among threads, since we
1369 * use a single region. See the documentation tcg_region_init() for the
1370 * reasoning behind this.
1371 * In system-mode we will have at most max_cpus TCG threads.
1373 #ifdef CONFIG_USER_ONLY
1374 tcg_ctxs
= &tcg_ctx
;
1378 tcg_max_ctxs
= max_cpus
;
1379 tcg_ctxs
= g_new0(TCGContext
*, max_cpus
);
1382 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
1383 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
1384 tcg_env
= temp_tcgv_ptr(ts
);
1387 void tcg_init(size_t tb_size
, int splitwx
, unsigned max_cpus
)
1389 tcg_context_init(max_cpus
);
1390 tcg_region_init(tb_size
, splitwx
, max_cpus
);
1394 * Allocate TBs right before their corresponding translated code, making
1395 * sure that TBs and code are on different cache lines.
1397 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
1399 uintptr_t align
= qemu_icache_linesize
;
1400 TranslationBlock
*tb
;
1404 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
1405 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
1407 if (unlikely(next
> s
->code_gen_highwater
)) {
1408 if (tcg_region_alloc(s
)) {
1413 qatomic_set(&s
->code_gen_ptr
, next
);
1414 s
->data_gen_ptr
= NULL
;
1418 void tcg_prologue_init(void)
1420 TCGContext
*s
= tcg_ctx
;
1421 size_t prologue_size
;
1423 s
->code_ptr
= s
->code_gen_ptr
;
1424 s
->code_buf
= s
->code_gen_ptr
;
1425 s
->data_gen_ptr
= NULL
;
1427 #ifndef CONFIG_TCG_INTERPRETER
1428 tcg_qemu_tb_exec
= (tcg_prologue_fn
*)tcg_splitwx_to_rx(s
->code_ptr
);
1431 #ifdef TCG_TARGET_NEED_POOL_LABELS
1432 s
->pool_labels
= NULL
;
1435 qemu_thread_jit_write();
1436 /* Generate the prologue. */
1437 tcg_target_qemu_prologue(s
);
1439 #ifdef TCG_TARGET_NEED_POOL_LABELS
1440 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1442 int result
= tcg_out_pool_finalize(s
);
1443 tcg_debug_assert(result
== 0);
1447 prologue_size
= tcg_current_code_size(s
);
1448 perf_report_prologue(s
->code_gen_ptr
, prologue_size
);
1450 #ifndef CONFIG_TCG_INTERPRETER
1451 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
1452 (uintptr_t)s
->code_buf
, prologue_size
);
1455 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1456 FILE *logfile
= qemu_log_trylock();
1458 fprintf(logfile
, "PROLOGUE: [size=%zu]\n", prologue_size
);
1459 if (s
->data_gen_ptr
) {
1460 size_t code_size
= s
->data_gen_ptr
- s
->code_gen_ptr
;
1461 size_t data_size
= prologue_size
- code_size
;
1464 disas(logfile
, s
->code_gen_ptr
, code_size
);
1466 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1467 if (sizeof(tcg_target_ulong
) == 8) {
1469 "0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1470 (uintptr_t)s
->data_gen_ptr
+ i
,
1471 *(uint64_t *)(s
->data_gen_ptr
+ i
));
1474 "0x%08" PRIxPTR
": .long 0x%08x\n",
1475 (uintptr_t)s
->data_gen_ptr
+ i
,
1476 *(uint32_t *)(s
->data_gen_ptr
+ i
));
1480 disas(logfile
, s
->code_gen_ptr
, prologue_size
);
1482 fprintf(logfile
, "\n");
1483 qemu_log_unlock(logfile
);
1487 #ifndef CONFIG_TCG_INTERPRETER
1489 * Assert that goto_ptr is implemented completely, setting an epilogue.
1490 * For tci, we use NULL as the signal to return from the interpreter,
1491 * so skip this check.
1493 tcg_debug_assert(tcg_code_gen_epilogue
!= NULL
);
1496 tcg_region_prologue_set(s
);
1499 void tcg_func_start(TCGContext
*s
)
1502 s
->nb_temps
= s
->nb_globals
;
1504 /* No temps have been previously allocated for size or locality. */
1505 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
1507 /* No constant temps have been previously allocated. */
1508 for (int i
= 0; i
< TCG_TYPE_COUNT
; ++i
) {
1509 if (s
->const_table
[i
]) {
1510 g_hash_table_remove_all(s
->const_table
[i
]);
1516 s
->current_frame_offset
= s
->frame_start
;
1518 #ifdef CONFIG_DEBUG_TCG
1519 s
->goto_tb_issue_mask
= 0;
1522 QTAILQ_INIT(&s
->ops
);
1523 QTAILQ_INIT(&s
->free_ops
);
1524 QSIMPLEQ_INIT(&s
->labels
);
1526 tcg_debug_assert(s
->addr_type
== TCG_TYPE_I32
||
1527 s
->addr_type
== TCG_TYPE_I64
);
1529 tcg_debug_assert(s
->insn_start_words
> 0);
1532 static TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
1534 int n
= s
->nb_temps
++;
1536 if (n
>= TCG_MAX_TEMPS
) {
1537 tcg_raise_tb_overflow(s
);
1539 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1542 static TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1546 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1547 tcg_debug_assert(s
->nb_globals
< TCG_MAX_TEMPS
);
1549 ts
= tcg_temp_alloc(s
);
1550 ts
->kind
= TEMP_GLOBAL
;
1555 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1556 TCGReg reg
, const char *name
)
1560 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
1562 ts
= tcg_global_alloc(s
);
1563 ts
->base_type
= type
;
1565 ts
->kind
= TEMP_FIXED
;
1568 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1573 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1575 s
->frame_start
= start
;
1576 s
->frame_end
= start
+ size
;
1578 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1581 static TCGTemp
*tcg_global_mem_new_internal(TCGv_ptr base
, intptr_t offset
,
1582 const char *name
, TCGType type
)
1584 TCGContext
*s
= tcg_ctx
;
1585 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1586 TCGTemp
*ts
= tcg_global_alloc(s
);
1587 int indirect_reg
= 0;
1589 switch (base_ts
->kind
) {
1593 /* We do not support double-indirect registers. */
1594 tcg_debug_assert(!base_ts
->indirect_reg
);
1595 base_ts
->indirect_base
= 1;
1596 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1601 g_assert_not_reached();
1604 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1605 TCGTemp
*ts2
= tcg_global_alloc(s
);
1608 ts
->base_type
= TCG_TYPE_I64
;
1609 ts
->type
= TCG_TYPE_I32
;
1610 ts
->indirect_reg
= indirect_reg
;
1611 ts
->mem_allocated
= 1;
1612 ts
->mem_base
= base_ts
;
1613 ts
->mem_offset
= offset
;
1614 pstrcpy(buf
, sizeof(buf
), name
);
1615 pstrcat(buf
, sizeof(buf
), "_0");
1616 ts
->name
= strdup(buf
);
1618 tcg_debug_assert(ts2
== ts
+ 1);
1619 ts2
->base_type
= TCG_TYPE_I64
;
1620 ts2
->type
= TCG_TYPE_I32
;
1621 ts2
->indirect_reg
= indirect_reg
;
1622 ts2
->mem_allocated
= 1;
1623 ts2
->mem_base
= base_ts
;
1624 ts2
->mem_offset
= offset
+ 4;
1625 ts2
->temp_subindex
= 1;
1626 pstrcpy(buf
, sizeof(buf
), name
);
1627 pstrcat(buf
, sizeof(buf
), "_1");
1628 ts2
->name
= strdup(buf
);
1630 ts
->base_type
= type
;
1632 ts
->indirect_reg
= indirect_reg
;
1633 ts
->mem_allocated
= 1;
1634 ts
->mem_base
= base_ts
;
1635 ts
->mem_offset
= offset
;
1641 TCGv_i32
tcg_global_mem_new_i32(TCGv_ptr reg
, intptr_t off
, const char *name
)
1643 TCGTemp
*ts
= tcg_global_mem_new_internal(reg
, off
, name
, TCG_TYPE_I32
);
1644 return temp_tcgv_i32(ts
);
1647 TCGv_i64
tcg_global_mem_new_i64(TCGv_ptr reg
, intptr_t off
, const char *name
)
1649 TCGTemp
*ts
= tcg_global_mem_new_internal(reg
, off
, name
, TCG_TYPE_I64
);
1650 return temp_tcgv_i64(ts
);
1653 TCGv_ptr
tcg_global_mem_new_ptr(TCGv_ptr reg
, intptr_t off
, const char *name
)
1655 TCGTemp
*ts
= tcg_global_mem_new_internal(reg
, off
, name
, TCG_TYPE_PTR
);
1656 return temp_tcgv_ptr(ts
);
1659 TCGTemp
*tcg_temp_new_internal(TCGType type
, TCGTempKind kind
)
1661 TCGContext
*s
= tcg_ctx
;
1665 if (kind
== TEMP_EBB
) {
1666 int idx
= find_first_bit(s
->free_temps
[type
].l
, TCG_MAX_TEMPS
);
1668 if (idx
< TCG_MAX_TEMPS
) {
1669 /* There is already an available temp with the right type. */
1670 clear_bit(idx
, s
->free_temps
[type
].l
);
1672 ts
= &s
->temps
[idx
];
1673 ts
->temp_allocated
= 1;
1674 tcg_debug_assert(ts
->base_type
== type
);
1675 tcg_debug_assert(ts
->kind
== kind
);
1679 tcg_debug_assert(kind
== TEMP_TB
);
1690 n
= 64 / TCG_TARGET_REG_BITS
;
1693 n
= 128 / TCG_TARGET_REG_BITS
;
1696 g_assert_not_reached();
1699 ts
= tcg_temp_alloc(s
);
1700 ts
->base_type
= type
;
1701 ts
->temp_allocated
= 1;
1707 ts
->type
= TCG_TYPE_REG
;
1709 for (int i
= 1; i
< n
; ++i
) {
1710 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1712 tcg_debug_assert(ts2
== ts
+ i
);
1713 ts2
->base_type
= type
;
1714 ts2
->type
= TCG_TYPE_REG
;
1715 ts2
->temp_allocated
= 1;
1716 ts2
->temp_subindex
= i
;
1723 TCGv_i32
tcg_temp_new_i32(void)
1725 return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32
, TEMP_TB
));
1728 TCGv_i32
tcg_temp_ebb_new_i32(void)
1730 return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32
, TEMP_EBB
));
1733 TCGv_i64
tcg_temp_new_i64(void)
1735 return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64
, TEMP_TB
));
1738 TCGv_i64
tcg_temp_ebb_new_i64(void)
1740 return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64
, TEMP_EBB
));
1743 TCGv_ptr
tcg_temp_new_ptr(void)
1745 return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR
, TEMP_TB
));
1748 TCGv_ptr
tcg_temp_ebb_new_ptr(void)
1750 return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR
, TEMP_EBB
));
1753 TCGv_i128
tcg_temp_new_i128(void)
1755 return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128
, TEMP_TB
));
1758 TCGv_i128
tcg_temp_ebb_new_i128(void)
1760 return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128
, TEMP_EBB
));
1763 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1767 #ifdef CONFIG_DEBUG_TCG
1770 assert(TCG_TARGET_HAS_v64
);
1773 assert(TCG_TARGET_HAS_v128
);
1776 assert(TCG_TARGET_HAS_v256
);
1779 g_assert_not_reached();
1783 t
= tcg_temp_new_internal(type
, TEMP_EBB
);
1784 return temp_tcgv_vec(t
);
1787 /* Create a new temp of the same type as an existing temp. */
1788 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1790 TCGTemp
*t
= tcgv_vec_temp(match
);
1792 tcg_debug_assert(t
->temp_allocated
!= 0);
1794 t
= tcg_temp_new_internal(t
->base_type
, TEMP_EBB
);
1795 return temp_tcgv_vec(t
);
1798 void tcg_temp_free_internal(TCGTemp
*ts
)
1800 TCGContext
*s
= tcg_ctx
;
1805 /* Silently ignore free. */
1808 tcg_debug_assert(ts
->temp_allocated
!= 0);
1809 ts
->temp_allocated
= 0;
1810 set_bit(temp_idx(ts
), s
->free_temps
[ts
->base_type
].l
);
1813 /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
1814 g_assert_not_reached();
1818 void tcg_temp_free_i32(TCGv_i32 arg
)
1820 tcg_temp_free_internal(tcgv_i32_temp(arg
));
1823 void tcg_temp_free_i64(TCGv_i64 arg
)
1825 tcg_temp_free_internal(tcgv_i64_temp(arg
));
1828 void tcg_temp_free_i128(TCGv_i128 arg
)
1830 tcg_temp_free_internal(tcgv_i128_temp(arg
));
1833 void tcg_temp_free_ptr(TCGv_ptr arg
)
1835 tcg_temp_free_internal(tcgv_ptr_temp(arg
));
1838 void tcg_temp_free_vec(TCGv_vec arg
)
1840 tcg_temp_free_internal(tcgv_vec_temp(arg
));
1843 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
)
1845 TCGContext
*s
= tcg_ctx
;
1846 GHashTable
*h
= s
->const_table
[type
];
1850 h
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
1851 s
->const_table
[type
] = h
;
1854 ts
= g_hash_table_lookup(h
, &val
);
1858 ts
= tcg_temp_alloc(s
);
1860 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1861 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1863 tcg_debug_assert(ts2
== ts
+ 1);
1865 ts
->base_type
= TCG_TYPE_I64
;
1866 ts
->type
= TCG_TYPE_I32
;
1867 ts
->kind
= TEMP_CONST
;
1868 ts
->temp_allocated
= 1;
1870 ts2
->base_type
= TCG_TYPE_I64
;
1871 ts2
->type
= TCG_TYPE_I32
;
1872 ts2
->kind
= TEMP_CONST
;
1873 ts2
->temp_allocated
= 1;
1874 ts2
->temp_subindex
= 1;
1877 * Retain the full value of the 64-bit constant in the low
1878 * part, so that the hash table works. Actual uses will
1879 * truncate the value to the low part.
1881 ts
[HOST_BIG_ENDIAN
].val
= val
;
1882 ts
[!HOST_BIG_ENDIAN
].val
= val
>> 32;
1883 val_ptr
= &ts
[HOST_BIG_ENDIAN
].val
;
1885 ts
->base_type
= type
;
1887 ts
->kind
= TEMP_CONST
;
1888 ts
->temp_allocated
= 1;
1892 g_hash_table_insert(h
, val_ptr
, ts
);
1898 TCGv_i32
tcg_constant_i32(int32_t val
)
1900 return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32
, val
));
1903 TCGv_i64
tcg_constant_i64(int64_t val
)
1905 return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64
, val
));
1908 TCGv_ptr
tcg_constant_ptr_int(intptr_t val
)
1910 return temp_tcgv_ptr(tcg_constant_internal(TCG_TYPE_PTR
, val
));
1913 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
)
1915 val
= dup_const(vece
, val
);
1916 return temp_tcgv_vec(tcg_constant_internal(type
, val
));
1919 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
)
1921 TCGTemp
*t
= tcgv_vec_temp(match
);
1923 tcg_debug_assert(t
->temp_allocated
!= 0);
1924 return tcg_constant_vec(t
->base_type
, vece
, val
);
1927 #ifdef CONFIG_DEBUG_TCG
1928 size_t temp_idx(TCGTemp
*ts
)
1930 ptrdiff_t n
= ts
- tcg_ctx
->temps
;
1931 assert(n
>= 0 && n
< tcg_ctx
->nb_temps
);
1935 TCGTemp
*tcgv_i32_temp(TCGv_i32 v
)
1937 uintptr_t o
= (uintptr_t)v
- offsetof(TCGContext
, temps
);
1939 assert(o
< sizeof(TCGTemp
) * tcg_ctx
->nb_temps
);
1940 assert(o
% sizeof(TCGTemp
) == 0);
1942 return (void *)tcg_ctx
+ (uintptr_t)v
;
1944 #endif /* CONFIG_DEBUG_TCG */
1946 /* Return true if OP may appear in the opcode stream.
1947 Test the runtime variable that controls each opcode. */
1948 bool tcg_op_supported(TCGOpcode op
)
1951 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1954 case INDEX_op_discard
:
1955 case INDEX_op_set_label
:
1959 case INDEX_op_insn_start
:
1960 case INDEX_op_exit_tb
:
1961 case INDEX_op_goto_tb
:
1962 case INDEX_op_goto_ptr
:
1963 case INDEX_op_qemu_ld_a32_i32
:
1964 case INDEX_op_qemu_ld_a64_i32
:
1965 case INDEX_op_qemu_st_a32_i32
:
1966 case INDEX_op_qemu_st_a64_i32
:
1967 case INDEX_op_qemu_ld_a32_i64
:
1968 case INDEX_op_qemu_ld_a64_i64
:
1969 case INDEX_op_qemu_st_a32_i64
:
1970 case INDEX_op_qemu_st_a64_i64
:
1973 case INDEX_op_qemu_st8_a32_i32
:
1974 case INDEX_op_qemu_st8_a64_i32
:
1975 return TCG_TARGET_HAS_qemu_st8_i32
;
1977 case INDEX_op_qemu_ld_a32_i128
:
1978 case INDEX_op_qemu_ld_a64_i128
:
1979 case INDEX_op_qemu_st_a32_i128
:
1980 case INDEX_op_qemu_st_a64_i128
:
1981 return TCG_TARGET_HAS_qemu_ldst_i128
;
1983 case INDEX_op_mov_i32
:
1984 case INDEX_op_setcond_i32
:
1985 case INDEX_op_brcond_i32
:
1986 case INDEX_op_movcond_i32
:
1987 case INDEX_op_ld8u_i32
:
1988 case INDEX_op_ld8s_i32
:
1989 case INDEX_op_ld16u_i32
:
1990 case INDEX_op_ld16s_i32
:
1991 case INDEX_op_ld_i32
:
1992 case INDEX_op_st8_i32
:
1993 case INDEX_op_st16_i32
:
1994 case INDEX_op_st_i32
:
1995 case INDEX_op_add_i32
:
1996 case INDEX_op_sub_i32
:
1997 case INDEX_op_neg_i32
:
1998 case INDEX_op_mul_i32
:
1999 case INDEX_op_and_i32
:
2000 case INDEX_op_or_i32
:
2001 case INDEX_op_xor_i32
:
2002 case INDEX_op_shl_i32
:
2003 case INDEX_op_shr_i32
:
2004 case INDEX_op_sar_i32
:
2007 case INDEX_op_negsetcond_i32
:
2008 return TCG_TARGET_HAS_negsetcond_i32
;
2009 case INDEX_op_div_i32
:
2010 case INDEX_op_divu_i32
:
2011 return TCG_TARGET_HAS_div_i32
;
2012 case INDEX_op_rem_i32
:
2013 case INDEX_op_remu_i32
:
2014 return TCG_TARGET_HAS_rem_i32
;
2015 case INDEX_op_div2_i32
:
2016 case INDEX_op_divu2_i32
:
2017 return TCG_TARGET_HAS_div2_i32
;
2018 case INDEX_op_rotl_i32
:
2019 case INDEX_op_rotr_i32
:
2020 return TCG_TARGET_HAS_rot_i32
;
2021 case INDEX_op_deposit_i32
:
2022 return TCG_TARGET_HAS_deposit_i32
;
2023 case INDEX_op_extract_i32
:
2024 return TCG_TARGET_HAS_extract_i32
;
2025 case INDEX_op_sextract_i32
:
2026 return TCG_TARGET_HAS_sextract_i32
;
2027 case INDEX_op_extract2_i32
:
2028 return TCG_TARGET_HAS_extract2_i32
;
2029 case INDEX_op_add2_i32
:
2030 return TCG_TARGET_HAS_add2_i32
;
2031 case INDEX_op_sub2_i32
:
2032 return TCG_TARGET_HAS_sub2_i32
;
2033 case INDEX_op_mulu2_i32
:
2034 return TCG_TARGET_HAS_mulu2_i32
;
2035 case INDEX_op_muls2_i32
:
2036 return TCG_TARGET_HAS_muls2_i32
;
2037 case INDEX_op_muluh_i32
:
2038 return TCG_TARGET_HAS_muluh_i32
;
2039 case INDEX_op_mulsh_i32
:
2040 return TCG_TARGET_HAS_mulsh_i32
;
2041 case INDEX_op_ext8s_i32
:
2042 return TCG_TARGET_HAS_ext8s_i32
;
2043 case INDEX_op_ext16s_i32
:
2044 return TCG_TARGET_HAS_ext16s_i32
;
2045 case INDEX_op_ext8u_i32
:
2046 return TCG_TARGET_HAS_ext8u_i32
;
2047 case INDEX_op_ext16u_i32
:
2048 return TCG_TARGET_HAS_ext16u_i32
;
2049 case INDEX_op_bswap16_i32
:
2050 return TCG_TARGET_HAS_bswap16_i32
;
2051 case INDEX_op_bswap32_i32
:
2052 return TCG_TARGET_HAS_bswap32_i32
;
2053 case INDEX_op_not_i32
:
2054 return TCG_TARGET_HAS_not_i32
;
2055 case INDEX_op_andc_i32
:
2056 return TCG_TARGET_HAS_andc_i32
;
2057 case INDEX_op_orc_i32
:
2058 return TCG_TARGET_HAS_orc_i32
;
2059 case INDEX_op_eqv_i32
:
2060 return TCG_TARGET_HAS_eqv_i32
;
2061 case INDEX_op_nand_i32
:
2062 return TCG_TARGET_HAS_nand_i32
;
2063 case INDEX_op_nor_i32
:
2064 return TCG_TARGET_HAS_nor_i32
;
2065 case INDEX_op_clz_i32
:
2066 return TCG_TARGET_HAS_clz_i32
;
2067 case INDEX_op_ctz_i32
:
2068 return TCG_TARGET_HAS_ctz_i32
;
2069 case INDEX_op_ctpop_i32
:
2070 return TCG_TARGET_HAS_ctpop_i32
;
2072 case INDEX_op_brcond2_i32
:
2073 case INDEX_op_setcond2_i32
:
2074 return TCG_TARGET_REG_BITS
== 32;
2076 case INDEX_op_mov_i64
:
2077 case INDEX_op_setcond_i64
:
2078 case INDEX_op_brcond_i64
:
2079 case INDEX_op_movcond_i64
:
2080 case INDEX_op_ld8u_i64
:
2081 case INDEX_op_ld8s_i64
:
2082 case INDEX_op_ld16u_i64
:
2083 case INDEX_op_ld16s_i64
:
2084 case INDEX_op_ld32u_i64
:
2085 case INDEX_op_ld32s_i64
:
2086 case INDEX_op_ld_i64
:
2087 case INDEX_op_st8_i64
:
2088 case INDEX_op_st16_i64
:
2089 case INDEX_op_st32_i64
:
2090 case INDEX_op_st_i64
:
2091 case INDEX_op_add_i64
:
2092 case INDEX_op_sub_i64
:
2093 case INDEX_op_neg_i64
:
2094 case INDEX_op_mul_i64
:
2095 case INDEX_op_and_i64
:
2096 case INDEX_op_or_i64
:
2097 case INDEX_op_xor_i64
:
2098 case INDEX_op_shl_i64
:
2099 case INDEX_op_shr_i64
:
2100 case INDEX_op_sar_i64
:
2101 case INDEX_op_ext_i32_i64
:
2102 case INDEX_op_extu_i32_i64
:
2103 return TCG_TARGET_REG_BITS
== 64;
2105 case INDEX_op_negsetcond_i64
:
2106 return TCG_TARGET_HAS_negsetcond_i64
;
2107 case INDEX_op_div_i64
:
2108 case INDEX_op_divu_i64
:
2109 return TCG_TARGET_HAS_div_i64
;
2110 case INDEX_op_rem_i64
:
2111 case INDEX_op_remu_i64
:
2112 return TCG_TARGET_HAS_rem_i64
;
2113 case INDEX_op_div2_i64
:
2114 case INDEX_op_divu2_i64
:
2115 return TCG_TARGET_HAS_div2_i64
;
2116 case INDEX_op_rotl_i64
:
2117 case INDEX_op_rotr_i64
:
2118 return TCG_TARGET_HAS_rot_i64
;
2119 case INDEX_op_deposit_i64
:
2120 return TCG_TARGET_HAS_deposit_i64
;
2121 case INDEX_op_extract_i64
:
2122 return TCG_TARGET_HAS_extract_i64
;
2123 case INDEX_op_sextract_i64
:
2124 return TCG_TARGET_HAS_sextract_i64
;
2125 case INDEX_op_extract2_i64
:
2126 return TCG_TARGET_HAS_extract2_i64
;
2127 case INDEX_op_extrl_i64_i32
:
2128 case INDEX_op_extrh_i64_i32
:
2129 return TCG_TARGET_HAS_extr_i64_i32
;
2130 case INDEX_op_ext8s_i64
:
2131 return TCG_TARGET_HAS_ext8s_i64
;
2132 case INDEX_op_ext16s_i64
:
2133 return TCG_TARGET_HAS_ext16s_i64
;
2134 case INDEX_op_ext32s_i64
:
2135 return TCG_TARGET_HAS_ext32s_i64
;
2136 case INDEX_op_ext8u_i64
:
2137 return TCG_TARGET_HAS_ext8u_i64
;
2138 case INDEX_op_ext16u_i64
:
2139 return TCG_TARGET_HAS_ext16u_i64
;
2140 case INDEX_op_ext32u_i64
:
2141 return TCG_TARGET_HAS_ext32u_i64
;
2142 case INDEX_op_bswap16_i64
:
2143 return TCG_TARGET_HAS_bswap16_i64
;
2144 case INDEX_op_bswap32_i64
:
2145 return TCG_TARGET_HAS_bswap32_i64
;
2146 case INDEX_op_bswap64_i64
:
2147 return TCG_TARGET_HAS_bswap64_i64
;
2148 case INDEX_op_not_i64
:
2149 return TCG_TARGET_HAS_not_i64
;
2150 case INDEX_op_andc_i64
:
2151 return TCG_TARGET_HAS_andc_i64
;
2152 case INDEX_op_orc_i64
:
2153 return TCG_TARGET_HAS_orc_i64
;
2154 case INDEX_op_eqv_i64
:
2155 return TCG_TARGET_HAS_eqv_i64
;
2156 case INDEX_op_nand_i64
:
2157 return TCG_TARGET_HAS_nand_i64
;
2158 case INDEX_op_nor_i64
:
2159 return TCG_TARGET_HAS_nor_i64
;
2160 case INDEX_op_clz_i64
:
2161 return TCG_TARGET_HAS_clz_i64
;
2162 case INDEX_op_ctz_i64
:
2163 return TCG_TARGET_HAS_ctz_i64
;
2164 case INDEX_op_ctpop_i64
:
2165 return TCG_TARGET_HAS_ctpop_i64
;
2166 case INDEX_op_add2_i64
:
2167 return TCG_TARGET_HAS_add2_i64
;
2168 case INDEX_op_sub2_i64
:
2169 return TCG_TARGET_HAS_sub2_i64
;
2170 case INDEX_op_mulu2_i64
:
2171 return TCG_TARGET_HAS_mulu2_i64
;
2172 case INDEX_op_muls2_i64
:
2173 return TCG_TARGET_HAS_muls2_i64
;
2174 case INDEX_op_muluh_i64
:
2175 return TCG_TARGET_HAS_muluh_i64
;
2176 case INDEX_op_mulsh_i64
:
2177 return TCG_TARGET_HAS_mulsh_i64
;
2179 case INDEX_op_mov_vec
:
2180 case INDEX_op_dup_vec
:
2181 case INDEX_op_dupm_vec
:
2182 case INDEX_op_ld_vec
:
2183 case INDEX_op_st_vec
:
2184 case INDEX_op_add_vec
:
2185 case INDEX_op_sub_vec
:
2186 case INDEX_op_and_vec
:
2187 case INDEX_op_or_vec
:
2188 case INDEX_op_xor_vec
:
2189 case INDEX_op_cmp_vec
:
2191 case INDEX_op_dup2_vec
:
2192 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
2193 case INDEX_op_not_vec
:
2194 return have_vec
&& TCG_TARGET_HAS_not_vec
;
2195 case INDEX_op_neg_vec
:
2196 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
2197 case INDEX_op_abs_vec
:
2198 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
2199 case INDEX_op_andc_vec
:
2200 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
2201 case INDEX_op_orc_vec
:
2202 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
2203 case INDEX_op_nand_vec
:
2204 return have_vec
&& TCG_TARGET_HAS_nand_vec
;
2205 case INDEX_op_nor_vec
:
2206 return have_vec
&& TCG_TARGET_HAS_nor_vec
;
2207 case INDEX_op_eqv_vec
:
2208 return have_vec
&& TCG_TARGET_HAS_eqv_vec
;
2209 case INDEX_op_mul_vec
:
2210 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
2211 case INDEX_op_shli_vec
:
2212 case INDEX_op_shri_vec
:
2213 case INDEX_op_sari_vec
:
2214 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
2215 case INDEX_op_shls_vec
:
2216 case INDEX_op_shrs_vec
:
2217 case INDEX_op_sars_vec
:
2218 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
2219 case INDEX_op_shlv_vec
:
2220 case INDEX_op_shrv_vec
:
2221 case INDEX_op_sarv_vec
:
2222 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
2223 case INDEX_op_rotli_vec
:
2224 return have_vec
&& TCG_TARGET_HAS_roti_vec
;
2225 case INDEX_op_rotls_vec
:
2226 return have_vec
&& TCG_TARGET_HAS_rots_vec
;
2227 case INDEX_op_rotlv_vec
:
2228 case INDEX_op_rotrv_vec
:
2229 return have_vec
&& TCG_TARGET_HAS_rotv_vec
;
2230 case INDEX_op_ssadd_vec
:
2231 case INDEX_op_usadd_vec
:
2232 case INDEX_op_sssub_vec
:
2233 case INDEX_op_ussub_vec
:
2234 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
2235 case INDEX_op_smin_vec
:
2236 case INDEX_op_umin_vec
:
2237 case INDEX_op_smax_vec
:
2238 case INDEX_op_umax_vec
:
2239 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
2240 case INDEX_op_bitsel_vec
:
2241 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
2242 case INDEX_op_cmpsel_vec
:
2243 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
2246 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
2251 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
);
2253 static void tcg_gen_callN(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
**args
)
2255 TCGv_i64 extend_free
[MAX_CALL_IARGS
];
2258 int i
, n
, pi
= 0, total_args
;
2260 if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info
)))) {
2261 init_call_layout(info
);
2262 g_once_init_leave(HELPER_INFO_INIT(info
), HELPER_INFO_INIT_VAL(info
));
2265 total_args
= info
->nr_out
+ info
->nr_in
+ 2;
2266 op
= tcg_op_alloc(INDEX_op_call
, total_args
);
2268 #ifdef CONFIG_PLUGIN
2269 /* Flag helpers that may affect guest state */
2270 if (tcg_ctx
->plugin_insn
&&
2271 !(info
->flags
& TCG_CALL_PLUGIN
) &&
2272 !(info
->flags
& TCG_CALL_NO_SIDE_EFFECTS
)) {
2273 tcg_ctx
->plugin_insn
->calls_helpers
= true;
2277 TCGOP_CALLO(op
) = n
= info
->nr_out
;
2280 tcg_debug_assert(ret
== NULL
);
2283 tcg_debug_assert(ret
!= NULL
);
2284 op
->args
[pi
++] = temp_arg(ret
);
2288 tcg_debug_assert(ret
!= NULL
);
2289 tcg_debug_assert(ret
->base_type
== ret
->type
+ ctz32(n
));
2290 tcg_debug_assert(ret
->temp_subindex
== 0);
2291 for (i
= 0; i
< n
; ++i
) {
2292 op
->args
[pi
++] = temp_arg(ret
+ i
);
2296 g_assert_not_reached();
2299 TCGOP_CALLI(op
) = n
= info
->nr_in
;
2300 for (i
= 0; i
< n
; i
++) {
2301 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
2302 TCGTemp
*ts
= args
[loc
->arg_idx
] + loc
->tmp_subindex
;
2304 switch (loc
->kind
) {
2305 case TCG_CALL_ARG_NORMAL
:
2306 case TCG_CALL_ARG_BY_REF
:
2307 case TCG_CALL_ARG_BY_REF_N
:
2308 op
->args
[pi
++] = temp_arg(ts
);
2311 case TCG_CALL_ARG_EXTEND_U
:
2312 case TCG_CALL_ARG_EXTEND_S
:
2314 TCGv_i64 temp
= tcg_temp_ebb_new_i64();
2315 TCGv_i32 orig
= temp_tcgv_i32(ts
);
2317 if (loc
->kind
== TCG_CALL_ARG_EXTEND_S
) {
2318 tcg_gen_ext_i32_i64(temp
, orig
);
2320 tcg_gen_extu_i32_i64(temp
, orig
);
2322 op
->args
[pi
++] = tcgv_i64_arg(temp
);
2323 extend_free
[n_extend
++] = temp
;
2328 g_assert_not_reached();
2331 op
->args
[pi
++] = (uintptr_t)info
->func
;
2332 op
->args
[pi
++] = (uintptr_t)info
;
2333 tcg_debug_assert(pi
== total_args
);
2335 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2337 tcg_debug_assert(n_extend
< ARRAY_SIZE(extend_free
));
2338 for (i
= 0; i
< n_extend
; ++i
) {
2339 tcg_temp_free_i64(extend_free
[i
]);
2343 void tcg_gen_call0(TCGHelperInfo
*info
, TCGTemp
*ret
)
2345 tcg_gen_callN(info
, ret
, NULL
);
2348 void tcg_gen_call1(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
)
2350 tcg_gen_callN(info
, ret
, &t1
);
2353 void tcg_gen_call2(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
, TCGTemp
*t2
)
2355 TCGTemp
*args
[2] = { t1
, t2
};
2356 tcg_gen_callN(info
, ret
, args
);
2359 void tcg_gen_call3(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2360 TCGTemp
*t2
, TCGTemp
*t3
)
2362 TCGTemp
*args
[3] = { t1
, t2
, t3
};
2363 tcg_gen_callN(info
, ret
, args
);
2366 void tcg_gen_call4(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2367 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
)
2369 TCGTemp
*args
[4] = { t1
, t2
, t3
, t4
};
2370 tcg_gen_callN(info
, ret
, args
);
2373 void tcg_gen_call5(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2374 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
, TCGTemp
*t5
)
2376 TCGTemp
*args
[5] = { t1
, t2
, t3
, t4
, t5
};
2377 tcg_gen_callN(info
, ret
, args
);
2380 void tcg_gen_call6(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
, TCGTemp
*t2
,
2381 TCGTemp
*t3
, TCGTemp
*t4
, TCGTemp
*t5
, TCGTemp
*t6
)
2383 TCGTemp
*args
[6] = { t1
, t2
, t3
, t4
, t5
, t6
};
2384 tcg_gen_callN(info
, ret
, args
);
2387 void tcg_gen_call7(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2388 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
,
2389 TCGTemp
*t5
, TCGTemp
*t6
, TCGTemp
*t7
)
2391 TCGTemp
*args
[7] = { t1
, t2
, t3
, t4
, t5
, t6
, t7
};
2392 tcg_gen_callN(info
, ret
, args
);
2395 static void tcg_reg_alloc_start(TCGContext
*s
)
2399 for (i
= 0, n
= s
->nb_temps
; i
< n
; i
++) {
2400 TCGTemp
*ts
= &s
->temps
[i
];
2401 TCGTempVal val
= TEMP_VAL_MEM
;
2405 val
= TEMP_VAL_CONST
;
2413 val
= TEMP_VAL_DEAD
;
2416 ts
->mem_allocated
= 0;
2419 g_assert_not_reached();
2424 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
2427 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
2430 int idx
= temp_idx(ts
);
2435 pstrcpy(buf
, buf_size
, ts
->name
);
2438 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
2441 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
2446 snprintf(buf
, buf_size
, "$0x%x", (int32_t)ts
->val
);
2448 #if TCG_TARGET_REG_BITS > 32
2450 snprintf(buf
, buf_size
, "$0x%" PRIx64
, ts
->val
);
2456 snprintf(buf
, buf_size
, "v%d$0x%" PRIx64
,
2457 64 << (ts
->type
- TCG_TYPE_V64
), ts
->val
);
2460 g_assert_not_reached();
2467 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
2468 int buf_size
, TCGArg arg
)
2470 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
2473 static const char * const cond_name
[] =
2475 [TCG_COND_NEVER
] = "never",
2476 [TCG_COND_ALWAYS
] = "always",
2477 [TCG_COND_EQ
] = "eq",
2478 [TCG_COND_NE
] = "ne",
2479 [TCG_COND_LT
] = "lt",
2480 [TCG_COND_GE
] = "ge",
2481 [TCG_COND_LE
] = "le",
2482 [TCG_COND_GT
] = "gt",
2483 [TCG_COND_LTU
] = "ltu",
2484 [TCG_COND_GEU
] = "geu",
2485 [TCG_COND_LEU
] = "leu",
2486 [TCG_COND_GTU
] = "gtu",
2487 [TCG_COND_TSTEQ
] = "tsteq",
2488 [TCG_COND_TSTNE
] = "tstne",
2491 static const char * const ldst_name
[(MO_BSWAP
| MO_SSIZE
) + 1] =
2505 [MO_128
+ MO_BE
] = "beo",
2506 [MO_128
+ MO_LE
] = "leo",
2509 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
2510 [MO_UNALN
>> MO_ASHIFT
] = "un+",
2511 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
2512 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
2513 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
2514 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
2515 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
2516 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
2517 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
2520 static const char * const atom_name
[(MO_ATOM_MASK
>> MO_ATOM_SHIFT
) + 1] = {
2521 [MO_ATOM_IFALIGN
>> MO_ATOM_SHIFT
] = "",
2522 [MO_ATOM_IFALIGN_PAIR
>> MO_ATOM_SHIFT
] = "pair+",
2523 [MO_ATOM_WITHIN16
>> MO_ATOM_SHIFT
] = "w16+",
2524 [MO_ATOM_WITHIN16_PAIR
>> MO_ATOM_SHIFT
] = "w16p+",
2525 [MO_ATOM_SUBALIGN
>> MO_ATOM_SHIFT
] = "sub+",
2526 [MO_ATOM_NONE
>> MO_ATOM_SHIFT
] = "noat+",
2529 static const char bswap_flag_name
[][6] = {
2530 [TCG_BSWAP_IZ
] = "iz",
2531 [TCG_BSWAP_OZ
] = "oz",
2532 [TCG_BSWAP_OS
] = "os",
2533 [TCG_BSWAP_IZ
| TCG_BSWAP_OZ
] = "iz,oz",
2534 [TCG_BSWAP_IZ
| TCG_BSWAP_OS
] = "iz,os",
2537 static inline bool tcg_regset_single(TCGRegSet d
)
2539 return (d
& (d
- 1)) == 0;
2542 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
2544 if (TCG_TARGET_NB_REGS
<= 32) {
2551 /* Return only the number of characters output -- no error return. */
2552 #define ne_fprintf(...) \
2553 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
2555 static void tcg_dump_ops(TCGContext
*s
, FILE *f
, bool have_prefs
)
2560 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
2561 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
2562 const TCGOpDef
*def
;
2567 def
= &tcg_op_defs
[c
];
2569 if (c
== INDEX_op_insn_start
) {
2571 col
+= ne_fprintf(f
, "\n ----");
2573 for (i
= 0, k
= s
->insn_start_words
; i
< k
; ++i
) {
2574 col
+= ne_fprintf(f
, " %016" PRIx64
,
2575 tcg_get_insn_start_param(op
, i
));
2577 } else if (c
== INDEX_op_call
) {
2578 const TCGHelperInfo
*info
= tcg_call_info(op
);
2579 void *func
= tcg_call_func(op
);
2581 /* variable number of arguments */
2582 nb_oargs
= TCGOP_CALLO(op
);
2583 nb_iargs
= TCGOP_CALLI(op
);
2584 nb_cargs
= def
->nb_cargs
;
2586 col
+= ne_fprintf(f
, " %s ", def
->name
);
2589 * Print the function name from TCGHelperInfo, if available.
2590 * Note that plugins have a template function for the info,
2591 * but the actual function pointer comes from the plugin.
2593 if (func
== info
->func
) {
2594 col
+= ne_fprintf(f
, "%s", info
->name
);
2596 col
+= ne_fprintf(f
, "plugin(%p)", func
);
2599 col
+= ne_fprintf(f
, ",$0x%x,$%d", info
->flags
, nb_oargs
);
2600 for (i
= 0; i
< nb_oargs
; i
++) {
2601 col
+= ne_fprintf(f
, ",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2604 for (i
= 0; i
< nb_iargs
; i
++) {
2605 TCGArg arg
= op
->args
[nb_oargs
+ i
];
2606 const char *t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
2607 col
+= ne_fprintf(f
, ",%s", t
);
2610 col
+= ne_fprintf(f
, " %s ", def
->name
);
2612 nb_oargs
= def
->nb_oargs
;
2613 nb_iargs
= def
->nb_iargs
;
2614 nb_cargs
= def
->nb_cargs
;
2616 if (def
->flags
& TCG_OPF_VECTOR
) {
2617 col
+= ne_fprintf(f
, "v%d,e%d,", 64 << TCGOP_VECL(op
),
2618 8 << TCGOP_VECE(op
));
2622 for (i
= 0; i
< nb_oargs
; i
++) {
2623 const char *sep
= k
? "," : "";
2624 col
+= ne_fprintf(f
, "%s%s", sep
,
2625 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2628 for (i
= 0; i
< nb_iargs
; i
++) {
2629 const char *sep
= k
? "," : "";
2630 col
+= ne_fprintf(f
, "%s%s", sep
,
2631 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2635 case INDEX_op_brcond_i32
:
2636 case INDEX_op_setcond_i32
:
2637 case INDEX_op_negsetcond_i32
:
2638 case INDEX_op_movcond_i32
:
2639 case INDEX_op_brcond2_i32
:
2640 case INDEX_op_setcond2_i32
:
2641 case INDEX_op_brcond_i64
:
2642 case INDEX_op_setcond_i64
:
2643 case INDEX_op_negsetcond_i64
:
2644 case INDEX_op_movcond_i64
:
2645 case INDEX_op_cmp_vec
:
2646 case INDEX_op_cmpsel_vec
:
2647 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
2648 && cond_name
[op
->args
[k
]]) {
2649 col
+= ne_fprintf(f
, ",%s", cond_name
[op
->args
[k
++]]);
2651 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, op
->args
[k
++]);
2655 case INDEX_op_qemu_ld_a32_i32
:
2656 case INDEX_op_qemu_ld_a64_i32
:
2657 case INDEX_op_qemu_st_a32_i32
:
2658 case INDEX_op_qemu_st_a64_i32
:
2659 case INDEX_op_qemu_st8_a32_i32
:
2660 case INDEX_op_qemu_st8_a64_i32
:
2661 case INDEX_op_qemu_ld_a32_i64
:
2662 case INDEX_op_qemu_ld_a64_i64
:
2663 case INDEX_op_qemu_st_a32_i64
:
2664 case INDEX_op_qemu_st_a64_i64
:
2665 case INDEX_op_qemu_ld_a32_i128
:
2666 case INDEX_op_qemu_ld_a64_i128
:
2667 case INDEX_op_qemu_st_a32_i128
:
2668 case INDEX_op_qemu_st_a64_i128
:
2670 const char *s_al
, *s_op
, *s_at
;
2671 MemOpIdx oi
= op
->args
[k
++];
2672 MemOp mop
= get_memop(oi
);
2673 unsigned ix
= get_mmuidx(oi
);
2675 s_al
= alignment_name
[(mop
& MO_AMASK
) >> MO_ASHIFT
];
2676 s_op
= ldst_name
[mop
& (MO_BSWAP
| MO_SSIZE
)];
2677 s_at
= atom_name
[(mop
& MO_ATOM_MASK
) >> MO_ATOM_SHIFT
];
2678 mop
&= ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
| MO_ATOM_MASK
);
2680 /* If all fields are accounted for, print symbolically. */
2681 if (!mop
&& s_al
&& s_op
&& s_at
) {
2682 col
+= ne_fprintf(f
, ",%s%s%s,%u",
2683 s_at
, s_al
, s_op
, ix
);
2685 mop
= get_memop(oi
);
2686 col
+= ne_fprintf(f
, ",$0x%x,%u", mop
, ix
);
2691 case INDEX_op_bswap16_i32
:
2692 case INDEX_op_bswap16_i64
:
2693 case INDEX_op_bswap32_i32
:
2694 case INDEX_op_bswap32_i64
:
2695 case INDEX_op_bswap64_i64
:
2697 TCGArg flags
= op
->args
[k
];
2698 const char *name
= NULL
;
2700 if (flags
< ARRAY_SIZE(bswap_flag_name
)) {
2701 name
= bswap_flag_name
[flags
];
2704 col
+= ne_fprintf(f
, ",%s", name
);
2706 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, flags
);
2716 case INDEX_op_set_label
:
2718 case INDEX_op_brcond_i32
:
2719 case INDEX_op_brcond_i64
:
2720 case INDEX_op_brcond2_i32
:
2721 col
+= ne_fprintf(f
, "%s$L%d", k
? "," : "",
2722 arg_label(op
->args
[k
])->id
);
2727 TCGBar membar
= op
->args
[k
];
2728 const char *b_op
, *m_op
;
2730 switch (membar
& TCG_BAR_SC
) {
2744 g_assert_not_reached();
2747 switch (membar
& TCG_MO_ALL
) {
2763 case TCG_MO_LD_LD
| TCG_MO_LD_ST
:
2766 case TCG_MO_LD_LD
| TCG_MO_ST_LD
:
2769 case TCG_MO_LD_LD
| TCG_MO_ST_ST
:
2772 case TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2775 case TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2778 case TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2781 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2784 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2787 case TCG_MO_LD_LD
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2790 case TCG_MO_LD_ST
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2797 g_assert_not_reached();
2800 col
+= ne_fprintf(f
, "%s%s:%s", (k
? "," : ""), b_op
, m_op
);
2807 for (; i
< nb_cargs
; i
++, k
++) {
2808 col
+= ne_fprintf(f
, "%s$0x%" TCG_PRIlx
, k
? "," : "",
2813 if (have_prefs
|| op
->life
) {
2814 for (; col
< 40; ++col
) {
2820 unsigned life
= op
->life
;
2822 if (life
& (SYNC_ARG
* 3)) {
2823 ne_fprintf(f
, " sync:");
2824 for (i
= 0; i
< 2; ++i
) {
2825 if (life
& (SYNC_ARG
<< i
)) {
2826 ne_fprintf(f
, " %d", i
);
2832 ne_fprintf(f
, " dead:");
2833 for (i
= 0; life
; ++i
, life
>>= 1) {
2835 ne_fprintf(f
, " %d", i
);
2842 for (i
= 0; i
< nb_oargs
; ++i
) {
2843 TCGRegSet set
= output_pref(op
, i
);
2846 ne_fprintf(f
, " pref=");
2851 ne_fprintf(f
, "none");
2852 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2853 ne_fprintf(f
, "all");
2854 #ifdef CONFIG_DEBUG_TCG
2855 } else if (tcg_regset_single(set
)) {
2856 TCGReg reg
= tcg_regset_first(set
);
2857 ne_fprintf(f
, "%s", tcg_target_reg_names
[reg
]);
2859 } else if (TCG_TARGET_NB_REGS
<= 32) {
2860 ne_fprintf(f
, "0x%x", (uint32_t)set
);
2862 ne_fprintf(f
, "0x%" PRIx64
, (uint64_t)set
);
2871 /* we give more priority to constraints with less registers */
2872 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2874 const TCGArgConstraint
*arg_ct
= &def
->args_ct
[k
];
2875 int n
= ctpop64(arg_ct
->regs
);
2878 * Sort constraints of a single register first, which includes output
2879 * aliases (which must exactly match the input already allocated).
2881 if (n
== 1 || arg_ct
->oalias
) {
2886 * Sort register pairs next, first then second immediately after.
2887 * Arbitrarily sort multiple pairs by the index of the first reg;
2888 * there shouldn't be many pairs.
2890 switch (arg_ct
->pair
) {
2895 return (arg_ct
->pair_index
+ 1) * 2 - 1;
2898 /* Finally, sort by decreasing register count. */
2903 /* sort from highest priority to lowest */
2904 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2907 TCGArgConstraint
*a
= def
->args_ct
;
2909 for (i
= 0; i
< n
; i
++) {
2910 a
[start
+ i
].sort_index
= start
+ i
;
2915 for (i
= 0; i
< n
- 1; i
++) {
2916 for (j
= i
+ 1; j
< n
; j
++) {
2917 int p1
= get_constraint_priority(def
, a
[start
+ i
].sort_index
);
2918 int p2
= get_constraint_priority(def
, a
[start
+ j
].sort_index
);
2920 int tmp
= a
[start
+ i
].sort_index
;
2921 a
[start
+ i
].sort_index
= a
[start
+ j
].sort_index
;
2922 a
[start
+ j
].sort_index
= tmp
;
2928 static void process_op_defs(TCGContext
*s
)
2932 for (op
= 0; op
< NB_OPS
; op
++) {
2933 TCGOpDef
*def
= &tcg_op_defs
[op
];
2934 const TCGTargetOpDef
*tdefs
;
2935 bool saw_alias_pair
= false;
2936 int i
, o
, i2
, o2
, nb_args
;
2938 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2942 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2948 * Macro magic should make it impossible, but double-check that
2949 * the array index is in range. Since the signness of an enum
2950 * is implementation defined, force the result to unsigned.
2952 unsigned con_set
= tcg_target_op_def(op
);
2953 tcg_debug_assert(con_set
< ARRAY_SIZE(constraint_sets
));
2954 tdefs
= &constraint_sets
[con_set
];
2956 for (i
= 0; i
< nb_args
; i
++) {
2957 const char *ct_str
= tdefs
->args_ct_str
[i
];
2958 bool input_p
= i
>= def
->nb_oargs
;
2960 /* Incomplete TCGTargetOpDef entry. */
2961 tcg_debug_assert(ct_str
!= NULL
);
2966 tcg_debug_assert(input_p
);
2967 tcg_debug_assert(o
< def
->nb_oargs
);
2968 tcg_debug_assert(def
->args_ct
[o
].regs
!= 0);
2969 tcg_debug_assert(!def
->args_ct
[o
].oalias
);
2970 def
->args_ct
[i
] = def
->args_ct
[o
];
2971 /* The output sets oalias. */
2972 def
->args_ct
[o
].oalias
= 1;
2973 def
->args_ct
[o
].alias_index
= i
;
2974 /* The input sets ialias. */
2975 def
->args_ct
[i
].ialias
= 1;
2976 def
->args_ct
[i
].alias_index
= o
;
2977 if (def
->args_ct
[i
].pair
) {
2978 saw_alias_pair
= true;
2980 tcg_debug_assert(ct_str
[1] == '\0');
2984 tcg_debug_assert(!input_p
);
2985 def
->args_ct
[i
].newreg
= true;
2989 case 'p': /* plus */
2990 /* Allocate to the register after the previous. */
2991 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2993 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2994 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2995 def
->args_ct
[i
] = (TCGArgConstraint
){
2998 .regs
= def
->args_ct
[o
].regs
<< 1,
2999 .newreg
= def
->args_ct
[o
].newreg
,
3001 def
->args_ct
[o
].pair
= 1;
3002 def
->args_ct
[o
].pair_index
= i
;
3003 tcg_debug_assert(ct_str
[1] == '\0');
3006 case 'm': /* minus */
3007 /* Allocate to the register before the previous. */
3008 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
3010 tcg_debug_assert(!def
->args_ct
[o
].pair
);
3011 tcg_debug_assert(!def
->args_ct
[o
].ct
);
3012 def
->args_ct
[i
] = (TCGArgConstraint
){
3015 .regs
= def
->args_ct
[o
].regs
>> 1,
3016 .newreg
= def
->args_ct
[o
].newreg
,
3018 def
->args_ct
[o
].pair
= 2;
3019 def
->args_ct
[o
].pair_index
= i
;
3020 tcg_debug_assert(ct_str
[1] == '\0');
3027 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
3030 /* Include all of the target-specific constraints. */
3033 #define CONST(CASE, MASK) \
3034 case CASE: def->args_ct[i].ct |= MASK; break;
3035 #define REGS(CASE, MASK) \
3036 case CASE: def->args_ct[i].regs |= MASK; break;
3038 #include "tcg-target-con-str.h"
3047 /* Typo in TCGTargetOpDef constraint. */
3048 g_assert_not_reached();
3050 } while (*++ct_str
!= '\0');
3053 /* TCGTargetOpDef entry with too much information? */
3054 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
3057 * Fix up output pairs that are aliased with inputs.
3058 * When we created the alias, we copied pair from the output.
3059 * There are three cases:
3060 * (1a) Pairs of inputs alias pairs of outputs.
3061 * (1b) One input aliases the first of a pair of outputs.
3062 * (2) One input aliases the second of a pair of outputs.
3064 * Case 1a is handled by making sure that the pair_index'es are
3065 * properly updated so that they appear the same as a pair of inputs.
3067 * Case 1b is handled by setting the pair_index of the input to
3068 * itself, simply so it doesn't point to an unrelated argument.
3069 * Since we don't encounter the "second" during the input allocation
3070 * phase, nothing happens with the second half of the input pair.
3072 * Case 2 is handled by setting the second input to pair=3, the
3073 * first output to pair=3, and the pair_index'es to match.
3075 if (saw_alias_pair
) {
3076 for (i
= def
->nb_oargs
; i
< nb_args
; i
++) {
3078 * Since [0-9pm] must be alone in the constraint string,
3079 * the only way they can both be set is if the pair comes
3080 * from the output alias.
3082 if (!def
->args_ct
[i
].ialias
) {
3085 switch (def
->args_ct
[i
].pair
) {
3089 o
= def
->args_ct
[i
].alias_index
;
3090 o2
= def
->args_ct
[o
].pair_index
;
3091 tcg_debug_assert(def
->args_ct
[o
].pair
== 1);
3092 tcg_debug_assert(def
->args_ct
[o2
].pair
== 2);
3093 if (def
->args_ct
[o2
].oalias
) {
3095 i2
= def
->args_ct
[o2
].alias_index
;
3096 tcg_debug_assert(def
->args_ct
[i2
].pair
== 2);
3097 def
->args_ct
[i2
].pair_index
= i
;
3098 def
->args_ct
[i
].pair_index
= i2
;
3101 def
->args_ct
[i
].pair_index
= i
;
3105 o
= def
->args_ct
[i
].alias_index
;
3106 o2
= def
->args_ct
[o
].pair_index
;
3107 tcg_debug_assert(def
->args_ct
[o
].pair
== 2);
3108 tcg_debug_assert(def
->args_ct
[o2
].pair
== 1);
3109 if (def
->args_ct
[o2
].oalias
) {
3111 i2
= def
->args_ct
[o2
].alias_index
;
3112 tcg_debug_assert(def
->args_ct
[i2
].pair
== 1);
3113 def
->args_ct
[i2
].pair_index
= i
;
3114 def
->args_ct
[i
].pair_index
= i2
;
3117 def
->args_ct
[i
].pair
= 3;
3118 def
->args_ct
[o2
].pair
= 3;
3119 def
->args_ct
[i
].pair_index
= o2
;
3120 def
->args_ct
[o2
].pair_index
= i
;
3124 g_assert_not_reached();
3129 /* sort the constraints (XXX: this is just an heuristic) */
3130 sort_constraints(def
, 0, def
->nb_oargs
);
3131 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
3135 static void remove_label_use(TCGOp
*op
, int idx
)
3137 TCGLabel
*label
= arg_label(op
->args
[idx
]);
3140 QSIMPLEQ_FOREACH(use
, &label
->branches
, next
) {
3141 if (use
->op
== op
) {
3142 QSIMPLEQ_REMOVE(&label
->branches
, use
, TCGLabelUse
, next
);
3146 g_assert_not_reached();
3149 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
3153 remove_label_use(op
, 0);
3155 case INDEX_op_brcond_i32
:
3156 case INDEX_op_brcond_i64
:
3157 remove_label_use(op
, 3);
3159 case INDEX_op_brcond2_i32
:
3160 remove_label_use(op
, 5);
3166 QTAILQ_REMOVE(&s
->ops
, op
, link
);
3167 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
3171 void tcg_remove_ops_after(TCGOp
*op
)
3173 TCGContext
*s
= tcg_ctx
;
3176 TCGOp
*last
= tcg_last_op();
3180 tcg_op_remove(s
, last
);
3184 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
)
3186 TCGContext
*s
= tcg_ctx
;
3189 if (unlikely(!QTAILQ_EMPTY(&s
->free_ops
))) {
3190 QTAILQ_FOREACH(op
, &s
->free_ops
, link
) {
3191 if (nargs
<= op
->nargs
) {
3192 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
3199 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
3200 nargs
= MAX(4, nargs
);
3201 op
= tcg_malloc(sizeof(TCGOp
) + sizeof(TCGArg
) * nargs
);
3204 memset(op
, 0, offsetof(TCGOp
, link
));
3208 /* Check for bitfield overflow. */
3209 tcg_debug_assert(op
->nargs
== nargs
);
3215 TCGOp
*tcg_emit_op(TCGOpcode opc
, unsigned nargs
)
3217 TCGOp
*op
= tcg_op_alloc(opc
, nargs
);
3218 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
3222 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
,
3223 TCGOpcode opc
, unsigned nargs
)
3225 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3226 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
3230 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
,
3231 TCGOpcode opc
, unsigned nargs
)
3233 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3234 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
3238 static void move_label_uses(TCGLabel
*to
, TCGLabel
*from
)
3242 QSIMPLEQ_FOREACH(u
, &from
->branches
, next
) {
3246 op
->args
[0] = label_arg(to
);
3248 case INDEX_op_brcond_i32
:
3249 case INDEX_op_brcond_i64
:
3250 op
->args
[3] = label_arg(to
);
3252 case INDEX_op_brcond2_i32
:
3253 op
->args
[5] = label_arg(to
);
3256 g_assert_not_reached();
3260 QSIMPLEQ_CONCAT(&to
->branches
, &from
->branches
);
3263 /* Reachable analysis : remove unreachable code. */
3264 static void __attribute__((noinline
))
3265 reachable_code_pass(TCGContext
*s
)
3267 TCGOp
*op
, *op_next
, *op_prev
;
3270 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3275 case INDEX_op_set_label
:
3276 label
= arg_label(op
->args
[0]);
3279 * Note that the first op in the TB is always a load,
3280 * so there is always something before a label.
3282 op_prev
= QTAILQ_PREV(op
, link
);
3285 * If we find two sequential labels, move all branches to
3286 * reference the second label and remove the first label.
3287 * Do this before branch to next optimization, so that the
3288 * middle label is out of the way.
3290 if (op_prev
->opc
== INDEX_op_set_label
) {
3291 move_label_uses(label
, arg_label(op_prev
->args
[0]));
3292 tcg_op_remove(s
, op_prev
);
3293 op_prev
= QTAILQ_PREV(op
, link
);
3297 * Optimization can fold conditional branches to unconditional.
3298 * If we find a label which is preceded by an unconditional
3299 * branch to next, remove the branch. We couldn't do this when
3300 * processing the branch because any dead code between the branch
3301 * and label had not yet been removed.
3303 if (op_prev
->opc
== INDEX_op_br
&&
3304 label
== arg_label(op_prev
->args
[0])) {
3305 tcg_op_remove(s
, op_prev
);
3306 /* Fall through means insns become live again. */
3310 if (QSIMPLEQ_EMPTY(&label
->branches
)) {
3312 * While there is an occasional backward branch, virtually
3313 * all branches generated by the translators are forward.
3314 * Which means that generally we will have already removed
3315 * all references to the label that will be, and there is
3316 * little to be gained by iterating.
3320 /* Once we see a label, insns become live again. */
3327 case INDEX_op_exit_tb
:
3328 case INDEX_op_goto_ptr
:
3329 /* Unconditional branches; everything following is dead. */
3334 /* Notice noreturn helper calls, raising exceptions. */
3335 if (tcg_call_flags(op
) & TCG_CALL_NO_RETURN
) {
3340 case INDEX_op_insn_start
:
3341 /* Never remove -- we need to keep these for unwind. */
3350 tcg_op_remove(s
, op
);
3358 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
3359 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
3361 /* For liveness_pass_1, the register preferences for a given temp. */
3362 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
3364 return ts
->state_ptr
;
3367 /* For liveness_pass_1, reset the preferences for a given temp to the
3368 * maximal regset for its type.
3370 static inline void la_reset_pref(TCGTemp
*ts
)
3373 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
3376 /* liveness analysis: end of function: all temps are dead, and globals
3377 should be in memory. */
3378 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
3382 for (i
= 0; i
< ng
; ++i
) {
3383 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3384 la_reset_pref(&s
->temps
[i
]);
3386 for (i
= ng
; i
< nt
; ++i
) {
3387 s
->temps
[i
].state
= TS_DEAD
;
3388 la_reset_pref(&s
->temps
[i
]);
3392 /* liveness analysis: end of basic block: all temps are dead, globals
3393 and local temps should be in memory. */
3394 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
3398 for (i
= 0; i
< nt
; ++i
) {
3399 TCGTemp
*ts
= &s
->temps
[i
];
3406 state
= TS_DEAD
| TS_MEM
;
3413 g_assert_not_reached();
3420 /* liveness analysis: sync globals back to memory. */
3421 static void la_global_sync(TCGContext
*s
, int ng
)
3425 for (i
= 0; i
< ng
; ++i
) {
3426 int state
= s
->temps
[i
].state
;
3427 s
->temps
[i
].state
= state
| TS_MEM
;
3428 if (state
== TS_DEAD
) {
3429 /* If the global was previously dead, reset prefs. */
3430 la_reset_pref(&s
->temps
[i
]);
3436 * liveness analysis: conditional branch: all temps are dead unless
3437 * explicitly live-across-conditional-branch, globals and local temps
3440 static void la_bb_sync(TCGContext
*s
, int ng
, int nt
)
3442 la_global_sync(s
, ng
);
3444 for (int i
= ng
; i
< nt
; ++i
) {
3445 TCGTemp
*ts
= &s
->temps
[i
];
3451 ts
->state
= state
| TS_MEM
;
3452 if (state
!= TS_DEAD
) {
3460 g_assert_not_reached();
3462 la_reset_pref(&s
->temps
[i
]);
3466 /* liveness analysis: sync globals back to memory and kill. */
3467 static void la_global_kill(TCGContext
*s
, int ng
)
3471 for (i
= 0; i
< ng
; i
++) {
3472 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3473 la_reset_pref(&s
->temps
[i
]);
3477 /* liveness analysis: note live globals crossing calls. */
3478 static void la_cross_call(TCGContext
*s
, int nt
)
3480 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
3483 for (i
= 0; i
< nt
; i
++) {
3484 TCGTemp
*ts
= &s
->temps
[i
];
3485 if (!(ts
->state
& TS_DEAD
)) {
3486 TCGRegSet
*pset
= la_temp_pref(ts
);
3487 TCGRegSet set
= *pset
;
3490 /* If the combination is not possible, restart. */
3492 set
= tcg_target_available_regs
[ts
->type
] & mask
;
3500 * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
3501 * to TEMP_EBB, if possible.
3503 static void __attribute__((noinline
))
3504 liveness_pass_0(TCGContext
*s
)
3506 void * const multiple_ebb
= (void *)(uintptr_t)-1;
3507 int nb_temps
= s
->nb_temps
;
3510 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3511 s
->temps
[i
].state_ptr
= NULL
;
3515 * Represent each EBB by the op at which it begins. In the case of
3516 * the first EBB, this is the first op, otherwise it is a label.
3517 * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
3518 * within a single EBB, else MULTIPLE_EBB.
3520 ebb
= QTAILQ_FIRST(&s
->ops
);
3521 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3522 const TCGOpDef
*def
;
3523 int nb_oargs
, nb_iargs
;
3526 case INDEX_op_set_label
:
3529 case INDEX_op_discard
:
3532 nb_oargs
= TCGOP_CALLO(op
);
3533 nb_iargs
= TCGOP_CALLI(op
);
3536 def
= &tcg_op_defs
[op
->opc
];
3537 nb_oargs
= def
->nb_oargs
;
3538 nb_iargs
= def
->nb_iargs
;
3542 for (int i
= 0; i
< nb_oargs
+ nb_iargs
; ++i
) {
3543 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
3545 if (ts
->kind
!= TEMP_TB
) {
3548 if (ts
->state_ptr
== NULL
) {
3549 ts
->state_ptr
= ebb
;
3550 } else if (ts
->state_ptr
!= ebb
) {
3551 ts
->state_ptr
= multiple_ebb
;
3557 * For TEMP_TB that turned out not to be used beyond one EBB,
3558 * reduce the liveness to TEMP_EBB.
3560 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3561 TCGTemp
*ts
= &s
->temps
[i
];
3562 if (ts
->kind
== TEMP_TB
&& ts
->state_ptr
!= multiple_ebb
) {
3563 ts
->kind
= TEMP_EBB
;
3568 /* Liveness analysis : update the opc_arg_life array to tell if a
3569 given input arguments is dead. Instructions updating dead
3570 temporaries are removed. */
3571 static void __attribute__((noinline
))
3572 liveness_pass_1(TCGContext
*s
)
3574 int nb_globals
= s
->nb_globals
;
3575 int nb_temps
= s
->nb_temps
;
3576 TCGOp
*op
, *op_prev
;
3580 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
3581 for (i
= 0; i
< nb_temps
; ++i
) {
3582 s
->temps
[i
].state_ptr
= prefs
+ i
;
3585 /* ??? Should be redundant with the exit_tb that ends the TB. */
3586 la_func_end(s
, nb_globals
, nb_temps
);
3588 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
3589 int nb_iargs
, nb_oargs
;
3590 TCGOpcode opc_new
, opc_new2
;
3592 TCGLifeData arg_life
= 0;
3594 TCGOpcode opc
= op
->opc
;
3595 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3600 const TCGHelperInfo
*info
= tcg_call_info(op
);
3601 int call_flags
= tcg_call_flags(op
);
3603 nb_oargs
= TCGOP_CALLO(op
);
3604 nb_iargs
= TCGOP_CALLI(op
);
3606 /* pure functions can be removed if their result is unused */
3607 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
3608 for (i
= 0; i
< nb_oargs
; i
++) {
3609 ts
= arg_temp(op
->args
[i
]);
3610 if (ts
->state
!= TS_DEAD
) {
3611 goto do_not_remove_call
;
3618 /* Output args are dead. */
3619 for (i
= 0; i
< nb_oargs
; i
++) {
3620 ts
= arg_temp(op
->args
[i
]);
3621 if (ts
->state
& TS_DEAD
) {
3622 arg_life
|= DEAD_ARG
<< i
;
3624 if (ts
->state
& TS_MEM
) {
3625 arg_life
|= SYNC_ARG
<< i
;
3627 ts
->state
= TS_DEAD
;
3631 /* Not used -- it will be tcg_target_call_oarg_reg(). */
3632 memset(op
->output_pref
, 0, sizeof(op
->output_pref
));
3634 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
3635 TCG_CALL_NO_READ_GLOBALS
))) {
3636 la_global_kill(s
, nb_globals
);
3637 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
3638 la_global_sync(s
, nb_globals
);
3641 /* Record arguments that die in this helper. */
3642 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3643 ts
= arg_temp(op
->args
[i
]);
3644 if (ts
->state
& TS_DEAD
) {
3645 arg_life
|= DEAD_ARG
<< i
;
3649 /* For all live registers, remove call-clobbered prefs. */
3650 la_cross_call(s
, nb_temps
);
3653 * Input arguments are live for preceding opcodes.
3655 * For those arguments that die, and will be allocated in
3656 * registers, clear the register set for that arg, to be
3657 * filled in below. For args that will be on the stack,
3658 * reset to any available reg. Process arguments in reverse
3659 * order so that if a temp is used more than once, the stack
3660 * reset to max happens before the register reset to 0.
3662 for (i
= nb_iargs
- 1; i
>= 0; i
--) {
3663 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3664 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3666 if (ts
->state
& TS_DEAD
) {
3667 switch (loc
->kind
) {
3668 case TCG_CALL_ARG_NORMAL
:
3669 case TCG_CALL_ARG_EXTEND_U
:
3670 case TCG_CALL_ARG_EXTEND_S
:
3671 if (arg_slot_reg_p(loc
->arg_slot
)) {
3672 *la_temp_pref(ts
) = 0;
3678 tcg_target_available_regs
[ts
->type
];
3681 ts
->state
&= ~TS_DEAD
;
3686 * For each input argument, add its input register to prefs.
3687 * If a temp is used once, this produces a single set bit;
3688 * if a temp is used multiple times, this produces a set.
3690 for (i
= 0; i
< nb_iargs
; i
++) {
3691 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3692 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3694 switch (loc
->kind
) {
3695 case TCG_CALL_ARG_NORMAL
:
3696 case TCG_CALL_ARG_EXTEND_U
:
3697 case TCG_CALL_ARG_EXTEND_S
:
3698 if (arg_slot_reg_p(loc
->arg_slot
)) {
3699 tcg_regset_set_reg(*la_temp_pref(ts
),
3700 tcg_target_call_iarg_regs
[loc
->arg_slot
]);
3709 case INDEX_op_insn_start
:
3711 case INDEX_op_discard
:
3712 /* mark the temporary as dead */
3713 ts
= arg_temp(op
->args
[0]);
3714 ts
->state
= TS_DEAD
;
3718 case INDEX_op_add2_i32
:
3719 opc_new
= INDEX_op_add_i32
;
3721 case INDEX_op_sub2_i32
:
3722 opc_new
= INDEX_op_sub_i32
;
3724 case INDEX_op_add2_i64
:
3725 opc_new
= INDEX_op_add_i64
;
3727 case INDEX_op_sub2_i64
:
3728 opc_new
= INDEX_op_sub_i64
;
3732 /* Test if the high part of the operation is dead, but not
3733 the low part. The result can be optimized to a simple
3734 add or sub. This happens often for x86_64 guest when the
3735 cpu mode is set to 32 bit. */
3736 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3737 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3740 /* Replace the opcode and adjust the args in place,
3741 leaving 3 unused args at the end. */
3742 op
->opc
= opc
= opc_new
;
3743 op
->args
[1] = op
->args
[2];
3744 op
->args
[2] = op
->args
[4];
3745 /* Fall through and mark the single-word operation live. */
3751 case INDEX_op_mulu2_i32
:
3752 opc_new
= INDEX_op_mul_i32
;
3753 opc_new2
= INDEX_op_muluh_i32
;
3754 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
3756 case INDEX_op_muls2_i32
:
3757 opc_new
= INDEX_op_mul_i32
;
3758 opc_new2
= INDEX_op_mulsh_i32
;
3759 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
3761 case INDEX_op_mulu2_i64
:
3762 opc_new
= INDEX_op_mul_i64
;
3763 opc_new2
= INDEX_op_muluh_i64
;
3764 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
3766 case INDEX_op_muls2_i64
:
3767 opc_new
= INDEX_op_mul_i64
;
3768 opc_new2
= INDEX_op_mulsh_i64
;
3769 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
3774 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3775 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3776 /* Both parts of the operation are dead. */
3779 /* The high part of the operation is dead; generate the low. */
3780 op
->opc
= opc
= opc_new
;
3781 op
->args
[1] = op
->args
[2];
3782 op
->args
[2] = op
->args
[3];
3783 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
3784 /* The low part of the operation is dead; generate the high. */
3785 op
->opc
= opc
= opc_new2
;
3786 op
->args
[0] = op
->args
[1];
3787 op
->args
[1] = op
->args
[2];
3788 op
->args
[2] = op
->args
[3];
3792 /* Mark the single-word operation live. */
3797 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3798 nb_iargs
= def
->nb_iargs
;
3799 nb_oargs
= def
->nb_oargs
;
3801 /* Test if the operation can be removed because all
3802 its outputs are dead. We assume that nb_oargs == 0
3803 implies side effects */
3804 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
3805 for (i
= 0; i
< nb_oargs
; i
++) {
3806 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
3815 tcg_op_remove(s
, op
);
3819 for (i
= 0; i
< nb_oargs
; i
++) {
3820 ts
= arg_temp(op
->args
[i
]);
3822 /* Remember the preference of the uses that followed. */
3823 if (i
< ARRAY_SIZE(op
->output_pref
)) {
3824 op
->output_pref
[i
] = *la_temp_pref(ts
);
3827 /* Output args are dead. */
3828 if (ts
->state
& TS_DEAD
) {
3829 arg_life
|= DEAD_ARG
<< i
;
3831 if (ts
->state
& TS_MEM
) {
3832 arg_life
|= SYNC_ARG
<< i
;
3834 ts
->state
= TS_DEAD
;
3838 /* If end of basic block, update. */
3839 if (def
->flags
& TCG_OPF_BB_EXIT
) {
3840 la_func_end(s
, nb_globals
, nb_temps
);
3841 } else if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3842 la_bb_sync(s
, nb_globals
, nb_temps
);
3843 } else if (def
->flags
& TCG_OPF_BB_END
) {
3844 la_bb_end(s
, nb_globals
, nb_temps
);
3845 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3846 la_global_sync(s
, nb_globals
);
3847 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3848 la_cross_call(s
, nb_temps
);
3852 /* Record arguments that die in this opcode. */
3853 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3854 ts
= arg_temp(op
->args
[i
]);
3855 if (ts
->state
& TS_DEAD
) {
3856 arg_life
|= DEAD_ARG
<< i
;
3860 /* Input arguments are live for preceding opcodes. */
3861 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3862 ts
= arg_temp(op
->args
[i
]);
3863 if (ts
->state
& TS_DEAD
) {
3864 /* For operands that were dead, initially allow
3865 all regs for the type. */
3866 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
3867 ts
->state
&= ~TS_DEAD
;
3871 /* Incorporate constraints for this operand. */
3873 case INDEX_op_mov_i32
:
3874 case INDEX_op_mov_i64
:
3875 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3876 have proper constraints. That said, special case
3877 moves to propagate preferences backward. */
3878 if (IS_DEAD_ARG(1)) {
3879 *la_temp_pref(arg_temp(op
->args
[0]))
3880 = *la_temp_pref(arg_temp(op
->args
[1]));
3885 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3886 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
3887 TCGRegSet set
, *pset
;
3889 ts
= arg_temp(op
->args
[i
]);
3890 pset
= la_temp_pref(ts
);
3895 set
&= output_pref(op
, ct
->alias_index
);
3897 /* If the combination is not possible, restart. */
3907 op
->life
= arg_life
;
3911 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3912 static bool __attribute__((noinline
))
3913 liveness_pass_2(TCGContext
*s
)
3915 int nb_globals
= s
->nb_globals
;
3917 bool changes
= false;
3918 TCGOp
*op
, *op_next
;
3920 /* Create a temporary for each indirect global. */
3921 for (i
= 0; i
< nb_globals
; ++i
) {
3922 TCGTemp
*its
= &s
->temps
[i
];
3923 if (its
->indirect_reg
) {
3924 TCGTemp
*dts
= tcg_temp_alloc(s
);
3925 dts
->type
= its
->type
;
3926 dts
->base_type
= its
->base_type
;
3927 dts
->temp_subindex
= its
->temp_subindex
;
3928 dts
->kind
= TEMP_EBB
;
3929 its
->state_ptr
= dts
;
3931 its
->state_ptr
= NULL
;
3933 /* All globals begin dead. */
3934 its
->state
= TS_DEAD
;
3936 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
3937 TCGTemp
*its
= &s
->temps
[i
];
3938 its
->state_ptr
= NULL
;
3939 its
->state
= TS_DEAD
;
3942 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3943 TCGOpcode opc
= op
->opc
;
3944 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3945 TCGLifeData arg_life
= op
->life
;
3946 int nb_iargs
, nb_oargs
, call_flags
;
3947 TCGTemp
*arg_ts
, *dir_ts
;
3949 if (opc
== INDEX_op_call
) {
3950 nb_oargs
= TCGOP_CALLO(op
);
3951 nb_iargs
= TCGOP_CALLI(op
);
3952 call_flags
= tcg_call_flags(op
);
3954 nb_iargs
= def
->nb_iargs
;
3955 nb_oargs
= def
->nb_oargs
;
3957 /* Set flags similar to how calls require. */
3958 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3959 /* Like reading globals: sync_globals */
3960 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3961 } else if (def
->flags
& TCG_OPF_BB_END
) {
3962 /* Like writing globals: save_globals */
3964 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3965 /* Like reading globals: sync_globals */
3966 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3968 /* No effect on globals. */
3969 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
3970 TCG_CALL_NO_WRITE_GLOBALS
);
3974 /* Make sure that input arguments are available. */
3975 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3976 arg_ts
= arg_temp(op
->args
[i
]);
3977 dir_ts
= arg_ts
->state_ptr
;
3978 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
3979 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
3982 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
, 3);
3984 lop
->args
[0] = temp_arg(dir_ts
);
3985 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3986 lop
->args
[2] = arg_ts
->mem_offset
;
3988 /* Loaded, but synced with memory. */
3989 arg_ts
->state
= TS_MEM
;
3993 /* Perform input replacement, and mark inputs that became dead.
3994 No action is required except keeping temp_state up to date
3995 so that we reload when needed. */
3996 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3997 arg_ts
= arg_temp(op
->args
[i
]);
3998 dir_ts
= arg_ts
->state_ptr
;
4000 op
->args
[i
] = temp_arg(dir_ts
);
4002 if (IS_DEAD_ARG(i
)) {
4003 arg_ts
->state
= TS_DEAD
;
4008 /* Liveness analysis should ensure that the following are
4009 all correct, for call sites and basic block end points. */
4010 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
4012 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
4013 for (i
= 0; i
< nb_globals
; ++i
) {
4014 /* Liveness should see that globals are synced back,
4015 that is, either TS_DEAD or TS_MEM. */
4016 arg_ts
= &s
->temps
[i
];
4017 tcg_debug_assert(arg_ts
->state_ptr
== 0
4018 || arg_ts
->state
!= 0);
4021 for (i
= 0; i
< nb_globals
; ++i
) {
4022 /* Liveness should see that globals are saved back,
4023 that is, TS_DEAD, waiting to be reloaded. */
4024 arg_ts
= &s
->temps
[i
];
4025 tcg_debug_assert(arg_ts
->state_ptr
== 0
4026 || arg_ts
->state
== TS_DEAD
);
4030 /* Outputs become available. */
4031 if (opc
== INDEX_op_mov_i32
|| opc
== INDEX_op_mov_i64
) {
4032 arg_ts
= arg_temp(op
->args
[0]);
4033 dir_ts
= arg_ts
->state_ptr
;
4035 op
->args
[0] = temp_arg(dir_ts
);
4038 /* The output is now live and modified. */
4041 if (NEED_SYNC_ARG(0)) {
4042 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
4045 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
4046 TCGTemp
*out_ts
= dir_ts
;
4048 if (IS_DEAD_ARG(0)) {
4049 out_ts
= arg_temp(op
->args
[1]);
4050 arg_ts
->state
= TS_DEAD
;
4051 tcg_op_remove(s
, op
);
4053 arg_ts
->state
= TS_MEM
;
4056 sop
->args
[0] = temp_arg(out_ts
);
4057 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
4058 sop
->args
[2] = arg_ts
->mem_offset
;
4060 tcg_debug_assert(!IS_DEAD_ARG(0));
4064 for (i
= 0; i
< nb_oargs
; i
++) {
4065 arg_ts
= arg_temp(op
->args
[i
]);
4066 dir_ts
= arg_ts
->state_ptr
;
4070 op
->args
[i
] = temp_arg(dir_ts
);
4073 /* The output is now live and modified. */
4076 /* Sync outputs upon their last write. */
4077 if (NEED_SYNC_ARG(i
)) {
4078 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
4081 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
4083 sop
->args
[0] = temp_arg(dir_ts
);
4084 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
4085 sop
->args
[2] = arg_ts
->mem_offset
;
4087 arg_ts
->state
= TS_MEM
;
4089 /* Drop outputs that are dead. */
4090 if (IS_DEAD_ARG(i
)) {
4091 arg_ts
->state
= TS_DEAD
;
4100 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
4105 /* When allocating an object, look at the full type. */
4106 size
= tcg_type_size(ts
->base_type
);
4107 switch (ts
->base_type
) {
4119 * Note that we do not require aligned storage for V256,
4120 * and that we provide alignment for I128 to match V128,
4121 * even if that's above what the host ABI requires.
4126 g_assert_not_reached();
4130 * Assume the stack is sufficiently aligned.
4131 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
4132 * and do not require 16 byte vector alignment. This seems slightly
4133 * easier than fully parameterizing the above switch statement.
4135 align
= MIN(TCG_TARGET_STACK_ALIGN
, align
);
4136 off
= ROUND_UP(s
->current_frame_offset
, align
);
4138 /* If we've exhausted the stack frame, restart with a smaller TB. */
4139 if (off
+ size
> s
->frame_end
) {
4140 tcg_raise_tb_overflow(s
);
4142 s
->current_frame_offset
= off
+ size
;
4143 #if defined(__sparc__)
4144 off
+= TCG_TARGET_STACK_BIAS
;
4147 /* If the object was subdivided, assign memory to all the parts. */
4148 if (ts
->base_type
!= ts
->type
) {
4149 int part_size
= tcg_type_size(ts
->type
);
4150 int part_count
= size
/ part_size
;
4153 * Each part is allocated sequentially in tcg_temp_new_internal.
4154 * Jump back to the first part by subtracting the current index.
4156 ts
-= ts
->temp_subindex
;
4157 for (int i
= 0; i
< part_count
; ++i
) {
4158 ts
[i
].mem_offset
= off
+ i
* part_size
;
4159 ts
[i
].mem_base
= s
->frame_temp
;
4160 ts
[i
].mem_allocated
= 1;
4163 ts
->mem_offset
= off
;
4164 ts
->mem_base
= s
->frame_temp
;
4165 ts
->mem_allocated
= 1;
4169 /* Assign @reg to @ts, and update reg_to_temp[]. */
4170 static void set_temp_val_reg(TCGContext
*s
, TCGTemp
*ts
, TCGReg reg
)
4172 if (ts
->val_type
== TEMP_VAL_REG
) {
4173 TCGReg old
= ts
->reg
;
4174 tcg_debug_assert(s
->reg_to_temp
[old
] == ts
);
4178 s
->reg_to_temp
[old
] = NULL
;
4180 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
4181 s
->reg_to_temp
[reg
] = ts
;
4182 ts
->val_type
= TEMP_VAL_REG
;
4186 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
4187 static void set_temp_val_nonreg(TCGContext
*s
, TCGTemp
*ts
, TCGTempVal type
)
4189 tcg_debug_assert(type
!= TEMP_VAL_REG
);
4190 if (ts
->val_type
== TEMP_VAL_REG
) {
4191 TCGReg reg
= ts
->reg
;
4192 tcg_debug_assert(s
->reg_to_temp
[reg
] == ts
);
4193 s
->reg_to_temp
[reg
] = NULL
;
4195 ts
->val_type
= type
;
4198 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
4200 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
4201 mark it free; otherwise mark it dead. */
4202 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
4204 TCGTempVal new_type
;
4211 new_type
= TEMP_VAL_MEM
;
4214 new_type
= free_or_dead
< 0 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
;
4217 new_type
= TEMP_VAL_CONST
;
4220 g_assert_not_reached();
4222 set_temp_val_nonreg(s
, ts
, new_type
);
4225 /* Mark a temporary as dead. */
4226 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
4228 temp_free_or_dead(s
, ts
, 1);
4231 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
4232 registers needs to be allocated to store a constant. If 'free_or_dead'
4233 is non-zero, subsequently release the temporary; if it is positive, the
4234 temp is dead; if it is negative, the temp is free. */
4235 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
4236 TCGRegSet preferred_regs
, int free_or_dead
)
4238 if (!temp_readonly(ts
) && !ts
->mem_coherent
) {
4239 if (!ts
->mem_allocated
) {
4240 temp_allocate_frame(s
, ts
);
4242 switch (ts
->val_type
) {
4243 case TEMP_VAL_CONST
:
4244 /* If we're going to free the temp immediately, then we won't
4245 require it later in a register, so attempt to store the
4246 constant to memory directly. */
4248 && tcg_out_sti(s
, ts
->type
, ts
->val
,
4249 ts
->mem_base
->reg
, ts
->mem_offset
)) {
4252 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4253 allocated_regs
, preferred_regs
);
4257 tcg_out_st(s
, ts
->type
, ts
->reg
,
4258 ts
->mem_base
->reg
, ts
->mem_offset
);
4266 g_assert_not_reached();
4268 ts
->mem_coherent
= 1;
4271 temp_free_or_dead(s
, ts
, free_or_dead
);
4275 /* free register 'reg' by spilling the corresponding temporary if necessary */
4276 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
4278 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
4280 temp_sync(s
, ts
, allocated_regs
, 0, -1);
4286 * @required_regs: Set of registers in which we must allocate.
4287 * @allocated_regs: Set of registers which must be avoided.
4288 * @preferred_regs: Set of registers we should prefer.
4289 * @rev: True if we search the registers in "indirect" order.
4291 * The allocated register must be in @required_regs & ~@allocated_regs,
4292 * but if we can put it in @preferred_regs we may save a move later.
4294 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
4295 TCGRegSet allocated_regs
,
4296 TCGRegSet preferred_regs
, bool rev
)
4298 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4299 TCGRegSet reg_ct
[2];
4302 reg_ct
[1] = required_regs
& ~allocated_regs
;
4303 tcg_debug_assert(reg_ct
[1] != 0);
4304 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4306 /* Skip the preferred_regs option if it cannot be satisfied,
4307 or if the preference made no difference. */
4308 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4310 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4312 /* Try free registers, preferences first. */
4313 for (j
= f
; j
< 2; j
++) {
4314 TCGRegSet set
= reg_ct
[j
];
4316 if (tcg_regset_single(set
)) {
4317 /* One register in the set. */
4318 TCGReg reg
= tcg_regset_first(set
);
4319 if (s
->reg_to_temp
[reg
] == NULL
) {
4323 for (i
= 0; i
< n
; i
++) {
4324 TCGReg reg
= order
[i
];
4325 if (s
->reg_to_temp
[reg
] == NULL
&&
4326 tcg_regset_test_reg(set
, reg
)) {
4333 /* We must spill something. */
4334 for (j
= f
; j
< 2; j
++) {
4335 TCGRegSet set
= reg_ct
[j
];
4337 if (tcg_regset_single(set
)) {
4338 /* One register in the set. */
4339 TCGReg reg
= tcg_regset_first(set
);
4340 tcg_reg_free(s
, reg
, allocated_regs
);
4343 for (i
= 0; i
< n
; i
++) {
4344 TCGReg reg
= order
[i
];
4345 if (tcg_regset_test_reg(set
, reg
)) {
4346 tcg_reg_free(s
, reg
, allocated_regs
);
4353 g_assert_not_reached();
4356 static TCGReg
tcg_reg_alloc_pair(TCGContext
*s
, TCGRegSet required_regs
,
4357 TCGRegSet allocated_regs
,
4358 TCGRegSet preferred_regs
, bool rev
)
4360 int i
, j
, k
, fmin
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4361 TCGRegSet reg_ct
[2];
4364 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
4365 reg_ct
[1] = required_regs
& ~(allocated_regs
| (allocated_regs
>> 1));
4366 tcg_debug_assert(reg_ct
[1] != 0);
4367 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4369 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4372 * Skip the preferred_regs option if it cannot be satisfied,
4373 * or if the preference made no difference.
4375 k
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4378 * Minimize the number of flushes by looking for 2 free registers first,
4379 * then a single flush, then two flushes.
4381 for (fmin
= 2; fmin
>= 0; fmin
--) {
4382 for (j
= k
; j
< 2; j
++) {
4383 TCGRegSet set
= reg_ct
[j
];
4385 for (i
= 0; i
< n
; i
++) {
4386 TCGReg reg
= order
[i
];
4388 if (tcg_regset_test_reg(set
, reg
)) {
4389 int f
= !s
->reg_to_temp
[reg
] + !s
->reg_to_temp
[reg
+ 1];
4391 tcg_reg_free(s
, reg
, allocated_regs
);
4392 tcg_reg_free(s
, reg
+ 1, allocated_regs
);
4399 g_assert_not_reached();
4402 /* Make sure the temporary is in a register. If needed, allocate the register
4403 from DESIRED while avoiding ALLOCATED. */
4404 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
4405 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
4409 switch (ts
->val_type
) {
4412 case TEMP_VAL_CONST
:
4413 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4414 preferred_regs
, ts
->indirect_base
);
4415 if (ts
->type
<= TCG_TYPE_I64
) {
4416 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
4418 uint64_t val
= ts
->val
;
4422 * Find the minimal vector element that matches the constant.
4423 * The targets will, in general, have to do this search anyway,
4424 * do this generically.
4426 if (val
== dup_const(MO_8
, val
)) {
4428 } else if (val
== dup_const(MO_16
, val
)) {
4430 } else if (val
== dup_const(MO_32
, val
)) {
4434 tcg_out_dupi_vec(s
, ts
->type
, vece
, reg
, ts
->val
);
4436 ts
->mem_coherent
= 0;
4439 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4440 preferred_regs
, ts
->indirect_base
);
4441 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
4442 ts
->mem_coherent
= 1;
4446 g_assert_not_reached();
4448 set_temp_val_reg(s
, ts
, reg
);
4451 /* Save a temporary to memory. 'allocated_regs' is used in case a
4452 temporary registers needs to be allocated to store a constant. */
4453 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
4455 /* The liveness analysis already ensures that globals are back
4456 in memory. Keep an tcg_debug_assert for safety. */
4457 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| temp_readonly(ts
));
4460 /* save globals to their canonical location and assume they can be
4461 modified be the following code. 'allocated_regs' is used in case a
4462 temporary registers needs to be allocated to store a constant. */
4463 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4467 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4468 temp_save(s
, &s
->temps
[i
], allocated_regs
);
4472 /* sync globals to their canonical location and assume they can be
4473 read by the following code. 'allocated_regs' is used in case a
4474 temporary registers needs to be allocated to store a constant. */
4475 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4479 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4480 TCGTemp
*ts
= &s
->temps
[i
];
4481 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
4482 || ts
->kind
== TEMP_FIXED
4483 || ts
->mem_coherent
);
4487 /* at the end of a basic block, we assume all temporaries are dead and
4488 all globals are stored at their canonical location. */
4489 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
4493 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4494 TCGTemp
*ts
= &s
->temps
[i
];
4498 temp_save(s
, ts
, allocated_regs
);
4501 /* The liveness analysis already ensures that temps are dead.
4502 Keep an tcg_debug_assert for safety. */
4503 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
4506 /* Similarly, we should have freed any allocated register. */
4507 tcg_debug_assert(ts
->val_type
== TEMP_VAL_CONST
);
4510 g_assert_not_reached();
4514 save_globals(s
, allocated_regs
);
4518 * At a conditional branch, we assume all temporaries are dead unless
4519 * explicitly live-across-conditional-branch; all globals and local
4520 * temps are synced to their location.
4522 static void tcg_reg_alloc_cbranch(TCGContext
*s
, TCGRegSet allocated_regs
)
4524 sync_globals(s
, allocated_regs
);
4526 for (int i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4527 TCGTemp
*ts
= &s
->temps
[i
];
4529 * The liveness analysis already ensures that temps are dead.
4530 * Keep tcg_debug_asserts for safety.
4534 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
|| ts
->mem_coherent
);
4540 g_assert_not_reached();
4546 * Specialized code generation for INDEX_op_mov_* with a constant.
4548 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
4549 tcg_target_ulong val
, TCGLifeData arg_life
,
4550 TCGRegSet preferred_regs
)
4552 /* ENV should not be modified. */
4553 tcg_debug_assert(!temp_readonly(ots
));
4555 /* The movi is not explicitly generated here. */
4556 set_temp_val_nonreg(s
, ots
, TEMP_VAL_CONST
);
4558 ots
->mem_coherent
= 0;
4559 if (NEED_SYNC_ARG(0)) {
4560 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
4561 } else if (IS_DEAD_ARG(0)) {
4567 * Specialized code generation for INDEX_op_mov_*.
4569 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
4571 const TCGLifeData arg_life
= op
->life
;
4572 TCGRegSet allocated_regs
, preferred_regs
;
4574 TCGType otype
, itype
;
4577 allocated_regs
= s
->reserved_regs
;
4578 preferred_regs
= output_pref(op
, 0);
4579 ots
= arg_temp(op
->args
[0]);
4580 ts
= arg_temp(op
->args
[1]);
4582 /* ENV should not be modified. */
4583 tcg_debug_assert(!temp_readonly(ots
));
4585 /* Note that otype != itype for no-op truncation. */
4589 if (ts
->val_type
== TEMP_VAL_CONST
) {
4590 /* propagate constant or generate sti */
4591 tcg_target_ulong val
= ts
->val
;
4592 if (IS_DEAD_ARG(1)) {
4595 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
4599 /* If the source value is in memory we're going to be forced
4600 to have it in a register in order to perform the copy. Copy
4601 the SOURCE value into its own register first, that way we
4602 don't have to reload SOURCE the next time it is used. */
4603 if (ts
->val_type
== TEMP_VAL_MEM
) {
4604 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
4605 allocated_regs
, preferred_regs
);
4607 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
4610 if (IS_DEAD_ARG(0)) {
4611 /* mov to a non-saved dead register makes no sense (even with
4612 liveness analysis disabled). */
4613 tcg_debug_assert(NEED_SYNC_ARG(0));
4614 if (!ots
->mem_allocated
) {
4615 temp_allocate_frame(s
, ots
);
4617 tcg_out_st(s
, otype
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4618 if (IS_DEAD_ARG(1)) {
4625 if (IS_DEAD_ARG(1) && ts
->kind
!= TEMP_FIXED
) {
4627 * The mov can be suppressed. Kill input first, so that it
4628 * is unlinked from reg_to_temp, then set the output to the
4629 * reg that we saved from the input.
4634 if (ots
->val_type
== TEMP_VAL_REG
) {
4637 /* Make sure to not spill the input register during allocation. */
4638 oreg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
4639 allocated_regs
| ((TCGRegSet
)1 << ireg
),
4640 preferred_regs
, ots
->indirect_base
);
4642 if (!tcg_out_mov(s
, otype
, oreg
, ireg
)) {
4644 * Cross register class move not supported.
4645 * Store the source register into the destination slot
4646 * and leave the destination temp as TEMP_VAL_MEM.
4648 assert(!temp_readonly(ots
));
4649 if (!ts
->mem_allocated
) {
4650 temp_allocate_frame(s
, ots
);
4652 tcg_out_st(s
, ts
->type
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4653 set_temp_val_nonreg(s
, ts
, TEMP_VAL_MEM
);
4654 ots
->mem_coherent
= 1;
4658 set_temp_val_reg(s
, ots
, oreg
);
4659 ots
->mem_coherent
= 0;
4661 if (NEED_SYNC_ARG(0)) {
4662 temp_sync(s
, ots
, allocated_regs
, 0, 0);
4667 * Specialized code generation for INDEX_op_dup_vec.
4669 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
4671 const TCGLifeData arg_life
= op
->life
;
4672 TCGRegSet dup_out_regs
, dup_in_regs
;
4674 TCGType itype
, vtype
;
4679 ots
= arg_temp(op
->args
[0]);
4680 its
= arg_temp(op
->args
[1]);
4682 /* ENV should not be modified. */
4683 tcg_debug_assert(!temp_readonly(ots
));
4686 vece
= TCGOP_VECE(op
);
4687 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
4689 if (its
->val_type
== TEMP_VAL_CONST
) {
4690 /* Propagate constant via movi -> dupi. */
4691 tcg_target_ulong val
= its
->val
;
4692 if (IS_DEAD_ARG(1)) {
4695 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, output_pref(op
, 0));
4699 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
4700 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].regs
;
4702 /* Allocate the output register now. */
4703 if (ots
->val_type
!= TEMP_VAL_REG
) {
4704 TCGRegSet allocated_regs
= s
->reserved_regs
;
4707 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
4708 /* Make sure to not spill the input register. */
4709 tcg_regset_set_reg(allocated_regs
, its
->reg
);
4711 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
4712 output_pref(op
, 0), ots
->indirect_base
);
4713 set_temp_val_reg(s
, ots
, oreg
);
4716 switch (its
->val_type
) {
4719 * The dup constriaints must be broad, covering all possible VECE.
4720 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
4721 * to fail, indicating that extra moves are required for that case.
4723 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
4724 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
4727 /* Try again from memory or a vector input register. */
4729 if (!its
->mem_coherent
) {
4731 * The input register is not synced, and so an extra store
4732 * would be required to use memory. Attempt an integer-vector
4733 * register move first. We do not have a TCGRegSet for this.
4735 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
4738 /* Sync the temp back to its slot and load from there. */
4739 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
4745 if (HOST_BIG_ENDIAN
) {
4746 lowpart_ofs
= tcg_type_size(itype
) - (1 << vece
);
4748 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
4749 its
->mem_offset
+ lowpart_ofs
)) {
4752 /* Load the input into the destination vector register. */
4753 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
4757 g_assert_not_reached();
4760 /* We now have a vector input register, so dup must succeed. */
4761 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
4762 tcg_debug_assert(ok
);
4765 ots
->mem_coherent
= 0;
4766 if (IS_DEAD_ARG(1)) {
4769 if (NEED_SYNC_ARG(0)) {
4770 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
4772 if (IS_DEAD_ARG(0)) {
4777 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
4779 const TCGLifeData arg_life
= op
->life
;
4780 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
4781 TCGRegSet i_allocated_regs
;
4782 TCGRegSet o_allocated_regs
;
4783 int i
, k
, nb_iargs
, nb_oargs
;
4786 const TCGArgConstraint
*arg_ct
;
4788 TCGArg new_args
[TCG_MAX_OP_ARGS
];
4789 int const_args
[TCG_MAX_OP_ARGS
];
4792 nb_oargs
= def
->nb_oargs
;
4793 nb_iargs
= def
->nb_iargs
;
4795 /* copy constants */
4796 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
4797 op
->args
+ nb_oargs
+ nb_iargs
,
4798 sizeof(TCGArg
) * def
->nb_cargs
);
4800 i_allocated_regs
= s
->reserved_regs
;
4801 o_allocated_regs
= s
->reserved_regs
;
4804 case INDEX_op_brcond_i32
:
4805 case INDEX_op_brcond_i64
:
4806 op_cond
= op
->args
[2];
4808 case INDEX_op_setcond_i32
:
4809 case INDEX_op_setcond_i64
:
4810 case INDEX_op_negsetcond_i32
:
4811 case INDEX_op_negsetcond_i64
:
4812 case INDEX_op_cmp_vec
:
4813 op_cond
= op
->args
[3];
4815 case INDEX_op_brcond2_i32
:
4816 op_cond
= op
->args
[4];
4818 case INDEX_op_movcond_i32
:
4819 case INDEX_op_movcond_i64
:
4820 case INDEX_op_setcond2_i32
:
4821 case INDEX_op_cmpsel_vec
:
4822 op_cond
= op
->args
[5];
4825 /* No condition within opcode. */
4826 op_cond
= TCG_COND_ALWAYS
;
4830 /* satisfy input constraints */
4831 for (k
= 0; k
< nb_iargs
; k
++) {
4832 TCGRegSet i_preferred_regs
, i_required_regs
;
4833 bool allocate_new_reg
, copyto_new_reg
;
4837 i
= def
->args_ct
[nb_oargs
+ k
].sort_index
;
4839 arg_ct
= &def
->args_ct
[i
];
4842 if (ts
->val_type
== TEMP_VAL_CONST
4843 && tcg_target_const_match(ts
->val
, arg_ct
->ct
, ts
->type
,
4844 op_cond
, TCGOP_VECE(op
))) {
4845 /* constant is OK for instruction */
4847 new_args
[i
] = ts
->val
;
4852 i_preferred_regs
= 0;
4853 i_required_regs
= arg_ct
->regs
;
4854 allocate_new_reg
= false;
4855 copyto_new_reg
= false;
4857 switch (arg_ct
->pair
) {
4858 case 0: /* not paired */
4859 if (arg_ct
->ialias
) {
4860 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4863 * If the input is readonly, then it cannot also be an
4864 * output and aliased to itself. If the input is not
4865 * dead after the instruction, we must allocate a new
4866 * register and move it.
4868 if (temp_readonly(ts
) || !IS_DEAD_ARG(i
)
4869 || def
->args_ct
[arg_ct
->alias_index
].newreg
) {
4870 allocate_new_reg
= true;
4871 } else if (ts
->val_type
== TEMP_VAL_REG
) {
4873 * Check if the current register has already been
4874 * allocated for another input.
4877 tcg_regset_test_reg(i_allocated_regs
, reg
);
4880 if (!allocate_new_reg
) {
4881 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4884 allocate_new_reg
= !tcg_regset_test_reg(i_required_regs
, reg
);
4886 if (allocate_new_reg
) {
4888 * Allocate a new register matching the constraint
4889 * and move the temporary register into it.
4891 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4892 i_allocated_regs
, 0);
4893 reg
= tcg_reg_alloc(s
, i_required_regs
, i_allocated_regs
,
4894 i_preferred_regs
, ts
->indirect_base
);
4895 copyto_new_reg
= true;
4900 /* First of an input pair; if i1 == i2, the second is an output. */
4902 i2
= arg_ct
->pair_index
;
4903 ts2
= i1
!= i2
? arg_temp(op
->args
[i2
]) : NULL
;
4906 * It is easier to default to allocating a new pair
4907 * and to identify a few cases where it's not required.
4909 if (arg_ct
->ialias
) {
4910 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4911 if (IS_DEAD_ARG(i1
) &&
4913 !temp_readonly(ts
) &&
4914 ts
->val_type
== TEMP_VAL_REG
&&
4915 ts
->reg
< TCG_TARGET_NB_REGS
- 1 &&
4916 tcg_regset_test_reg(i_required_regs
, reg
) &&
4917 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4918 !tcg_regset_test_reg(i_allocated_regs
, reg
+ 1) &&
4920 ? ts2
->val_type
== TEMP_VAL_REG
&&
4921 ts2
->reg
== reg
+ 1 &&
4923 : s
->reg_to_temp
[reg
+ 1] == NULL
)) {
4927 /* Without aliasing, the pair must also be an input. */
4928 tcg_debug_assert(ts2
);
4929 if (ts
->val_type
== TEMP_VAL_REG
&&
4930 ts2
->val_type
== TEMP_VAL_REG
&&
4931 ts2
->reg
== reg
+ 1 &&
4932 tcg_regset_test_reg(i_required_regs
, reg
)) {
4936 reg
= tcg_reg_alloc_pair(s
, i_required_regs
, i_allocated_regs
,
4937 0, ts
->indirect_base
);
4940 case 2: /* pair second */
4941 reg
= new_args
[arg_ct
->pair_index
] + 1;
4944 case 3: /* ialias with second output, no first input */
4945 tcg_debug_assert(arg_ct
->ialias
);
4946 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4948 if (IS_DEAD_ARG(i
) &&
4949 !temp_readonly(ts
) &&
4950 ts
->val_type
== TEMP_VAL_REG
&&
4952 s
->reg_to_temp
[reg
- 1] == NULL
&&
4953 tcg_regset_test_reg(i_required_regs
, reg
) &&
4954 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4955 !tcg_regset_test_reg(i_allocated_regs
, reg
- 1)) {
4956 tcg_regset_set_reg(i_allocated_regs
, reg
- 1);
4959 reg
= tcg_reg_alloc_pair(s
, i_required_regs
>> 1,
4960 i_allocated_regs
, 0,
4962 tcg_regset_set_reg(i_allocated_regs
, reg
);
4968 * If an aliased input is not dead after the instruction,
4969 * we must allocate a new register and move it.
4971 if (arg_ct
->ialias
&& (!IS_DEAD_ARG(i
) || temp_readonly(ts
))) {
4972 TCGRegSet t_allocated_regs
= i_allocated_regs
;
4975 * Because of the alias, and the continued life, make sure
4976 * that the temp is somewhere *other* than the reg pair,
4977 * and we get a copy in reg.
4979 tcg_regset_set_reg(t_allocated_regs
, reg
);
4980 tcg_regset_set_reg(t_allocated_regs
, reg
+ 1);
4981 if (ts
->val_type
== TEMP_VAL_REG
&& ts
->reg
== reg
) {
4982 /* If ts was already in reg, copy it somewhere else. */
4986 tcg_debug_assert(ts
->kind
!= TEMP_FIXED
);
4987 nr
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
4988 t_allocated_regs
, 0, ts
->indirect_base
);
4989 ok
= tcg_out_mov(s
, ts
->type
, nr
, reg
);
4990 tcg_debug_assert(ok
);
4992 set_temp_val_reg(s
, ts
, nr
);
4994 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4995 t_allocated_regs
, 0);
4996 copyto_new_reg
= true;
4999 /* Preferably allocate to reg, otherwise copy. */
5000 i_required_regs
= (TCGRegSet
)1 << reg
;
5001 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
5003 copyto_new_reg
= ts
->reg
!= reg
;
5008 g_assert_not_reached();
5011 if (copyto_new_reg
) {
5012 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
5014 * Cross register class move not supported. Sync the
5015 * temp back to its slot and load from there.
5017 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
5018 tcg_out_ld(s
, ts
->type
, reg
,
5019 ts
->mem_base
->reg
, ts
->mem_offset
);
5024 tcg_regset_set_reg(i_allocated_regs
, reg
);
5027 /* mark dead temporaries and free the associated registers */
5028 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
5029 if (IS_DEAD_ARG(i
)) {
5030 temp_dead(s
, arg_temp(op
->args
[i
]));
5034 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
5035 tcg_reg_alloc_cbranch(s
, i_allocated_regs
);
5036 } else if (def
->flags
& TCG_OPF_BB_END
) {
5037 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
5039 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
5040 /* XXX: permit generic clobber register list ? */
5041 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
5042 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
5043 tcg_reg_free(s
, i
, i_allocated_regs
);
5047 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
5048 /* sync globals if the op has side effects and might trigger
5050 sync_globals(s
, i_allocated_regs
);
5053 /* satisfy the output constraints */
5054 for(k
= 0; k
< nb_oargs
; k
++) {
5055 i
= def
->args_ct
[k
].sort_index
;
5057 arg_ct
= &def
->args_ct
[i
];
5060 /* ENV should not be modified. */
5061 tcg_debug_assert(!temp_readonly(ts
));
5063 switch (arg_ct
->pair
) {
5064 case 0: /* not paired */
5065 if (arg_ct
->oalias
&& !const_args
[arg_ct
->alias_index
]) {
5066 reg
= new_args
[arg_ct
->alias_index
];
5067 } else if (arg_ct
->newreg
) {
5068 reg
= tcg_reg_alloc(s
, arg_ct
->regs
,
5069 i_allocated_regs
| o_allocated_regs
,
5070 output_pref(op
, k
), ts
->indirect_base
);
5072 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, o_allocated_regs
,
5073 output_pref(op
, k
), ts
->indirect_base
);
5077 case 1: /* first of pair */
5078 if (arg_ct
->oalias
) {
5079 reg
= new_args
[arg_ct
->alias_index
];
5080 } else if (arg_ct
->newreg
) {
5081 reg
= tcg_reg_alloc_pair(s
, arg_ct
->regs
,
5082 i_allocated_regs
| o_allocated_regs
,
5086 reg
= tcg_reg_alloc_pair(s
, arg_ct
->regs
, o_allocated_regs
,
5092 case 2: /* second of pair */
5093 if (arg_ct
->oalias
) {
5094 reg
= new_args
[arg_ct
->alias_index
];
5096 reg
= new_args
[arg_ct
->pair_index
] + 1;
5100 case 3: /* first of pair, aliasing with a second input */
5101 tcg_debug_assert(!arg_ct
->newreg
);
5102 reg
= new_args
[arg_ct
->pair_index
] - 1;
5106 g_assert_not_reached();
5108 tcg_regset_set_reg(o_allocated_regs
, reg
);
5109 set_temp_val_reg(s
, ts
, reg
);
5110 ts
->mem_coherent
= 0;
5115 /* emit instruction */
5117 case INDEX_op_ext8s_i32
:
5118 tcg_out_ext8s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
5120 case INDEX_op_ext8s_i64
:
5121 tcg_out_ext8s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
5123 case INDEX_op_ext8u_i32
:
5124 case INDEX_op_ext8u_i64
:
5125 tcg_out_ext8u(s
, new_args
[0], new_args
[1]);
5127 case INDEX_op_ext16s_i32
:
5128 tcg_out_ext16s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
5130 case INDEX_op_ext16s_i64
:
5131 tcg_out_ext16s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
5133 case INDEX_op_ext16u_i32
:
5134 case INDEX_op_ext16u_i64
:
5135 tcg_out_ext16u(s
, new_args
[0], new_args
[1]);
5137 case INDEX_op_ext32s_i64
:
5138 tcg_out_ext32s(s
, new_args
[0], new_args
[1]);
5140 case INDEX_op_ext32u_i64
:
5141 tcg_out_ext32u(s
, new_args
[0], new_args
[1]);
5143 case INDEX_op_ext_i32_i64
:
5144 tcg_out_exts_i32_i64(s
, new_args
[0], new_args
[1]);
5146 case INDEX_op_extu_i32_i64
:
5147 tcg_out_extu_i32_i64(s
, new_args
[0], new_args
[1]);
5149 case INDEX_op_extrl_i64_i32
:
5150 tcg_out_extrl_i64_i32(s
, new_args
[0], new_args
[1]);
5153 if (def
->flags
& TCG_OPF_VECTOR
) {
5154 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
5155 new_args
, const_args
);
5157 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
5162 /* move the outputs in the correct register if needed */
5163 for(i
= 0; i
< nb_oargs
; i
++) {
5164 ts
= arg_temp(op
->args
[i
]);
5166 /* ENV should not be modified. */
5167 tcg_debug_assert(!temp_readonly(ts
));
5169 if (NEED_SYNC_ARG(i
)) {
5170 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
5171 } else if (IS_DEAD_ARG(i
)) {
5177 static bool tcg_reg_alloc_dup2(TCGContext
*s
, const TCGOp
*op
)
5179 const TCGLifeData arg_life
= op
->life
;
5180 TCGTemp
*ots
, *itsl
, *itsh
;
5181 TCGType vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
5183 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
5184 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
5185 tcg_debug_assert(TCGOP_VECE(op
) == MO_64
);
5187 ots
= arg_temp(op
->args
[0]);
5188 itsl
= arg_temp(op
->args
[1]);
5189 itsh
= arg_temp(op
->args
[2]);
5191 /* ENV should not be modified. */
5192 tcg_debug_assert(!temp_readonly(ots
));
5194 /* Allocate the output register now. */
5195 if (ots
->val_type
!= TEMP_VAL_REG
) {
5196 TCGRegSet allocated_regs
= s
->reserved_regs
;
5197 TCGRegSet dup_out_regs
=
5198 tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
5201 /* Make sure to not spill the input registers. */
5202 if (!IS_DEAD_ARG(1) && itsl
->val_type
== TEMP_VAL_REG
) {
5203 tcg_regset_set_reg(allocated_regs
, itsl
->reg
);
5205 if (!IS_DEAD_ARG(2) && itsh
->val_type
== TEMP_VAL_REG
) {
5206 tcg_regset_set_reg(allocated_regs
, itsh
->reg
);
5209 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
5210 output_pref(op
, 0), ots
->indirect_base
);
5211 set_temp_val_reg(s
, ots
, oreg
);
5214 /* Promote dup2 of immediates to dupi_vec. */
5215 if (itsl
->val_type
== TEMP_VAL_CONST
&& itsh
->val_type
== TEMP_VAL_CONST
) {
5216 uint64_t val
= deposit64(itsl
->val
, 32, 32, itsh
->val
);
5219 if (val
== dup_const(MO_8
, val
)) {
5221 } else if (val
== dup_const(MO_16
, val
)) {
5223 } else if (val
== dup_const(MO_32
, val
)) {
5227 tcg_out_dupi_vec(s
, vtype
, vece
, ots
->reg
, val
);
5231 /* If the two inputs form one 64-bit value, try dupm_vec. */
5232 if (itsl
->temp_subindex
== HOST_BIG_ENDIAN
&&
5233 itsh
->temp_subindex
== !HOST_BIG_ENDIAN
&&
5234 itsl
== itsh
+ (HOST_BIG_ENDIAN
? 1 : -1)) {
5235 TCGTemp
*its
= itsl
- HOST_BIG_ENDIAN
;
5237 temp_sync(s
, its
+ 0, s
->reserved_regs
, 0, 0);
5238 temp_sync(s
, its
+ 1, s
->reserved_regs
, 0, 0);
5240 if (tcg_out_dupm_vec(s
, vtype
, MO_64
, ots
->reg
,
5241 its
->mem_base
->reg
, its
->mem_offset
)) {
5246 /* Fall back to generic expansion. */
5250 ots
->mem_coherent
= 0;
5251 if (IS_DEAD_ARG(1)) {
5254 if (IS_DEAD_ARG(2)) {
5257 if (NEED_SYNC_ARG(0)) {
5258 temp_sync(s
, ots
, s
->reserved_regs
, 0, IS_DEAD_ARG(0));
5259 } else if (IS_DEAD_ARG(0)) {
5265 static void load_arg_reg(TCGContext
*s
, TCGReg reg
, TCGTemp
*ts
,
5266 TCGRegSet allocated_regs
)
5268 if (ts
->val_type
== TEMP_VAL_REG
) {
5269 if (ts
->reg
!= reg
) {
5270 tcg_reg_free(s
, reg
, allocated_regs
);
5271 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
5273 * Cross register class move not supported. Sync the
5274 * temp back to its slot and load from there.
5276 temp_sync(s
, ts
, allocated_regs
, 0, 0);
5277 tcg_out_ld(s
, ts
->type
, reg
,
5278 ts
->mem_base
->reg
, ts
->mem_offset
);
5282 TCGRegSet arg_set
= 0;
5284 tcg_reg_free(s
, reg
, allocated_regs
);
5285 tcg_regset_set_reg(arg_set
, reg
);
5286 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
5290 static void load_arg_stk(TCGContext
*s
, unsigned arg_slot
, TCGTemp
*ts
,
5291 TCGRegSet allocated_regs
)
5294 * When the destination is on the stack, load up the temp and store.
5295 * If there are many call-saved registers, the temp might live to
5296 * see another use; otherwise it'll be discarded.
5298 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
], allocated_regs
, 0);
5299 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
,
5300 arg_slot_stk_ofs(arg_slot
));
5303 static void load_arg_normal(TCGContext
*s
, const TCGCallArgumentLoc
*l
,
5304 TCGTemp
*ts
, TCGRegSet
*allocated_regs
)
5306 if (arg_slot_reg_p(l
->arg_slot
)) {
5307 TCGReg reg
= tcg_target_call_iarg_regs
[l
->arg_slot
];
5308 load_arg_reg(s
, reg
, ts
, *allocated_regs
);
5309 tcg_regset_set_reg(*allocated_regs
, reg
);
5311 load_arg_stk(s
, l
->arg_slot
, ts
, *allocated_regs
);
5315 static void load_arg_ref(TCGContext
*s
, unsigned arg_slot
, TCGReg ref_base
,
5316 intptr_t ref_off
, TCGRegSet
*allocated_regs
)
5320 if (arg_slot_reg_p(arg_slot
)) {
5321 reg
= tcg_target_call_iarg_regs
[arg_slot
];
5322 tcg_reg_free(s
, reg
, *allocated_regs
);
5323 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5324 tcg_regset_set_reg(*allocated_regs
, reg
);
5326 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[TCG_TYPE_PTR
],
5327 *allocated_regs
, 0, false);
5328 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5329 tcg_out_st(s
, TCG_TYPE_PTR
, reg
, TCG_REG_CALL_STACK
,
5330 arg_slot_stk_ofs(arg_slot
));
5334 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
5336 const int nb_oargs
= TCGOP_CALLO(op
);
5337 const int nb_iargs
= TCGOP_CALLI(op
);
5338 const TCGLifeData arg_life
= op
->life
;
5339 const TCGHelperInfo
*info
= tcg_call_info(op
);
5340 TCGRegSet allocated_regs
= s
->reserved_regs
;
5344 * Move inputs into place in reverse order,
5345 * so that we place stacked arguments first.
5347 for (i
= nb_iargs
- 1; i
>= 0; --i
) {
5348 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
5349 TCGTemp
*ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
5351 switch (loc
->kind
) {
5352 case TCG_CALL_ARG_NORMAL
:
5353 case TCG_CALL_ARG_EXTEND_U
:
5354 case TCG_CALL_ARG_EXTEND_S
:
5355 load_arg_normal(s
, loc
, ts
, &allocated_regs
);
5357 case TCG_CALL_ARG_BY_REF
:
5358 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5359 load_arg_ref(s
, loc
->arg_slot
, TCG_REG_CALL_STACK
,
5360 arg_slot_stk_ofs(loc
->ref_slot
),
5363 case TCG_CALL_ARG_BY_REF_N
:
5364 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5367 g_assert_not_reached();
5371 /* Mark dead temporaries and free the associated registers. */
5372 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
5373 if (IS_DEAD_ARG(i
)) {
5374 temp_dead(s
, arg_temp(op
->args
[i
]));
5378 /* Clobber call registers. */
5379 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
5380 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
5381 tcg_reg_free(s
, i
, allocated_regs
);
5386 * Save globals if they might be written by the helper,
5387 * sync them if they might be read.
5389 if (info
->flags
& TCG_CALL_NO_READ_GLOBALS
) {
5391 } else if (info
->flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
5392 sync_globals(s
, allocated_regs
);
5394 save_globals(s
, allocated_regs
);
5398 * If the ABI passes a pointer to the returned struct as the first
5399 * argument, load that now. Pass a pointer to the output home slot.
5401 if (info
->out_kind
== TCG_CALL_RET_BY_REF
) {
5402 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5404 if (!ts
->mem_allocated
) {
5405 temp_allocate_frame(s
, ts
);
5407 load_arg_ref(s
, 0, ts
->mem_base
->reg
, ts
->mem_offset
, &allocated_regs
);
5410 tcg_out_call(s
, tcg_call_func(op
), info
);
5412 /* Assign output registers and emit moves if needed. */
5413 switch (info
->out_kind
) {
5414 case TCG_CALL_RET_NORMAL
:
5415 for (i
= 0; i
< nb_oargs
; i
++) {
5416 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5417 TCGReg reg
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, i
);
5419 /* ENV should not be modified. */
5420 tcg_debug_assert(!temp_readonly(ts
));
5422 set_temp_val_reg(s
, ts
, reg
);
5423 ts
->mem_coherent
= 0;
5427 case TCG_CALL_RET_BY_VEC
:
5429 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5431 tcg_debug_assert(ts
->base_type
== TCG_TYPE_I128
);
5432 tcg_debug_assert(ts
->temp_subindex
== 0);
5433 if (!ts
->mem_allocated
) {
5434 temp_allocate_frame(s
, ts
);
5436 tcg_out_st(s
, TCG_TYPE_V128
,
5437 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5438 ts
->mem_base
->reg
, ts
->mem_offset
);
5440 /* fall through to mark all parts in memory */
5442 case TCG_CALL_RET_BY_REF
:
5443 /* The callee has performed a write through the reference. */
5444 for (i
= 0; i
< nb_oargs
; i
++) {
5445 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5446 ts
->val_type
= TEMP_VAL_MEM
;
5451 g_assert_not_reached();
5454 /* Flush or discard output registers as needed. */
5455 for (i
= 0; i
< nb_oargs
; i
++) {
5456 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5457 if (NEED_SYNC_ARG(i
)) {
5458 temp_sync(s
, ts
, s
->reserved_regs
, 0, IS_DEAD_ARG(i
));
5459 } else if (IS_DEAD_ARG(i
)) {
5466 * atom_and_align_for_opc:
5468 * @opc: memory operation code
5469 * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
5470 * @allow_two_ops: true if we are prepared to issue two operations
5472 * Return the alignment and atomicity to use for the inline fast path
5473 * for the given memory operation. The alignment may be larger than
5474 * that specified in @opc, and the correct alignment will be diagnosed
5475 * by the slow path helper.
5477 * If @allow_two_ops, the host is prepared to test for 2x alignment,
5478 * and issue two loads or stores for subalignment.
5480 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
5481 MemOp host_atom
, bool allow_two_ops
)
5483 MemOp align
= get_alignment_bits(opc
);
5484 MemOp size
= opc
& MO_SIZE
;
5485 MemOp half
= size
? size
- 1 : 0;
5486 MemOp atom
= opc
& MO_ATOM_MASK
;
5491 /* The operation requires no specific atomicity. */
5495 case MO_ATOM_IFALIGN
:
5499 case MO_ATOM_IFALIGN_PAIR
:
5503 case MO_ATOM_WITHIN16
:
5505 if (size
== MO_128
) {
5506 /* Misalignment implies !within16, and therefore no atomicity. */
5507 } else if (host_atom
!= MO_ATOM_WITHIN16
) {
5508 /* The host does not implement within16, so require alignment. */
5509 align
= MAX(align
, size
);
5513 case MO_ATOM_WITHIN16_PAIR
:
5516 * Misalignment implies !within16, and therefore half atomicity.
5517 * Any host prepared for two operations can implement this with
5520 if (host_atom
!= MO_ATOM_WITHIN16
&& allow_two_ops
) {
5521 align
= MAX(align
, half
);
5525 case MO_ATOM_SUBALIGN
:
5527 if (host_atom
!= MO_ATOM_SUBALIGN
) {
5528 /* If unaligned but not odd, there are subobjects up to half. */
5529 if (allow_two_ops
) {
5530 align
= MAX(align
, half
);
5532 align
= MAX(align
, size
);
5538 g_assert_not_reached();
5541 return (TCGAtomAlign
){ .atom
= atmax
, .align
= align
};
5545 * Similarly for qemu_ld/st slow path helpers.
5546 * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
5547 * using only the provided backend tcg_out_* functions.
5550 static int tcg_out_helper_stk_ofs(TCGType type
, unsigned slot
)
5552 int ofs
= arg_slot_stk_ofs(slot
);
5555 * Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
5556 * require extension to uint64_t, adjust the address for uint32_t.
5558 if (HOST_BIG_ENDIAN
&&
5559 TCG_TARGET_REG_BITS
== 64 &&
5560 type
== TCG_TYPE_I32
) {
5566 static void tcg_out_helper_load_slots(TCGContext
*s
,
5567 unsigned nmov
, TCGMovExtend
*mov
,
5568 const TCGLdstHelperParam
*parm
)
5574 * Start from the end, storing to the stack first.
5575 * This frees those registers, so we need not consider overlap.
5577 for (i
= nmov
; i
-- > 0; ) {
5578 unsigned slot
= mov
[i
].dst
;
5580 if (arg_slot_reg_p(slot
)) {
5584 TCGReg src
= mov
[i
].src
;
5585 TCGType dst_type
= mov
[i
].dst_type
;
5586 MemOp dst_mo
= dst_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5588 /* The argument is going onto the stack; extend into scratch. */
5589 if ((mov
[i
].src_ext
& MO_SIZE
) != dst_mo
) {
5590 tcg_debug_assert(parm
->ntmp
!= 0);
5591 mov
[i
].dst
= src
= parm
->tmp
[0];
5592 tcg_out_movext1(s
, &mov
[i
]);
5595 tcg_out_st(s
, dst_type
, src
, TCG_REG_CALL_STACK
,
5596 tcg_out_helper_stk_ofs(dst_type
, slot
));
5602 * The remaining arguments are in registers.
5603 * Convert slot numbers to argument registers.
5606 for (i
= 0; i
< nmov
; ++i
) {
5607 mov
[i
].dst
= tcg_target_call_iarg_regs
[mov
[i
].dst
];
5612 /* The backend must have provided enough temps for the worst case. */
5613 tcg_debug_assert(parm
->ntmp
>= 2);
5616 for (unsigned j
= 0; j
< 3; ++j
) {
5617 if (dst3
== mov
[j
].src
) {
5619 * Conflict. Copy the source to a temporary, perform the
5620 * remaining moves, then the extension from our scratch
5623 TCGReg scratch
= parm
->tmp
[1];
5625 tcg_out_mov(s
, mov
[3].src_type
, scratch
, mov
[3].src
);
5626 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2, parm
->tmp
[0]);
5627 tcg_out_movext1_new_src(s
, &mov
[3], scratch
);
5632 /* No conflicts: perform this move and continue. */
5633 tcg_out_movext1(s
, &mov
[3]);
5637 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2,
5638 parm
->ntmp
? parm
->tmp
[0] : -1);
5641 tcg_out_movext2(s
, mov
, mov
+ 1,
5642 parm
->ntmp
? parm
->tmp
[0] : -1);
5645 tcg_out_movext1(s
, mov
);
5648 g_assert_not_reached();
5652 static void tcg_out_helper_load_imm(TCGContext
*s
, unsigned slot
,
5653 TCGType type
, tcg_target_long imm
,
5654 const TCGLdstHelperParam
*parm
)
5656 if (arg_slot_reg_p(slot
)) {
5657 tcg_out_movi(s
, type
, tcg_target_call_iarg_regs
[slot
], imm
);
5659 int ofs
= tcg_out_helper_stk_ofs(type
, slot
);
5660 if (!tcg_out_sti(s
, type
, imm
, TCG_REG_CALL_STACK
, ofs
)) {
5661 tcg_debug_assert(parm
->ntmp
!= 0);
5662 tcg_out_movi(s
, type
, parm
->tmp
[0], imm
);
5663 tcg_out_st(s
, type
, parm
->tmp
[0], TCG_REG_CALL_STACK
, ofs
);
5668 static void tcg_out_helper_load_common_args(TCGContext
*s
,
5669 const TCGLabelQemuLdst
*ldst
,
5670 const TCGLdstHelperParam
*parm
,
5671 const TCGHelperInfo
*info
,
5674 TCGMovExtend ptr_mov
= {
5675 .dst_type
= TCG_TYPE_PTR
,
5676 .src_type
= TCG_TYPE_PTR
,
5677 .src_ext
= sizeof(void *) == 4 ? MO_32
: MO_64
5679 const TCGCallArgumentLoc
*loc
= &info
->in
[0];
5682 tcg_target_ulong imm
;
5685 * Handle env, which is always first.
5687 ptr_mov
.dst
= loc
->arg_slot
;
5688 ptr_mov
.src
= TCG_AREG0
;
5689 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5695 loc
= &info
->in
[next_arg
];
5696 type
= TCG_TYPE_I32
;
5697 switch (loc
->kind
) {
5698 case TCG_CALL_ARG_NORMAL
:
5700 case TCG_CALL_ARG_EXTEND_U
:
5701 case TCG_CALL_ARG_EXTEND_S
:
5702 /* No extension required for MemOpIdx. */
5703 tcg_debug_assert(imm
<= INT32_MAX
);
5704 type
= TCG_TYPE_REG
;
5707 g_assert_not_reached();
5709 tcg_out_helper_load_imm(s
, loc
->arg_slot
, type
, imm
, parm
);
5715 loc
= &info
->in
[next_arg
];
5716 slot
= loc
->arg_slot
;
5721 if (arg_slot_reg_p(slot
)) {
5722 arg_reg
= tcg_target_call_iarg_regs
[slot
];
5724 ra_reg
= parm
->ra_gen(s
, ldst
, arg_reg
);
5727 ptr_mov
.src
= ra_reg
;
5728 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5730 imm
= (uintptr_t)ldst
->raddr
;
5731 tcg_out_helper_load_imm(s
, slot
, TCG_TYPE_PTR
, imm
, parm
);
5735 static unsigned tcg_out_helper_add_mov(TCGMovExtend
*mov
,
5736 const TCGCallArgumentLoc
*loc
,
5737 TCGType dst_type
, TCGType src_type
,
5738 TCGReg lo
, TCGReg hi
)
5742 if (dst_type
<= TCG_TYPE_REG
) {
5745 switch (loc
->kind
) {
5746 case TCG_CALL_ARG_NORMAL
:
5747 src_ext
= src_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5749 case TCG_CALL_ARG_EXTEND_U
:
5750 dst_type
= TCG_TYPE_REG
;
5753 case TCG_CALL_ARG_EXTEND_S
:
5754 dst_type
= TCG_TYPE_REG
;
5758 g_assert_not_reached();
5761 mov
[0].dst
= loc
->arg_slot
;
5762 mov
[0].dst_type
= dst_type
;
5764 mov
[0].src_type
= src_type
;
5765 mov
[0].src_ext
= src_ext
;
5769 if (TCG_TARGET_REG_BITS
== 32) {
5770 assert(dst_type
== TCG_TYPE_I64
);
5773 assert(dst_type
== TCG_TYPE_I128
);
5777 mov
[0].dst
= loc
[HOST_BIG_ENDIAN
].arg_slot
;
5779 mov
[0].dst_type
= TCG_TYPE_REG
;
5780 mov
[0].src_type
= TCG_TYPE_REG
;
5781 mov
[0].src_ext
= reg_mo
;
5783 mov
[1].dst
= loc
[!HOST_BIG_ENDIAN
].arg_slot
;
5785 mov
[1].dst_type
= TCG_TYPE_REG
;
5786 mov
[1].src_type
= TCG_TYPE_REG
;
5787 mov
[1].src_ext
= reg_mo
;
5792 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5793 const TCGLdstHelperParam
*parm
)
5795 const TCGHelperInfo
*info
;
5796 const TCGCallArgumentLoc
*loc
;
5797 TCGMovExtend mov
[2];
5798 unsigned next_arg
, nmov
;
5799 MemOp mop
= get_memop(ldst
->oi
);
5801 switch (mop
& MO_SIZE
) {
5805 info
= &info_helper_ld32_mmu
;
5808 info
= &info_helper_ld64_mmu
;
5811 info
= &info_helper_ld128_mmu
;
5814 g_assert_not_reached();
5817 /* Defer env argument. */
5820 loc
= &info
->in
[next_arg
];
5821 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5823 * 32-bit host with 32-bit guest: zero-extend the guest address
5824 * to 64-bits for the helper by storing the low part, then
5825 * load a zero for the high part.
5827 tcg_out_helper_add_mov(mov
, loc
+ HOST_BIG_ENDIAN
,
5828 TCG_TYPE_I32
, TCG_TYPE_I32
,
5829 ldst
->addrlo_reg
, -1);
5830 tcg_out_helper_load_slots(s
, 1, mov
, parm
);
5832 tcg_out_helper_load_imm(s
, loc
[!HOST_BIG_ENDIAN
].arg_slot
,
5833 TCG_TYPE_I32
, 0, parm
);
5836 nmov
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_I64
, s
->addr_type
,
5837 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
5838 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5842 switch (info
->out_kind
) {
5843 case TCG_CALL_RET_NORMAL
:
5844 case TCG_CALL_RET_BY_VEC
:
5846 case TCG_CALL_RET_BY_REF
:
5848 * The return reference is in the first argument slot.
5849 * We need memory in which to return: re-use the top of stack.
5852 int ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5854 if (arg_slot_reg_p(0)) {
5855 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[0],
5856 TCG_REG_CALL_STACK
, ofs_slot0
);
5858 tcg_debug_assert(parm
->ntmp
!= 0);
5859 tcg_out_addi_ptr(s
, parm
->tmp
[0],
5860 TCG_REG_CALL_STACK
, ofs_slot0
);
5861 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
5862 TCG_REG_CALL_STACK
, ofs_slot0
);
5867 g_assert_not_reached();
5870 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
5873 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5875 const TCGLdstHelperParam
*parm
)
5877 MemOp mop
= get_memop(ldst
->oi
);
5878 TCGMovExtend mov
[2];
5881 switch (ldst
->type
) {
5883 if (TCG_TARGET_REG_BITS
== 32) {
5889 mov
[0].dst
= ldst
->datalo_reg
;
5890 mov
[0].src
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, 0);
5891 mov
[0].dst_type
= ldst
->type
;
5892 mov
[0].src_type
= TCG_TYPE_REG
;
5895 * If load_sign, then we allowed the helper to perform the
5896 * appropriate sign extension to tcg_target_ulong, and all
5897 * we need now is a plain move.
5899 * If they do not, then we expect the relevant extension
5900 * instruction to be no more expensive than a move, and
5901 * we thus save the icache etc by only using one of two
5904 if (load_sign
|| !(mop
& MO_SIGN
)) {
5905 if (TCG_TARGET_REG_BITS
== 32 || ldst
->type
== TCG_TYPE_I32
) {
5906 mov
[0].src_ext
= MO_32
;
5908 mov
[0].src_ext
= MO_64
;
5911 mov
[0].src_ext
= mop
& MO_SSIZE
;
5913 tcg_out_movext1(s
, mov
);
5917 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
5918 ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5919 switch (TCG_TARGET_CALL_RET_I128
) {
5920 case TCG_CALL_RET_NORMAL
:
5922 case TCG_CALL_RET_BY_VEC
:
5923 tcg_out_st(s
, TCG_TYPE_V128
,
5924 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5925 TCG_REG_CALL_STACK
, ofs_slot0
);
5927 case TCG_CALL_RET_BY_REF
:
5928 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datalo_reg
,
5929 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * HOST_BIG_ENDIAN
);
5930 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datahi_reg
,
5931 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * !HOST_BIG_ENDIAN
);
5934 g_assert_not_reached();
5939 g_assert_not_reached();
5942 mov
[0].dst
= ldst
->datalo_reg
;
5944 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, HOST_BIG_ENDIAN
);
5945 mov
[0].dst_type
= TCG_TYPE_REG
;
5946 mov
[0].src_type
= TCG_TYPE_REG
;
5947 mov
[0].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5949 mov
[1].dst
= ldst
->datahi_reg
;
5951 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, !HOST_BIG_ENDIAN
);
5952 mov
[1].dst_type
= TCG_TYPE_REG
;
5953 mov
[1].src_type
= TCG_TYPE_REG
;
5954 mov
[1].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5956 tcg_out_movext2(s
, mov
, mov
+ 1, parm
->ntmp
? parm
->tmp
[0] : -1);
5959 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5960 const TCGLdstHelperParam
*parm
)
5962 const TCGHelperInfo
*info
;
5963 const TCGCallArgumentLoc
*loc
;
5964 TCGMovExtend mov
[4];
5966 unsigned next_arg
, nmov
, n
;
5967 MemOp mop
= get_memop(ldst
->oi
);
5969 switch (mop
& MO_SIZE
) {
5973 info
= &info_helper_st32_mmu
;
5974 data_type
= TCG_TYPE_I32
;
5977 info
= &info_helper_st64_mmu
;
5978 data_type
= TCG_TYPE_I64
;
5981 info
= &info_helper_st128_mmu
;
5982 data_type
= TCG_TYPE_I128
;
5985 g_assert_not_reached();
5988 /* Defer env argument. */
5992 /* Handle addr argument. */
5993 loc
= &info
->in
[next_arg
];
5994 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5996 * 32-bit host with 32-bit guest: zero-extend the guest address
5997 * to 64-bits for the helper by storing the low part. Later,
5998 * after we have processed the register inputs, we will load a
5999 * zero for the high part.
6001 tcg_out_helper_add_mov(mov
, loc
+ HOST_BIG_ENDIAN
,
6002 TCG_TYPE_I32
, TCG_TYPE_I32
,
6003 ldst
->addrlo_reg
, -1);
6007 n
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_I64
, s
->addr_type
,
6008 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
6013 /* Handle data argument. */
6014 loc
= &info
->in
[next_arg
];
6015 switch (loc
->kind
) {
6016 case TCG_CALL_ARG_NORMAL
:
6017 case TCG_CALL_ARG_EXTEND_U
:
6018 case TCG_CALL_ARG_EXTEND_S
:
6019 n
= tcg_out_helper_add_mov(mov
+ nmov
, loc
, data_type
, ldst
->type
,
6020 ldst
->datalo_reg
, ldst
->datahi_reg
);
6023 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
6026 case TCG_CALL_ARG_BY_REF
:
6027 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
6028 tcg_debug_assert(data_type
== TCG_TYPE_I128
);
6029 tcg_out_st(s
, TCG_TYPE_I64
,
6030 HOST_BIG_ENDIAN
? ldst
->datahi_reg
: ldst
->datalo_reg
,
6031 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[0].ref_slot
));
6032 tcg_out_st(s
, TCG_TYPE_I64
,
6033 HOST_BIG_ENDIAN
? ldst
->datalo_reg
: ldst
->datahi_reg
,
6034 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[1].ref_slot
));
6036 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
6038 if (arg_slot_reg_p(loc
->arg_slot
)) {
6039 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[loc
->arg_slot
],
6041 arg_slot_stk_ofs(loc
->ref_slot
));
6043 tcg_debug_assert(parm
->ntmp
!= 0);
6044 tcg_out_addi_ptr(s
, parm
->tmp
[0], TCG_REG_CALL_STACK
,
6045 arg_slot_stk_ofs(loc
->ref_slot
));
6046 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
6047 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
->arg_slot
));
6053 g_assert_not_reached();
6056 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
6057 /* Zero extend the address by loading a zero for the high part. */
6058 loc
= &info
->in
[1 + !HOST_BIG_ENDIAN
];
6059 tcg_out_helper_load_imm(s
, loc
->arg_slot
, TCG_TYPE_I32
, 0, parm
);
6062 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
6065 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
, uint64_t pc_start
)
6067 int i
, start_words
, num_insns
;
6070 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
6071 && qemu_log_in_addr_range(pc_start
))) {
6072 FILE *logfile
= qemu_log_trylock();
6074 fprintf(logfile
, "OP:\n");
6075 tcg_dump_ops(s
, logfile
, false);
6076 fprintf(logfile
, "\n");
6077 qemu_log_unlock(logfile
);
6081 #ifdef CONFIG_DEBUG_TCG
6082 /* Ensure all labels referenced have been emitted. */
6087 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
6088 if (unlikely(!l
->present
) && !QSIMPLEQ_EMPTY(&l
->branches
)) {
6089 qemu_log_mask(CPU_LOG_TB_OP
,
6090 "$L%d referenced but not present.\n", l
->id
);
6100 reachable_code_pass(s
);
6104 if (s
->nb_indirects
> 0) {
6105 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
6106 && qemu_log_in_addr_range(pc_start
))) {
6107 FILE *logfile
= qemu_log_trylock();
6109 fprintf(logfile
, "OP before indirect lowering:\n");
6110 tcg_dump_ops(s
, logfile
, false);
6111 fprintf(logfile
, "\n");
6112 qemu_log_unlock(logfile
);
6116 /* Replace indirect temps with direct temps. */
6117 if (liveness_pass_2(s
)) {
6118 /* If changes were made, re-run liveness. */
6123 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
6124 && qemu_log_in_addr_range(pc_start
))) {
6125 FILE *logfile
= qemu_log_trylock();
6127 fprintf(logfile
, "OP after optimization and liveness analysis:\n");
6128 tcg_dump_ops(s
, logfile
, true);
6129 fprintf(logfile
, "\n");
6130 qemu_log_unlock(logfile
);
6134 /* Initialize goto_tb jump offsets. */
6135 tb
->jmp_reset_offset
[0] = TB_JMP_OFFSET_INVALID
;
6136 tb
->jmp_reset_offset
[1] = TB_JMP_OFFSET_INVALID
;
6137 tb
->jmp_insn_offset
[0] = TB_JMP_OFFSET_INVALID
;
6138 tb
->jmp_insn_offset
[1] = TB_JMP_OFFSET_INVALID
;
6140 tcg_reg_alloc_start(s
);
6143 * Reset the buffer pointers when restarting after overflow.
6144 * TODO: Move this into translate-all.c with the rest of the
6145 * buffer management. Having only this done here is confusing.
6147 s
->code_buf
= tcg_splitwx_to_rw(tb
->tc
.ptr
);
6148 s
->code_ptr
= s
->code_buf
;
6150 #ifdef TCG_TARGET_NEED_LDST_LABELS
6151 QSIMPLEQ_INIT(&s
->ldst_labels
);
6153 #ifdef TCG_TARGET_NEED_POOL_LABELS
6154 s
->pool_labels
= NULL
;
6157 start_words
= s
->insn_start_words
;
6159 tcg_malloc(sizeof(uint64_t) * s
->gen_tb
->icount
* start_words
);
6161 tcg_out_tb_start(s
);
6164 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
6165 TCGOpcode opc
= op
->opc
;
6168 case INDEX_op_mov_i32
:
6169 case INDEX_op_mov_i64
:
6170 case INDEX_op_mov_vec
:
6171 tcg_reg_alloc_mov(s
, op
);
6173 case INDEX_op_dup_vec
:
6174 tcg_reg_alloc_dup(s
, op
);
6176 case INDEX_op_insn_start
:
6177 if (num_insns
>= 0) {
6178 size_t off
= tcg_current_code_size(s
);
6179 s
->gen_insn_end_off
[num_insns
] = off
;
6180 /* Assert that we do not overflow our stored offset. */
6181 assert(s
->gen_insn_end_off
[num_insns
] == off
);
6184 for (i
= 0; i
< start_words
; ++i
) {
6185 s
->gen_insn_data
[num_insns
* start_words
+ i
] =
6186 tcg_get_insn_start_param(op
, i
);
6189 case INDEX_op_discard
:
6190 temp_dead(s
, arg_temp(op
->args
[0]));
6192 case INDEX_op_set_label
:
6193 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
6194 tcg_out_label(s
, arg_label(op
->args
[0]));
6197 tcg_reg_alloc_call(s
, op
);
6199 case INDEX_op_exit_tb
:
6200 tcg_out_exit_tb(s
, op
->args
[0]);
6202 case INDEX_op_goto_tb
:
6203 tcg_out_goto_tb(s
, op
->args
[0]);
6205 case INDEX_op_dup2_vec
:
6206 if (tcg_reg_alloc_dup2(s
, op
)) {
6211 /* Sanity check that we've not introduced any unhandled opcodes. */
6212 tcg_debug_assert(tcg_op_supported(opc
));
6213 /* Note: in order to speed up the code, it would be much
6214 faster to have specialized register allocator functions for
6215 some common argument patterns */
6216 tcg_reg_alloc_op(s
, op
);
6219 /* Test for (pending) buffer overflow. The assumption is that any
6220 one operation beginning below the high water mark cannot overrun
6221 the buffer completely. Thus we can test for overflow after
6222 generating code without having to check during generation. */
6223 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
6226 /* Test for TB overflow, as seen by gen_insn_end_off. */
6227 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
6231 tcg_debug_assert(num_insns
+ 1 == s
->gen_tb
->icount
);
6232 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
6234 /* Generate TB finalization at the end of block */
6235 #ifdef TCG_TARGET_NEED_LDST_LABELS
6236 i
= tcg_out_ldst_finalize(s
);
6241 #ifdef TCG_TARGET_NEED_POOL_LABELS
6242 i
= tcg_out_pool_finalize(s
);
6247 if (!tcg_resolve_relocs(s
)) {
6251 #ifndef CONFIG_TCG_INTERPRETER
6252 /* flush instruction cache */
6253 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
6254 (uintptr_t)s
->code_buf
,
6255 tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
));
6258 return tcg_current_code_size(s
);
6261 #ifdef ELF_HOST_MACHINE
6262 /* In order to use this feature, the backend needs to do three things:
6264 (1) Define ELF_HOST_MACHINE to indicate both what value to
6265 put into the ELF image and to indicate support for the feature.
6267 (2) Define tcg_register_jit. This should create a buffer containing
6268 the contents of a .debug_frame section that describes the post-
6269 prologue unwind info for the tcg machine.
6271 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
6274 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
6281 struct jit_code_entry
{
6282 struct jit_code_entry
*next_entry
;
6283 struct jit_code_entry
*prev_entry
;
6284 const void *symfile_addr
;
6285 uint64_t symfile_size
;
6288 struct jit_descriptor
{
6290 uint32_t action_flag
;
6291 struct jit_code_entry
*relevant_entry
;
6292 struct jit_code_entry
*first_entry
;
6295 void __jit_debug_register_code(void) __attribute__((noinline
));
6296 void __jit_debug_register_code(void)
6301 /* Must statically initialize the version, because GDB may check
6302 the version before we can set it. */
6303 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
6305 /* End GDB interface. */
6307 static int find_string(const char *strtab
, const char *str
)
6309 const char *p
= strtab
+ 1;
6312 if (strcmp(p
, str
) == 0) {
6319 static void tcg_register_jit_int(const void *buf_ptr
, size_t buf_size
,
6320 const void *debug_frame
,
6321 size_t debug_frame_size
)
6323 struct __attribute__((packed
)) DebugInfo
{
6330 uintptr_t cu_low_pc
;
6331 uintptr_t cu_high_pc
;
6334 uintptr_t fn_low_pc
;
6335 uintptr_t fn_high_pc
;
6344 struct DebugInfo di
;
6349 struct ElfImage
*img
;
6351 static const struct ElfImage img_template
= {
6353 .e_ident
[EI_MAG0
] = ELFMAG0
,
6354 .e_ident
[EI_MAG1
] = ELFMAG1
,
6355 .e_ident
[EI_MAG2
] = ELFMAG2
,
6356 .e_ident
[EI_MAG3
] = ELFMAG3
,
6357 .e_ident
[EI_CLASS
] = ELF_CLASS
,
6358 .e_ident
[EI_DATA
] = ELF_DATA
,
6359 .e_ident
[EI_VERSION
] = EV_CURRENT
,
6361 .e_machine
= ELF_HOST_MACHINE
,
6362 .e_version
= EV_CURRENT
,
6363 .e_phoff
= offsetof(struct ElfImage
, phdr
),
6364 .e_shoff
= offsetof(struct ElfImage
, shdr
),
6365 .e_ehsize
= sizeof(ElfW(Shdr
)),
6366 .e_phentsize
= sizeof(ElfW(Phdr
)),
6368 .e_shentsize
= sizeof(ElfW(Shdr
)),
6369 .e_shnum
= ARRAY_SIZE(img
->shdr
),
6370 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
6371 #ifdef ELF_HOST_FLAGS
6372 .e_flags
= ELF_HOST_FLAGS
,
6375 .e_ident
[EI_OSABI
] = ELF_OSABI
,
6383 [0] = { .sh_type
= SHT_NULL
},
6384 /* Trick: The contents of code_gen_buffer are not present in
6385 this fake ELF file; that got allocated elsewhere. Therefore
6386 we mark .text as SHT_NOBITS (similar to .bss) so that readers
6387 will not look for contents. We can record any address. */
6389 .sh_type
= SHT_NOBITS
,
6390 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
6392 [2] = { /* .debug_info */
6393 .sh_type
= SHT_PROGBITS
,
6394 .sh_offset
= offsetof(struct ElfImage
, di
),
6395 .sh_size
= sizeof(struct DebugInfo
),
6397 [3] = { /* .debug_abbrev */
6398 .sh_type
= SHT_PROGBITS
,
6399 .sh_offset
= offsetof(struct ElfImage
, da
),
6400 .sh_size
= sizeof(img
->da
),
6402 [4] = { /* .debug_frame */
6403 .sh_type
= SHT_PROGBITS
,
6404 .sh_offset
= sizeof(struct ElfImage
),
6406 [5] = { /* .symtab */
6407 .sh_type
= SHT_SYMTAB
,
6408 .sh_offset
= offsetof(struct ElfImage
, sym
),
6409 .sh_size
= sizeof(img
->sym
),
6411 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
6412 .sh_entsize
= sizeof(ElfW(Sym
)),
6414 [6] = { /* .strtab */
6415 .sh_type
= SHT_STRTAB
,
6416 .sh_offset
= offsetof(struct ElfImage
, str
),
6417 .sh_size
= sizeof(img
->str
),
6421 [1] = { /* code_gen_buffer */
6422 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
6427 .len
= sizeof(struct DebugInfo
) - 4,
6429 .ptr_size
= sizeof(void *),
6431 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
6433 .fn_name
= "code_gen_buffer"
6436 1, /* abbrev number (the cu) */
6437 0x11, 1, /* DW_TAG_compile_unit, has children */
6438 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
6439 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6440 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6441 0, 0, /* end of abbrev */
6442 2, /* abbrev number (the fn) */
6443 0x2e, 0, /* DW_TAG_subprogram, no children */
6444 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
6445 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6446 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6447 0, 0, /* end of abbrev */
6448 0 /* no more abbrev */
6450 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
6451 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
6454 /* We only need a single jit entry; statically allocate it. */
6455 static struct jit_code_entry one_entry
;
6457 uintptr_t buf
= (uintptr_t)buf_ptr
;
6458 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
6459 DebugFrameHeader
*dfh
;
6461 img
= g_malloc(img_size
);
6462 *img
= img_template
;
6464 img
->phdr
.p_vaddr
= buf
;
6465 img
->phdr
.p_paddr
= buf
;
6466 img
->phdr
.p_memsz
= buf_size
;
6468 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
6469 img
->shdr
[1].sh_addr
= buf
;
6470 img
->shdr
[1].sh_size
= buf_size
;
6472 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
6473 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
6475 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
6476 img
->shdr
[4].sh_size
= debug_frame_size
;
6478 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
6479 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
6481 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
6482 img
->sym
[1].st_value
= buf
;
6483 img
->sym
[1].st_size
= buf_size
;
6485 img
->di
.cu_low_pc
= buf
;
6486 img
->di
.cu_high_pc
= buf
+ buf_size
;
6487 img
->di
.fn_low_pc
= buf
;
6488 img
->di
.fn_high_pc
= buf
+ buf_size
;
6490 dfh
= (DebugFrameHeader
*)(img
+ 1);
6491 memcpy(dfh
, debug_frame
, debug_frame_size
);
6492 dfh
->fde
.func_start
= buf
;
6493 dfh
->fde
.func_len
= buf_size
;
6496 /* Enable this block to be able to debug the ELF image file creation.
6497 One can use readelf, objdump, or other inspection utilities. */
6499 g_autofree
char *jit
= g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
6500 FILE *f
= fopen(jit
, "w+b");
6502 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
6503 /* Avoid stupid unused return value warning for fwrite. */
6510 one_entry
.symfile_addr
= img
;
6511 one_entry
.symfile_size
= img_size
;
6513 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
6514 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
6515 __jit_debug_descriptor
.first_entry
= &one_entry
;
6516 __jit_debug_register_code();
6519 /* No support for the feature. Provide the entry point expected by exec.c,
6520 and implement the internal function we declared earlier. */
6522 static void tcg_register_jit_int(const void *buf
, size_t size
,
6523 const void *debug_frame
,
6524 size_t debug_frame_size
)
6528 void tcg_register_jit(const void *buf
, size_t buf_size
)
6531 #endif /* ELF_HOST_MACHINE */
6533 #if !TCG_TARGET_MAYBE_vec
6534 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
6536 g_assert_not_reached();