2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 /* Define to jump the ELF file used to communicate with GDB. */
30 #include "qemu/error-report.h"
31 #include "qemu/cutils.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/qemu-print.h"
34 #include "qemu/cacheflush.h"
35 #include "qemu/cacheinfo.h"
36 #include "qemu/timer.h"
37 #include "exec/translation-block.h"
38 #include "exec/tlb-common.h"
39 #include "tcg/startup.h"
40 #include "tcg/tcg-op-common.h"
42 #if UINTPTR_MAX == UINT32_MAX
43 # define ELF_CLASS ELFCLASS32
45 # define ELF_CLASS ELFCLASS64
48 # define ELF_DATA ELFDATA2MSB
50 # define ELF_DATA ELFDATA2LSB
55 #include "tcg/tcg-ldst.h"
56 #include "tcg/tcg-temp-internal.h"
57 #include "tcg-internal.h"
58 #include "accel/tcg/perf.h"
59 #ifdef CONFIG_USER_ONLY
60 #include "exec/user/guest-base.h"
63 /* Forward declarations for functions declared in tcg-target.c.inc and
65 static void tcg_target_init(TCGContext
*s
);
66 static void tcg_target_qemu_prologue(TCGContext
*s
);
67 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
68 intptr_t value
, intptr_t addend
);
70 /* The CIE and FDE header definitions will be common to all hosts. */
72 uint32_t len
__attribute__((aligned((sizeof(void *)))));
78 uint8_t return_column
;
81 typedef struct QEMU_PACKED
{
82 uint32_t len
__attribute__((aligned((sizeof(void *)))));
86 } DebugFrameFDEHeader
;
88 typedef struct QEMU_PACKED
{
90 DebugFrameFDEHeader fde
;
93 typedef struct TCGLabelQemuLdst
{
94 bool is_ld
; /* qemu_ld: true, qemu_st: false */
96 TCGType type
; /* result type of a load */
97 TCGReg addrlo_reg
; /* reg index for low word of guest virtual addr */
98 TCGReg addrhi_reg
; /* reg index for high word of guest virtual addr */
99 TCGReg datalo_reg
; /* reg index for low word to be loaded or stored */
100 TCGReg datahi_reg
; /* reg index for high word to be loaded or stored */
101 const tcg_insn_unit
*raddr
; /* addr of the next IR of qemu_ld/st IR */
102 tcg_insn_unit
*label_ptr
[2]; /* label pointers to be updated */
103 QSIMPLEQ_ENTRY(TCGLabelQemuLdst
) next
;
106 static void tcg_register_jit_int(const void *buf
, size_t size
,
107 const void *debug_frame
,
108 size_t debug_frame_size
)
109 __attribute__((unused
));
111 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
112 static void tcg_out_tb_start(TCGContext
*s
);
113 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
115 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
116 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
117 TCGReg ret
, tcg_target_long arg
);
118 static void tcg_out_ext8s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
119 static void tcg_out_ext16s(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
120 static void tcg_out_ext8u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
121 static void tcg_out_ext16u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
122 static void tcg_out_ext32s(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
123 static void tcg_out_ext32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
124 static void tcg_out_exts_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
125 static void tcg_out_extu_i32_i64(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
126 static void tcg_out_extrl_i64_i32(TCGContext
*s
, TCGReg ret
, TCGReg arg
);
127 static void tcg_out_addi_ptr(TCGContext
*s
, TCGReg
, TCGReg
, tcg_target_long
);
128 static bool tcg_out_xchg(TCGContext
*s
, TCGType type
, TCGReg r1
, TCGReg r2
);
129 static void tcg_out_exit_tb(TCGContext
*s
, uintptr_t arg
);
130 static void tcg_out_goto_tb(TCGContext
*s
, int which
);
131 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
132 const TCGArg args
[TCG_MAX_OP_ARGS
],
133 const int const_args
[TCG_MAX_OP_ARGS
]);
134 #if TCG_TARGET_MAYBE_vec
135 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
136 TCGReg dst
, TCGReg src
);
137 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
138 TCGReg dst
, TCGReg base
, intptr_t offset
);
139 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
140 TCGReg dst
, int64_t arg
);
141 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
142 unsigned vecl
, unsigned vece
,
143 const TCGArg args
[TCG_MAX_OP_ARGS
],
144 const int const_args
[TCG_MAX_OP_ARGS
]);
146 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
147 TCGReg dst
, TCGReg src
)
149 g_assert_not_reached();
151 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
152 TCGReg dst
, TCGReg base
, intptr_t offset
)
154 g_assert_not_reached();
156 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
157 TCGReg dst
, int64_t arg
)
159 g_assert_not_reached();
161 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
162 unsigned vecl
, unsigned vece
,
163 const TCGArg args
[TCG_MAX_OP_ARGS
],
164 const int const_args
[TCG_MAX_OP_ARGS
])
166 g_assert_not_reached();
169 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
171 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
172 TCGReg base
, intptr_t ofs
);
173 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
,
174 const TCGHelperInfo
*info
);
175 static TCGReg
tcg_target_call_oarg_reg(TCGCallReturnKind kind
, int slot
);
176 static bool tcg_target_const_match(int64_t val
, TCGType type
, int ct
, int vece
);
177 #ifdef TCG_TARGET_NEED_LDST_LABELS
178 static int tcg_out_ldst_finalize(TCGContext
*s
);
181 #ifndef CONFIG_USER_ONLY
182 #define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
185 typedef struct TCGLdstHelperParam
{
186 TCGReg (*ra_gen
)(TCGContext
*s
, const TCGLabelQemuLdst
*l
, int arg_reg
);
189 } TCGLdstHelperParam
;
191 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
192 const TCGLdstHelperParam
*p
)
193 __attribute__((unused
));
194 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
195 bool load_sign
, const TCGLdstHelperParam
*p
)
196 __attribute__((unused
));
197 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*l
,
198 const TCGLdstHelperParam
*p
)
199 __attribute__((unused
));
201 static void * const qemu_ld_helpers
[MO_SSIZE
+ 1] __attribute__((unused
)) = {
202 [MO_UB
] = helper_ldub_mmu
,
203 [MO_SB
] = helper_ldsb_mmu
,
204 [MO_UW
] = helper_lduw_mmu
,
205 [MO_SW
] = helper_ldsw_mmu
,
206 [MO_UL
] = helper_ldul_mmu
,
207 [MO_UQ
] = helper_ldq_mmu
,
208 #if TCG_TARGET_REG_BITS == 64
209 [MO_SL
] = helper_ldsl_mmu
,
210 [MO_128
] = helper_ld16_mmu
,
214 static void * const qemu_st_helpers
[MO_SIZE
+ 1] __attribute__((unused
)) = {
215 [MO_8
] = helper_stb_mmu
,
216 [MO_16
] = helper_stw_mmu
,
217 [MO_32
] = helper_stl_mmu
,
218 [MO_64
] = helper_stq_mmu
,
219 #if TCG_TARGET_REG_BITS == 64
220 [MO_128
] = helper_st16_mmu
,
225 MemOp atom
; /* lg2 bits of atomicity required */
226 MemOp align
; /* lg2 bits of alignment to use */
229 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
230 MemOp host_atom
, bool allow_two_ops
)
231 __attribute__((unused
));
233 #ifdef CONFIG_USER_ONLY
234 bool tcg_use_softmmu
;
237 TCGContext tcg_init_ctx
;
238 __thread TCGContext
*tcg_ctx
;
240 TCGContext
**tcg_ctxs
;
241 unsigned int tcg_cur_ctxs
;
242 unsigned int tcg_max_ctxs
;
244 const void *tcg_code_gen_epilogue
;
245 uintptr_t tcg_splitwx_diff
;
247 #ifndef CONFIG_TCG_INTERPRETER
248 tcg_prologue_fn
*tcg_qemu_tb_exec
;
251 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
252 static TCGRegSet tcg_target_call_clobber_regs
;
254 #if TCG_TARGET_INSN_UNIT_SIZE == 1
255 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
260 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
267 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
268 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
270 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
273 tcg_insn_unit
*p
= s
->code_ptr
;
274 memcpy(p
, &v
, sizeof(v
));
275 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
279 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
282 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
285 memcpy(p
, &v
, sizeof(v
));
290 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
291 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
293 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
296 tcg_insn_unit
*p
= s
->code_ptr
;
297 memcpy(p
, &v
, sizeof(v
));
298 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
302 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
305 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
308 memcpy(p
, &v
, sizeof(v
));
313 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
314 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
316 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
319 tcg_insn_unit
*p
= s
->code_ptr
;
320 memcpy(p
, &v
, sizeof(v
));
321 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
325 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
328 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
331 memcpy(p
, &v
, sizeof(v
));
336 /* label relocation processing */
338 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
339 TCGLabel
*l
, intptr_t addend
)
341 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
346 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
349 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
)
351 tcg_debug_assert(!l
->has_value
);
353 l
->u
.value_ptr
= tcg_splitwx_to_rx(s
->code_ptr
);
356 TCGLabel
*gen_new_label(void)
358 TCGContext
*s
= tcg_ctx
;
359 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
361 memset(l
, 0, sizeof(TCGLabel
));
362 l
->id
= s
->nb_labels
++;
363 QSIMPLEQ_INIT(&l
->branches
);
364 QSIMPLEQ_INIT(&l
->relocs
);
366 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
371 static bool tcg_resolve_relocs(TCGContext
*s
)
375 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
377 uintptr_t value
= l
->u
.value
;
379 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
380 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
388 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
391 * We will check for overflow at the end of the opcode loop in
392 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
394 s
->gen_tb
->jmp_reset_offset
[which
] = tcg_current_code_size(s
);
397 static void G_GNUC_UNUSED
set_jmp_insn_offset(TCGContext
*s
, int which
)
400 * We will check for overflow at the end of the opcode loop in
401 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
403 s
->gen_tb
->jmp_insn_offset
[which
] = tcg_current_code_size(s
);
406 static uintptr_t G_GNUC_UNUSED
get_jmp_target_addr(TCGContext
*s
, int which
)
409 * Return the read-execute version of the pointer, for the benefit
410 * of any pc-relative addressing mode.
412 return (uintptr_t)tcg_splitwx_to_rx(&s
->gen_tb
->jmp_target_addr
[which
]);
415 static int __attribute__((unused
))
416 tlb_mask_table_ofs(TCGContext
*s
, int which
)
418 return (offsetof(CPUNegativeOffsetState
, tlb
.f
[which
]) -
419 sizeof(CPUNegativeOffsetState
));
422 /* Signal overflow, starting over with fewer guest insns. */
424 void tcg_raise_tb_overflow(TCGContext
*s
)
426 siglongjmp(s
->jmp_trans
, -2);
430 * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
431 * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
433 * However, tcg_out_helper_load_slots reuses this field to hold an
434 * argument slot number (which may designate a argument register or an
435 * argument stack slot), converting to TCGReg once all arguments that
436 * are destined for the stack are processed.
438 typedef struct TCGMovExtend
{
447 * tcg_out_movext -- move and extend
449 * @dst_type: integral type for destination
450 * @dst: destination register
451 * @src_type: integral type for source
452 * @src_ext: extension to apply to source
453 * @src: source register
455 * Move or extend @src into @dst, depending on @src_ext and the types.
457 static void tcg_out_movext(TCGContext
*s
, TCGType dst_type
, TCGReg dst
,
458 TCGType src_type
, MemOp src_ext
, TCGReg src
)
462 tcg_out_ext8u(s
, dst
, src
);
465 tcg_out_ext8s(s
, dst_type
, dst
, src
);
468 tcg_out_ext16u(s
, dst
, src
);
471 tcg_out_ext16s(s
, dst_type
, dst
, src
);
475 if (dst_type
== TCG_TYPE_I32
) {
476 if (src_type
== TCG_TYPE_I32
) {
477 tcg_out_mov(s
, TCG_TYPE_I32
, dst
, src
);
479 tcg_out_extrl_i64_i32(s
, dst
, src
);
481 } else if (src_type
== TCG_TYPE_I32
) {
482 if (src_ext
& MO_SIGN
) {
483 tcg_out_exts_i32_i64(s
, dst
, src
);
485 tcg_out_extu_i32_i64(s
, dst
, src
);
488 if (src_ext
& MO_SIGN
) {
489 tcg_out_ext32s(s
, dst
, src
);
491 tcg_out_ext32u(s
, dst
, src
);
496 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
497 if (dst_type
== TCG_TYPE_I32
) {
498 tcg_out_extrl_i64_i32(s
, dst
, src
);
500 tcg_out_mov(s
, TCG_TYPE_I64
, dst
, src
);
504 g_assert_not_reached();
508 /* Minor variations on a theme, using a structure. */
509 static void tcg_out_movext1_new_src(TCGContext
*s
, const TCGMovExtend
*i
,
512 tcg_out_movext(s
, i
->dst_type
, i
->dst
, i
->src_type
, i
->src_ext
, src
);
515 static void tcg_out_movext1(TCGContext
*s
, const TCGMovExtend
*i
)
517 tcg_out_movext1_new_src(s
, i
, i
->src
);
521 * tcg_out_movext2 -- move and extend two pair
523 * @i1: first move description
524 * @i2: second move description
525 * @scratch: temporary register, or -1 for none
527 * As tcg_out_movext, for both @i1 and @i2, caring for overlap
528 * between the sources and destinations.
531 static void tcg_out_movext2(TCGContext
*s
, const TCGMovExtend
*i1
,
532 const TCGMovExtend
*i2
, int scratch
)
534 TCGReg src1
= i1
->src
;
535 TCGReg src2
= i2
->src
;
537 if (i1
->dst
!= src2
) {
538 tcg_out_movext1(s
, i1
);
539 tcg_out_movext1(s
, i2
);
542 if (i2
->dst
== src1
) {
543 TCGType src1_type
= i1
->src_type
;
544 TCGType src2_type
= i2
->src_type
;
546 if (tcg_out_xchg(s
, MAX(src1_type
, src2_type
), src1
, src2
)) {
547 /* The data is now in the correct registers, now extend. */
551 tcg_debug_assert(scratch
>= 0);
552 tcg_out_mov(s
, src1_type
, scratch
, src1
);
556 tcg_out_movext1_new_src(s
, i2
, src2
);
557 tcg_out_movext1_new_src(s
, i1
, src1
);
561 * tcg_out_movext3 -- move and extend three pair
563 * @i1: first move description
564 * @i2: second move description
565 * @i3: third move description
566 * @scratch: temporary register, or -1 for none
568 * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
569 * between the sources and destinations.
572 static void tcg_out_movext3(TCGContext
*s
, const TCGMovExtend
*i1
,
573 const TCGMovExtend
*i2
, const TCGMovExtend
*i3
,
576 TCGReg src1
= i1
->src
;
577 TCGReg src2
= i2
->src
;
578 TCGReg src3
= i3
->src
;
580 if (i1
->dst
!= src2
&& i1
->dst
!= src3
) {
581 tcg_out_movext1(s
, i1
);
582 tcg_out_movext2(s
, i2
, i3
, scratch
);
585 if (i2
->dst
!= src1
&& i2
->dst
!= src3
) {
586 tcg_out_movext1(s
, i2
);
587 tcg_out_movext2(s
, i1
, i3
, scratch
);
590 if (i3
->dst
!= src1
&& i3
->dst
!= src2
) {
591 tcg_out_movext1(s
, i3
);
592 tcg_out_movext2(s
, i1
, i2
, scratch
);
597 * There is a cycle. Since there are only 3 nodes, the cycle is
598 * either "clockwise" or "anti-clockwise", and can be solved with
599 * a single scratch or two xchg.
601 if (i1
->dst
== src2
&& i2
->dst
== src3
&& i3
->dst
== src1
) {
603 if (tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
)) {
604 tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
);
605 /* The data is now in the correct registers, now extend. */
606 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
607 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
608 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
610 tcg_debug_assert(scratch
>= 0);
611 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
612 tcg_out_movext1(s
, i3
);
613 tcg_out_movext1(s
, i2
);
614 tcg_out_movext1_new_src(s
, i1
, scratch
);
616 } else if (i1
->dst
== src3
&& i2
->dst
== src1
&& i3
->dst
== src2
) {
617 /* "Anti-clockwise" */
618 if (tcg_out_xchg(s
, MAX(i2
->src_type
, i3
->src_type
), src2
, src3
)) {
619 tcg_out_xchg(s
, MAX(i1
->src_type
, i2
->src_type
), src1
, src2
);
620 /* The data is now in the correct registers, now extend. */
621 tcg_out_movext1_new_src(s
, i1
, i1
->dst
);
622 tcg_out_movext1_new_src(s
, i2
, i2
->dst
);
623 tcg_out_movext1_new_src(s
, i3
, i3
->dst
);
625 tcg_debug_assert(scratch
>= 0);
626 tcg_out_mov(s
, i1
->src_type
, scratch
, src1
);
627 tcg_out_movext1(s
, i2
);
628 tcg_out_movext1(s
, i3
);
629 tcg_out_movext1_new_src(s
, i1
, scratch
);
632 g_assert_not_reached();
636 #define C_PFX1(P, A) P##A
637 #define C_PFX2(P, A, B) P##A##_##B
638 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
639 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
640 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
641 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
643 /* Define an enumeration for the various combinations. */
645 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
646 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
647 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
648 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
650 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
651 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
652 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
653 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
655 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
657 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
658 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
659 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
660 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
661 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
664 #include "tcg-target-con-set.h"
665 } TCGConstraintSetIndex
;
667 static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode
);
684 /* Put all of the constraint sets into an array, indexed by the enum. */
686 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
687 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
688 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
689 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
691 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
692 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
693 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
694 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
696 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
698 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
699 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
700 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
701 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
702 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
704 static const TCGTargetOpDef constraint_sets
[] = {
705 #include "tcg-target-con-set.h"
724 /* Expand the enumerator to be returned from tcg_target_op_def(). */
726 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
727 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
728 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
729 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
731 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
732 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
733 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
734 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
736 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
738 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
739 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
740 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
741 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
742 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
744 #include "tcg-target.c.inc"
746 #ifndef CONFIG_TCG_INTERPRETER
747 /* Validate CPUTLBDescFast placement. */
748 QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState
, tlb
.f
[0]) -
749 sizeof(CPUNegativeOffsetState
))
750 < MIN_TLB_MASK_TABLE_OFS
);
753 static void alloc_tcg_plugin_context(TCGContext
*s
)
756 s
->plugin_tb
= g_new0(struct qemu_plugin_tb
, 1);
757 s
->plugin_tb
->insns
=
758 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn
);
763 * All TCG threads except the parent (i.e. the one that called tcg_context_init
764 * and registered the target's TCG globals) must register with this function
765 * before initiating translation.
767 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
768 * of tcg_region_init() for the reasoning behind this.
770 * In system-mode each caller registers its context in tcg_ctxs[]. Note that in
771 * system-mode tcg_ctxs[] does not track tcg_ctx_init, since the initial context
772 * is not used anymore for translation once this function is called.
774 * Not tracking tcg_init_ctx in tcg_ctxs[] in system-mode keeps code that
775 * iterates over the array (e.g. tcg_code_size() the same for both system/user
778 #ifdef CONFIG_USER_ONLY
779 void tcg_register_thread(void)
781 tcg_ctx
= &tcg_init_ctx
;
784 void tcg_register_thread(void)
786 TCGContext
*s
= g_malloc(sizeof(*s
));
791 /* Relink mem_base. */
792 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
793 if (tcg_init_ctx
.temps
[i
].mem_base
) {
794 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
795 tcg_debug_assert(b
>= 0 && b
< n
);
796 s
->temps
[i
].mem_base
= &s
->temps
[b
];
800 /* Claim an entry in tcg_ctxs */
801 n
= qatomic_fetch_inc(&tcg_cur_ctxs
);
802 g_assert(n
< tcg_max_ctxs
);
803 qatomic_set(&tcg_ctxs
[n
], s
);
806 alloc_tcg_plugin_context(s
);
807 tcg_region_initial_alloc(s
);
812 #endif /* !CONFIG_USER_ONLY */
814 /* pool based memory allocation */
815 void *tcg_malloc_internal(TCGContext
*s
, int size
)
820 if (size
> TCG_POOL_CHUNK_SIZE
) {
821 /* big malloc: insert a new pool (XXX: could optimize) */
822 p
= g_malloc(sizeof(TCGPool
) + size
);
824 p
->next
= s
->pool_first_large
;
825 s
->pool_first_large
= p
;
836 pool_size
= TCG_POOL_CHUNK_SIZE
;
837 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
840 if (s
->pool_current
) {
841 s
->pool_current
->next
= p
;
851 s
->pool_cur
= p
->data
+ size
;
852 s
->pool_end
= p
->data
+ p
->size
;
856 void tcg_pool_reset(TCGContext
*s
)
859 for (p
= s
->pool_first_large
; p
; p
= t
) {
863 s
->pool_first_large
= NULL
;
864 s
->pool_cur
= s
->pool_end
= NULL
;
865 s
->pool_current
= NULL
;
869 * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
870 * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
871 * We only use these for layout in tcg_out_ld_helper_ret and
872 * tcg_out_st_helper_args, and share them between several of
873 * the helpers, with the end result that it's easier to build manually.
876 #if TCG_TARGET_REG_BITS == 32
877 # define dh_typecode_ttl dh_typecode_i32
879 # define dh_typecode_ttl dh_typecode_i64
882 static TCGHelperInfo info_helper_ld32_mmu
= {
883 .flags
= TCG_CALL_NO_WG
,
884 .typemask
= dh_typemask(ttl
, 0) /* return tcg_target_ulong */
885 | dh_typemask(env
, 1)
886 | dh_typemask(i64
, 2) /* uint64_t addr */
887 | dh_typemask(i32
, 3) /* unsigned oi */
888 | dh_typemask(ptr
, 4) /* uintptr_t ra */
891 static TCGHelperInfo info_helper_ld64_mmu
= {
892 .flags
= TCG_CALL_NO_WG
,
893 .typemask
= dh_typemask(i64
, 0) /* return uint64_t */
894 | dh_typemask(env
, 1)
895 | dh_typemask(i64
, 2) /* uint64_t addr */
896 | dh_typemask(i32
, 3) /* unsigned oi */
897 | dh_typemask(ptr
, 4) /* uintptr_t ra */
900 static TCGHelperInfo info_helper_ld128_mmu
= {
901 .flags
= TCG_CALL_NO_WG
,
902 .typemask
= dh_typemask(i128
, 0) /* return Int128 */
903 | dh_typemask(env
, 1)
904 | dh_typemask(i64
, 2) /* uint64_t addr */
905 | dh_typemask(i32
, 3) /* unsigned oi */
906 | dh_typemask(ptr
, 4) /* uintptr_t ra */
909 static TCGHelperInfo info_helper_st32_mmu
= {
910 .flags
= TCG_CALL_NO_WG
,
911 .typemask
= dh_typemask(void, 0)
912 | dh_typemask(env
, 1)
913 | dh_typemask(i64
, 2) /* uint64_t addr */
914 | dh_typemask(i32
, 3) /* uint32_t data */
915 | dh_typemask(i32
, 4) /* unsigned oi */
916 | dh_typemask(ptr
, 5) /* uintptr_t ra */
919 static TCGHelperInfo info_helper_st64_mmu
= {
920 .flags
= TCG_CALL_NO_WG
,
921 .typemask
= dh_typemask(void, 0)
922 | dh_typemask(env
, 1)
923 | dh_typemask(i64
, 2) /* uint64_t addr */
924 | dh_typemask(i64
, 3) /* uint64_t data */
925 | dh_typemask(i32
, 4) /* unsigned oi */
926 | dh_typemask(ptr
, 5) /* uintptr_t ra */
929 static TCGHelperInfo info_helper_st128_mmu
= {
930 .flags
= TCG_CALL_NO_WG
,
931 .typemask
= dh_typemask(void, 0)
932 | dh_typemask(env
, 1)
933 | dh_typemask(i64
, 2) /* uint64_t addr */
934 | dh_typemask(i128
, 3) /* Int128 data */
935 | dh_typemask(i32
, 4) /* unsigned oi */
936 | dh_typemask(ptr
, 5) /* uintptr_t ra */
939 #ifdef CONFIG_TCG_INTERPRETER
940 static ffi_type
*typecode_to_ffi(int argmask
)
943 * libffi does not support __int128_t, so we have forced Int128
944 * to use the structure definition instead of the builtin type.
946 static ffi_type
*ffi_type_i128_elements
[3] = {
951 static ffi_type ffi_type_i128
= {
953 .alignment
= __alignof__(Int128
),
954 .type
= FFI_TYPE_STRUCT
,
955 .elements
= ffi_type_i128_elements
,
959 case dh_typecode_void
:
960 return &ffi_type_void
;
961 case dh_typecode_i32
:
962 return &ffi_type_uint32
;
963 case dh_typecode_s32
:
964 return &ffi_type_sint32
;
965 case dh_typecode_i64
:
966 return &ffi_type_uint64
;
967 case dh_typecode_s64
:
968 return &ffi_type_sint64
;
969 case dh_typecode_ptr
:
970 return &ffi_type_pointer
;
971 case dh_typecode_i128
:
972 return &ffi_type_i128
;
974 g_assert_not_reached();
977 static ffi_cif
*init_ffi_layout(TCGHelperInfo
*info
)
979 unsigned typemask
= info
->typemask
;
987 /* Ignoring the return type, find the last non-zero field. */
988 nargs
= 32 - clz32(typemask
>> 3);
989 nargs
= DIV_ROUND_UP(nargs
, 3);
990 assert(nargs
<= MAX_CALL_IARGS
);
992 ca
= g_malloc0(sizeof(*ca
) + nargs
* sizeof(ffi_type
*));
993 ca
->cif
.rtype
= typecode_to_ffi(typemask
& 7);
994 ca
->cif
.nargs
= nargs
;
997 ca
->cif
.arg_types
= ca
->args
;
998 for (int j
= 0; j
< nargs
; ++j
) {
999 int typecode
= extract32(typemask
, (j
+ 1) * 3, 3);
1000 ca
->args
[j
] = typecode_to_ffi(typecode
);
1004 status
= ffi_prep_cif(&ca
->cif
, FFI_DEFAULT_ABI
, nargs
,
1005 ca
->cif
.rtype
, ca
->cif
.arg_types
);
1006 assert(status
== FFI_OK
);
1011 #define HELPER_INFO_INIT(I) (&(I)->cif)
1012 #define HELPER_INFO_INIT_VAL(I) init_ffi_layout(I)
1014 #define HELPER_INFO_INIT(I) (&(I)->init)
1015 #define HELPER_INFO_INIT_VAL(I) 1
1016 #endif /* CONFIG_TCG_INTERPRETER */
1018 static inline bool arg_slot_reg_p(unsigned arg_slot
)
1021 * Split the sizeof away from the comparison to avoid Werror from
1022 * "unsigned < 0 is always false", when iarg_regs is empty.
1024 unsigned nreg
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1025 return arg_slot
< nreg
;
1028 static inline int arg_slot_stk_ofs(unsigned arg_slot
)
1030 unsigned max
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1031 unsigned stk_slot
= arg_slot
- ARRAY_SIZE(tcg_target_call_iarg_regs
);
1033 tcg_debug_assert(stk_slot
< max
);
1034 return TCG_TARGET_CALL_STACK_OFFSET
+ stk_slot
* sizeof(tcg_target_long
);
1037 typedef struct TCGCumulativeArgs
{
1038 int arg_idx
; /* tcg_gen_callN args[] */
1039 int info_in_idx
; /* TCGHelperInfo in[] */
1040 int arg_slot
; /* regs+stack slot */
1041 int ref_slot
; /* stack slots for references */
1042 } TCGCumulativeArgs
;
1044 static void layout_arg_even(TCGCumulativeArgs
*cum
)
1046 cum
->arg_slot
+= cum
->arg_slot
& 1;
1049 static void layout_arg_1(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
,
1050 TCGCallArgumentKind kind
)
1052 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1054 *loc
= (TCGCallArgumentLoc
){
1056 .arg_idx
= cum
->arg_idx
,
1057 .arg_slot
= cum
->arg_slot
,
1063 static void layout_arg_normal_n(TCGCumulativeArgs
*cum
,
1064 TCGHelperInfo
*info
, int n
)
1066 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1068 for (int i
= 0; i
< n
; ++i
) {
1069 /* Layout all using the same arg_idx, adjusting the subindex. */
1070 loc
[i
] = (TCGCallArgumentLoc
){
1071 .kind
= TCG_CALL_ARG_NORMAL
,
1072 .arg_idx
= cum
->arg_idx
,
1074 .arg_slot
= cum
->arg_slot
+ i
,
1077 cum
->info_in_idx
+= n
;
1081 static void layout_arg_by_ref(TCGCumulativeArgs
*cum
, TCGHelperInfo
*info
)
1083 TCGCallArgumentLoc
*loc
= &info
->in
[cum
->info_in_idx
];
1084 int n
= 128 / TCG_TARGET_REG_BITS
;
1086 /* The first subindex carries the pointer. */
1087 layout_arg_1(cum
, info
, TCG_CALL_ARG_BY_REF
);
1090 * The callee is allowed to clobber memory associated with
1091 * structure pass by-reference. Therefore we must make copies.
1092 * Allocate space from "ref_slot", which will be adjusted to
1093 * follow the parameters on the stack.
1095 loc
[0].ref_slot
= cum
->ref_slot
;
1098 * Subsequent words also go into the reference slot, but
1099 * do not accumulate into the regular arguments.
1101 for (int i
= 1; i
< n
; ++i
) {
1102 loc
[i
] = (TCGCallArgumentLoc
){
1103 .kind
= TCG_CALL_ARG_BY_REF_N
,
1104 .arg_idx
= cum
->arg_idx
,
1106 .ref_slot
= cum
->ref_slot
+ i
,
1109 cum
->info_in_idx
+= n
- 1; /* i=0 accounted for in layout_arg_1 */
1113 static void init_call_layout(TCGHelperInfo
*info
)
1115 int max_reg_slots
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
1116 int max_stk_slots
= TCG_STATIC_CALL_ARGS_SIZE
/ sizeof(tcg_target_long
);
1117 unsigned typemask
= info
->typemask
;
1119 TCGCumulativeArgs cum
= { };
1122 * Parse and place any function return value.
1124 typecode
= typemask
& 7;
1126 case dh_typecode_void
:
1129 case dh_typecode_i32
:
1130 case dh_typecode_s32
:
1131 case dh_typecode_ptr
:
1133 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1135 case dh_typecode_i64
:
1136 case dh_typecode_s64
:
1137 info
->nr_out
= 64 / TCG_TARGET_REG_BITS
;
1138 info
->out_kind
= TCG_CALL_RET_NORMAL
;
1139 /* Query the last register now to trigger any assert early. */
1140 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1142 case dh_typecode_i128
:
1143 info
->nr_out
= 128 / TCG_TARGET_REG_BITS
;
1144 info
->out_kind
= TCG_TARGET_CALL_RET_I128
;
1145 switch (TCG_TARGET_CALL_RET_I128
) {
1146 case TCG_CALL_RET_NORMAL
:
1147 /* Query the last register now to trigger any assert early. */
1148 tcg_target_call_oarg_reg(info
->out_kind
, info
->nr_out
- 1);
1150 case TCG_CALL_RET_BY_VEC
:
1151 /* Query the single register now to trigger any assert early. */
1152 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0);
1154 case TCG_CALL_RET_BY_REF
:
1156 * Allocate the first argument to the output.
1157 * We don't need to store this anywhere, just make it
1158 * unavailable for use in the input loop below.
1163 qemu_build_not_reached();
1167 g_assert_not_reached();
1171 * Parse and place function arguments.
1173 for (typemask
>>= 3; typemask
; typemask
>>= 3, cum
.arg_idx
++) {
1174 TCGCallArgumentKind kind
;
1177 typecode
= typemask
& 7;
1179 case dh_typecode_i32
:
1180 case dh_typecode_s32
:
1181 type
= TCG_TYPE_I32
;
1183 case dh_typecode_i64
:
1184 case dh_typecode_s64
:
1185 type
= TCG_TYPE_I64
;
1187 case dh_typecode_ptr
:
1188 type
= TCG_TYPE_PTR
;
1190 case dh_typecode_i128
:
1191 type
= TCG_TYPE_I128
;
1194 g_assert_not_reached();
1199 switch (TCG_TARGET_CALL_ARG_I32
) {
1200 case TCG_CALL_ARG_EVEN
:
1201 layout_arg_even(&cum
);
1203 case TCG_CALL_ARG_NORMAL
:
1204 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1206 case TCG_CALL_ARG_EXTEND
:
1207 kind
= TCG_CALL_ARG_EXTEND_U
+ (typecode
& 1);
1208 layout_arg_1(&cum
, info
, kind
);
1211 qemu_build_not_reached();
1216 switch (TCG_TARGET_CALL_ARG_I64
) {
1217 case TCG_CALL_ARG_EVEN
:
1218 layout_arg_even(&cum
);
1220 case TCG_CALL_ARG_NORMAL
:
1221 if (TCG_TARGET_REG_BITS
== 32) {
1222 layout_arg_normal_n(&cum
, info
, 2);
1224 layout_arg_1(&cum
, info
, TCG_CALL_ARG_NORMAL
);
1228 qemu_build_not_reached();
1233 switch (TCG_TARGET_CALL_ARG_I128
) {
1234 case TCG_CALL_ARG_EVEN
:
1235 layout_arg_even(&cum
);
1237 case TCG_CALL_ARG_NORMAL
:
1238 layout_arg_normal_n(&cum
, info
, 128 / TCG_TARGET_REG_BITS
);
1240 case TCG_CALL_ARG_BY_REF
:
1241 layout_arg_by_ref(&cum
, info
);
1244 qemu_build_not_reached();
1249 g_assert_not_reached();
1252 info
->nr_in
= cum
.info_in_idx
;
1254 /* Validate that we didn't overrun the input array. */
1255 assert(cum
.info_in_idx
<= ARRAY_SIZE(info
->in
));
1256 /* Validate the backend has enough argument space. */
1257 assert(cum
.arg_slot
<= max_reg_slots
+ max_stk_slots
);
1260 * Relocate the "ref_slot" area to the end of the parameters.
1261 * Minimizing this stack offset helps code size for x86,
1262 * which has a signed 8-bit offset encoding.
1264 if (cum
.ref_slot
!= 0) {
1267 if (cum
.arg_slot
> max_reg_slots
) {
1268 int align
= __alignof(Int128
) / sizeof(tcg_target_long
);
1270 ref_base
= cum
.arg_slot
- max_reg_slots
;
1272 ref_base
= ROUND_UP(ref_base
, align
);
1275 assert(ref_base
+ cum
.ref_slot
<= max_stk_slots
);
1276 ref_base
+= max_reg_slots
;
1278 if (ref_base
!= 0) {
1279 for (int i
= cum
.info_in_idx
- 1; i
>= 0; --i
) {
1280 TCGCallArgumentLoc
*loc
= &info
->in
[i
];
1281 switch (loc
->kind
) {
1282 case TCG_CALL_ARG_BY_REF
:
1283 case TCG_CALL_ARG_BY_REF_N
:
1284 loc
->ref_slot
+= ref_base
;
1294 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
1295 static void process_op_defs(TCGContext
*s
);
1296 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1297 TCGReg reg
, const char *name
);
1299 static void tcg_context_init(unsigned max_cpus
)
1301 TCGContext
*s
= &tcg_init_ctx
;
1302 int op
, total_args
, n
, i
;
1304 TCGArgConstraint
*args_ct
;
1307 memset(s
, 0, sizeof(*s
));
1310 /* Count total number of arguments and allocate the corresponding
1313 for(op
= 0; op
< NB_OPS
; op
++) {
1314 def
= &tcg_op_defs
[op
];
1315 n
= def
->nb_iargs
+ def
->nb_oargs
;
1319 args_ct
= g_new0(TCGArgConstraint
, total_args
);
1321 for(op
= 0; op
< NB_OPS
; op
++) {
1322 def
= &tcg_op_defs
[op
];
1323 def
->args_ct
= args_ct
;
1324 n
= def
->nb_iargs
+ def
->nb_oargs
;
1328 init_call_layout(&info_helper_ld32_mmu
);
1329 init_call_layout(&info_helper_ld64_mmu
);
1330 init_call_layout(&info_helper_ld128_mmu
);
1331 init_call_layout(&info_helper_st32_mmu
);
1332 init_call_layout(&info_helper_st64_mmu
);
1333 init_call_layout(&info_helper_st128_mmu
);
1338 /* Reverse the order of the saved registers, assuming they're all at
1339 the start of tcg_target_reg_alloc_order. */
1340 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
1341 int r
= tcg_target_reg_alloc_order
[n
];
1342 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
1346 for (i
= 0; i
< n
; ++i
) {
1347 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
1349 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
1350 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
1353 alloc_tcg_plugin_context(s
);
1357 * In user-mode we simply share the init context among threads, since we
1358 * use a single region. See the documentation tcg_region_init() for the
1359 * reasoning behind this.
1360 * In system-mode we will have at most max_cpus TCG threads.
1362 #ifdef CONFIG_USER_ONLY
1363 tcg_ctxs
= &tcg_ctx
;
1367 tcg_max_ctxs
= max_cpus
;
1368 tcg_ctxs
= g_new0(TCGContext
*, max_cpus
);
1371 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
1372 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
1373 tcg_env
= temp_tcgv_ptr(ts
);
1376 void tcg_init(size_t tb_size
, int splitwx
, unsigned max_cpus
)
1378 tcg_context_init(max_cpus
);
1379 tcg_region_init(tb_size
, splitwx
, max_cpus
);
1383 * Allocate TBs right before their corresponding translated code, making
1384 * sure that TBs and code are on different cache lines.
1386 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
1388 uintptr_t align
= qemu_icache_linesize
;
1389 TranslationBlock
*tb
;
1393 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
1394 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
1396 if (unlikely(next
> s
->code_gen_highwater
)) {
1397 if (tcg_region_alloc(s
)) {
1402 qatomic_set(&s
->code_gen_ptr
, next
);
1403 s
->data_gen_ptr
= NULL
;
1407 void tcg_prologue_init(void)
1409 TCGContext
*s
= tcg_ctx
;
1410 size_t prologue_size
;
1412 s
->code_ptr
= s
->code_gen_ptr
;
1413 s
->code_buf
= s
->code_gen_ptr
;
1414 s
->data_gen_ptr
= NULL
;
1416 #ifndef CONFIG_TCG_INTERPRETER
1417 tcg_qemu_tb_exec
= (tcg_prologue_fn
*)tcg_splitwx_to_rx(s
->code_ptr
);
1420 #ifdef TCG_TARGET_NEED_POOL_LABELS
1421 s
->pool_labels
= NULL
;
1424 qemu_thread_jit_write();
1425 /* Generate the prologue. */
1426 tcg_target_qemu_prologue(s
);
1428 #ifdef TCG_TARGET_NEED_POOL_LABELS
1429 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1431 int result
= tcg_out_pool_finalize(s
);
1432 tcg_debug_assert(result
== 0);
1436 prologue_size
= tcg_current_code_size(s
);
1437 perf_report_prologue(s
->code_gen_ptr
, prologue_size
);
1439 #ifndef CONFIG_TCG_INTERPRETER
1440 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
1441 (uintptr_t)s
->code_buf
, prologue_size
);
1444 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1445 FILE *logfile
= qemu_log_trylock();
1447 fprintf(logfile
, "PROLOGUE: [size=%zu]\n", prologue_size
);
1448 if (s
->data_gen_ptr
) {
1449 size_t code_size
= s
->data_gen_ptr
- s
->code_gen_ptr
;
1450 size_t data_size
= prologue_size
- code_size
;
1453 disas(logfile
, s
->code_gen_ptr
, code_size
);
1455 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1456 if (sizeof(tcg_target_ulong
) == 8) {
1458 "0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1459 (uintptr_t)s
->data_gen_ptr
+ i
,
1460 *(uint64_t *)(s
->data_gen_ptr
+ i
));
1463 "0x%08" PRIxPTR
": .long 0x%08x\n",
1464 (uintptr_t)s
->data_gen_ptr
+ i
,
1465 *(uint32_t *)(s
->data_gen_ptr
+ i
));
1469 disas(logfile
, s
->code_gen_ptr
, prologue_size
);
1471 fprintf(logfile
, "\n");
1472 qemu_log_unlock(logfile
);
1476 #ifndef CONFIG_TCG_INTERPRETER
1478 * Assert that goto_ptr is implemented completely, setting an epilogue.
1479 * For tci, we use NULL as the signal to return from the interpreter,
1480 * so skip this check.
1482 tcg_debug_assert(tcg_code_gen_epilogue
!= NULL
);
1485 tcg_region_prologue_set(s
);
1488 void tcg_func_start(TCGContext
*s
)
1491 s
->nb_temps
= s
->nb_globals
;
1493 /* No temps have been previously allocated for size or locality. */
1494 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
1496 /* No constant temps have been previously allocated. */
1497 for (int i
= 0; i
< TCG_TYPE_COUNT
; ++i
) {
1498 if (s
->const_table
[i
]) {
1499 g_hash_table_remove_all(s
->const_table
[i
]);
1505 s
->current_frame_offset
= s
->frame_start
;
1507 #ifdef CONFIG_DEBUG_TCG
1508 s
->goto_tb_issue_mask
= 0;
1511 QTAILQ_INIT(&s
->ops
);
1512 QTAILQ_INIT(&s
->free_ops
);
1513 QSIMPLEQ_INIT(&s
->labels
);
1515 tcg_debug_assert(s
->addr_type
== TCG_TYPE_I32
||
1516 s
->addr_type
== TCG_TYPE_I64
);
1518 tcg_debug_assert(s
->insn_start_words
> 0);
1521 static TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
1523 int n
= s
->nb_temps
++;
1525 if (n
>= TCG_MAX_TEMPS
) {
1526 tcg_raise_tb_overflow(s
);
1528 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1531 static TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1535 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1536 tcg_debug_assert(s
->nb_globals
< TCG_MAX_TEMPS
);
1538 ts
= tcg_temp_alloc(s
);
1539 ts
->kind
= TEMP_GLOBAL
;
1544 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1545 TCGReg reg
, const char *name
)
1549 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64 || type
== TCG_TYPE_I32
);
1551 ts
= tcg_global_alloc(s
);
1552 ts
->base_type
= type
;
1554 ts
->kind
= TEMP_FIXED
;
1557 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1562 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1564 s
->frame_start
= start
;
1565 s
->frame_end
= start
+ size
;
1567 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1570 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
1571 intptr_t offset
, const char *name
)
1573 TCGContext
*s
= tcg_ctx
;
1574 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1575 TCGTemp
*ts
= tcg_global_alloc(s
);
1576 int indirect_reg
= 0;
1578 switch (base_ts
->kind
) {
1582 /* We do not support double-indirect registers. */
1583 tcg_debug_assert(!base_ts
->indirect_reg
);
1584 base_ts
->indirect_base
= 1;
1585 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1590 g_assert_not_reached();
1593 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1594 TCGTemp
*ts2
= tcg_global_alloc(s
);
1597 ts
->base_type
= TCG_TYPE_I64
;
1598 ts
->type
= TCG_TYPE_I32
;
1599 ts
->indirect_reg
= indirect_reg
;
1600 ts
->mem_allocated
= 1;
1601 ts
->mem_base
= base_ts
;
1602 ts
->mem_offset
= offset
;
1603 pstrcpy(buf
, sizeof(buf
), name
);
1604 pstrcat(buf
, sizeof(buf
), "_0");
1605 ts
->name
= strdup(buf
);
1607 tcg_debug_assert(ts2
== ts
+ 1);
1608 ts2
->base_type
= TCG_TYPE_I64
;
1609 ts2
->type
= TCG_TYPE_I32
;
1610 ts2
->indirect_reg
= indirect_reg
;
1611 ts2
->mem_allocated
= 1;
1612 ts2
->mem_base
= base_ts
;
1613 ts2
->mem_offset
= offset
+ 4;
1614 ts2
->temp_subindex
= 1;
1615 pstrcpy(buf
, sizeof(buf
), name
);
1616 pstrcat(buf
, sizeof(buf
), "_1");
1617 ts2
->name
= strdup(buf
);
1619 ts
->base_type
= type
;
1621 ts
->indirect_reg
= indirect_reg
;
1622 ts
->mem_allocated
= 1;
1623 ts
->mem_base
= base_ts
;
1624 ts
->mem_offset
= offset
;
1630 TCGTemp
*tcg_temp_new_internal(TCGType type
, TCGTempKind kind
)
1632 TCGContext
*s
= tcg_ctx
;
1636 if (kind
== TEMP_EBB
) {
1637 int idx
= find_first_bit(s
->free_temps
[type
].l
, TCG_MAX_TEMPS
);
1639 if (idx
< TCG_MAX_TEMPS
) {
1640 /* There is already an available temp with the right type. */
1641 clear_bit(idx
, s
->free_temps
[type
].l
);
1643 ts
= &s
->temps
[idx
];
1644 ts
->temp_allocated
= 1;
1645 tcg_debug_assert(ts
->base_type
== type
);
1646 tcg_debug_assert(ts
->kind
== kind
);
1650 tcg_debug_assert(kind
== TEMP_TB
);
1661 n
= 64 / TCG_TARGET_REG_BITS
;
1664 n
= 128 / TCG_TARGET_REG_BITS
;
1667 g_assert_not_reached();
1670 ts
= tcg_temp_alloc(s
);
1671 ts
->base_type
= type
;
1672 ts
->temp_allocated
= 1;
1678 ts
->type
= TCG_TYPE_REG
;
1680 for (int i
= 1; i
< n
; ++i
) {
1681 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1683 tcg_debug_assert(ts2
== ts
+ i
);
1684 ts2
->base_type
= type
;
1685 ts2
->type
= TCG_TYPE_REG
;
1686 ts2
->temp_allocated
= 1;
1687 ts2
->temp_subindex
= i
;
1694 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1698 #ifdef CONFIG_DEBUG_TCG
1701 assert(TCG_TARGET_HAS_v64
);
1704 assert(TCG_TARGET_HAS_v128
);
1707 assert(TCG_TARGET_HAS_v256
);
1710 g_assert_not_reached();
1714 t
= tcg_temp_new_internal(type
, TEMP_EBB
);
1715 return temp_tcgv_vec(t
);
1718 /* Create a new temp of the same type as an existing temp. */
1719 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1721 TCGTemp
*t
= tcgv_vec_temp(match
);
1723 tcg_debug_assert(t
->temp_allocated
!= 0);
1725 t
= tcg_temp_new_internal(t
->base_type
, TEMP_EBB
);
1726 return temp_tcgv_vec(t
);
1729 void tcg_temp_free_internal(TCGTemp
*ts
)
1731 TCGContext
*s
= tcg_ctx
;
1736 /* Silently ignore free. */
1739 tcg_debug_assert(ts
->temp_allocated
!= 0);
1740 ts
->temp_allocated
= 0;
1741 set_bit(temp_idx(ts
), s
->free_temps
[ts
->base_type
].l
);
1744 /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
1745 g_assert_not_reached();
1749 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
)
1751 TCGContext
*s
= tcg_ctx
;
1752 GHashTable
*h
= s
->const_table
[type
];
1756 h
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
1757 s
->const_table
[type
] = h
;
1760 ts
= g_hash_table_lookup(h
, &val
);
1764 ts
= tcg_temp_alloc(s
);
1766 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1767 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1769 tcg_debug_assert(ts2
== ts
+ 1);
1771 ts
->base_type
= TCG_TYPE_I64
;
1772 ts
->type
= TCG_TYPE_I32
;
1773 ts
->kind
= TEMP_CONST
;
1774 ts
->temp_allocated
= 1;
1776 ts2
->base_type
= TCG_TYPE_I64
;
1777 ts2
->type
= TCG_TYPE_I32
;
1778 ts2
->kind
= TEMP_CONST
;
1779 ts2
->temp_allocated
= 1;
1780 ts2
->temp_subindex
= 1;
1783 * Retain the full value of the 64-bit constant in the low
1784 * part, so that the hash table works. Actual uses will
1785 * truncate the value to the low part.
1787 ts
[HOST_BIG_ENDIAN
].val
= val
;
1788 ts
[!HOST_BIG_ENDIAN
].val
= val
>> 32;
1789 val_ptr
= &ts
[HOST_BIG_ENDIAN
].val
;
1791 ts
->base_type
= type
;
1793 ts
->kind
= TEMP_CONST
;
1794 ts
->temp_allocated
= 1;
1798 g_hash_table_insert(h
, val_ptr
, ts
);
1804 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
)
1806 val
= dup_const(vece
, val
);
1807 return temp_tcgv_vec(tcg_constant_internal(type
, val
));
1810 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
)
1812 TCGTemp
*t
= tcgv_vec_temp(match
);
1814 tcg_debug_assert(t
->temp_allocated
!= 0);
1815 return tcg_constant_vec(t
->base_type
, vece
, val
);
1818 #ifdef CONFIG_DEBUG_TCG
1819 size_t temp_idx(TCGTemp
*ts
)
1821 ptrdiff_t n
= ts
- tcg_ctx
->temps
;
1822 assert(n
>= 0 && n
< tcg_ctx
->nb_temps
);
1826 TCGTemp
*tcgv_i32_temp(TCGv_i32 v
)
1828 uintptr_t o
= (uintptr_t)v
- offsetof(TCGContext
, temps
);
1830 assert(o
< sizeof(TCGTemp
) * tcg_ctx
->nb_temps
);
1831 assert(o
% sizeof(TCGTemp
) == 0);
1833 return (void *)tcg_ctx
+ (uintptr_t)v
;
1835 #endif /* CONFIG_DEBUG_TCG */
1837 /* Return true if OP may appear in the opcode stream.
1838 Test the runtime variable that controls each opcode. */
1839 bool tcg_op_supported(TCGOpcode op
)
1842 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1845 case INDEX_op_discard
:
1846 case INDEX_op_set_label
:
1850 case INDEX_op_insn_start
:
1851 case INDEX_op_exit_tb
:
1852 case INDEX_op_goto_tb
:
1853 case INDEX_op_goto_ptr
:
1854 case INDEX_op_qemu_ld_a32_i32
:
1855 case INDEX_op_qemu_ld_a64_i32
:
1856 case INDEX_op_qemu_st_a32_i32
:
1857 case INDEX_op_qemu_st_a64_i32
:
1858 case INDEX_op_qemu_ld_a32_i64
:
1859 case INDEX_op_qemu_ld_a64_i64
:
1860 case INDEX_op_qemu_st_a32_i64
:
1861 case INDEX_op_qemu_st_a64_i64
:
1864 case INDEX_op_qemu_st8_a32_i32
:
1865 case INDEX_op_qemu_st8_a64_i32
:
1866 return TCG_TARGET_HAS_qemu_st8_i32
;
1868 case INDEX_op_qemu_ld_a32_i128
:
1869 case INDEX_op_qemu_ld_a64_i128
:
1870 case INDEX_op_qemu_st_a32_i128
:
1871 case INDEX_op_qemu_st_a64_i128
:
1872 return TCG_TARGET_HAS_qemu_ldst_i128
;
1874 case INDEX_op_mov_i32
:
1875 case INDEX_op_setcond_i32
:
1876 case INDEX_op_brcond_i32
:
1877 case INDEX_op_ld8u_i32
:
1878 case INDEX_op_ld8s_i32
:
1879 case INDEX_op_ld16u_i32
:
1880 case INDEX_op_ld16s_i32
:
1881 case INDEX_op_ld_i32
:
1882 case INDEX_op_st8_i32
:
1883 case INDEX_op_st16_i32
:
1884 case INDEX_op_st_i32
:
1885 case INDEX_op_add_i32
:
1886 case INDEX_op_sub_i32
:
1887 case INDEX_op_mul_i32
:
1888 case INDEX_op_and_i32
:
1889 case INDEX_op_or_i32
:
1890 case INDEX_op_xor_i32
:
1891 case INDEX_op_shl_i32
:
1892 case INDEX_op_shr_i32
:
1893 case INDEX_op_sar_i32
:
1896 case INDEX_op_negsetcond_i32
:
1897 return TCG_TARGET_HAS_negsetcond_i32
;
1898 case INDEX_op_movcond_i32
:
1899 return TCG_TARGET_HAS_movcond_i32
;
1900 case INDEX_op_div_i32
:
1901 case INDEX_op_divu_i32
:
1902 return TCG_TARGET_HAS_div_i32
;
1903 case INDEX_op_rem_i32
:
1904 case INDEX_op_remu_i32
:
1905 return TCG_TARGET_HAS_rem_i32
;
1906 case INDEX_op_div2_i32
:
1907 case INDEX_op_divu2_i32
:
1908 return TCG_TARGET_HAS_div2_i32
;
1909 case INDEX_op_rotl_i32
:
1910 case INDEX_op_rotr_i32
:
1911 return TCG_TARGET_HAS_rot_i32
;
1912 case INDEX_op_deposit_i32
:
1913 return TCG_TARGET_HAS_deposit_i32
;
1914 case INDEX_op_extract_i32
:
1915 return TCG_TARGET_HAS_extract_i32
;
1916 case INDEX_op_sextract_i32
:
1917 return TCG_TARGET_HAS_sextract_i32
;
1918 case INDEX_op_extract2_i32
:
1919 return TCG_TARGET_HAS_extract2_i32
;
1920 case INDEX_op_add2_i32
:
1921 return TCG_TARGET_HAS_add2_i32
;
1922 case INDEX_op_sub2_i32
:
1923 return TCG_TARGET_HAS_sub2_i32
;
1924 case INDEX_op_mulu2_i32
:
1925 return TCG_TARGET_HAS_mulu2_i32
;
1926 case INDEX_op_muls2_i32
:
1927 return TCG_TARGET_HAS_muls2_i32
;
1928 case INDEX_op_muluh_i32
:
1929 return TCG_TARGET_HAS_muluh_i32
;
1930 case INDEX_op_mulsh_i32
:
1931 return TCG_TARGET_HAS_mulsh_i32
;
1932 case INDEX_op_ext8s_i32
:
1933 return TCG_TARGET_HAS_ext8s_i32
;
1934 case INDEX_op_ext16s_i32
:
1935 return TCG_TARGET_HAS_ext16s_i32
;
1936 case INDEX_op_ext8u_i32
:
1937 return TCG_TARGET_HAS_ext8u_i32
;
1938 case INDEX_op_ext16u_i32
:
1939 return TCG_TARGET_HAS_ext16u_i32
;
1940 case INDEX_op_bswap16_i32
:
1941 return TCG_TARGET_HAS_bswap16_i32
;
1942 case INDEX_op_bswap32_i32
:
1943 return TCG_TARGET_HAS_bswap32_i32
;
1944 case INDEX_op_not_i32
:
1945 return TCG_TARGET_HAS_not_i32
;
1946 case INDEX_op_neg_i32
:
1947 return TCG_TARGET_HAS_neg_i32
;
1948 case INDEX_op_andc_i32
:
1949 return TCG_TARGET_HAS_andc_i32
;
1950 case INDEX_op_orc_i32
:
1951 return TCG_TARGET_HAS_orc_i32
;
1952 case INDEX_op_eqv_i32
:
1953 return TCG_TARGET_HAS_eqv_i32
;
1954 case INDEX_op_nand_i32
:
1955 return TCG_TARGET_HAS_nand_i32
;
1956 case INDEX_op_nor_i32
:
1957 return TCG_TARGET_HAS_nor_i32
;
1958 case INDEX_op_clz_i32
:
1959 return TCG_TARGET_HAS_clz_i32
;
1960 case INDEX_op_ctz_i32
:
1961 return TCG_TARGET_HAS_ctz_i32
;
1962 case INDEX_op_ctpop_i32
:
1963 return TCG_TARGET_HAS_ctpop_i32
;
1965 case INDEX_op_brcond2_i32
:
1966 case INDEX_op_setcond2_i32
:
1967 return TCG_TARGET_REG_BITS
== 32;
1969 case INDEX_op_mov_i64
:
1970 case INDEX_op_setcond_i64
:
1971 case INDEX_op_brcond_i64
:
1972 case INDEX_op_ld8u_i64
:
1973 case INDEX_op_ld8s_i64
:
1974 case INDEX_op_ld16u_i64
:
1975 case INDEX_op_ld16s_i64
:
1976 case INDEX_op_ld32u_i64
:
1977 case INDEX_op_ld32s_i64
:
1978 case INDEX_op_ld_i64
:
1979 case INDEX_op_st8_i64
:
1980 case INDEX_op_st16_i64
:
1981 case INDEX_op_st32_i64
:
1982 case INDEX_op_st_i64
:
1983 case INDEX_op_add_i64
:
1984 case INDEX_op_sub_i64
:
1985 case INDEX_op_mul_i64
:
1986 case INDEX_op_and_i64
:
1987 case INDEX_op_or_i64
:
1988 case INDEX_op_xor_i64
:
1989 case INDEX_op_shl_i64
:
1990 case INDEX_op_shr_i64
:
1991 case INDEX_op_sar_i64
:
1992 case INDEX_op_ext_i32_i64
:
1993 case INDEX_op_extu_i32_i64
:
1994 return TCG_TARGET_REG_BITS
== 64;
1996 case INDEX_op_negsetcond_i64
:
1997 return TCG_TARGET_HAS_negsetcond_i64
;
1998 case INDEX_op_movcond_i64
:
1999 return TCG_TARGET_HAS_movcond_i64
;
2000 case INDEX_op_div_i64
:
2001 case INDEX_op_divu_i64
:
2002 return TCG_TARGET_HAS_div_i64
;
2003 case INDEX_op_rem_i64
:
2004 case INDEX_op_remu_i64
:
2005 return TCG_TARGET_HAS_rem_i64
;
2006 case INDEX_op_div2_i64
:
2007 case INDEX_op_divu2_i64
:
2008 return TCG_TARGET_HAS_div2_i64
;
2009 case INDEX_op_rotl_i64
:
2010 case INDEX_op_rotr_i64
:
2011 return TCG_TARGET_HAS_rot_i64
;
2012 case INDEX_op_deposit_i64
:
2013 return TCG_TARGET_HAS_deposit_i64
;
2014 case INDEX_op_extract_i64
:
2015 return TCG_TARGET_HAS_extract_i64
;
2016 case INDEX_op_sextract_i64
:
2017 return TCG_TARGET_HAS_sextract_i64
;
2018 case INDEX_op_extract2_i64
:
2019 return TCG_TARGET_HAS_extract2_i64
;
2020 case INDEX_op_extrl_i64_i32
:
2021 case INDEX_op_extrh_i64_i32
:
2022 return TCG_TARGET_HAS_extr_i64_i32
;
2023 case INDEX_op_ext8s_i64
:
2024 return TCG_TARGET_HAS_ext8s_i64
;
2025 case INDEX_op_ext16s_i64
:
2026 return TCG_TARGET_HAS_ext16s_i64
;
2027 case INDEX_op_ext32s_i64
:
2028 return TCG_TARGET_HAS_ext32s_i64
;
2029 case INDEX_op_ext8u_i64
:
2030 return TCG_TARGET_HAS_ext8u_i64
;
2031 case INDEX_op_ext16u_i64
:
2032 return TCG_TARGET_HAS_ext16u_i64
;
2033 case INDEX_op_ext32u_i64
:
2034 return TCG_TARGET_HAS_ext32u_i64
;
2035 case INDEX_op_bswap16_i64
:
2036 return TCG_TARGET_HAS_bswap16_i64
;
2037 case INDEX_op_bswap32_i64
:
2038 return TCG_TARGET_HAS_bswap32_i64
;
2039 case INDEX_op_bswap64_i64
:
2040 return TCG_TARGET_HAS_bswap64_i64
;
2041 case INDEX_op_not_i64
:
2042 return TCG_TARGET_HAS_not_i64
;
2043 case INDEX_op_neg_i64
:
2044 return TCG_TARGET_HAS_neg_i64
;
2045 case INDEX_op_andc_i64
:
2046 return TCG_TARGET_HAS_andc_i64
;
2047 case INDEX_op_orc_i64
:
2048 return TCG_TARGET_HAS_orc_i64
;
2049 case INDEX_op_eqv_i64
:
2050 return TCG_TARGET_HAS_eqv_i64
;
2051 case INDEX_op_nand_i64
:
2052 return TCG_TARGET_HAS_nand_i64
;
2053 case INDEX_op_nor_i64
:
2054 return TCG_TARGET_HAS_nor_i64
;
2055 case INDEX_op_clz_i64
:
2056 return TCG_TARGET_HAS_clz_i64
;
2057 case INDEX_op_ctz_i64
:
2058 return TCG_TARGET_HAS_ctz_i64
;
2059 case INDEX_op_ctpop_i64
:
2060 return TCG_TARGET_HAS_ctpop_i64
;
2061 case INDEX_op_add2_i64
:
2062 return TCG_TARGET_HAS_add2_i64
;
2063 case INDEX_op_sub2_i64
:
2064 return TCG_TARGET_HAS_sub2_i64
;
2065 case INDEX_op_mulu2_i64
:
2066 return TCG_TARGET_HAS_mulu2_i64
;
2067 case INDEX_op_muls2_i64
:
2068 return TCG_TARGET_HAS_muls2_i64
;
2069 case INDEX_op_muluh_i64
:
2070 return TCG_TARGET_HAS_muluh_i64
;
2071 case INDEX_op_mulsh_i64
:
2072 return TCG_TARGET_HAS_mulsh_i64
;
2074 case INDEX_op_mov_vec
:
2075 case INDEX_op_dup_vec
:
2076 case INDEX_op_dupm_vec
:
2077 case INDEX_op_ld_vec
:
2078 case INDEX_op_st_vec
:
2079 case INDEX_op_add_vec
:
2080 case INDEX_op_sub_vec
:
2081 case INDEX_op_and_vec
:
2082 case INDEX_op_or_vec
:
2083 case INDEX_op_xor_vec
:
2084 case INDEX_op_cmp_vec
:
2086 case INDEX_op_dup2_vec
:
2087 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
2088 case INDEX_op_not_vec
:
2089 return have_vec
&& TCG_TARGET_HAS_not_vec
;
2090 case INDEX_op_neg_vec
:
2091 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
2092 case INDEX_op_abs_vec
:
2093 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
2094 case INDEX_op_andc_vec
:
2095 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
2096 case INDEX_op_orc_vec
:
2097 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
2098 case INDEX_op_nand_vec
:
2099 return have_vec
&& TCG_TARGET_HAS_nand_vec
;
2100 case INDEX_op_nor_vec
:
2101 return have_vec
&& TCG_TARGET_HAS_nor_vec
;
2102 case INDEX_op_eqv_vec
:
2103 return have_vec
&& TCG_TARGET_HAS_eqv_vec
;
2104 case INDEX_op_mul_vec
:
2105 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
2106 case INDEX_op_shli_vec
:
2107 case INDEX_op_shri_vec
:
2108 case INDEX_op_sari_vec
:
2109 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
2110 case INDEX_op_shls_vec
:
2111 case INDEX_op_shrs_vec
:
2112 case INDEX_op_sars_vec
:
2113 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
2114 case INDEX_op_shlv_vec
:
2115 case INDEX_op_shrv_vec
:
2116 case INDEX_op_sarv_vec
:
2117 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
2118 case INDEX_op_rotli_vec
:
2119 return have_vec
&& TCG_TARGET_HAS_roti_vec
;
2120 case INDEX_op_rotls_vec
:
2121 return have_vec
&& TCG_TARGET_HAS_rots_vec
;
2122 case INDEX_op_rotlv_vec
:
2123 case INDEX_op_rotrv_vec
:
2124 return have_vec
&& TCG_TARGET_HAS_rotv_vec
;
2125 case INDEX_op_ssadd_vec
:
2126 case INDEX_op_usadd_vec
:
2127 case INDEX_op_sssub_vec
:
2128 case INDEX_op_ussub_vec
:
2129 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
2130 case INDEX_op_smin_vec
:
2131 case INDEX_op_umin_vec
:
2132 case INDEX_op_smax_vec
:
2133 case INDEX_op_umax_vec
:
2134 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
2135 case INDEX_op_bitsel_vec
:
2136 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
2137 case INDEX_op_cmpsel_vec
:
2138 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
2141 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
2146 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
);
2148 static void tcg_gen_callN(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
**args
)
2150 TCGv_i64 extend_free
[MAX_CALL_IARGS
];
2153 int i
, n
, pi
= 0, total_args
;
2155 if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info
)))) {
2156 init_call_layout(info
);
2157 g_once_init_leave(HELPER_INFO_INIT(info
), HELPER_INFO_INIT_VAL(info
));
2160 total_args
= info
->nr_out
+ info
->nr_in
+ 2;
2161 op
= tcg_op_alloc(INDEX_op_call
, total_args
);
2163 #ifdef CONFIG_PLUGIN
2164 /* Flag helpers that may affect guest state */
2165 if (tcg_ctx
->plugin_insn
&&
2166 !(info
->flags
& TCG_CALL_PLUGIN
) &&
2167 !(info
->flags
& TCG_CALL_NO_SIDE_EFFECTS
)) {
2168 tcg_ctx
->plugin_insn
->calls_helpers
= true;
2172 TCGOP_CALLO(op
) = n
= info
->nr_out
;
2175 tcg_debug_assert(ret
== NULL
);
2178 tcg_debug_assert(ret
!= NULL
);
2179 op
->args
[pi
++] = temp_arg(ret
);
2183 tcg_debug_assert(ret
!= NULL
);
2184 tcg_debug_assert(ret
->base_type
== ret
->type
+ ctz32(n
));
2185 tcg_debug_assert(ret
->temp_subindex
== 0);
2186 for (i
= 0; i
< n
; ++i
) {
2187 op
->args
[pi
++] = temp_arg(ret
+ i
);
2191 g_assert_not_reached();
2194 TCGOP_CALLI(op
) = n
= info
->nr_in
;
2195 for (i
= 0; i
< n
; i
++) {
2196 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
2197 TCGTemp
*ts
= args
[loc
->arg_idx
] + loc
->tmp_subindex
;
2199 switch (loc
->kind
) {
2200 case TCG_CALL_ARG_NORMAL
:
2201 case TCG_CALL_ARG_BY_REF
:
2202 case TCG_CALL_ARG_BY_REF_N
:
2203 op
->args
[pi
++] = temp_arg(ts
);
2206 case TCG_CALL_ARG_EXTEND_U
:
2207 case TCG_CALL_ARG_EXTEND_S
:
2209 TCGv_i64 temp
= tcg_temp_ebb_new_i64();
2210 TCGv_i32 orig
= temp_tcgv_i32(ts
);
2212 if (loc
->kind
== TCG_CALL_ARG_EXTEND_S
) {
2213 tcg_gen_ext_i32_i64(temp
, orig
);
2215 tcg_gen_extu_i32_i64(temp
, orig
);
2217 op
->args
[pi
++] = tcgv_i64_arg(temp
);
2218 extend_free
[n_extend
++] = temp
;
2223 g_assert_not_reached();
2226 op
->args
[pi
++] = (uintptr_t)info
->func
;
2227 op
->args
[pi
++] = (uintptr_t)info
;
2228 tcg_debug_assert(pi
== total_args
);
2230 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2232 tcg_debug_assert(n_extend
< ARRAY_SIZE(extend_free
));
2233 for (i
= 0; i
< n_extend
; ++i
) {
2234 tcg_temp_free_i64(extend_free
[i
]);
2238 void tcg_gen_call0(TCGHelperInfo
*info
, TCGTemp
*ret
)
2240 tcg_gen_callN(info
, ret
, NULL
);
2243 void tcg_gen_call1(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
)
2245 tcg_gen_callN(info
, ret
, &t1
);
2248 void tcg_gen_call2(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
, TCGTemp
*t2
)
2250 TCGTemp
*args
[2] = { t1
, t2
};
2251 tcg_gen_callN(info
, ret
, args
);
2254 void tcg_gen_call3(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2255 TCGTemp
*t2
, TCGTemp
*t3
)
2257 TCGTemp
*args
[3] = { t1
, t2
, t3
};
2258 tcg_gen_callN(info
, ret
, args
);
2261 void tcg_gen_call4(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2262 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
)
2264 TCGTemp
*args
[4] = { t1
, t2
, t3
, t4
};
2265 tcg_gen_callN(info
, ret
, args
);
2268 void tcg_gen_call5(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2269 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
, TCGTemp
*t5
)
2271 TCGTemp
*args
[5] = { t1
, t2
, t3
, t4
, t5
};
2272 tcg_gen_callN(info
, ret
, args
);
2275 void tcg_gen_call6(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
, TCGTemp
*t2
,
2276 TCGTemp
*t3
, TCGTemp
*t4
, TCGTemp
*t5
, TCGTemp
*t6
)
2278 TCGTemp
*args
[6] = { t1
, t2
, t3
, t4
, t5
, t6
};
2279 tcg_gen_callN(info
, ret
, args
);
2282 void tcg_gen_call7(TCGHelperInfo
*info
, TCGTemp
*ret
, TCGTemp
*t1
,
2283 TCGTemp
*t2
, TCGTemp
*t3
, TCGTemp
*t4
,
2284 TCGTemp
*t5
, TCGTemp
*t6
, TCGTemp
*t7
)
2286 TCGTemp
*args
[7] = { t1
, t2
, t3
, t4
, t5
, t6
, t7
};
2287 tcg_gen_callN(info
, ret
, args
);
2290 static void tcg_reg_alloc_start(TCGContext
*s
)
2294 for (i
= 0, n
= s
->nb_temps
; i
< n
; i
++) {
2295 TCGTemp
*ts
= &s
->temps
[i
];
2296 TCGTempVal val
= TEMP_VAL_MEM
;
2300 val
= TEMP_VAL_CONST
;
2308 val
= TEMP_VAL_DEAD
;
2311 ts
->mem_allocated
= 0;
2314 g_assert_not_reached();
2319 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
2322 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
2325 int idx
= temp_idx(ts
);
2330 pstrcpy(buf
, buf_size
, ts
->name
);
2333 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
2336 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
2341 snprintf(buf
, buf_size
, "$0x%x", (int32_t)ts
->val
);
2343 #if TCG_TARGET_REG_BITS > 32
2345 snprintf(buf
, buf_size
, "$0x%" PRIx64
, ts
->val
);
2351 snprintf(buf
, buf_size
, "v%d$0x%" PRIx64
,
2352 64 << (ts
->type
- TCG_TYPE_V64
), ts
->val
);
2355 g_assert_not_reached();
2362 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
2363 int buf_size
, TCGArg arg
)
2365 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
2368 static const char * const cond_name
[] =
2370 [TCG_COND_NEVER
] = "never",
2371 [TCG_COND_ALWAYS
] = "always",
2372 [TCG_COND_EQ
] = "eq",
2373 [TCG_COND_NE
] = "ne",
2374 [TCG_COND_LT
] = "lt",
2375 [TCG_COND_GE
] = "ge",
2376 [TCG_COND_LE
] = "le",
2377 [TCG_COND_GT
] = "gt",
2378 [TCG_COND_LTU
] = "ltu",
2379 [TCG_COND_GEU
] = "geu",
2380 [TCG_COND_LEU
] = "leu",
2381 [TCG_COND_GTU
] = "gtu"
2384 static const char * const ldst_name
[(MO_BSWAP
| MO_SSIZE
) + 1] =
2398 [MO_128
+ MO_BE
] = "beo",
2399 [MO_128
+ MO_LE
] = "leo",
2402 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
2403 [MO_UNALN
>> MO_ASHIFT
] = "un+",
2404 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
2405 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
2406 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
2407 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
2408 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
2409 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
2410 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
2413 static const char * const atom_name
[(MO_ATOM_MASK
>> MO_ATOM_SHIFT
) + 1] = {
2414 [MO_ATOM_IFALIGN
>> MO_ATOM_SHIFT
] = "",
2415 [MO_ATOM_IFALIGN_PAIR
>> MO_ATOM_SHIFT
] = "pair+",
2416 [MO_ATOM_WITHIN16
>> MO_ATOM_SHIFT
] = "w16+",
2417 [MO_ATOM_WITHIN16_PAIR
>> MO_ATOM_SHIFT
] = "w16p+",
2418 [MO_ATOM_SUBALIGN
>> MO_ATOM_SHIFT
] = "sub+",
2419 [MO_ATOM_NONE
>> MO_ATOM_SHIFT
] = "noat+",
2422 static const char bswap_flag_name
[][6] = {
2423 [TCG_BSWAP_IZ
] = "iz",
2424 [TCG_BSWAP_OZ
] = "oz",
2425 [TCG_BSWAP_OS
] = "os",
2426 [TCG_BSWAP_IZ
| TCG_BSWAP_OZ
] = "iz,oz",
2427 [TCG_BSWAP_IZ
| TCG_BSWAP_OS
] = "iz,os",
2430 static inline bool tcg_regset_single(TCGRegSet d
)
2432 return (d
& (d
- 1)) == 0;
2435 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
2437 if (TCG_TARGET_NB_REGS
<= 32) {
2444 /* Return only the number of characters output -- no error return. */
2445 #define ne_fprintf(...) \
2446 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
2448 static void tcg_dump_ops(TCGContext
*s
, FILE *f
, bool have_prefs
)
2453 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
2454 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
2455 const TCGOpDef
*def
;
2460 def
= &tcg_op_defs
[c
];
2462 if (c
== INDEX_op_insn_start
) {
2464 col
+= ne_fprintf(f
, "\n ----");
2466 for (i
= 0, k
= s
->insn_start_words
; i
< k
; ++i
) {
2467 col
+= ne_fprintf(f
, " %016" PRIx64
,
2468 tcg_get_insn_start_param(op
, i
));
2470 } else if (c
== INDEX_op_call
) {
2471 const TCGHelperInfo
*info
= tcg_call_info(op
);
2472 void *func
= tcg_call_func(op
);
2474 /* variable number of arguments */
2475 nb_oargs
= TCGOP_CALLO(op
);
2476 nb_iargs
= TCGOP_CALLI(op
);
2477 nb_cargs
= def
->nb_cargs
;
2479 col
+= ne_fprintf(f
, " %s ", def
->name
);
2482 * Print the function name from TCGHelperInfo, if available.
2483 * Note that plugins have a template function for the info,
2484 * but the actual function pointer comes from the plugin.
2486 if (func
== info
->func
) {
2487 col
+= ne_fprintf(f
, "%s", info
->name
);
2489 col
+= ne_fprintf(f
, "plugin(%p)", func
);
2492 col
+= ne_fprintf(f
, ",$0x%x,$%d", info
->flags
, nb_oargs
);
2493 for (i
= 0; i
< nb_oargs
; i
++) {
2494 col
+= ne_fprintf(f
, ",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2497 for (i
= 0; i
< nb_iargs
; i
++) {
2498 TCGArg arg
= op
->args
[nb_oargs
+ i
];
2499 const char *t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
2500 col
+= ne_fprintf(f
, ",%s", t
);
2503 col
+= ne_fprintf(f
, " %s ", def
->name
);
2505 nb_oargs
= def
->nb_oargs
;
2506 nb_iargs
= def
->nb_iargs
;
2507 nb_cargs
= def
->nb_cargs
;
2509 if (def
->flags
& TCG_OPF_VECTOR
) {
2510 col
+= ne_fprintf(f
, "v%d,e%d,", 64 << TCGOP_VECL(op
),
2511 8 << TCGOP_VECE(op
));
2515 for (i
= 0; i
< nb_oargs
; i
++) {
2516 const char *sep
= k
? "," : "";
2517 col
+= ne_fprintf(f
, "%s%s", sep
,
2518 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2521 for (i
= 0; i
< nb_iargs
; i
++) {
2522 const char *sep
= k
? "," : "";
2523 col
+= ne_fprintf(f
, "%s%s", sep
,
2524 tcg_get_arg_str(s
, buf
, sizeof(buf
),
2528 case INDEX_op_brcond_i32
:
2529 case INDEX_op_setcond_i32
:
2530 case INDEX_op_negsetcond_i32
:
2531 case INDEX_op_movcond_i32
:
2532 case INDEX_op_brcond2_i32
:
2533 case INDEX_op_setcond2_i32
:
2534 case INDEX_op_brcond_i64
:
2535 case INDEX_op_setcond_i64
:
2536 case INDEX_op_negsetcond_i64
:
2537 case INDEX_op_movcond_i64
:
2538 case INDEX_op_cmp_vec
:
2539 case INDEX_op_cmpsel_vec
:
2540 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
2541 && cond_name
[op
->args
[k
]]) {
2542 col
+= ne_fprintf(f
, ",%s", cond_name
[op
->args
[k
++]]);
2544 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, op
->args
[k
++]);
2548 case INDEX_op_qemu_ld_a32_i32
:
2549 case INDEX_op_qemu_ld_a64_i32
:
2550 case INDEX_op_qemu_st_a32_i32
:
2551 case INDEX_op_qemu_st_a64_i32
:
2552 case INDEX_op_qemu_st8_a32_i32
:
2553 case INDEX_op_qemu_st8_a64_i32
:
2554 case INDEX_op_qemu_ld_a32_i64
:
2555 case INDEX_op_qemu_ld_a64_i64
:
2556 case INDEX_op_qemu_st_a32_i64
:
2557 case INDEX_op_qemu_st_a64_i64
:
2558 case INDEX_op_qemu_ld_a32_i128
:
2559 case INDEX_op_qemu_ld_a64_i128
:
2560 case INDEX_op_qemu_st_a32_i128
:
2561 case INDEX_op_qemu_st_a64_i128
:
2563 const char *s_al
, *s_op
, *s_at
;
2564 MemOpIdx oi
= op
->args
[k
++];
2565 MemOp mop
= get_memop(oi
);
2566 unsigned ix
= get_mmuidx(oi
);
2568 s_al
= alignment_name
[(mop
& MO_AMASK
) >> MO_ASHIFT
];
2569 s_op
= ldst_name
[mop
& (MO_BSWAP
| MO_SSIZE
)];
2570 s_at
= atom_name
[(mop
& MO_ATOM_MASK
) >> MO_ATOM_SHIFT
];
2571 mop
&= ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
| MO_ATOM_MASK
);
2573 /* If all fields are accounted for, print symbolically. */
2574 if (!mop
&& s_al
&& s_op
&& s_at
) {
2575 col
+= ne_fprintf(f
, ",%s%s%s,%u",
2576 s_at
, s_al
, s_op
, ix
);
2578 mop
= get_memop(oi
);
2579 col
+= ne_fprintf(f
, ",$0x%x,%u", mop
, ix
);
2584 case INDEX_op_bswap16_i32
:
2585 case INDEX_op_bswap16_i64
:
2586 case INDEX_op_bswap32_i32
:
2587 case INDEX_op_bswap32_i64
:
2588 case INDEX_op_bswap64_i64
:
2590 TCGArg flags
= op
->args
[k
];
2591 const char *name
= NULL
;
2593 if (flags
< ARRAY_SIZE(bswap_flag_name
)) {
2594 name
= bswap_flag_name
[flags
];
2597 col
+= ne_fprintf(f
, ",%s", name
);
2599 col
+= ne_fprintf(f
, ",$0x%" TCG_PRIlx
, flags
);
2609 case INDEX_op_set_label
:
2611 case INDEX_op_brcond_i32
:
2612 case INDEX_op_brcond_i64
:
2613 case INDEX_op_brcond2_i32
:
2614 col
+= ne_fprintf(f
, "%s$L%d", k
? "," : "",
2615 arg_label(op
->args
[k
])->id
);
2620 TCGBar membar
= op
->args
[k
];
2621 const char *b_op
, *m_op
;
2623 switch (membar
& TCG_BAR_SC
) {
2637 g_assert_not_reached();
2640 switch (membar
& TCG_MO_ALL
) {
2656 case TCG_MO_LD_LD
| TCG_MO_LD_ST
:
2659 case TCG_MO_LD_LD
| TCG_MO_ST_LD
:
2662 case TCG_MO_LD_LD
| TCG_MO_ST_ST
:
2665 case TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2668 case TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2671 case TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2674 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_LD
:
2677 case TCG_MO_LD_LD
| TCG_MO_LD_ST
| TCG_MO_ST_ST
:
2680 case TCG_MO_LD_LD
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2683 case TCG_MO_LD_ST
| TCG_MO_ST_LD
| TCG_MO_ST_ST
:
2690 g_assert_not_reached();
2693 col
+= ne_fprintf(f
, "%s%s:%s", (k
? "," : ""), b_op
, m_op
);
2700 for (; i
< nb_cargs
; i
++, k
++) {
2701 col
+= ne_fprintf(f
, "%s$0x%" TCG_PRIlx
, k
? "," : "",
2706 if (have_prefs
|| op
->life
) {
2707 for (; col
< 40; ++col
) {
2713 unsigned life
= op
->life
;
2715 if (life
& (SYNC_ARG
* 3)) {
2716 ne_fprintf(f
, " sync:");
2717 for (i
= 0; i
< 2; ++i
) {
2718 if (life
& (SYNC_ARG
<< i
)) {
2719 ne_fprintf(f
, " %d", i
);
2725 ne_fprintf(f
, " dead:");
2726 for (i
= 0; life
; ++i
, life
>>= 1) {
2728 ne_fprintf(f
, " %d", i
);
2735 for (i
= 0; i
< nb_oargs
; ++i
) {
2736 TCGRegSet set
= output_pref(op
, i
);
2739 ne_fprintf(f
, " pref=");
2744 ne_fprintf(f
, "none");
2745 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2746 ne_fprintf(f
, "all");
2747 #ifdef CONFIG_DEBUG_TCG
2748 } else if (tcg_regset_single(set
)) {
2749 TCGReg reg
= tcg_regset_first(set
);
2750 ne_fprintf(f
, "%s", tcg_target_reg_names
[reg
]);
2752 } else if (TCG_TARGET_NB_REGS
<= 32) {
2753 ne_fprintf(f
, "0x%x", (uint32_t)set
);
2755 ne_fprintf(f
, "0x%" PRIx64
, (uint64_t)set
);
2764 /* we give more priority to constraints with less registers */
2765 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2767 const TCGArgConstraint
*arg_ct
= &def
->args_ct
[k
];
2768 int n
= ctpop64(arg_ct
->regs
);
2771 * Sort constraints of a single register first, which includes output
2772 * aliases (which must exactly match the input already allocated).
2774 if (n
== 1 || arg_ct
->oalias
) {
2779 * Sort register pairs next, first then second immediately after.
2780 * Arbitrarily sort multiple pairs by the index of the first reg;
2781 * there shouldn't be many pairs.
2783 switch (arg_ct
->pair
) {
2788 return (arg_ct
->pair_index
+ 1) * 2 - 1;
2791 /* Finally, sort by decreasing register count. */
2796 /* sort from highest priority to lowest */
2797 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2800 TCGArgConstraint
*a
= def
->args_ct
;
2802 for (i
= 0; i
< n
; i
++) {
2803 a
[start
+ i
].sort_index
= start
+ i
;
2808 for (i
= 0; i
< n
- 1; i
++) {
2809 for (j
= i
+ 1; j
< n
; j
++) {
2810 int p1
= get_constraint_priority(def
, a
[start
+ i
].sort_index
);
2811 int p2
= get_constraint_priority(def
, a
[start
+ j
].sort_index
);
2813 int tmp
= a
[start
+ i
].sort_index
;
2814 a
[start
+ i
].sort_index
= a
[start
+ j
].sort_index
;
2815 a
[start
+ j
].sort_index
= tmp
;
2821 static void process_op_defs(TCGContext
*s
)
2825 for (op
= 0; op
< NB_OPS
; op
++) {
2826 TCGOpDef
*def
= &tcg_op_defs
[op
];
2827 const TCGTargetOpDef
*tdefs
;
2828 bool saw_alias_pair
= false;
2829 int i
, o
, i2
, o2
, nb_args
;
2831 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2835 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2841 * Macro magic should make it impossible, but double-check that
2842 * the array index is in range. Since the signness of an enum
2843 * is implementation defined, force the result to unsigned.
2845 unsigned con_set
= tcg_target_op_def(op
);
2846 tcg_debug_assert(con_set
< ARRAY_SIZE(constraint_sets
));
2847 tdefs
= &constraint_sets
[con_set
];
2849 for (i
= 0; i
< nb_args
; i
++) {
2850 const char *ct_str
= tdefs
->args_ct_str
[i
];
2851 bool input_p
= i
>= def
->nb_oargs
;
2853 /* Incomplete TCGTargetOpDef entry. */
2854 tcg_debug_assert(ct_str
!= NULL
);
2859 tcg_debug_assert(input_p
);
2860 tcg_debug_assert(o
< def
->nb_oargs
);
2861 tcg_debug_assert(def
->args_ct
[o
].regs
!= 0);
2862 tcg_debug_assert(!def
->args_ct
[o
].oalias
);
2863 def
->args_ct
[i
] = def
->args_ct
[o
];
2864 /* The output sets oalias. */
2865 def
->args_ct
[o
].oalias
= 1;
2866 def
->args_ct
[o
].alias_index
= i
;
2867 /* The input sets ialias. */
2868 def
->args_ct
[i
].ialias
= 1;
2869 def
->args_ct
[i
].alias_index
= o
;
2870 if (def
->args_ct
[i
].pair
) {
2871 saw_alias_pair
= true;
2873 tcg_debug_assert(ct_str
[1] == '\0');
2877 tcg_debug_assert(!input_p
);
2878 def
->args_ct
[i
].newreg
= true;
2882 case 'p': /* plus */
2883 /* Allocate to the register after the previous. */
2884 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2886 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2887 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2888 def
->args_ct
[i
] = (TCGArgConstraint
){
2891 .regs
= def
->args_ct
[o
].regs
<< 1,
2893 def
->args_ct
[o
].pair
= 1;
2894 def
->args_ct
[o
].pair_index
= i
;
2895 tcg_debug_assert(ct_str
[1] == '\0');
2898 case 'm': /* minus */
2899 /* Allocate to the register before the previous. */
2900 tcg_debug_assert(i
> (input_p
? def
->nb_oargs
: 0));
2902 tcg_debug_assert(!def
->args_ct
[o
].pair
);
2903 tcg_debug_assert(!def
->args_ct
[o
].ct
);
2904 def
->args_ct
[i
] = (TCGArgConstraint
){
2907 .regs
= def
->args_ct
[o
].regs
>> 1,
2909 def
->args_ct
[o
].pair
= 2;
2910 def
->args_ct
[o
].pair_index
= i
;
2911 tcg_debug_assert(ct_str
[1] == '\0');
2918 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2921 /* Include all of the target-specific constraints. */
2924 #define CONST(CASE, MASK) \
2925 case CASE: def->args_ct[i].ct |= MASK; break;
2926 #define REGS(CASE, MASK) \
2927 case CASE: def->args_ct[i].regs |= MASK; break;
2929 #include "tcg-target-con-str.h"
2938 /* Typo in TCGTargetOpDef constraint. */
2939 g_assert_not_reached();
2941 } while (*++ct_str
!= '\0');
2944 /* TCGTargetOpDef entry with too much information? */
2945 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2948 * Fix up output pairs that are aliased with inputs.
2949 * When we created the alias, we copied pair from the output.
2950 * There are three cases:
2951 * (1a) Pairs of inputs alias pairs of outputs.
2952 * (1b) One input aliases the first of a pair of outputs.
2953 * (2) One input aliases the second of a pair of outputs.
2955 * Case 1a is handled by making sure that the pair_index'es are
2956 * properly updated so that they appear the same as a pair of inputs.
2958 * Case 1b is handled by setting the pair_index of the input to
2959 * itself, simply so it doesn't point to an unrelated argument.
2960 * Since we don't encounter the "second" during the input allocation
2961 * phase, nothing happens with the second half of the input pair.
2963 * Case 2 is handled by setting the second input to pair=3, the
2964 * first output to pair=3, and the pair_index'es to match.
2966 if (saw_alias_pair
) {
2967 for (i
= def
->nb_oargs
; i
< nb_args
; i
++) {
2969 * Since [0-9pm] must be alone in the constraint string,
2970 * the only way they can both be set is if the pair comes
2971 * from the output alias.
2973 if (!def
->args_ct
[i
].ialias
) {
2976 switch (def
->args_ct
[i
].pair
) {
2980 o
= def
->args_ct
[i
].alias_index
;
2981 o2
= def
->args_ct
[o
].pair_index
;
2982 tcg_debug_assert(def
->args_ct
[o
].pair
== 1);
2983 tcg_debug_assert(def
->args_ct
[o2
].pair
== 2);
2984 if (def
->args_ct
[o2
].oalias
) {
2986 i2
= def
->args_ct
[o2
].alias_index
;
2987 tcg_debug_assert(def
->args_ct
[i2
].pair
== 2);
2988 def
->args_ct
[i2
].pair_index
= i
;
2989 def
->args_ct
[i
].pair_index
= i2
;
2992 def
->args_ct
[i
].pair_index
= i
;
2996 o
= def
->args_ct
[i
].alias_index
;
2997 o2
= def
->args_ct
[o
].pair_index
;
2998 tcg_debug_assert(def
->args_ct
[o
].pair
== 2);
2999 tcg_debug_assert(def
->args_ct
[o2
].pair
== 1);
3000 if (def
->args_ct
[o2
].oalias
) {
3002 i2
= def
->args_ct
[o2
].alias_index
;
3003 tcg_debug_assert(def
->args_ct
[i2
].pair
== 1);
3004 def
->args_ct
[i2
].pair_index
= i
;
3005 def
->args_ct
[i
].pair_index
= i2
;
3008 def
->args_ct
[i
].pair
= 3;
3009 def
->args_ct
[o2
].pair
= 3;
3010 def
->args_ct
[i
].pair_index
= o2
;
3011 def
->args_ct
[o2
].pair_index
= i
;
3015 g_assert_not_reached();
3020 /* sort the constraints (XXX: this is just an heuristic) */
3021 sort_constraints(def
, 0, def
->nb_oargs
);
3022 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
3026 static void remove_label_use(TCGOp
*op
, int idx
)
3028 TCGLabel
*label
= arg_label(op
->args
[idx
]);
3031 QSIMPLEQ_FOREACH(use
, &label
->branches
, next
) {
3032 if (use
->op
== op
) {
3033 QSIMPLEQ_REMOVE(&label
->branches
, use
, TCGLabelUse
, next
);
3037 g_assert_not_reached();
3040 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
3044 remove_label_use(op
, 0);
3046 case INDEX_op_brcond_i32
:
3047 case INDEX_op_brcond_i64
:
3048 remove_label_use(op
, 3);
3050 case INDEX_op_brcond2_i32
:
3051 remove_label_use(op
, 5);
3057 QTAILQ_REMOVE(&s
->ops
, op
, link
);
3058 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
3062 void tcg_remove_ops_after(TCGOp
*op
)
3064 TCGContext
*s
= tcg_ctx
;
3067 TCGOp
*last
= tcg_last_op();
3071 tcg_op_remove(s
, last
);
3075 static TCGOp
*tcg_op_alloc(TCGOpcode opc
, unsigned nargs
)
3077 TCGContext
*s
= tcg_ctx
;
3080 if (unlikely(!QTAILQ_EMPTY(&s
->free_ops
))) {
3081 QTAILQ_FOREACH(op
, &s
->free_ops
, link
) {
3082 if (nargs
<= op
->nargs
) {
3083 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
3090 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
3091 nargs
= MAX(4, nargs
);
3092 op
= tcg_malloc(sizeof(TCGOp
) + sizeof(TCGArg
) * nargs
);
3095 memset(op
, 0, offsetof(TCGOp
, link
));
3099 /* Check for bitfield overflow. */
3100 tcg_debug_assert(op
->nargs
== nargs
);
3106 TCGOp
*tcg_emit_op(TCGOpcode opc
, unsigned nargs
)
3108 TCGOp
*op
= tcg_op_alloc(opc
, nargs
);
3109 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
3113 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
,
3114 TCGOpcode opc
, unsigned nargs
)
3116 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3117 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
3121 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
,
3122 TCGOpcode opc
, unsigned nargs
)
3124 TCGOp
*new_op
= tcg_op_alloc(opc
, nargs
);
3125 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
3129 static void move_label_uses(TCGLabel
*to
, TCGLabel
*from
)
3133 QSIMPLEQ_FOREACH(u
, &from
->branches
, next
) {
3137 op
->args
[0] = label_arg(to
);
3139 case INDEX_op_brcond_i32
:
3140 case INDEX_op_brcond_i64
:
3141 op
->args
[3] = label_arg(to
);
3143 case INDEX_op_brcond2_i32
:
3144 op
->args
[5] = label_arg(to
);
3147 g_assert_not_reached();
3151 QSIMPLEQ_CONCAT(&to
->branches
, &from
->branches
);
3154 /* Reachable analysis : remove unreachable code. */
3155 static void __attribute__((noinline
))
3156 reachable_code_pass(TCGContext
*s
)
3158 TCGOp
*op
, *op_next
, *op_prev
;
3161 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3166 case INDEX_op_set_label
:
3167 label
= arg_label(op
->args
[0]);
3170 * Note that the first op in the TB is always a load,
3171 * so there is always something before a label.
3173 op_prev
= QTAILQ_PREV(op
, link
);
3176 * If we find two sequential labels, move all branches to
3177 * reference the second label and remove the first label.
3178 * Do this before branch to next optimization, so that the
3179 * middle label is out of the way.
3181 if (op_prev
->opc
== INDEX_op_set_label
) {
3182 move_label_uses(label
, arg_label(op_prev
->args
[0]));
3183 tcg_op_remove(s
, op_prev
);
3184 op_prev
= QTAILQ_PREV(op
, link
);
3188 * Optimization can fold conditional branches to unconditional.
3189 * If we find a label which is preceded by an unconditional
3190 * branch to next, remove the branch. We couldn't do this when
3191 * processing the branch because any dead code between the branch
3192 * and label had not yet been removed.
3194 if (op_prev
->opc
== INDEX_op_br
&&
3195 label
== arg_label(op_prev
->args
[0])) {
3196 tcg_op_remove(s
, op_prev
);
3197 /* Fall through means insns become live again. */
3201 if (QSIMPLEQ_EMPTY(&label
->branches
)) {
3203 * While there is an occasional backward branch, virtually
3204 * all branches generated by the translators are forward.
3205 * Which means that generally we will have already removed
3206 * all references to the label that will be, and there is
3207 * little to be gained by iterating.
3211 /* Once we see a label, insns become live again. */
3218 case INDEX_op_exit_tb
:
3219 case INDEX_op_goto_ptr
:
3220 /* Unconditional branches; everything following is dead. */
3225 /* Notice noreturn helper calls, raising exceptions. */
3226 if (tcg_call_flags(op
) & TCG_CALL_NO_RETURN
) {
3231 case INDEX_op_insn_start
:
3232 /* Never remove -- we need to keep these for unwind. */
3241 tcg_op_remove(s
, op
);
3249 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
3250 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
3252 /* For liveness_pass_1, the register preferences for a given temp. */
3253 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
3255 return ts
->state_ptr
;
3258 /* For liveness_pass_1, reset the preferences for a given temp to the
3259 * maximal regset for its type.
3261 static inline void la_reset_pref(TCGTemp
*ts
)
3264 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
3267 /* liveness analysis: end of function: all temps are dead, and globals
3268 should be in memory. */
3269 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
3273 for (i
= 0; i
< ng
; ++i
) {
3274 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3275 la_reset_pref(&s
->temps
[i
]);
3277 for (i
= ng
; i
< nt
; ++i
) {
3278 s
->temps
[i
].state
= TS_DEAD
;
3279 la_reset_pref(&s
->temps
[i
]);
3283 /* liveness analysis: end of basic block: all temps are dead, globals
3284 and local temps should be in memory. */
3285 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
3289 for (i
= 0; i
< nt
; ++i
) {
3290 TCGTemp
*ts
= &s
->temps
[i
];
3297 state
= TS_DEAD
| TS_MEM
;
3304 g_assert_not_reached();
3311 /* liveness analysis: sync globals back to memory. */
3312 static void la_global_sync(TCGContext
*s
, int ng
)
3316 for (i
= 0; i
< ng
; ++i
) {
3317 int state
= s
->temps
[i
].state
;
3318 s
->temps
[i
].state
= state
| TS_MEM
;
3319 if (state
== TS_DEAD
) {
3320 /* If the global was previously dead, reset prefs. */
3321 la_reset_pref(&s
->temps
[i
]);
3327 * liveness analysis: conditional branch: all temps are dead unless
3328 * explicitly live-across-conditional-branch, globals and local temps
3331 static void la_bb_sync(TCGContext
*s
, int ng
, int nt
)
3333 la_global_sync(s
, ng
);
3335 for (int i
= ng
; i
< nt
; ++i
) {
3336 TCGTemp
*ts
= &s
->temps
[i
];
3342 ts
->state
= state
| TS_MEM
;
3343 if (state
!= TS_DEAD
) {
3351 g_assert_not_reached();
3353 la_reset_pref(&s
->temps
[i
]);
3357 /* liveness analysis: sync globals back to memory and kill. */
3358 static void la_global_kill(TCGContext
*s
, int ng
)
3362 for (i
= 0; i
< ng
; i
++) {
3363 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
3364 la_reset_pref(&s
->temps
[i
]);
3368 /* liveness analysis: note live globals crossing calls. */
3369 static void la_cross_call(TCGContext
*s
, int nt
)
3371 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
3374 for (i
= 0; i
< nt
; i
++) {
3375 TCGTemp
*ts
= &s
->temps
[i
];
3376 if (!(ts
->state
& TS_DEAD
)) {
3377 TCGRegSet
*pset
= la_temp_pref(ts
);
3378 TCGRegSet set
= *pset
;
3381 /* If the combination is not possible, restart. */
3383 set
= tcg_target_available_regs
[ts
->type
] & mask
;
3391 * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
3392 * to TEMP_EBB, if possible.
3394 static void __attribute__((noinline
))
3395 liveness_pass_0(TCGContext
*s
)
3397 void * const multiple_ebb
= (void *)(uintptr_t)-1;
3398 int nb_temps
= s
->nb_temps
;
3401 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3402 s
->temps
[i
].state_ptr
= NULL
;
3406 * Represent each EBB by the op at which it begins. In the case of
3407 * the first EBB, this is the first op, otherwise it is a label.
3408 * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
3409 * within a single EBB, else MULTIPLE_EBB.
3411 ebb
= QTAILQ_FIRST(&s
->ops
);
3412 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3413 const TCGOpDef
*def
;
3414 int nb_oargs
, nb_iargs
;
3417 case INDEX_op_set_label
:
3420 case INDEX_op_discard
:
3423 nb_oargs
= TCGOP_CALLO(op
);
3424 nb_iargs
= TCGOP_CALLI(op
);
3427 def
= &tcg_op_defs
[op
->opc
];
3428 nb_oargs
= def
->nb_oargs
;
3429 nb_iargs
= def
->nb_iargs
;
3433 for (int i
= 0; i
< nb_oargs
+ nb_iargs
; ++i
) {
3434 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
3436 if (ts
->kind
!= TEMP_TB
) {
3439 if (ts
->state_ptr
== NULL
) {
3440 ts
->state_ptr
= ebb
;
3441 } else if (ts
->state_ptr
!= ebb
) {
3442 ts
->state_ptr
= multiple_ebb
;
3448 * For TEMP_TB that turned out not to be used beyond one EBB,
3449 * reduce the liveness to TEMP_EBB.
3451 for (int i
= s
->nb_globals
; i
< nb_temps
; ++i
) {
3452 TCGTemp
*ts
= &s
->temps
[i
];
3453 if (ts
->kind
== TEMP_TB
&& ts
->state_ptr
!= multiple_ebb
) {
3454 ts
->kind
= TEMP_EBB
;
3459 /* Liveness analysis : update the opc_arg_life array to tell if a
3460 given input arguments is dead. Instructions updating dead
3461 temporaries are removed. */
3462 static void __attribute__((noinline
))
3463 liveness_pass_1(TCGContext
*s
)
3465 int nb_globals
= s
->nb_globals
;
3466 int nb_temps
= s
->nb_temps
;
3467 TCGOp
*op
, *op_prev
;
3471 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
3472 for (i
= 0; i
< nb_temps
; ++i
) {
3473 s
->temps
[i
].state_ptr
= prefs
+ i
;
3476 /* ??? Should be redundant with the exit_tb that ends the TB. */
3477 la_func_end(s
, nb_globals
, nb_temps
);
3479 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
3480 int nb_iargs
, nb_oargs
;
3481 TCGOpcode opc_new
, opc_new2
;
3483 TCGLifeData arg_life
= 0;
3485 TCGOpcode opc
= op
->opc
;
3486 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3491 const TCGHelperInfo
*info
= tcg_call_info(op
);
3492 int call_flags
= tcg_call_flags(op
);
3494 nb_oargs
= TCGOP_CALLO(op
);
3495 nb_iargs
= TCGOP_CALLI(op
);
3497 /* pure functions can be removed if their result is unused */
3498 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
3499 for (i
= 0; i
< nb_oargs
; i
++) {
3500 ts
= arg_temp(op
->args
[i
]);
3501 if (ts
->state
!= TS_DEAD
) {
3502 goto do_not_remove_call
;
3509 /* Output args are dead. */
3510 for (i
= 0; i
< nb_oargs
; i
++) {
3511 ts
= arg_temp(op
->args
[i
]);
3512 if (ts
->state
& TS_DEAD
) {
3513 arg_life
|= DEAD_ARG
<< i
;
3515 if (ts
->state
& TS_MEM
) {
3516 arg_life
|= SYNC_ARG
<< i
;
3518 ts
->state
= TS_DEAD
;
3522 /* Not used -- it will be tcg_target_call_oarg_reg(). */
3523 memset(op
->output_pref
, 0, sizeof(op
->output_pref
));
3525 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
3526 TCG_CALL_NO_READ_GLOBALS
))) {
3527 la_global_kill(s
, nb_globals
);
3528 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
3529 la_global_sync(s
, nb_globals
);
3532 /* Record arguments that die in this helper. */
3533 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3534 ts
= arg_temp(op
->args
[i
]);
3535 if (ts
->state
& TS_DEAD
) {
3536 arg_life
|= DEAD_ARG
<< i
;
3540 /* For all live registers, remove call-clobbered prefs. */
3541 la_cross_call(s
, nb_temps
);
3544 * Input arguments are live for preceding opcodes.
3546 * For those arguments that die, and will be allocated in
3547 * registers, clear the register set for that arg, to be
3548 * filled in below. For args that will be on the stack,
3549 * reset to any available reg. Process arguments in reverse
3550 * order so that if a temp is used more than once, the stack
3551 * reset to max happens before the register reset to 0.
3553 for (i
= nb_iargs
- 1; i
>= 0; i
--) {
3554 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3555 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3557 if (ts
->state
& TS_DEAD
) {
3558 switch (loc
->kind
) {
3559 case TCG_CALL_ARG_NORMAL
:
3560 case TCG_CALL_ARG_EXTEND_U
:
3561 case TCG_CALL_ARG_EXTEND_S
:
3562 if (arg_slot_reg_p(loc
->arg_slot
)) {
3563 *la_temp_pref(ts
) = 0;
3569 tcg_target_available_regs
[ts
->type
];
3572 ts
->state
&= ~TS_DEAD
;
3577 * For each input argument, add its input register to prefs.
3578 * If a temp is used once, this produces a single set bit;
3579 * if a temp is used multiple times, this produces a set.
3581 for (i
= 0; i
< nb_iargs
; i
++) {
3582 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
3583 ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
3585 switch (loc
->kind
) {
3586 case TCG_CALL_ARG_NORMAL
:
3587 case TCG_CALL_ARG_EXTEND_U
:
3588 case TCG_CALL_ARG_EXTEND_S
:
3589 if (arg_slot_reg_p(loc
->arg_slot
)) {
3590 tcg_regset_set_reg(*la_temp_pref(ts
),
3591 tcg_target_call_iarg_regs
[loc
->arg_slot
]);
3600 case INDEX_op_insn_start
:
3602 case INDEX_op_discard
:
3603 /* mark the temporary as dead */
3604 ts
= arg_temp(op
->args
[0]);
3605 ts
->state
= TS_DEAD
;
3609 case INDEX_op_add2_i32
:
3610 opc_new
= INDEX_op_add_i32
;
3612 case INDEX_op_sub2_i32
:
3613 opc_new
= INDEX_op_sub_i32
;
3615 case INDEX_op_add2_i64
:
3616 opc_new
= INDEX_op_add_i64
;
3618 case INDEX_op_sub2_i64
:
3619 opc_new
= INDEX_op_sub_i64
;
3623 /* Test if the high part of the operation is dead, but not
3624 the low part. The result can be optimized to a simple
3625 add or sub. This happens often for x86_64 guest when the
3626 cpu mode is set to 32 bit. */
3627 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3628 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3631 /* Replace the opcode and adjust the args in place,
3632 leaving 3 unused args at the end. */
3633 op
->opc
= opc
= opc_new
;
3634 op
->args
[1] = op
->args
[2];
3635 op
->args
[2] = op
->args
[4];
3636 /* Fall through and mark the single-word operation live. */
3642 case INDEX_op_mulu2_i32
:
3643 opc_new
= INDEX_op_mul_i32
;
3644 opc_new2
= INDEX_op_muluh_i32
;
3645 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
3647 case INDEX_op_muls2_i32
:
3648 opc_new
= INDEX_op_mul_i32
;
3649 opc_new2
= INDEX_op_mulsh_i32
;
3650 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
3652 case INDEX_op_mulu2_i64
:
3653 opc_new
= INDEX_op_mul_i64
;
3654 opc_new2
= INDEX_op_muluh_i64
;
3655 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
3657 case INDEX_op_muls2_i64
:
3658 opc_new
= INDEX_op_mul_i64
;
3659 opc_new2
= INDEX_op_mulsh_i64
;
3660 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
3665 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3666 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3667 /* Both parts of the operation are dead. */
3670 /* The high part of the operation is dead; generate the low. */
3671 op
->opc
= opc
= opc_new
;
3672 op
->args
[1] = op
->args
[2];
3673 op
->args
[2] = op
->args
[3];
3674 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
3675 /* The low part of the operation is dead; generate the high. */
3676 op
->opc
= opc
= opc_new2
;
3677 op
->args
[0] = op
->args
[1];
3678 op
->args
[1] = op
->args
[2];
3679 op
->args
[2] = op
->args
[3];
3683 /* Mark the single-word operation live. */
3688 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3689 nb_iargs
= def
->nb_iargs
;
3690 nb_oargs
= def
->nb_oargs
;
3692 /* Test if the operation can be removed because all
3693 its outputs are dead. We assume that nb_oargs == 0
3694 implies side effects */
3695 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
3696 for (i
= 0; i
< nb_oargs
; i
++) {
3697 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
3706 tcg_op_remove(s
, op
);
3710 for (i
= 0; i
< nb_oargs
; i
++) {
3711 ts
= arg_temp(op
->args
[i
]);
3713 /* Remember the preference of the uses that followed. */
3714 if (i
< ARRAY_SIZE(op
->output_pref
)) {
3715 op
->output_pref
[i
] = *la_temp_pref(ts
);
3718 /* Output args are dead. */
3719 if (ts
->state
& TS_DEAD
) {
3720 arg_life
|= DEAD_ARG
<< i
;
3722 if (ts
->state
& TS_MEM
) {
3723 arg_life
|= SYNC_ARG
<< i
;
3725 ts
->state
= TS_DEAD
;
3729 /* If end of basic block, update. */
3730 if (def
->flags
& TCG_OPF_BB_EXIT
) {
3731 la_func_end(s
, nb_globals
, nb_temps
);
3732 } else if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3733 la_bb_sync(s
, nb_globals
, nb_temps
);
3734 } else if (def
->flags
& TCG_OPF_BB_END
) {
3735 la_bb_end(s
, nb_globals
, nb_temps
);
3736 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3737 la_global_sync(s
, nb_globals
);
3738 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3739 la_cross_call(s
, nb_temps
);
3743 /* Record arguments that die in this opcode. */
3744 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3745 ts
= arg_temp(op
->args
[i
]);
3746 if (ts
->state
& TS_DEAD
) {
3747 arg_life
|= DEAD_ARG
<< i
;
3751 /* Input arguments are live for preceding opcodes. */
3752 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3753 ts
= arg_temp(op
->args
[i
]);
3754 if (ts
->state
& TS_DEAD
) {
3755 /* For operands that were dead, initially allow
3756 all regs for the type. */
3757 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
3758 ts
->state
&= ~TS_DEAD
;
3762 /* Incorporate constraints for this operand. */
3764 case INDEX_op_mov_i32
:
3765 case INDEX_op_mov_i64
:
3766 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3767 have proper constraints. That said, special case
3768 moves to propagate preferences backward. */
3769 if (IS_DEAD_ARG(1)) {
3770 *la_temp_pref(arg_temp(op
->args
[0]))
3771 = *la_temp_pref(arg_temp(op
->args
[1]));
3776 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3777 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
3778 TCGRegSet set
, *pset
;
3780 ts
= arg_temp(op
->args
[i
]);
3781 pset
= la_temp_pref(ts
);
3786 set
&= output_pref(op
, ct
->alias_index
);
3788 /* If the combination is not possible, restart. */
3798 op
->life
= arg_life
;
3802 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3803 static bool __attribute__((noinline
))
3804 liveness_pass_2(TCGContext
*s
)
3806 int nb_globals
= s
->nb_globals
;
3808 bool changes
= false;
3809 TCGOp
*op
, *op_next
;
3811 /* Create a temporary for each indirect global. */
3812 for (i
= 0; i
< nb_globals
; ++i
) {
3813 TCGTemp
*its
= &s
->temps
[i
];
3814 if (its
->indirect_reg
) {
3815 TCGTemp
*dts
= tcg_temp_alloc(s
);
3816 dts
->type
= its
->type
;
3817 dts
->base_type
= its
->base_type
;
3818 dts
->temp_subindex
= its
->temp_subindex
;
3819 dts
->kind
= TEMP_EBB
;
3820 its
->state_ptr
= dts
;
3822 its
->state_ptr
= NULL
;
3824 /* All globals begin dead. */
3825 its
->state
= TS_DEAD
;
3827 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
3828 TCGTemp
*its
= &s
->temps
[i
];
3829 its
->state_ptr
= NULL
;
3830 its
->state
= TS_DEAD
;
3833 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3834 TCGOpcode opc
= op
->opc
;
3835 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3836 TCGLifeData arg_life
= op
->life
;
3837 int nb_iargs
, nb_oargs
, call_flags
;
3838 TCGTemp
*arg_ts
, *dir_ts
;
3840 if (opc
== INDEX_op_call
) {
3841 nb_oargs
= TCGOP_CALLO(op
);
3842 nb_iargs
= TCGOP_CALLI(op
);
3843 call_flags
= tcg_call_flags(op
);
3845 nb_iargs
= def
->nb_iargs
;
3846 nb_oargs
= def
->nb_oargs
;
3848 /* Set flags similar to how calls require. */
3849 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3850 /* Like reading globals: sync_globals */
3851 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3852 } else if (def
->flags
& TCG_OPF_BB_END
) {
3853 /* Like writing globals: save_globals */
3855 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3856 /* Like reading globals: sync_globals */
3857 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3859 /* No effect on globals. */
3860 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
3861 TCG_CALL_NO_WRITE_GLOBALS
);
3865 /* Make sure that input arguments are available. */
3866 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3867 arg_ts
= arg_temp(op
->args
[i
]);
3868 dir_ts
= arg_ts
->state_ptr
;
3869 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
3870 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
3873 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
, 3);
3875 lop
->args
[0] = temp_arg(dir_ts
);
3876 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3877 lop
->args
[2] = arg_ts
->mem_offset
;
3879 /* Loaded, but synced with memory. */
3880 arg_ts
->state
= TS_MEM
;
3884 /* Perform input replacement, and mark inputs that became dead.
3885 No action is required except keeping temp_state up to date
3886 so that we reload when needed. */
3887 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3888 arg_ts
= arg_temp(op
->args
[i
]);
3889 dir_ts
= arg_ts
->state_ptr
;
3891 op
->args
[i
] = temp_arg(dir_ts
);
3893 if (IS_DEAD_ARG(i
)) {
3894 arg_ts
->state
= TS_DEAD
;
3899 /* Liveness analysis should ensure that the following are
3900 all correct, for call sites and basic block end points. */
3901 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
3903 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3904 for (i
= 0; i
< nb_globals
; ++i
) {
3905 /* Liveness should see that globals are synced back,
3906 that is, either TS_DEAD or TS_MEM. */
3907 arg_ts
= &s
->temps
[i
];
3908 tcg_debug_assert(arg_ts
->state_ptr
== 0
3909 || arg_ts
->state
!= 0);
3912 for (i
= 0; i
< nb_globals
; ++i
) {
3913 /* Liveness should see that globals are saved back,
3914 that is, TS_DEAD, waiting to be reloaded. */
3915 arg_ts
= &s
->temps
[i
];
3916 tcg_debug_assert(arg_ts
->state_ptr
== 0
3917 || arg_ts
->state
== TS_DEAD
);
3921 /* Outputs become available. */
3922 if (opc
== INDEX_op_mov_i32
|| opc
== INDEX_op_mov_i64
) {
3923 arg_ts
= arg_temp(op
->args
[0]);
3924 dir_ts
= arg_ts
->state_ptr
;
3926 op
->args
[0] = temp_arg(dir_ts
);
3929 /* The output is now live and modified. */
3932 if (NEED_SYNC_ARG(0)) {
3933 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3936 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3937 TCGTemp
*out_ts
= dir_ts
;
3939 if (IS_DEAD_ARG(0)) {
3940 out_ts
= arg_temp(op
->args
[1]);
3941 arg_ts
->state
= TS_DEAD
;
3942 tcg_op_remove(s
, op
);
3944 arg_ts
->state
= TS_MEM
;
3947 sop
->args
[0] = temp_arg(out_ts
);
3948 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3949 sop
->args
[2] = arg_ts
->mem_offset
;
3951 tcg_debug_assert(!IS_DEAD_ARG(0));
3955 for (i
= 0; i
< nb_oargs
; i
++) {
3956 arg_ts
= arg_temp(op
->args
[i
]);
3957 dir_ts
= arg_ts
->state_ptr
;
3961 op
->args
[i
] = temp_arg(dir_ts
);
3964 /* The output is now live and modified. */
3967 /* Sync outputs upon their last write. */
3968 if (NEED_SYNC_ARG(i
)) {
3969 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3972 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
3974 sop
->args
[0] = temp_arg(dir_ts
);
3975 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3976 sop
->args
[2] = arg_ts
->mem_offset
;
3978 arg_ts
->state
= TS_MEM
;
3980 /* Drop outputs that are dead. */
3981 if (IS_DEAD_ARG(i
)) {
3982 arg_ts
->state
= TS_DEAD
;
3991 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3996 /* When allocating an object, look at the full type. */
3997 size
= tcg_type_size(ts
->base_type
);
3998 switch (ts
->base_type
) {
4010 * Note that we do not require aligned storage for V256,
4011 * and that we provide alignment for I128 to match V128,
4012 * even if that's above what the host ABI requires.
4017 g_assert_not_reached();
4021 * Assume the stack is sufficiently aligned.
4022 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
4023 * and do not require 16 byte vector alignment. This seems slightly
4024 * easier than fully parameterizing the above switch statement.
4026 align
= MIN(TCG_TARGET_STACK_ALIGN
, align
);
4027 off
= ROUND_UP(s
->current_frame_offset
, align
);
4029 /* If we've exhausted the stack frame, restart with a smaller TB. */
4030 if (off
+ size
> s
->frame_end
) {
4031 tcg_raise_tb_overflow(s
);
4033 s
->current_frame_offset
= off
+ size
;
4034 #if defined(__sparc__)
4035 off
+= TCG_TARGET_STACK_BIAS
;
4038 /* If the object was subdivided, assign memory to all the parts. */
4039 if (ts
->base_type
!= ts
->type
) {
4040 int part_size
= tcg_type_size(ts
->type
);
4041 int part_count
= size
/ part_size
;
4044 * Each part is allocated sequentially in tcg_temp_new_internal.
4045 * Jump back to the first part by subtracting the current index.
4047 ts
-= ts
->temp_subindex
;
4048 for (int i
= 0; i
< part_count
; ++i
) {
4049 ts
[i
].mem_offset
= off
+ i
* part_size
;
4050 ts
[i
].mem_base
= s
->frame_temp
;
4051 ts
[i
].mem_allocated
= 1;
4054 ts
->mem_offset
= off
;
4055 ts
->mem_base
= s
->frame_temp
;
4056 ts
->mem_allocated
= 1;
4060 /* Assign @reg to @ts, and update reg_to_temp[]. */
4061 static void set_temp_val_reg(TCGContext
*s
, TCGTemp
*ts
, TCGReg reg
)
4063 if (ts
->val_type
== TEMP_VAL_REG
) {
4064 TCGReg old
= ts
->reg
;
4065 tcg_debug_assert(s
->reg_to_temp
[old
] == ts
);
4069 s
->reg_to_temp
[old
] = NULL
;
4071 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
4072 s
->reg_to_temp
[reg
] = ts
;
4073 ts
->val_type
= TEMP_VAL_REG
;
4077 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
4078 static void set_temp_val_nonreg(TCGContext
*s
, TCGTemp
*ts
, TCGTempVal type
)
4080 tcg_debug_assert(type
!= TEMP_VAL_REG
);
4081 if (ts
->val_type
== TEMP_VAL_REG
) {
4082 TCGReg reg
= ts
->reg
;
4083 tcg_debug_assert(s
->reg_to_temp
[reg
] == ts
);
4084 s
->reg_to_temp
[reg
] = NULL
;
4086 ts
->val_type
= type
;
4089 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
4091 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
4092 mark it free; otherwise mark it dead. */
4093 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
4095 TCGTempVal new_type
;
4102 new_type
= TEMP_VAL_MEM
;
4105 new_type
= free_or_dead
< 0 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
;
4108 new_type
= TEMP_VAL_CONST
;
4111 g_assert_not_reached();
4113 set_temp_val_nonreg(s
, ts
, new_type
);
4116 /* Mark a temporary as dead. */
4117 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
4119 temp_free_or_dead(s
, ts
, 1);
4122 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
4123 registers needs to be allocated to store a constant. If 'free_or_dead'
4124 is non-zero, subsequently release the temporary; if it is positive, the
4125 temp is dead; if it is negative, the temp is free. */
4126 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
4127 TCGRegSet preferred_regs
, int free_or_dead
)
4129 if (!temp_readonly(ts
) && !ts
->mem_coherent
) {
4130 if (!ts
->mem_allocated
) {
4131 temp_allocate_frame(s
, ts
);
4133 switch (ts
->val_type
) {
4134 case TEMP_VAL_CONST
:
4135 /* If we're going to free the temp immediately, then we won't
4136 require it later in a register, so attempt to store the
4137 constant to memory directly. */
4139 && tcg_out_sti(s
, ts
->type
, ts
->val
,
4140 ts
->mem_base
->reg
, ts
->mem_offset
)) {
4143 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4144 allocated_regs
, preferred_regs
);
4148 tcg_out_st(s
, ts
->type
, ts
->reg
,
4149 ts
->mem_base
->reg
, ts
->mem_offset
);
4157 g_assert_not_reached();
4159 ts
->mem_coherent
= 1;
4162 temp_free_or_dead(s
, ts
, free_or_dead
);
4166 /* free register 'reg' by spilling the corresponding temporary if necessary */
4167 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
4169 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
4171 temp_sync(s
, ts
, allocated_regs
, 0, -1);
4177 * @required_regs: Set of registers in which we must allocate.
4178 * @allocated_regs: Set of registers which must be avoided.
4179 * @preferred_regs: Set of registers we should prefer.
4180 * @rev: True if we search the registers in "indirect" order.
4182 * The allocated register must be in @required_regs & ~@allocated_regs,
4183 * but if we can put it in @preferred_regs we may save a move later.
4185 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
4186 TCGRegSet allocated_regs
,
4187 TCGRegSet preferred_regs
, bool rev
)
4189 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4190 TCGRegSet reg_ct
[2];
4193 reg_ct
[1] = required_regs
& ~allocated_regs
;
4194 tcg_debug_assert(reg_ct
[1] != 0);
4195 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4197 /* Skip the preferred_regs option if it cannot be satisfied,
4198 or if the preference made no difference. */
4199 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4201 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4203 /* Try free registers, preferences first. */
4204 for (j
= f
; j
< 2; j
++) {
4205 TCGRegSet set
= reg_ct
[j
];
4207 if (tcg_regset_single(set
)) {
4208 /* One register in the set. */
4209 TCGReg reg
= tcg_regset_first(set
);
4210 if (s
->reg_to_temp
[reg
] == NULL
) {
4214 for (i
= 0; i
< n
; i
++) {
4215 TCGReg reg
= order
[i
];
4216 if (s
->reg_to_temp
[reg
] == NULL
&&
4217 tcg_regset_test_reg(set
, reg
)) {
4224 /* We must spill something. */
4225 for (j
= f
; j
< 2; j
++) {
4226 TCGRegSet set
= reg_ct
[j
];
4228 if (tcg_regset_single(set
)) {
4229 /* One register in the set. */
4230 TCGReg reg
= tcg_regset_first(set
);
4231 tcg_reg_free(s
, reg
, allocated_regs
);
4234 for (i
= 0; i
< n
; i
++) {
4235 TCGReg reg
= order
[i
];
4236 if (tcg_regset_test_reg(set
, reg
)) {
4237 tcg_reg_free(s
, reg
, allocated_regs
);
4244 g_assert_not_reached();
4247 static TCGReg
tcg_reg_alloc_pair(TCGContext
*s
, TCGRegSet required_regs
,
4248 TCGRegSet allocated_regs
,
4249 TCGRegSet preferred_regs
, bool rev
)
4251 int i
, j
, k
, fmin
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
4252 TCGRegSet reg_ct
[2];
4255 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
4256 reg_ct
[1] = required_regs
& ~(allocated_regs
| (allocated_regs
>> 1));
4257 tcg_debug_assert(reg_ct
[1] != 0);
4258 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
4260 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
4263 * Skip the preferred_regs option if it cannot be satisfied,
4264 * or if the preference made no difference.
4266 k
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
4269 * Minimize the number of flushes by looking for 2 free registers first,
4270 * then a single flush, then two flushes.
4272 for (fmin
= 2; fmin
>= 0; fmin
--) {
4273 for (j
= k
; j
< 2; j
++) {
4274 TCGRegSet set
= reg_ct
[j
];
4276 for (i
= 0; i
< n
; i
++) {
4277 TCGReg reg
= order
[i
];
4279 if (tcg_regset_test_reg(set
, reg
)) {
4280 int f
= !s
->reg_to_temp
[reg
] + !s
->reg_to_temp
[reg
+ 1];
4282 tcg_reg_free(s
, reg
, allocated_regs
);
4283 tcg_reg_free(s
, reg
+ 1, allocated_regs
);
4290 g_assert_not_reached();
4293 /* Make sure the temporary is in a register. If needed, allocate the register
4294 from DESIRED while avoiding ALLOCATED. */
4295 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
4296 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
4300 switch (ts
->val_type
) {
4303 case TEMP_VAL_CONST
:
4304 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4305 preferred_regs
, ts
->indirect_base
);
4306 if (ts
->type
<= TCG_TYPE_I64
) {
4307 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
4309 uint64_t val
= ts
->val
;
4313 * Find the minimal vector element that matches the constant.
4314 * The targets will, in general, have to do this search anyway,
4315 * do this generically.
4317 if (val
== dup_const(MO_8
, val
)) {
4319 } else if (val
== dup_const(MO_16
, val
)) {
4321 } else if (val
== dup_const(MO_32
, val
)) {
4325 tcg_out_dupi_vec(s
, ts
->type
, vece
, reg
, ts
->val
);
4327 ts
->mem_coherent
= 0;
4330 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
4331 preferred_regs
, ts
->indirect_base
);
4332 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
4333 ts
->mem_coherent
= 1;
4337 g_assert_not_reached();
4339 set_temp_val_reg(s
, ts
, reg
);
4342 /* Save a temporary to memory. 'allocated_regs' is used in case a
4343 temporary registers needs to be allocated to store a constant. */
4344 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
4346 /* The liveness analysis already ensures that globals are back
4347 in memory. Keep an tcg_debug_assert for safety. */
4348 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| temp_readonly(ts
));
4351 /* save globals to their canonical location and assume they can be
4352 modified be the following code. 'allocated_regs' is used in case a
4353 temporary registers needs to be allocated to store a constant. */
4354 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4358 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4359 temp_save(s
, &s
->temps
[i
], allocated_regs
);
4363 /* sync globals to their canonical location and assume they can be
4364 read by the following code. 'allocated_regs' is used in case a
4365 temporary registers needs to be allocated to store a constant. */
4366 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
4370 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
4371 TCGTemp
*ts
= &s
->temps
[i
];
4372 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
4373 || ts
->kind
== TEMP_FIXED
4374 || ts
->mem_coherent
);
4378 /* at the end of a basic block, we assume all temporaries are dead and
4379 all globals are stored at their canonical location. */
4380 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
4384 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4385 TCGTemp
*ts
= &s
->temps
[i
];
4389 temp_save(s
, ts
, allocated_regs
);
4392 /* The liveness analysis already ensures that temps are dead.
4393 Keep an tcg_debug_assert for safety. */
4394 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
4397 /* Similarly, we should have freed any allocated register. */
4398 tcg_debug_assert(ts
->val_type
== TEMP_VAL_CONST
);
4401 g_assert_not_reached();
4405 save_globals(s
, allocated_regs
);
4409 * At a conditional branch, we assume all temporaries are dead unless
4410 * explicitly live-across-conditional-branch; all globals and local
4411 * temps are synced to their location.
4413 static void tcg_reg_alloc_cbranch(TCGContext
*s
, TCGRegSet allocated_regs
)
4415 sync_globals(s
, allocated_regs
);
4417 for (int i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
4418 TCGTemp
*ts
= &s
->temps
[i
];
4420 * The liveness analysis already ensures that temps are dead.
4421 * Keep tcg_debug_asserts for safety.
4425 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
|| ts
->mem_coherent
);
4431 g_assert_not_reached();
4437 * Specialized code generation for INDEX_op_mov_* with a constant.
4439 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
4440 tcg_target_ulong val
, TCGLifeData arg_life
,
4441 TCGRegSet preferred_regs
)
4443 /* ENV should not be modified. */
4444 tcg_debug_assert(!temp_readonly(ots
));
4446 /* The movi is not explicitly generated here. */
4447 set_temp_val_nonreg(s
, ots
, TEMP_VAL_CONST
);
4449 ots
->mem_coherent
= 0;
4450 if (NEED_SYNC_ARG(0)) {
4451 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
4452 } else if (IS_DEAD_ARG(0)) {
4458 * Specialized code generation for INDEX_op_mov_*.
4460 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
4462 const TCGLifeData arg_life
= op
->life
;
4463 TCGRegSet allocated_regs
, preferred_regs
;
4465 TCGType otype
, itype
;
4468 allocated_regs
= s
->reserved_regs
;
4469 preferred_regs
= output_pref(op
, 0);
4470 ots
= arg_temp(op
->args
[0]);
4471 ts
= arg_temp(op
->args
[1]);
4473 /* ENV should not be modified. */
4474 tcg_debug_assert(!temp_readonly(ots
));
4476 /* Note that otype != itype for no-op truncation. */
4480 if (ts
->val_type
== TEMP_VAL_CONST
) {
4481 /* propagate constant or generate sti */
4482 tcg_target_ulong val
= ts
->val
;
4483 if (IS_DEAD_ARG(1)) {
4486 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
4490 /* If the source value is in memory we're going to be forced
4491 to have it in a register in order to perform the copy. Copy
4492 the SOURCE value into its own register first, that way we
4493 don't have to reload SOURCE the next time it is used. */
4494 if (ts
->val_type
== TEMP_VAL_MEM
) {
4495 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
4496 allocated_regs
, preferred_regs
);
4498 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
4501 if (IS_DEAD_ARG(0)) {
4502 /* mov to a non-saved dead register makes no sense (even with
4503 liveness analysis disabled). */
4504 tcg_debug_assert(NEED_SYNC_ARG(0));
4505 if (!ots
->mem_allocated
) {
4506 temp_allocate_frame(s
, ots
);
4508 tcg_out_st(s
, otype
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4509 if (IS_DEAD_ARG(1)) {
4516 if (IS_DEAD_ARG(1) && ts
->kind
!= TEMP_FIXED
) {
4518 * The mov can be suppressed. Kill input first, so that it
4519 * is unlinked from reg_to_temp, then set the output to the
4520 * reg that we saved from the input.
4525 if (ots
->val_type
== TEMP_VAL_REG
) {
4528 /* Make sure to not spill the input register during allocation. */
4529 oreg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
4530 allocated_regs
| ((TCGRegSet
)1 << ireg
),
4531 preferred_regs
, ots
->indirect_base
);
4533 if (!tcg_out_mov(s
, otype
, oreg
, ireg
)) {
4535 * Cross register class move not supported.
4536 * Store the source register into the destination slot
4537 * and leave the destination temp as TEMP_VAL_MEM.
4539 assert(!temp_readonly(ots
));
4540 if (!ts
->mem_allocated
) {
4541 temp_allocate_frame(s
, ots
);
4543 tcg_out_st(s
, ts
->type
, ireg
, ots
->mem_base
->reg
, ots
->mem_offset
);
4544 set_temp_val_nonreg(s
, ts
, TEMP_VAL_MEM
);
4545 ots
->mem_coherent
= 1;
4549 set_temp_val_reg(s
, ots
, oreg
);
4550 ots
->mem_coherent
= 0;
4552 if (NEED_SYNC_ARG(0)) {
4553 temp_sync(s
, ots
, allocated_regs
, 0, 0);
4558 * Specialized code generation for INDEX_op_dup_vec.
4560 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
4562 const TCGLifeData arg_life
= op
->life
;
4563 TCGRegSet dup_out_regs
, dup_in_regs
;
4565 TCGType itype
, vtype
;
4570 ots
= arg_temp(op
->args
[0]);
4571 its
= arg_temp(op
->args
[1]);
4573 /* ENV should not be modified. */
4574 tcg_debug_assert(!temp_readonly(ots
));
4577 vece
= TCGOP_VECE(op
);
4578 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
4580 if (its
->val_type
== TEMP_VAL_CONST
) {
4581 /* Propagate constant via movi -> dupi. */
4582 tcg_target_ulong val
= its
->val
;
4583 if (IS_DEAD_ARG(1)) {
4586 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, output_pref(op
, 0));
4590 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
4591 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].regs
;
4593 /* Allocate the output register now. */
4594 if (ots
->val_type
!= TEMP_VAL_REG
) {
4595 TCGRegSet allocated_regs
= s
->reserved_regs
;
4598 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
4599 /* Make sure to not spill the input register. */
4600 tcg_regset_set_reg(allocated_regs
, its
->reg
);
4602 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
4603 output_pref(op
, 0), ots
->indirect_base
);
4604 set_temp_val_reg(s
, ots
, oreg
);
4607 switch (its
->val_type
) {
4610 * The dup constriaints must be broad, covering all possible VECE.
4611 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
4612 * to fail, indicating that extra moves are required for that case.
4614 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
4615 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
4618 /* Try again from memory or a vector input register. */
4620 if (!its
->mem_coherent
) {
4622 * The input register is not synced, and so an extra store
4623 * would be required to use memory. Attempt an integer-vector
4624 * register move first. We do not have a TCGRegSet for this.
4626 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
4629 /* Sync the temp back to its slot and load from there. */
4630 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
4636 if (HOST_BIG_ENDIAN
) {
4637 lowpart_ofs
= tcg_type_size(itype
) - (1 << vece
);
4639 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
4640 its
->mem_offset
+ lowpart_ofs
)) {
4643 /* Load the input into the destination vector register. */
4644 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
4648 g_assert_not_reached();
4651 /* We now have a vector input register, so dup must succeed. */
4652 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
4653 tcg_debug_assert(ok
);
4656 ots
->mem_coherent
= 0;
4657 if (IS_DEAD_ARG(1)) {
4660 if (NEED_SYNC_ARG(0)) {
4661 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
4663 if (IS_DEAD_ARG(0)) {
4668 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
4670 const TCGLifeData arg_life
= op
->life
;
4671 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
4672 TCGRegSet i_allocated_regs
;
4673 TCGRegSet o_allocated_regs
;
4674 int i
, k
, nb_iargs
, nb_oargs
;
4677 const TCGArgConstraint
*arg_ct
;
4679 TCGArg new_args
[TCG_MAX_OP_ARGS
];
4680 int const_args
[TCG_MAX_OP_ARGS
];
4682 nb_oargs
= def
->nb_oargs
;
4683 nb_iargs
= def
->nb_iargs
;
4685 /* copy constants */
4686 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
4687 op
->args
+ nb_oargs
+ nb_iargs
,
4688 sizeof(TCGArg
) * def
->nb_cargs
);
4690 i_allocated_regs
= s
->reserved_regs
;
4691 o_allocated_regs
= s
->reserved_regs
;
4693 /* satisfy input constraints */
4694 for (k
= 0; k
< nb_iargs
; k
++) {
4695 TCGRegSet i_preferred_regs
, i_required_regs
;
4696 bool allocate_new_reg
, copyto_new_reg
;
4700 i
= def
->args_ct
[nb_oargs
+ k
].sort_index
;
4702 arg_ct
= &def
->args_ct
[i
];
4705 if (ts
->val_type
== TEMP_VAL_CONST
4706 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
->ct
, TCGOP_VECE(op
))) {
4707 /* constant is OK for instruction */
4709 new_args
[i
] = ts
->val
;
4714 i_preferred_regs
= 0;
4715 i_required_regs
= arg_ct
->regs
;
4716 allocate_new_reg
= false;
4717 copyto_new_reg
= false;
4719 switch (arg_ct
->pair
) {
4720 case 0: /* not paired */
4721 if (arg_ct
->ialias
) {
4722 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4725 * If the input is readonly, then it cannot also be an
4726 * output and aliased to itself. If the input is not
4727 * dead after the instruction, we must allocate a new
4728 * register and move it.
4730 if (temp_readonly(ts
) || !IS_DEAD_ARG(i
)
4731 || def
->args_ct
[arg_ct
->alias_index
].newreg
) {
4732 allocate_new_reg
= true;
4733 } else if (ts
->val_type
== TEMP_VAL_REG
) {
4735 * Check if the current register has already been
4736 * allocated for another input.
4739 tcg_regset_test_reg(i_allocated_regs
, reg
);
4742 if (!allocate_new_reg
) {
4743 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4746 allocate_new_reg
= !tcg_regset_test_reg(i_required_regs
, reg
);
4748 if (allocate_new_reg
) {
4750 * Allocate a new register matching the constraint
4751 * and move the temporary register into it.
4753 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4754 i_allocated_regs
, 0);
4755 reg
= tcg_reg_alloc(s
, i_required_regs
, i_allocated_regs
,
4756 i_preferred_regs
, ts
->indirect_base
);
4757 copyto_new_reg
= true;
4762 /* First of an input pair; if i1 == i2, the second is an output. */
4764 i2
= arg_ct
->pair_index
;
4765 ts2
= i1
!= i2
? arg_temp(op
->args
[i2
]) : NULL
;
4768 * It is easier to default to allocating a new pair
4769 * and to identify a few cases where it's not required.
4771 if (arg_ct
->ialias
) {
4772 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4773 if (IS_DEAD_ARG(i1
) &&
4775 !temp_readonly(ts
) &&
4776 ts
->val_type
== TEMP_VAL_REG
&&
4777 ts
->reg
< TCG_TARGET_NB_REGS
- 1 &&
4778 tcg_regset_test_reg(i_required_regs
, reg
) &&
4779 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4780 !tcg_regset_test_reg(i_allocated_regs
, reg
+ 1) &&
4782 ? ts2
->val_type
== TEMP_VAL_REG
&&
4783 ts2
->reg
== reg
+ 1 &&
4785 : s
->reg_to_temp
[reg
+ 1] == NULL
)) {
4789 /* Without aliasing, the pair must also be an input. */
4790 tcg_debug_assert(ts2
);
4791 if (ts
->val_type
== TEMP_VAL_REG
&&
4792 ts2
->val_type
== TEMP_VAL_REG
&&
4793 ts2
->reg
== reg
+ 1 &&
4794 tcg_regset_test_reg(i_required_regs
, reg
)) {
4798 reg
= tcg_reg_alloc_pair(s
, i_required_regs
, i_allocated_regs
,
4799 0, ts
->indirect_base
);
4802 case 2: /* pair second */
4803 reg
= new_args
[arg_ct
->pair_index
] + 1;
4806 case 3: /* ialias with second output, no first input */
4807 tcg_debug_assert(arg_ct
->ialias
);
4808 i_preferred_regs
= output_pref(op
, arg_ct
->alias_index
);
4810 if (IS_DEAD_ARG(i
) &&
4811 !temp_readonly(ts
) &&
4812 ts
->val_type
== TEMP_VAL_REG
&&
4814 s
->reg_to_temp
[reg
- 1] == NULL
&&
4815 tcg_regset_test_reg(i_required_regs
, reg
) &&
4816 !tcg_regset_test_reg(i_allocated_regs
, reg
) &&
4817 !tcg_regset_test_reg(i_allocated_regs
, reg
- 1)) {
4818 tcg_regset_set_reg(i_allocated_regs
, reg
- 1);
4821 reg
= tcg_reg_alloc_pair(s
, i_required_regs
>> 1,
4822 i_allocated_regs
, 0,
4824 tcg_regset_set_reg(i_allocated_regs
, reg
);
4830 * If an aliased input is not dead after the instruction,
4831 * we must allocate a new register and move it.
4833 if (arg_ct
->ialias
&& (!IS_DEAD_ARG(i
) || temp_readonly(ts
))) {
4834 TCGRegSet t_allocated_regs
= i_allocated_regs
;
4837 * Because of the alias, and the continued life, make sure
4838 * that the temp is somewhere *other* than the reg pair,
4839 * and we get a copy in reg.
4841 tcg_regset_set_reg(t_allocated_regs
, reg
);
4842 tcg_regset_set_reg(t_allocated_regs
, reg
+ 1);
4843 if (ts
->val_type
== TEMP_VAL_REG
&& ts
->reg
== reg
) {
4844 /* If ts was already in reg, copy it somewhere else. */
4848 tcg_debug_assert(ts
->kind
!= TEMP_FIXED
);
4849 nr
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
4850 t_allocated_regs
, 0, ts
->indirect_base
);
4851 ok
= tcg_out_mov(s
, ts
->type
, nr
, reg
);
4852 tcg_debug_assert(ok
);
4854 set_temp_val_reg(s
, ts
, nr
);
4856 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4857 t_allocated_regs
, 0);
4858 copyto_new_reg
= true;
4861 /* Preferably allocate to reg, otherwise copy. */
4862 i_required_regs
= (TCGRegSet
)1 << reg
;
4863 temp_load(s
, ts
, i_required_regs
, i_allocated_regs
,
4865 copyto_new_reg
= ts
->reg
!= reg
;
4870 g_assert_not_reached();
4873 if (copyto_new_reg
) {
4874 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
4876 * Cross register class move not supported. Sync the
4877 * temp back to its slot and load from there.
4879 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
4880 tcg_out_ld(s
, ts
->type
, reg
,
4881 ts
->mem_base
->reg
, ts
->mem_offset
);
4886 tcg_regset_set_reg(i_allocated_regs
, reg
);
4889 /* mark dead temporaries and free the associated registers */
4890 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
4891 if (IS_DEAD_ARG(i
)) {
4892 temp_dead(s
, arg_temp(op
->args
[i
]));
4896 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
4897 tcg_reg_alloc_cbranch(s
, i_allocated_regs
);
4898 } else if (def
->flags
& TCG_OPF_BB_END
) {
4899 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
4901 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
4902 /* XXX: permit generic clobber register list ? */
4903 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4904 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4905 tcg_reg_free(s
, i
, i_allocated_regs
);
4909 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
4910 /* sync globals if the op has side effects and might trigger
4912 sync_globals(s
, i_allocated_regs
);
4915 /* satisfy the output constraints */
4916 for(k
= 0; k
< nb_oargs
; k
++) {
4917 i
= def
->args_ct
[k
].sort_index
;
4919 arg_ct
= &def
->args_ct
[i
];
4922 /* ENV should not be modified. */
4923 tcg_debug_assert(!temp_readonly(ts
));
4925 switch (arg_ct
->pair
) {
4926 case 0: /* not paired */
4927 if (arg_ct
->oalias
&& !const_args
[arg_ct
->alias_index
]) {
4928 reg
= new_args
[arg_ct
->alias_index
];
4929 } else if (arg_ct
->newreg
) {
4930 reg
= tcg_reg_alloc(s
, arg_ct
->regs
,
4931 i_allocated_regs
| o_allocated_regs
,
4932 output_pref(op
, k
), ts
->indirect_base
);
4934 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, o_allocated_regs
,
4935 output_pref(op
, k
), ts
->indirect_base
);
4939 case 1: /* first of pair */
4940 tcg_debug_assert(!arg_ct
->newreg
);
4941 if (arg_ct
->oalias
) {
4942 reg
= new_args
[arg_ct
->alias_index
];
4945 reg
= tcg_reg_alloc_pair(s
, arg_ct
->regs
, o_allocated_regs
,
4946 output_pref(op
, k
), ts
->indirect_base
);
4949 case 2: /* second of pair */
4950 tcg_debug_assert(!arg_ct
->newreg
);
4951 if (arg_ct
->oalias
) {
4952 reg
= new_args
[arg_ct
->alias_index
];
4954 reg
= new_args
[arg_ct
->pair_index
] + 1;
4958 case 3: /* first of pair, aliasing with a second input */
4959 tcg_debug_assert(!arg_ct
->newreg
);
4960 reg
= new_args
[arg_ct
->pair_index
] - 1;
4964 g_assert_not_reached();
4966 tcg_regset_set_reg(o_allocated_regs
, reg
);
4967 set_temp_val_reg(s
, ts
, reg
);
4968 ts
->mem_coherent
= 0;
4973 /* emit instruction */
4975 case INDEX_op_ext8s_i32
:
4976 tcg_out_ext8s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
4978 case INDEX_op_ext8s_i64
:
4979 tcg_out_ext8s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
4981 case INDEX_op_ext8u_i32
:
4982 case INDEX_op_ext8u_i64
:
4983 tcg_out_ext8u(s
, new_args
[0], new_args
[1]);
4985 case INDEX_op_ext16s_i32
:
4986 tcg_out_ext16s(s
, TCG_TYPE_I32
, new_args
[0], new_args
[1]);
4988 case INDEX_op_ext16s_i64
:
4989 tcg_out_ext16s(s
, TCG_TYPE_I64
, new_args
[0], new_args
[1]);
4991 case INDEX_op_ext16u_i32
:
4992 case INDEX_op_ext16u_i64
:
4993 tcg_out_ext16u(s
, new_args
[0], new_args
[1]);
4995 case INDEX_op_ext32s_i64
:
4996 tcg_out_ext32s(s
, new_args
[0], new_args
[1]);
4998 case INDEX_op_ext32u_i64
:
4999 tcg_out_ext32u(s
, new_args
[0], new_args
[1]);
5001 case INDEX_op_ext_i32_i64
:
5002 tcg_out_exts_i32_i64(s
, new_args
[0], new_args
[1]);
5004 case INDEX_op_extu_i32_i64
:
5005 tcg_out_extu_i32_i64(s
, new_args
[0], new_args
[1]);
5007 case INDEX_op_extrl_i64_i32
:
5008 tcg_out_extrl_i64_i32(s
, new_args
[0], new_args
[1]);
5011 if (def
->flags
& TCG_OPF_VECTOR
) {
5012 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
5013 new_args
, const_args
);
5015 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
5020 /* move the outputs in the correct register if needed */
5021 for(i
= 0; i
< nb_oargs
; i
++) {
5022 ts
= arg_temp(op
->args
[i
]);
5024 /* ENV should not be modified. */
5025 tcg_debug_assert(!temp_readonly(ts
));
5027 if (NEED_SYNC_ARG(i
)) {
5028 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
5029 } else if (IS_DEAD_ARG(i
)) {
5035 static bool tcg_reg_alloc_dup2(TCGContext
*s
, const TCGOp
*op
)
5037 const TCGLifeData arg_life
= op
->life
;
5038 TCGTemp
*ots
, *itsl
, *itsh
;
5039 TCGType vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
5041 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
5042 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
5043 tcg_debug_assert(TCGOP_VECE(op
) == MO_64
);
5045 ots
= arg_temp(op
->args
[0]);
5046 itsl
= arg_temp(op
->args
[1]);
5047 itsh
= arg_temp(op
->args
[2]);
5049 /* ENV should not be modified. */
5050 tcg_debug_assert(!temp_readonly(ots
));
5052 /* Allocate the output register now. */
5053 if (ots
->val_type
!= TEMP_VAL_REG
) {
5054 TCGRegSet allocated_regs
= s
->reserved_regs
;
5055 TCGRegSet dup_out_regs
=
5056 tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
5059 /* Make sure to not spill the input registers. */
5060 if (!IS_DEAD_ARG(1) && itsl
->val_type
== TEMP_VAL_REG
) {
5061 tcg_regset_set_reg(allocated_regs
, itsl
->reg
);
5063 if (!IS_DEAD_ARG(2) && itsh
->val_type
== TEMP_VAL_REG
) {
5064 tcg_regset_set_reg(allocated_regs
, itsh
->reg
);
5067 oreg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
5068 output_pref(op
, 0), ots
->indirect_base
);
5069 set_temp_val_reg(s
, ots
, oreg
);
5072 /* Promote dup2 of immediates to dupi_vec. */
5073 if (itsl
->val_type
== TEMP_VAL_CONST
&& itsh
->val_type
== TEMP_VAL_CONST
) {
5074 uint64_t val
= deposit64(itsl
->val
, 32, 32, itsh
->val
);
5077 if (val
== dup_const(MO_8
, val
)) {
5079 } else if (val
== dup_const(MO_16
, val
)) {
5081 } else if (val
== dup_const(MO_32
, val
)) {
5085 tcg_out_dupi_vec(s
, vtype
, vece
, ots
->reg
, val
);
5089 /* If the two inputs form one 64-bit value, try dupm_vec. */
5090 if (itsl
->temp_subindex
== HOST_BIG_ENDIAN
&&
5091 itsh
->temp_subindex
== !HOST_BIG_ENDIAN
&&
5092 itsl
== itsh
+ (HOST_BIG_ENDIAN
? 1 : -1)) {
5093 TCGTemp
*its
= itsl
- HOST_BIG_ENDIAN
;
5095 temp_sync(s
, its
+ 0, s
->reserved_regs
, 0, 0);
5096 temp_sync(s
, its
+ 1, s
->reserved_regs
, 0, 0);
5098 if (tcg_out_dupm_vec(s
, vtype
, MO_64
, ots
->reg
,
5099 its
->mem_base
->reg
, its
->mem_offset
)) {
5104 /* Fall back to generic expansion. */
5108 ots
->mem_coherent
= 0;
5109 if (IS_DEAD_ARG(1)) {
5112 if (IS_DEAD_ARG(2)) {
5115 if (NEED_SYNC_ARG(0)) {
5116 temp_sync(s
, ots
, s
->reserved_regs
, 0, IS_DEAD_ARG(0));
5117 } else if (IS_DEAD_ARG(0)) {
5123 static void load_arg_reg(TCGContext
*s
, TCGReg reg
, TCGTemp
*ts
,
5124 TCGRegSet allocated_regs
)
5126 if (ts
->val_type
== TEMP_VAL_REG
) {
5127 if (ts
->reg
!= reg
) {
5128 tcg_reg_free(s
, reg
, allocated_regs
);
5129 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
5131 * Cross register class move not supported. Sync the
5132 * temp back to its slot and load from there.
5134 temp_sync(s
, ts
, allocated_regs
, 0, 0);
5135 tcg_out_ld(s
, ts
->type
, reg
,
5136 ts
->mem_base
->reg
, ts
->mem_offset
);
5140 TCGRegSet arg_set
= 0;
5142 tcg_reg_free(s
, reg
, allocated_regs
);
5143 tcg_regset_set_reg(arg_set
, reg
);
5144 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
5148 static void load_arg_stk(TCGContext
*s
, unsigned arg_slot
, TCGTemp
*ts
,
5149 TCGRegSet allocated_regs
)
5152 * When the destination is on the stack, load up the temp and store.
5153 * If there are many call-saved registers, the temp might live to
5154 * see another use; otherwise it'll be discarded.
5156 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
], allocated_regs
, 0);
5157 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
,
5158 arg_slot_stk_ofs(arg_slot
));
5161 static void load_arg_normal(TCGContext
*s
, const TCGCallArgumentLoc
*l
,
5162 TCGTemp
*ts
, TCGRegSet
*allocated_regs
)
5164 if (arg_slot_reg_p(l
->arg_slot
)) {
5165 TCGReg reg
= tcg_target_call_iarg_regs
[l
->arg_slot
];
5166 load_arg_reg(s
, reg
, ts
, *allocated_regs
);
5167 tcg_regset_set_reg(*allocated_regs
, reg
);
5169 load_arg_stk(s
, l
->arg_slot
, ts
, *allocated_regs
);
5173 static void load_arg_ref(TCGContext
*s
, unsigned arg_slot
, TCGReg ref_base
,
5174 intptr_t ref_off
, TCGRegSet
*allocated_regs
)
5178 if (arg_slot_reg_p(arg_slot
)) {
5179 reg
= tcg_target_call_iarg_regs
[arg_slot
];
5180 tcg_reg_free(s
, reg
, *allocated_regs
);
5181 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5182 tcg_regset_set_reg(*allocated_regs
, reg
);
5184 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[TCG_TYPE_PTR
],
5185 *allocated_regs
, 0, false);
5186 tcg_out_addi_ptr(s
, reg
, ref_base
, ref_off
);
5187 tcg_out_st(s
, TCG_TYPE_PTR
, reg
, TCG_REG_CALL_STACK
,
5188 arg_slot_stk_ofs(arg_slot
));
5192 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
5194 const int nb_oargs
= TCGOP_CALLO(op
);
5195 const int nb_iargs
= TCGOP_CALLI(op
);
5196 const TCGLifeData arg_life
= op
->life
;
5197 const TCGHelperInfo
*info
= tcg_call_info(op
);
5198 TCGRegSet allocated_regs
= s
->reserved_regs
;
5202 * Move inputs into place in reverse order,
5203 * so that we place stacked arguments first.
5205 for (i
= nb_iargs
- 1; i
>= 0; --i
) {
5206 const TCGCallArgumentLoc
*loc
= &info
->in
[i
];
5207 TCGTemp
*ts
= arg_temp(op
->args
[nb_oargs
+ i
]);
5209 switch (loc
->kind
) {
5210 case TCG_CALL_ARG_NORMAL
:
5211 case TCG_CALL_ARG_EXTEND_U
:
5212 case TCG_CALL_ARG_EXTEND_S
:
5213 load_arg_normal(s
, loc
, ts
, &allocated_regs
);
5215 case TCG_CALL_ARG_BY_REF
:
5216 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5217 load_arg_ref(s
, loc
->arg_slot
, TCG_REG_CALL_STACK
,
5218 arg_slot_stk_ofs(loc
->ref_slot
),
5221 case TCG_CALL_ARG_BY_REF_N
:
5222 load_arg_stk(s
, loc
->ref_slot
, ts
, allocated_regs
);
5225 g_assert_not_reached();
5229 /* Mark dead temporaries and free the associated registers. */
5230 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
5231 if (IS_DEAD_ARG(i
)) {
5232 temp_dead(s
, arg_temp(op
->args
[i
]));
5236 /* Clobber call registers. */
5237 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
5238 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
5239 tcg_reg_free(s
, i
, allocated_regs
);
5244 * Save globals if they might be written by the helper,
5245 * sync them if they might be read.
5247 if (info
->flags
& TCG_CALL_NO_READ_GLOBALS
) {
5249 } else if (info
->flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
5250 sync_globals(s
, allocated_regs
);
5252 save_globals(s
, allocated_regs
);
5256 * If the ABI passes a pointer to the returned struct as the first
5257 * argument, load that now. Pass a pointer to the output home slot.
5259 if (info
->out_kind
== TCG_CALL_RET_BY_REF
) {
5260 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5262 if (!ts
->mem_allocated
) {
5263 temp_allocate_frame(s
, ts
);
5265 load_arg_ref(s
, 0, ts
->mem_base
->reg
, ts
->mem_offset
, &allocated_regs
);
5268 tcg_out_call(s
, tcg_call_func(op
), info
);
5270 /* Assign output registers and emit moves if needed. */
5271 switch (info
->out_kind
) {
5272 case TCG_CALL_RET_NORMAL
:
5273 for (i
= 0; i
< nb_oargs
; i
++) {
5274 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5275 TCGReg reg
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, i
);
5277 /* ENV should not be modified. */
5278 tcg_debug_assert(!temp_readonly(ts
));
5280 set_temp_val_reg(s
, ts
, reg
);
5281 ts
->mem_coherent
= 0;
5285 case TCG_CALL_RET_BY_VEC
:
5287 TCGTemp
*ts
= arg_temp(op
->args
[0]);
5289 tcg_debug_assert(ts
->base_type
== TCG_TYPE_I128
);
5290 tcg_debug_assert(ts
->temp_subindex
== 0);
5291 if (!ts
->mem_allocated
) {
5292 temp_allocate_frame(s
, ts
);
5294 tcg_out_st(s
, TCG_TYPE_V128
,
5295 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5296 ts
->mem_base
->reg
, ts
->mem_offset
);
5298 /* fall through to mark all parts in memory */
5300 case TCG_CALL_RET_BY_REF
:
5301 /* The callee has performed a write through the reference. */
5302 for (i
= 0; i
< nb_oargs
; i
++) {
5303 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5304 ts
->val_type
= TEMP_VAL_MEM
;
5309 g_assert_not_reached();
5312 /* Flush or discard output registers as needed. */
5313 for (i
= 0; i
< nb_oargs
; i
++) {
5314 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
5315 if (NEED_SYNC_ARG(i
)) {
5316 temp_sync(s
, ts
, s
->reserved_regs
, 0, IS_DEAD_ARG(i
));
5317 } else if (IS_DEAD_ARG(i
)) {
5324 * atom_and_align_for_opc:
5326 * @opc: memory operation code
5327 * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
5328 * @allow_two_ops: true if we are prepared to issue two operations
5330 * Return the alignment and atomicity to use for the inline fast path
5331 * for the given memory operation. The alignment may be larger than
5332 * that specified in @opc, and the correct alignment will be diagnosed
5333 * by the slow path helper.
5335 * If @allow_two_ops, the host is prepared to test for 2x alignment,
5336 * and issue two loads or stores for subalignment.
5338 static TCGAtomAlign
atom_and_align_for_opc(TCGContext
*s
, MemOp opc
,
5339 MemOp host_atom
, bool allow_two_ops
)
5341 MemOp align
= get_alignment_bits(opc
);
5342 MemOp size
= opc
& MO_SIZE
;
5343 MemOp half
= size
? size
- 1 : 0;
5347 /* When serialized, no further atomicity required. */
5348 if (s
->gen_tb
->cflags
& CF_PARALLEL
) {
5349 atom
= opc
& MO_ATOM_MASK
;
5351 atom
= MO_ATOM_NONE
;
5356 /* The operation requires no specific atomicity. */
5360 case MO_ATOM_IFALIGN
:
5364 case MO_ATOM_IFALIGN_PAIR
:
5368 case MO_ATOM_WITHIN16
:
5370 if (size
== MO_128
) {
5371 /* Misalignment implies !within16, and therefore no atomicity. */
5372 } else if (host_atom
!= MO_ATOM_WITHIN16
) {
5373 /* The host does not implement within16, so require alignment. */
5374 align
= MAX(align
, size
);
5378 case MO_ATOM_WITHIN16_PAIR
:
5381 * Misalignment implies !within16, and therefore half atomicity.
5382 * Any host prepared for two operations can implement this with
5385 if (host_atom
!= MO_ATOM_WITHIN16
&& allow_two_ops
) {
5386 align
= MAX(align
, half
);
5390 case MO_ATOM_SUBALIGN
:
5392 if (host_atom
!= MO_ATOM_SUBALIGN
) {
5393 /* If unaligned but not odd, there are subobjects up to half. */
5394 if (allow_two_ops
) {
5395 align
= MAX(align
, half
);
5397 align
= MAX(align
, size
);
5403 g_assert_not_reached();
5406 return (TCGAtomAlign
){ .atom
= atmax
, .align
= align
};
5410 * Similarly for qemu_ld/st slow path helpers.
5411 * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
5412 * using only the provided backend tcg_out_* functions.
5415 static int tcg_out_helper_stk_ofs(TCGType type
, unsigned slot
)
5417 int ofs
= arg_slot_stk_ofs(slot
);
5420 * Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
5421 * require extension to uint64_t, adjust the address for uint32_t.
5423 if (HOST_BIG_ENDIAN
&&
5424 TCG_TARGET_REG_BITS
== 64 &&
5425 type
== TCG_TYPE_I32
) {
5431 static void tcg_out_helper_load_slots(TCGContext
*s
,
5432 unsigned nmov
, TCGMovExtend
*mov
,
5433 const TCGLdstHelperParam
*parm
)
5439 * Start from the end, storing to the stack first.
5440 * This frees those registers, so we need not consider overlap.
5442 for (i
= nmov
; i
-- > 0; ) {
5443 unsigned slot
= mov
[i
].dst
;
5445 if (arg_slot_reg_p(slot
)) {
5449 TCGReg src
= mov
[i
].src
;
5450 TCGType dst_type
= mov
[i
].dst_type
;
5451 MemOp dst_mo
= dst_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5453 /* The argument is going onto the stack; extend into scratch. */
5454 if ((mov
[i
].src_ext
& MO_SIZE
) != dst_mo
) {
5455 tcg_debug_assert(parm
->ntmp
!= 0);
5456 mov
[i
].dst
= src
= parm
->tmp
[0];
5457 tcg_out_movext1(s
, &mov
[i
]);
5460 tcg_out_st(s
, dst_type
, src
, TCG_REG_CALL_STACK
,
5461 tcg_out_helper_stk_ofs(dst_type
, slot
));
5467 * The remaining arguments are in registers.
5468 * Convert slot numbers to argument registers.
5471 for (i
= 0; i
< nmov
; ++i
) {
5472 mov
[i
].dst
= tcg_target_call_iarg_regs
[mov
[i
].dst
];
5477 /* The backend must have provided enough temps for the worst case. */
5478 tcg_debug_assert(parm
->ntmp
>= 2);
5481 for (unsigned j
= 0; j
< 3; ++j
) {
5482 if (dst3
== mov
[j
].src
) {
5484 * Conflict. Copy the source to a temporary, perform the
5485 * remaining moves, then the extension from our scratch
5488 TCGReg scratch
= parm
->tmp
[1];
5490 tcg_out_mov(s
, mov
[3].src_type
, scratch
, mov
[3].src
);
5491 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2, parm
->tmp
[0]);
5492 tcg_out_movext1_new_src(s
, &mov
[3], scratch
);
5497 /* No conflicts: perform this move and continue. */
5498 tcg_out_movext1(s
, &mov
[3]);
5502 tcg_out_movext3(s
, mov
, mov
+ 1, mov
+ 2,
5503 parm
->ntmp
? parm
->tmp
[0] : -1);
5506 tcg_out_movext2(s
, mov
, mov
+ 1,
5507 parm
->ntmp
? parm
->tmp
[0] : -1);
5510 tcg_out_movext1(s
, mov
);
5513 g_assert_not_reached();
5517 static void tcg_out_helper_load_imm(TCGContext
*s
, unsigned slot
,
5518 TCGType type
, tcg_target_long imm
,
5519 const TCGLdstHelperParam
*parm
)
5521 if (arg_slot_reg_p(slot
)) {
5522 tcg_out_movi(s
, type
, tcg_target_call_iarg_regs
[slot
], imm
);
5524 int ofs
= tcg_out_helper_stk_ofs(type
, slot
);
5525 if (!tcg_out_sti(s
, type
, imm
, TCG_REG_CALL_STACK
, ofs
)) {
5526 tcg_debug_assert(parm
->ntmp
!= 0);
5527 tcg_out_movi(s
, type
, parm
->tmp
[0], imm
);
5528 tcg_out_st(s
, type
, parm
->tmp
[0], TCG_REG_CALL_STACK
, ofs
);
5533 static void tcg_out_helper_load_common_args(TCGContext
*s
,
5534 const TCGLabelQemuLdst
*ldst
,
5535 const TCGLdstHelperParam
*parm
,
5536 const TCGHelperInfo
*info
,
5539 TCGMovExtend ptr_mov
= {
5540 .dst_type
= TCG_TYPE_PTR
,
5541 .src_type
= TCG_TYPE_PTR
,
5542 .src_ext
= sizeof(void *) == 4 ? MO_32
: MO_64
5544 const TCGCallArgumentLoc
*loc
= &info
->in
[0];
5547 tcg_target_ulong imm
;
5550 * Handle env, which is always first.
5552 ptr_mov
.dst
= loc
->arg_slot
;
5553 ptr_mov
.src
= TCG_AREG0
;
5554 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5560 loc
= &info
->in
[next_arg
];
5561 type
= TCG_TYPE_I32
;
5562 switch (loc
->kind
) {
5563 case TCG_CALL_ARG_NORMAL
:
5565 case TCG_CALL_ARG_EXTEND_U
:
5566 case TCG_CALL_ARG_EXTEND_S
:
5567 /* No extension required for MemOpIdx. */
5568 tcg_debug_assert(imm
<= INT32_MAX
);
5569 type
= TCG_TYPE_REG
;
5572 g_assert_not_reached();
5574 tcg_out_helper_load_imm(s
, loc
->arg_slot
, type
, imm
, parm
);
5580 loc
= &info
->in
[next_arg
];
5581 slot
= loc
->arg_slot
;
5586 if (arg_slot_reg_p(slot
)) {
5587 arg_reg
= tcg_target_call_iarg_regs
[slot
];
5589 ra_reg
= parm
->ra_gen(s
, ldst
, arg_reg
);
5592 ptr_mov
.src
= ra_reg
;
5593 tcg_out_helper_load_slots(s
, 1, &ptr_mov
, parm
);
5595 imm
= (uintptr_t)ldst
->raddr
;
5596 tcg_out_helper_load_imm(s
, slot
, TCG_TYPE_PTR
, imm
, parm
);
5600 static unsigned tcg_out_helper_add_mov(TCGMovExtend
*mov
,
5601 const TCGCallArgumentLoc
*loc
,
5602 TCGType dst_type
, TCGType src_type
,
5603 TCGReg lo
, TCGReg hi
)
5607 if (dst_type
<= TCG_TYPE_REG
) {
5610 switch (loc
->kind
) {
5611 case TCG_CALL_ARG_NORMAL
:
5612 src_ext
= src_type
== TCG_TYPE_I32
? MO_32
: MO_64
;
5614 case TCG_CALL_ARG_EXTEND_U
:
5615 dst_type
= TCG_TYPE_REG
;
5618 case TCG_CALL_ARG_EXTEND_S
:
5619 dst_type
= TCG_TYPE_REG
;
5623 g_assert_not_reached();
5626 mov
[0].dst
= loc
->arg_slot
;
5627 mov
[0].dst_type
= dst_type
;
5629 mov
[0].src_type
= src_type
;
5630 mov
[0].src_ext
= src_ext
;
5634 if (TCG_TARGET_REG_BITS
== 32) {
5635 assert(dst_type
== TCG_TYPE_I64
);
5638 assert(dst_type
== TCG_TYPE_I128
);
5642 mov
[0].dst
= loc
[HOST_BIG_ENDIAN
].arg_slot
;
5644 mov
[0].dst_type
= TCG_TYPE_REG
;
5645 mov
[0].src_type
= TCG_TYPE_REG
;
5646 mov
[0].src_ext
= reg_mo
;
5648 mov
[1].dst
= loc
[!HOST_BIG_ENDIAN
].arg_slot
;
5650 mov
[1].dst_type
= TCG_TYPE_REG
;
5651 mov
[1].src_type
= TCG_TYPE_REG
;
5652 mov
[1].src_ext
= reg_mo
;
5657 static void tcg_out_ld_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5658 const TCGLdstHelperParam
*parm
)
5660 const TCGHelperInfo
*info
;
5661 const TCGCallArgumentLoc
*loc
;
5662 TCGMovExtend mov
[2];
5663 unsigned next_arg
, nmov
;
5664 MemOp mop
= get_memop(ldst
->oi
);
5666 switch (mop
& MO_SIZE
) {
5670 info
= &info_helper_ld32_mmu
;
5673 info
= &info_helper_ld64_mmu
;
5676 info
= &info_helper_ld128_mmu
;
5679 g_assert_not_reached();
5682 /* Defer env argument. */
5685 loc
= &info
->in
[next_arg
];
5686 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5688 * 32-bit host with 32-bit guest: zero-extend the guest address
5689 * to 64-bits for the helper by storing the low part, then
5690 * load a zero for the high part.
5692 tcg_out_helper_add_mov(mov
, loc
+ HOST_BIG_ENDIAN
,
5693 TCG_TYPE_I32
, TCG_TYPE_I32
,
5694 ldst
->addrlo_reg
, -1);
5695 tcg_out_helper_load_slots(s
, 1, mov
, parm
);
5697 tcg_out_helper_load_imm(s
, loc
[!HOST_BIG_ENDIAN
].arg_slot
,
5698 TCG_TYPE_I32
, 0, parm
);
5701 nmov
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_I64
, s
->addr_type
,
5702 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
5703 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5707 switch (info
->out_kind
) {
5708 case TCG_CALL_RET_NORMAL
:
5709 case TCG_CALL_RET_BY_VEC
:
5711 case TCG_CALL_RET_BY_REF
:
5713 * The return reference is in the first argument slot.
5714 * We need memory in which to return: re-use the top of stack.
5717 int ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5719 if (arg_slot_reg_p(0)) {
5720 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[0],
5721 TCG_REG_CALL_STACK
, ofs_slot0
);
5723 tcg_debug_assert(parm
->ntmp
!= 0);
5724 tcg_out_addi_ptr(s
, parm
->tmp
[0],
5725 TCG_REG_CALL_STACK
, ofs_slot0
);
5726 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
5727 TCG_REG_CALL_STACK
, ofs_slot0
);
5732 g_assert_not_reached();
5735 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
5738 static void tcg_out_ld_helper_ret(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5740 const TCGLdstHelperParam
*parm
)
5742 MemOp mop
= get_memop(ldst
->oi
);
5743 TCGMovExtend mov
[2];
5746 switch (ldst
->type
) {
5748 if (TCG_TARGET_REG_BITS
== 32) {
5754 mov
[0].dst
= ldst
->datalo_reg
;
5755 mov
[0].src
= tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, 0);
5756 mov
[0].dst_type
= ldst
->type
;
5757 mov
[0].src_type
= TCG_TYPE_REG
;
5760 * If load_sign, then we allowed the helper to perform the
5761 * appropriate sign extension to tcg_target_ulong, and all
5762 * we need now is a plain move.
5764 * If they do not, then we expect the relevant extension
5765 * instruction to be no more expensive than a move, and
5766 * we thus save the icache etc by only using one of two
5769 if (load_sign
|| !(mop
& MO_SIGN
)) {
5770 if (TCG_TARGET_REG_BITS
== 32 || ldst
->type
== TCG_TYPE_I32
) {
5771 mov
[0].src_ext
= MO_32
;
5773 mov
[0].src_ext
= MO_64
;
5776 mov
[0].src_ext
= mop
& MO_SSIZE
;
5778 tcg_out_movext1(s
, mov
);
5782 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
5783 ofs_slot0
= TCG_TARGET_CALL_STACK_OFFSET
;
5784 switch (TCG_TARGET_CALL_RET_I128
) {
5785 case TCG_CALL_RET_NORMAL
:
5787 case TCG_CALL_RET_BY_VEC
:
5788 tcg_out_st(s
, TCG_TYPE_V128
,
5789 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC
, 0),
5790 TCG_REG_CALL_STACK
, ofs_slot0
);
5792 case TCG_CALL_RET_BY_REF
:
5793 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datalo_reg
,
5794 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * HOST_BIG_ENDIAN
);
5795 tcg_out_ld(s
, TCG_TYPE_I64
, ldst
->datahi_reg
,
5796 TCG_REG_CALL_STACK
, ofs_slot0
+ 8 * !HOST_BIG_ENDIAN
);
5799 g_assert_not_reached();
5804 g_assert_not_reached();
5807 mov
[0].dst
= ldst
->datalo_reg
;
5809 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, HOST_BIG_ENDIAN
);
5810 mov
[0].dst_type
= TCG_TYPE_REG
;
5811 mov
[0].src_type
= TCG_TYPE_REG
;
5812 mov
[0].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5814 mov
[1].dst
= ldst
->datahi_reg
;
5816 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL
, !HOST_BIG_ENDIAN
);
5817 mov
[1].dst_type
= TCG_TYPE_REG
;
5818 mov
[1].src_type
= TCG_TYPE_REG
;
5819 mov
[1].src_ext
= TCG_TARGET_REG_BITS
== 32 ? MO_32
: MO_64
;
5821 tcg_out_movext2(s
, mov
, mov
+ 1, parm
->ntmp
? parm
->tmp
[0] : -1);
5824 static void tcg_out_st_helper_args(TCGContext
*s
, const TCGLabelQemuLdst
*ldst
,
5825 const TCGLdstHelperParam
*parm
)
5827 const TCGHelperInfo
*info
;
5828 const TCGCallArgumentLoc
*loc
;
5829 TCGMovExtend mov
[4];
5831 unsigned next_arg
, nmov
, n
;
5832 MemOp mop
= get_memop(ldst
->oi
);
5834 switch (mop
& MO_SIZE
) {
5838 info
= &info_helper_st32_mmu
;
5839 data_type
= TCG_TYPE_I32
;
5842 info
= &info_helper_st64_mmu
;
5843 data_type
= TCG_TYPE_I64
;
5846 info
= &info_helper_st128_mmu
;
5847 data_type
= TCG_TYPE_I128
;
5850 g_assert_not_reached();
5853 /* Defer env argument. */
5857 /* Handle addr argument. */
5858 loc
= &info
->in
[next_arg
];
5859 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5861 * 32-bit host with 32-bit guest: zero-extend the guest address
5862 * to 64-bits for the helper by storing the low part. Later,
5863 * after we have processed the register inputs, we will load a
5864 * zero for the high part.
5866 tcg_out_helper_add_mov(mov
, loc
+ HOST_BIG_ENDIAN
,
5867 TCG_TYPE_I32
, TCG_TYPE_I32
,
5868 ldst
->addrlo_reg
, -1);
5872 n
= tcg_out_helper_add_mov(mov
, loc
, TCG_TYPE_I64
, s
->addr_type
,
5873 ldst
->addrlo_reg
, ldst
->addrhi_reg
);
5878 /* Handle data argument. */
5879 loc
= &info
->in
[next_arg
];
5880 switch (loc
->kind
) {
5881 case TCG_CALL_ARG_NORMAL
:
5882 case TCG_CALL_ARG_EXTEND_U
:
5883 case TCG_CALL_ARG_EXTEND_S
:
5884 n
= tcg_out_helper_add_mov(mov
+ nmov
, loc
, data_type
, ldst
->type
,
5885 ldst
->datalo_reg
, ldst
->datahi_reg
);
5888 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5891 case TCG_CALL_ARG_BY_REF
:
5892 tcg_debug_assert(TCG_TARGET_REG_BITS
== 64);
5893 tcg_debug_assert(data_type
== TCG_TYPE_I128
);
5894 tcg_out_st(s
, TCG_TYPE_I64
,
5895 HOST_BIG_ENDIAN
? ldst
->datahi_reg
: ldst
->datalo_reg
,
5896 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[0].ref_slot
));
5897 tcg_out_st(s
, TCG_TYPE_I64
,
5898 HOST_BIG_ENDIAN
? ldst
->datalo_reg
: ldst
->datahi_reg
,
5899 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
[1].ref_slot
));
5901 tcg_out_helper_load_slots(s
, nmov
, mov
, parm
);
5903 if (arg_slot_reg_p(loc
->arg_slot
)) {
5904 tcg_out_addi_ptr(s
, tcg_target_call_iarg_regs
[loc
->arg_slot
],
5906 arg_slot_stk_ofs(loc
->ref_slot
));
5908 tcg_debug_assert(parm
->ntmp
!= 0);
5909 tcg_out_addi_ptr(s
, parm
->tmp
[0], TCG_REG_CALL_STACK
,
5910 arg_slot_stk_ofs(loc
->ref_slot
));
5911 tcg_out_st(s
, TCG_TYPE_PTR
, parm
->tmp
[0],
5912 TCG_REG_CALL_STACK
, arg_slot_stk_ofs(loc
->arg_slot
));
5918 g_assert_not_reached();
5921 if (TCG_TARGET_REG_BITS
== 32 && s
->addr_type
== TCG_TYPE_I32
) {
5922 /* Zero extend the address by loading a zero for the high part. */
5923 loc
= &info
->in
[1 + !HOST_BIG_ENDIAN
];
5924 tcg_out_helper_load_imm(s
, loc
->arg_slot
, TCG_TYPE_I32
, 0, parm
);
5927 tcg_out_helper_load_common_args(s
, ldst
, parm
, info
, next_arg
);
5930 void tcg_dump_op_count(GString
*buf
)
5932 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
5935 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
, uint64_t pc_start
)
5937 int i
, start_words
, num_insns
;
5940 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
5941 && qemu_log_in_addr_range(pc_start
))) {
5942 FILE *logfile
= qemu_log_trylock();
5944 fprintf(logfile
, "OP:\n");
5945 tcg_dump_ops(s
, logfile
, false);
5946 fprintf(logfile
, "\n");
5947 qemu_log_unlock(logfile
);
5951 #ifdef CONFIG_DEBUG_TCG
5952 /* Ensure all labels referenced have been emitted. */
5957 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
5958 if (unlikely(!l
->present
) && !QSIMPLEQ_EMPTY(&l
->branches
)) {
5959 qemu_log_mask(CPU_LOG_TB_OP
,
5960 "$L%d referenced but not present.\n", l
->id
);
5970 reachable_code_pass(s
);
5974 if (s
->nb_indirects
> 0) {
5975 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
5976 && qemu_log_in_addr_range(pc_start
))) {
5977 FILE *logfile
= qemu_log_trylock();
5979 fprintf(logfile
, "OP before indirect lowering:\n");
5980 tcg_dump_ops(s
, logfile
, false);
5981 fprintf(logfile
, "\n");
5982 qemu_log_unlock(logfile
);
5986 /* Replace indirect temps with direct temps. */
5987 if (liveness_pass_2(s
)) {
5988 /* If changes were made, re-run liveness. */
5993 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
5994 && qemu_log_in_addr_range(pc_start
))) {
5995 FILE *logfile
= qemu_log_trylock();
5997 fprintf(logfile
, "OP after optimization and liveness analysis:\n");
5998 tcg_dump_ops(s
, logfile
, true);
5999 fprintf(logfile
, "\n");
6000 qemu_log_unlock(logfile
);
6004 /* Initialize goto_tb jump offsets. */
6005 tb
->jmp_reset_offset
[0] = TB_JMP_OFFSET_INVALID
;
6006 tb
->jmp_reset_offset
[1] = TB_JMP_OFFSET_INVALID
;
6007 tb
->jmp_insn_offset
[0] = TB_JMP_OFFSET_INVALID
;
6008 tb
->jmp_insn_offset
[1] = TB_JMP_OFFSET_INVALID
;
6010 tcg_reg_alloc_start(s
);
6013 * Reset the buffer pointers when restarting after overflow.
6014 * TODO: Move this into translate-all.c with the rest of the
6015 * buffer management. Having only this done here is confusing.
6017 s
->code_buf
= tcg_splitwx_to_rw(tb
->tc
.ptr
);
6018 s
->code_ptr
= s
->code_buf
;
6020 #ifdef TCG_TARGET_NEED_LDST_LABELS
6021 QSIMPLEQ_INIT(&s
->ldst_labels
);
6023 #ifdef TCG_TARGET_NEED_POOL_LABELS
6024 s
->pool_labels
= NULL
;
6027 start_words
= s
->insn_start_words
;
6029 tcg_malloc(sizeof(uint64_t) * s
->gen_tb
->icount
* start_words
);
6031 tcg_out_tb_start(s
);
6034 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
6035 TCGOpcode opc
= op
->opc
;
6038 case INDEX_op_mov_i32
:
6039 case INDEX_op_mov_i64
:
6040 case INDEX_op_mov_vec
:
6041 tcg_reg_alloc_mov(s
, op
);
6043 case INDEX_op_dup_vec
:
6044 tcg_reg_alloc_dup(s
, op
);
6046 case INDEX_op_insn_start
:
6047 if (num_insns
>= 0) {
6048 size_t off
= tcg_current_code_size(s
);
6049 s
->gen_insn_end_off
[num_insns
] = off
;
6050 /* Assert that we do not overflow our stored offset. */
6051 assert(s
->gen_insn_end_off
[num_insns
] == off
);
6054 for (i
= 0; i
< start_words
; ++i
) {
6055 s
->gen_insn_data
[num_insns
* start_words
+ i
] =
6056 tcg_get_insn_start_param(op
, i
);
6059 case INDEX_op_discard
:
6060 temp_dead(s
, arg_temp(op
->args
[0]));
6062 case INDEX_op_set_label
:
6063 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
6064 tcg_out_label(s
, arg_label(op
->args
[0]));
6067 tcg_reg_alloc_call(s
, op
);
6069 case INDEX_op_exit_tb
:
6070 tcg_out_exit_tb(s
, op
->args
[0]);
6072 case INDEX_op_goto_tb
:
6073 tcg_out_goto_tb(s
, op
->args
[0]);
6075 case INDEX_op_dup2_vec
:
6076 if (tcg_reg_alloc_dup2(s
, op
)) {
6081 /* Sanity check that we've not introduced any unhandled opcodes. */
6082 tcg_debug_assert(tcg_op_supported(opc
));
6083 /* Note: in order to speed up the code, it would be much
6084 faster to have specialized register allocator functions for
6085 some common argument patterns */
6086 tcg_reg_alloc_op(s
, op
);
6089 /* Test for (pending) buffer overflow. The assumption is that any
6090 one operation beginning below the high water mark cannot overrun
6091 the buffer completely. Thus we can test for overflow after
6092 generating code without having to check during generation. */
6093 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
6096 /* Test for TB overflow, as seen by gen_insn_end_off. */
6097 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
6101 tcg_debug_assert(num_insns
+ 1 == s
->gen_tb
->icount
);
6102 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
6104 /* Generate TB finalization at the end of block */
6105 #ifdef TCG_TARGET_NEED_LDST_LABELS
6106 i
= tcg_out_ldst_finalize(s
);
6111 #ifdef TCG_TARGET_NEED_POOL_LABELS
6112 i
= tcg_out_pool_finalize(s
);
6117 if (!tcg_resolve_relocs(s
)) {
6121 #ifndef CONFIG_TCG_INTERPRETER
6122 /* flush instruction cache */
6123 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
6124 (uintptr_t)s
->code_buf
,
6125 tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
));
6128 return tcg_current_code_size(s
);
6131 void tcg_dump_info(GString
*buf
)
6133 g_string_append_printf(buf
, "[TCG profiler not compiled]\n");
6136 #ifdef ELF_HOST_MACHINE
6137 /* In order to use this feature, the backend needs to do three things:
6139 (1) Define ELF_HOST_MACHINE to indicate both what value to
6140 put into the ELF image and to indicate support for the feature.
6142 (2) Define tcg_register_jit. This should create a buffer containing
6143 the contents of a .debug_frame section that describes the post-
6144 prologue unwind info for the tcg machine.
6146 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
6149 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
6156 struct jit_code_entry
{
6157 struct jit_code_entry
*next_entry
;
6158 struct jit_code_entry
*prev_entry
;
6159 const void *symfile_addr
;
6160 uint64_t symfile_size
;
6163 struct jit_descriptor
{
6165 uint32_t action_flag
;
6166 struct jit_code_entry
*relevant_entry
;
6167 struct jit_code_entry
*first_entry
;
6170 void __jit_debug_register_code(void) __attribute__((noinline
));
6171 void __jit_debug_register_code(void)
6176 /* Must statically initialize the version, because GDB may check
6177 the version before we can set it. */
6178 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
6180 /* End GDB interface. */
6182 static int find_string(const char *strtab
, const char *str
)
6184 const char *p
= strtab
+ 1;
6187 if (strcmp(p
, str
) == 0) {
6194 static void tcg_register_jit_int(const void *buf_ptr
, size_t buf_size
,
6195 const void *debug_frame
,
6196 size_t debug_frame_size
)
6198 struct __attribute__((packed
)) DebugInfo
{
6205 uintptr_t cu_low_pc
;
6206 uintptr_t cu_high_pc
;
6209 uintptr_t fn_low_pc
;
6210 uintptr_t fn_high_pc
;
6219 struct DebugInfo di
;
6224 struct ElfImage
*img
;
6226 static const struct ElfImage img_template
= {
6228 .e_ident
[EI_MAG0
] = ELFMAG0
,
6229 .e_ident
[EI_MAG1
] = ELFMAG1
,
6230 .e_ident
[EI_MAG2
] = ELFMAG2
,
6231 .e_ident
[EI_MAG3
] = ELFMAG3
,
6232 .e_ident
[EI_CLASS
] = ELF_CLASS
,
6233 .e_ident
[EI_DATA
] = ELF_DATA
,
6234 .e_ident
[EI_VERSION
] = EV_CURRENT
,
6236 .e_machine
= ELF_HOST_MACHINE
,
6237 .e_version
= EV_CURRENT
,
6238 .e_phoff
= offsetof(struct ElfImage
, phdr
),
6239 .e_shoff
= offsetof(struct ElfImage
, shdr
),
6240 .e_ehsize
= sizeof(ElfW(Shdr
)),
6241 .e_phentsize
= sizeof(ElfW(Phdr
)),
6243 .e_shentsize
= sizeof(ElfW(Shdr
)),
6244 .e_shnum
= ARRAY_SIZE(img
->shdr
),
6245 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
6246 #ifdef ELF_HOST_FLAGS
6247 .e_flags
= ELF_HOST_FLAGS
,
6250 .e_ident
[EI_OSABI
] = ELF_OSABI
,
6258 [0] = { .sh_type
= SHT_NULL
},
6259 /* Trick: The contents of code_gen_buffer are not present in
6260 this fake ELF file; that got allocated elsewhere. Therefore
6261 we mark .text as SHT_NOBITS (similar to .bss) so that readers
6262 will not look for contents. We can record any address. */
6264 .sh_type
= SHT_NOBITS
,
6265 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
6267 [2] = { /* .debug_info */
6268 .sh_type
= SHT_PROGBITS
,
6269 .sh_offset
= offsetof(struct ElfImage
, di
),
6270 .sh_size
= sizeof(struct DebugInfo
),
6272 [3] = { /* .debug_abbrev */
6273 .sh_type
= SHT_PROGBITS
,
6274 .sh_offset
= offsetof(struct ElfImage
, da
),
6275 .sh_size
= sizeof(img
->da
),
6277 [4] = { /* .debug_frame */
6278 .sh_type
= SHT_PROGBITS
,
6279 .sh_offset
= sizeof(struct ElfImage
),
6281 [5] = { /* .symtab */
6282 .sh_type
= SHT_SYMTAB
,
6283 .sh_offset
= offsetof(struct ElfImage
, sym
),
6284 .sh_size
= sizeof(img
->sym
),
6286 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
6287 .sh_entsize
= sizeof(ElfW(Sym
)),
6289 [6] = { /* .strtab */
6290 .sh_type
= SHT_STRTAB
,
6291 .sh_offset
= offsetof(struct ElfImage
, str
),
6292 .sh_size
= sizeof(img
->str
),
6296 [1] = { /* code_gen_buffer */
6297 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
6302 .len
= sizeof(struct DebugInfo
) - 4,
6304 .ptr_size
= sizeof(void *),
6306 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
6308 .fn_name
= "code_gen_buffer"
6311 1, /* abbrev number (the cu) */
6312 0x11, 1, /* DW_TAG_compile_unit, has children */
6313 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
6314 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6315 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6316 0, 0, /* end of abbrev */
6317 2, /* abbrev number (the fn) */
6318 0x2e, 0, /* DW_TAG_subprogram, no children */
6319 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
6320 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6321 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6322 0, 0, /* end of abbrev */
6323 0 /* no more abbrev */
6325 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
6326 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
6329 /* We only need a single jit entry; statically allocate it. */
6330 static struct jit_code_entry one_entry
;
6332 uintptr_t buf
= (uintptr_t)buf_ptr
;
6333 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
6334 DebugFrameHeader
*dfh
;
6336 img
= g_malloc(img_size
);
6337 *img
= img_template
;
6339 img
->phdr
.p_vaddr
= buf
;
6340 img
->phdr
.p_paddr
= buf
;
6341 img
->phdr
.p_memsz
= buf_size
;
6343 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
6344 img
->shdr
[1].sh_addr
= buf
;
6345 img
->shdr
[1].sh_size
= buf_size
;
6347 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
6348 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
6350 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
6351 img
->shdr
[4].sh_size
= debug_frame_size
;
6353 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
6354 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
6356 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
6357 img
->sym
[1].st_value
= buf
;
6358 img
->sym
[1].st_size
= buf_size
;
6360 img
->di
.cu_low_pc
= buf
;
6361 img
->di
.cu_high_pc
= buf
+ buf_size
;
6362 img
->di
.fn_low_pc
= buf
;
6363 img
->di
.fn_high_pc
= buf
+ buf_size
;
6365 dfh
= (DebugFrameHeader
*)(img
+ 1);
6366 memcpy(dfh
, debug_frame
, debug_frame_size
);
6367 dfh
->fde
.func_start
= buf
;
6368 dfh
->fde
.func_len
= buf_size
;
6371 /* Enable this block to be able to debug the ELF image file creation.
6372 One can use readelf, objdump, or other inspection utilities. */
6374 g_autofree
char *jit
= g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
6375 FILE *f
= fopen(jit
, "w+b");
6377 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
6378 /* Avoid stupid unused return value warning for fwrite. */
6385 one_entry
.symfile_addr
= img
;
6386 one_entry
.symfile_size
= img_size
;
6388 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
6389 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
6390 __jit_debug_descriptor
.first_entry
= &one_entry
;
6391 __jit_debug_register_code();
6394 /* No support for the feature. Provide the entry point expected by exec.c,
6395 and implement the internal function we declared earlier. */
6397 static void tcg_register_jit_int(const void *buf
, size_t size
,
6398 const void *debug_frame
,
6399 size_t debug_frame_size
)
6403 void tcg_register_jit(const void *buf
, size_t buf_size
)
6406 #endif /* ELF_HOST_MACHINE */
6408 #if !TCG_TARGET_MAYBE_vec
6409 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
6411 g_assert_not_reached();