target/arm/cpu-features: Include missing 'hw/registerfields.h' header
[qemu/ar7.git] / tcg / tcg.c
blobe2c38f6d11c668d7387895c41693397b7ddd19c5
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
27 /* Define to jump the ELF file used to communicate with GDB. */
28 #undef DEBUG_JIT
30 #include "qemu/error-report.h"
31 #include "qemu/cutils.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/qemu-print.h"
34 #include "qemu/cacheflush.h"
35 #include "qemu/cacheinfo.h"
36 #include "qemu/timer.h"
37 #include "exec/translation-block.h"
38 #include "exec/tlb-common.h"
39 #include "tcg/startup.h"
40 #include "tcg/tcg-op-common.h"
42 #if UINTPTR_MAX == UINT32_MAX
43 # define ELF_CLASS ELFCLASS32
44 #else
45 # define ELF_CLASS ELFCLASS64
46 #endif
47 #if HOST_BIG_ENDIAN
48 # define ELF_DATA ELFDATA2MSB
49 #else
50 # define ELF_DATA ELFDATA2LSB
51 #endif
53 #include "elf.h"
54 #include "exec/log.h"
55 #include "tcg/tcg-ldst.h"
56 #include "tcg/tcg-temp-internal.h"
57 #include "tcg-internal.h"
58 #include "accel/tcg/perf.h"
59 #ifdef CONFIG_USER_ONLY
60 #include "exec/user/guest-base.h"
61 #endif
63 /* Forward declarations for functions declared in tcg-target.c.inc and
64 used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
68 intptr_t value, intptr_t addend);
70 /* The CIE and FDE header definitions will be common to all hosts. */
71 typedef struct {
72 uint32_t len __attribute__((aligned((sizeof(void *)))));
73 uint32_t id;
74 uint8_t version;
75 char augmentation[1];
76 uint8_t code_align;
77 uint8_t data_align;
78 uint8_t return_column;
79 } DebugFrameCIE;
81 typedef struct QEMU_PACKED {
82 uint32_t len __attribute__((aligned((sizeof(void *)))));
83 uint32_t cie_offset;
84 uintptr_t func_start;
85 uintptr_t func_len;
86 } DebugFrameFDEHeader;
88 typedef struct QEMU_PACKED {
89 DebugFrameCIE cie;
90 DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
93 typedef struct TCGLabelQemuLdst {
94 bool is_ld; /* qemu_ld: true, qemu_st: false */
95 MemOpIdx oi;
96 TCGType type; /* result type of a load */
97 TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
98 TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
99 TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
100 TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
101 const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
102 tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
103 QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
104 } TCGLabelQemuLdst;
106 static void tcg_register_jit_int(const void *buf, size_t size,
107 const void *debug_frame,
108 size_t debug_frame_size)
109 __attribute__((unused));
111 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
112 static void tcg_out_tb_start(TCGContext *s);
113 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
114 intptr_t arg2);
115 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
116 static void tcg_out_movi(TCGContext *s, TCGType type,
117 TCGReg ret, tcg_target_long arg);
118 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
119 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
120 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
121 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
122 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
123 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
124 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
125 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
126 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg);
127 static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
128 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
129 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
130 static void tcg_out_goto_tb(TCGContext *s, int which);
131 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
132 const TCGArg args[TCG_MAX_OP_ARGS],
133 const int const_args[TCG_MAX_OP_ARGS]);
134 #if TCG_TARGET_MAYBE_vec
135 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
136 TCGReg dst, TCGReg src);
137 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
138 TCGReg dst, TCGReg base, intptr_t offset);
139 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
140 TCGReg dst, int64_t arg);
141 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
142 unsigned vecl, unsigned vece,
143 const TCGArg args[TCG_MAX_OP_ARGS],
144 const int const_args[TCG_MAX_OP_ARGS]);
145 #else
146 static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
147 TCGReg dst, TCGReg src)
149 g_assert_not_reached();
151 static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
152 TCGReg dst, TCGReg base, intptr_t offset)
154 g_assert_not_reached();
156 static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
157 TCGReg dst, int64_t arg)
159 g_assert_not_reached();
161 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
162 unsigned vecl, unsigned vece,
163 const TCGArg args[TCG_MAX_OP_ARGS],
164 const int const_args[TCG_MAX_OP_ARGS])
166 g_assert_not_reached();
168 #endif
169 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
170 intptr_t arg2);
171 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
172 TCGReg base, intptr_t ofs);
173 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
174 const TCGHelperInfo *info);
175 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
176 static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
177 #ifdef TCG_TARGET_NEED_LDST_LABELS
178 static int tcg_out_ldst_finalize(TCGContext *s);
179 #endif
181 #ifndef CONFIG_USER_ONLY
182 #define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
183 #endif
185 typedef struct TCGLdstHelperParam {
186 TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg);
187 unsigned ntmp;
188 int tmp[3];
189 } TCGLdstHelperParam;
191 static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
192 const TCGLdstHelperParam *p)
193 __attribute__((unused));
194 static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *l,
195 bool load_sign, const TCGLdstHelperParam *p)
196 __attribute__((unused));
197 static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
198 const TCGLdstHelperParam *p)
199 __attribute__((unused));
201 static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = {
202 [MO_UB] = helper_ldub_mmu,
203 [MO_SB] = helper_ldsb_mmu,
204 [MO_UW] = helper_lduw_mmu,
205 [MO_SW] = helper_ldsw_mmu,
206 [MO_UL] = helper_ldul_mmu,
207 [MO_UQ] = helper_ldq_mmu,
208 #if TCG_TARGET_REG_BITS == 64
209 [MO_SL] = helper_ldsl_mmu,
210 [MO_128] = helper_ld16_mmu,
211 #endif
214 static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = {
215 [MO_8] = helper_stb_mmu,
216 [MO_16] = helper_stw_mmu,
217 [MO_32] = helper_stl_mmu,
218 [MO_64] = helper_stq_mmu,
219 #if TCG_TARGET_REG_BITS == 64
220 [MO_128] = helper_st16_mmu,
221 #endif
224 typedef struct {
225 MemOp atom; /* lg2 bits of atomicity required */
226 MemOp align; /* lg2 bits of alignment to use */
227 } TCGAtomAlign;
229 static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
230 MemOp host_atom, bool allow_two_ops)
231 __attribute__((unused));
233 #ifdef CONFIG_USER_ONLY
234 bool tcg_use_softmmu;
235 #endif
237 TCGContext tcg_init_ctx;
238 __thread TCGContext *tcg_ctx;
240 TCGContext **tcg_ctxs;
241 unsigned int tcg_cur_ctxs;
242 unsigned int tcg_max_ctxs;
243 TCGv_env tcg_env;
244 const void *tcg_code_gen_epilogue;
245 uintptr_t tcg_splitwx_diff;
247 #ifndef CONFIG_TCG_INTERPRETER
248 tcg_prologue_fn *tcg_qemu_tb_exec;
249 #endif
251 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
252 static TCGRegSet tcg_target_call_clobber_regs;
254 #if TCG_TARGET_INSN_UNIT_SIZE == 1
255 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
257 *s->code_ptr++ = v;
260 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
261 uint8_t v)
263 *p = v;
265 #endif
267 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
268 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
270 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
271 *s->code_ptr++ = v;
272 } else {
273 tcg_insn_unit *p = s->code_ptr;
274 memcpy(p, &v, sizeof(v));
275 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
279 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
280 uint16_t v)
282 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
283 *p = v;
284 } else {
285 memcpy(p, &v, sizeof(v));
288 #endif
290 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
291 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
293 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
294 *s->code_ptr++ = v;
295 } else {
296 tcg_insn_unit *p = s->code_ptr;
297 memcpy(p, &v, sizeof(v));
298 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
302 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
303 uint32_t v)
305 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
306 *p = v;
307 } else {
308 memcpy(p, &v, sizeof(v));
311 #endif
313 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
314 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
316 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
317 *s->code_ptr++ = v;
318 } else {
319 tcg_insn_unit *p = s->code_ptr;
320 memcpy(p, &v, sizeof(v));
321 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
325 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
326 uint64_t v)
328 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
329 *p = v;
330 } else {
331 memcpy(p, &v, sizeof(v));
334 #endif
336 /* label relocation processing */
338 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
339 TCGLabel *l, intptr_t addend)
341 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
343 r->type = type;
344 r->ptr = code_ptr;
345 r->addend = addend;
346 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
349 static void tcg_out_label(TCGContext *s, TCGLabel *l)
351 tcg_debug_assert(!l->has_value);
352 l->has_value = 1;
353 l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
356 TCGLabel *gen_new_label(void)
358 TCGContext *s = tcg_ctx;
359 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
361 memset(l, 0, sizeof(TCGLabel));
362 l->id = s->nb_labels++;
363 QSIMPLEQ_INIT(&l->branches);
364 QSIMPLEQ_INIT(&l->relocs);
366 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
368 return l;
371 static bool tcg_resolve_relocs(TCGContext *s)
373 TCGLabel *l;
375 QSIMPLEQ_FOREACH(l, &s->labels, next) {
376 TCGRelocation *r;
377 uintptr_t value = l->u.value;
379 QSIMPLEQ_FOREACH(r, &l->relocs, next) {
380 if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
381 return false;
385 return true;
388 static void set_jmp_reset_offset(TCGContext *s, int which)
391 * We will check for overflow at the end of the opcode loop in
392 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
394 s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
397 static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
400 * We will check for overflow at the end of the opcode loop in
401 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
403 s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
406 static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
409 * Return the read-execute version of the pointer, for the benefit
410 * of any pc-relative addressing mode.
412 return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
415 static int __attribute__((unused))
416 tlb_mask_table_ofs(TCGContext *s, int which)
418 return (offsetof(CPUNegativeOffsetState, tlb.f[which]) -
419 sizeof(CPUNegativeOffsetState));
422 /* Signal overflow, starting over with fewer guest insns. */
423 static G_NORETURN
424 void tcg_raise_tb_overflow(TCGContext *s)
426 siglongjmp(s->jmp_trans, -2);
430 * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
431 * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
433 * However, tcg_out_helper_load_slots reuses this field to hold an
434 * argument slot number (which may designate a argument register or an
435 * argument stack slot), converting to TCGReg once all arguments that
436 * are destined for the stack are processed.
438 typedef struct TCGMovExtend {
439 unsigned dst;
440 TCGReg src;
441 TCGType dst_type;
442 TCGType src_type;
443 MemOp src_ext;
444 } TCGMovExtend;
447 * tcg_out_movext -- move and extend
448 * @s: tcg context
449 * @dst_type: integral type for destination
450 * @dst: destination register
451 * @src_type: integral type for source
452 * @src_ext: extension to apply to source
453 * @src: source register
455 * Move or extend @src into @dst, depending on @src_ext and the types.
457 static void tcg_out_movext(TCGContext *s, TCGType dst_type, TCGReg dst,
458 TCGType src_type, MemOp src_ext, TCGReg src)
460 switch (src_ext) {
461 case MO_UB:
462 tcg_out_ext8u(s, dst, src);
463 break;
464 case MO_SB:
465 tcg_out_ext8s(s, dst_type, dst, src);
466 break;
467 case MO_UW:
468 tcg_out_ext16u(s, dst, src);
469 break;
470 case MO_SW:
471 tcg_out_ext16s(s, dst_type, dst, src);
472 break;
473 case MO_UL:
474 case MO_SL:
475 if (dst_type == TCG_TYPE_I32) {
476 if (src_type == TCG_TYPE_I32) {
477 tcg_out_mov(s, TCG_TYPE_I32, dst, src);
478 } else {
479 tcg_out_extrl_i64_i32(s, dst, src);
481 } else if (src_type == TCG_TYPE_I32) {
482 if (src_ext & MO_SIGN) {
483 tcg_out_exts_i32_i64(s, dst, src);
484 } else {
485 tcg_out_extu_i32_i64(s, dst, src);
487 } else {
488 if (src_ext & MO_SIGN) {
489 tcg_out_ext32s(s, dst, src);
490 } else {
491 tcg_out_ext32u(s, dst, src);
494 break;
495 case MO_UQ:
496 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
497 if (dst_type == TCG_TYPE_I32) {
498 tcg_out_extrl_i64_i32(s, dst, src);
499 } else {
500 tcg_out_mov(s, TCG_TYPE_I64, dst, src);
502 break;
503 default:
504 g_assert_not_reached();
508 /* Minor variations on a theme, using a structure. */
509 static void tcg_out_movext1_new_src(TCGContext *s, const TCGMovExtend *i,
510 TCGReg src)
512 tcg_out_movext(s, i->dst_type, i->dst, i->src_type, i->src_ext, src);
515 static void tcg_out_movext1(TCGContext *s, const TCGMovExtend *i)
517 tcg_out_movext1_new_src(s, i, i->src);
521 * tcg_out_movext2 -- move and extend two pair
522 * @s: tcg context
523 * @i1: first move description
524 * @i2: second move description
525 * @scratch: temporary register, or -1 for none
527 * As tcg_out_movext, for both @i1 and @i2, caring for overlap
528 * between the sources and destinations.
531 static void tcg_out_movext2(TCGContext *s, const TCGMovExtend *i1,
532 const TCGMovExtend *i2, int scratch)
534 TCGReg src1 = i1->src;
535 TCGReg src2 = i2->src;
537 if (i1->dst != src2) {
538 tcg_out_movext1(s, i1);
539 tcg_out_movext1(s, i2);
540 return;
542 if (i2->dst == src1) {
543 TCGType src1_type = i1->src_type;
544 TCGType src2_type = i2->src_type;
546 if (tcg_out_xchg(s, MAX(src1_type, src2_type), src1, src2)) {
547 /* The data is now in the correct registers, now extend. */
548 src1 = i2->src;
549 src2 = i1->src;
550 } else {
551 tcg_debug_assert(scratch >= 0);
552 tcg_out_mov(s, src1_type, scratch, src1);
553 src1 = scratch;
556 tcg_out_movext1_new_src(s, i2, src2);
557 tcg_out_movext1_new_src(s, i1, src1);
561 * tcg_out_movext3 -- move and extend three pair
562 * @s: tcg context
563 * @i1: first move description
564 * @i2: second move description
565 * @i3: third move description
566 * @scratch: temporary register, or -1 for none
568 * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
569 * between the sources and destinations.
572 static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
573 const TCGMovExtend *i2, const TCGMovExtend *i3,
574 int scratch)
576 TCGReg src1 = i1->src;
577 TCGReg src2 = i2->src;
578 TCGReg src3 = i3->src;
580 if (i1->dst != src2 && i1->dst != src3) {
581 tcg_out_movext1(s, i1);
582 tcg_out_movext2(s, i2, i3, scratch);
583 return;
585 if (i2->dst != src1 && i2->dst != src3) {
586 tcg_out_movext1(s, i2);
587 tcg_out_movext2(s, i1, i3, scratch);
588 return;
590 if (i3->dst != src1 && i3->dst != src2) {
591 tcg_out_movext1(s, i3);
592 tcg_out_movext2(s, i1, i2, scratch);
593 return;
597 * There is a cycle. Since there are only 3 nodes, the cycle is
598 * either "clockwise" or "anti-clockwise", and can be solved with
599 * a single scratch or two xchg.
601 if (i1->dst == src2 && i2->dst == src3 && i3->dst == src1) {
602 /* "Clockwise" */
603 if (tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2)) {
604 tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3);
605 /* The data is now in the correct registers, now extend. */
606 tcg_out_movext1_new_src(s, i1, i1->dst);
607 tcg_out_movext1_new_src(s, i2, i2->dst);
608 tcg_out_movext1_new_src(s, i3, i3->dst);
609 } else {
610 tcg_debug_assert(scratch >= 0);
611 tcg_out_mov(s, i1->src_type, scratch, src1);
612 tcg_out_movext1(s, i3);
613 tcg_out_movext1(s, i2);
614 tcg_out_movext1_new_src(s, i1, scratch);
616 } else if (i1->dst == src3 && i2->dst == src1 && i3->dst == src2) {
617 /* "Anti-clockwise" */
618 if (tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3)) {
619 tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2);
620 /* The data is now in the correct registers, now extend. */
621 tcg_out_movext1_new_src(s, i1, i1->dst);
622 tcg_out_movext1_new_src(s, i2, i2->dst);
623 tcg_out_movext1_new_src(s, i3, i3->dst);
624 } else {
625 tcg_debug_assert(scratch >= 0);
626 tcg_out_mov(s, i1->src_type, scratch, src1);
627 tcg_out_movext1(s, i2);
628 tcg_out_movext1(s, i3);
629 tcg_out_movext1_new_src(s, i1, scratch);
631 } else {
632 g_assert_not_reached();
636 #define C_PFX1(P, A) P##A
637 #define C_PFX2(P, A, B) P##A##_##B
638 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
639 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
640 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
641 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
643 /* Define an enumeration for the various combinations. */
645 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
646 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
647 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
648 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
650 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
651 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
652 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
653 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
655 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
656 #define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1),
657 #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1),
659 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
660 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
661 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
662 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
663 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
665 typedef enum {
666 #include "tcg-target-con-set.h"
667 } TCGConstraintSetIndex;
669 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
671 #undef C_O0_I1
672 #undef C_O0_I2
673 #undef C_O0_I3
674 #undef C_O0_I4
675 #undef C_O1_I1
676 #undef C_O1_I2
677 #undef C_O1_I3
678 #undef C_O1_I4
679 #undef C_N1_I2
680 #undef C_N1O1_I1
681 #undef C_N2_I1
682 #undef C_O2_I1
683 #undef C_O2_I2
684 #undef C_O2_I3
685 #undef C_O2_I4
686 #undef C_N1_O1_I4
688 /* Put all of the constraint sets into an array, indexed by the enum. */
690 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
691 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
692 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
693 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
695 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
696 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
697 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
698 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
700 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
701 #define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } },
702 #define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } },
704 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
705 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
706 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
707 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
708 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
710 static const TCGTargetOpDef constraint_sets[] = {
711 #include "tcg-target-con-set.h"
715 #undef C_O0_I1
716 #undef C_O0_I2
717 #undef C_O0_I3
718 #undef C_O0_I4
719 #undef C_O1_I1
720 #undef C_O1_I2
721 #undef C_O1_I3
722 #undef C_O1_I4
723 #undef C_N1_I2
724 #undef C_N1O1_I1
725 #undef C_N2_I1
726 #undef C_O2_I1
727 #undef C_O2_I2
728 #undef C_O2_I3
729 #undef C_O2_I4
730 #undef C_N1_O1_I4
732 /* Expand the enumerator to be returned from tcg_target_op_def(). */
734 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
735 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
736 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
737 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
739 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
740 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
741 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
742 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
744 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
745 #define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1)
746 #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1)
748 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
749 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
750 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
751 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
752 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
754 #include "tcg-target.c.inc"
756 #ifndef CONFIG_TCG_INTERPRETER
757 /* Validate CPUTLBDescFast placement. */
758 QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
759 sizeof(CPUNegativeOffsetState))
760 < MIN_TLB_MASK_TABLE_OFS);
761 #endif
763 static void alloc_tcg_plugin_context(TCGContext *s)
765 #ifdef CONFIG_PLUGIN
766 s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
767 s->plugin_tb->insns =
768 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
769 #endif
773 * All TCG threads except the parent (i.e. the one that called tcg_context_init
774 * and registered the target's TCG globals) must register with this function
775 * before initiating translation.
777 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
778 * of tcg_region_init() for the reasoning behind this.
780 * In system-mode each caller registers its context in tcg_ctxs[]. Note that in
781 * system-mode tcg_ctxs[] does not track tcg_ctx_init, since the initial context
782 * is not used anymore for translation once this function is called.
784 * Not tracking tcg_init_ctx in tcg_ctxs[] in system-mode keeps code that
785 * iterates over the array (e.g. tcg_code_size() the same for both system/user
786 * modes.
788 #ifdef CONFIG_USER_ONLY
789 void tcg_register_thread(void)
791 tcg_ctx = &tcg_init_ctx;
793 #else
794 void tcg_register_thread(void)
796 TCGContext *s = g_malloc(sizeof(*s));
797 unsigned int i, n;
799 *s = tcg_init_ctx;
801 /* Relink mem_base. */
802 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
803 if (tcg_init_ctx.temps[i].mem_base) {
804 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
805 tcg_debug_assert(b >= 0 && b < n);
806 s->temps[i].mem_base = &s->temps[b];
810 /* Claim an entry in tcg_ctxs */
811 n = qatomic_fetch_inc(&tcg_cur_ctxs);
812 g_assert(n < tcg_max_ctxs);
813 qatomic_set(&tcg_ctxs[n], s);
815 if (n > 0) {
816 alloc_tcg_plugin_context(s);
817 tcg_region_initial_alloc(s);
820 tcg_ctx = s;
822 #endif /* !CONFIG_USER_ONLY */
824 /* pool based memory allocation */
825 void *tcg_malloc_internal(TCGContext *s, int size)
827 TCGPool *p;
828 int pool_size;
830 if (size > TCG_POOL_CHUNK_SIZE) {
831 /* big malloc: insert a new pool (XXX: could optimize) */
832 p = g_malloc(sizeof(TCGPool) + size);
833 p->size = size;
834 p->next = s->pool_first_large;
835 s->pool_first_large = p;
836 return p->data;
837 } else {
838 p = s->pool_current;
839 if (!p) {
840 p = s->pool_first;
841 if (!p)
842 goto new_pool;
843 } else {
844 if (!p->next) {
845 new_pool:
846 pool_size = TCG_POOL_CHUNK_SIZE;
847 p = g_malloc(sizeof(TCGPool) + pool_size);
848 p->size = pool_size;
849 p->next = NULL;
850 if (s->pool_current) {
851 s->pool_current->next = p;
852 } else {
853 s->pool_first = p;
855 } else {
856 p = p->next;
860 s->pool_current = p;
861 s->pool_cur = p->data + size;
862 s->pool_end = p->data + p->size;
863 return p->data;
866 void tcg_pool_reset(TCGContext *s)
868 TCGPool *p, *t;
869 for (p = s->pool_first_large; p; p = t) {
870 t = p->next;
871 g_free(p);
873 s->pool_first_large = NULL;
874 s->pool_cur = s->pool_end = NULL;
875 s->pool_current = NULL;
879 * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
880 * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
881 * We only use these for layout in tcg_out_ld_helper_ret and
882 * tcg_out_st_helper_args, and share them between several of
883 * the helpers, with the end result that it's easier to build manually.
886 #if TCG_TARGET_REG_BITS == 32
887 # define dh_typecode_ttl dh_typecode_i32
888 #else
889 # define dh_typecode_ttl dh_typecode_i64
890 #endif
892 static TCGHelperInfo info_helper_ld32_mmu = {
893 .flags = TCG_CALL_NO_WG,
894 .typemask = dh_typemask(ttl, 0) /* return tcg_target_ulong */
895 | dh_typemask(env, 1)
896 | dh_typemask(i64, 2) /* uint64_t addr */
897 | dh_typemask(i32, 3) /* unsigned oi */
898 | dh_typemask(ptr, 4) /* uintptr_t ra */
901 static TCGHelperInfo info_helper_ld64_mmu = {
902 .flags = TCG_CALL_NO_WG,
903 .typemask = dh_typemask(i64, 0) /* return uint64_t */
904 | dh_typemask(env, 1)
905 | dh_typemask(i64, 2) /* uint64_t addr */
906 | dh_typemask(i32, 3) /* unsigned oi */
907 | dh_typemask(ptr, 4) /* uintptr_t ra */
910 static TCGHelperInfo info_helper_ld128_mmu = {
911 .flags = TCG_CALL_NO_WG,
912 .typemask = dh_typemask(i128, 0) /* return Int128 */
913 | dh_typemask(env, 1)
914 | dh_typemask(i64, 2) /* uint64_t addr */
915 | dh_typemask(i32, 3) /* unsigned oi */
916 | dh_typemask(ptr, 4) /* uintptr_t ra */
919 static TCGHelperInfo info_helper_st32_mmu = {
920 .flags = TCG_CALL_NO_WG,
921 .typemask = dh_typemask(void, 0)
922 | dh_typemask(env, 1)
923 | dh_typemask(i64, 2) /* uint64_t addr */
924 | dh_typemask(i32, 3) /* uint32_t data */
925 | dh_typemask(i32, 4) /* unsigned oi */
926 | dh_typemask(ptr, 5) /* uintptr_t ra */
929 static TCGHelperInfo info_helper_st64_mmu = {
930 .flags = TCG_CALL_NO_WG,
931 .typemask = dh_typemask(void, 0)
932 | dh_typemask(env, 1)
933 | dh_typemask(i64, 2) /* uint64_t addr */
934 | dh_typemask(i64, 3) /* uint64_t data */
935 | dh_typemask(i32, 4) /* unsigned oi */
936 | dh_typemask(ptr, 5) /* uintptr_t ra */
939 static TCGHelperInfo info_helper_st128_mmu = {
940 .flags = TCG_CALL_NO_WG,
941 .typemask = dh_typemask(void, 0)
942 | dh_typemask(env, 1)
943 | dh_typemask(i64, 2) /* uint64_t addr */
944 | dh_typemask(i128, 3) /* Int128 data */
945 | dh_typemask(i32, 4) /* unsigned oi */
946 | dh_typemask(ptr, 5) /* uintptr_t ra */
949 #ifdef CONFIG_TCG_INTERPRETER
950 static ffi_type *typecode_to_ffi(int argmask)
953 * libffi does not support __int128_t, so we have forced Int128
954 * to use the structure definition instead of the builtin type.
956 static ffi_type *ffi_type_i128_elements[3] = {
957 &ffi_type_uint64,
958 &ffi_type_uint64,
959 NULL
961 static ffi_type ffi_type_i128 = {
962 .size = 16,
963 .alignment = __alignof__(Int128),
964 .type = FFI_TYPE_STRUCT,
965 .elements = ffi_type_i128_elements,
968 switch (argmask) {
969 case dh_typecode_void:
970 return &ffi_type_void;
971 case dh_typecode_i32:
972 return &ffi_type_uint32;
973 case dh_typecode_s32:
974 return &ffi_type_sint32;
975 case dh_typecode_i64:
976 return &ffi_type_uint64;
977 case dh_typecode_s64:
978 return &ffi_type_sint64;
979 case dh_typecode_ptr:
980 return &ffi_type_pointer;
981 case dh_typecode_i128:
982 return &ffi_type_i128;
984 g_assert_not_reached();
987 static ffi_cif *init_ffi_layout(TCGHelperInfo *info)
989 unsigned typemask = info->typemask;
990 struct {
991 ffi_cif cif;
992 ffi_type *args[];
993 } *ca;
994 ffi_status status;
995 int nargs;
997 /* Ignoring the return type, find the last non-zero field. */
998 nargs = 32 - clz32(typemask >> 3);
999 nargs = DIV_ROUND_UP(nargs, 3);
1000 assert(nargs <= MAX_CALL_IARGS);
1002 ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
1003 ca->cif.rtype = typecode_to_ffi(typemask & 7);
1004 ca->cif.nargs = nargs;
1006 if (nargs != 0) {
1007 ca->cif.arg_types = ca->args;
1008 for (int j = 0; j < nargs; ++j) {
1009 int typecode = extract32(typemask, (j + 1) * 3, 3);
1010 ca->args[j] = typecode_to_ffi(typecode);
1014 status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
1015 ca->cif.rtype, ca->cif.arg_types);
1016 assert(status == FFI_OK);
1018 return &ca->cif;
1021 #define HELPER_INFO_INIT(I) (&(I)->cif)
1022 #define HELPER_INFO_INIT_VAL(I) init_ffi_layout(I)
1023 #else
1024 #define HELPER_INFO_INIT(I) (&(I)->init)
1025 #define HELPER_INFO_INIT_VAL(I) 1
1026 #endif /* CONFIG_TCG_INTERPRETER */
1028 static inline bool arg_slot_reg_p(unsigned arg_slot)
1031 * Split the sizeof away from the comparison to avoid Werror from
1032 * "unsigned < 0 is always false", when iarg_regs is empty.
1034 unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs);
1035 return arg_slot < nreg;
1038 static inline int arg_slot_stk_ofs(unsigned arg_slot)
1040 unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
1041 unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
1043 tcg_debug_assert(stk_slot < max);
1044 return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long);
1047 typedef struct TCGCumulativeArgs {
1048 int arg_idx; /* tcg_gen_callN args[] */
1049 int info_in_idx; /* TCGHelperInfo in[] */
1050 int arg_slot; /* regs+stack slot */
1051 int ref_slot; /* stack slots for references */
1052 } TCGCumulativeArgs;
1054 static void layout_arg_even(TCGCumulativeArgs *cum)
1056 cum->arg_slot += cum->arg_slot & 1;
1059 static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
1060 TCGCallArgumentKind kind)
1062 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
1064 *loc = (TCGCallArgumentLoc){
1065 .kind = kind,
1066 .arg_idx = cum->arg_idx,
1067 .arg_slot = cum->arg_slot,
1069 cum->info_in_idx++;
1070 cum->arg_slot++;
1073 static void layout_arg_normal_n(TCGCumulativeArgs *cum,
1074 TCGHelperInfo *info, int n)
1076 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
1078 for (int i = 0; i < n; ++i) {
1079 /* Layout all using the same arg_idx, adjusting the subindex. */
1080 loc[i] = (TCGCallArgumentLoc){
1081 .kind = TCG_CALL_ARG_NORMAL,
1082 .arg_idx = cum->arg_idx,
1083 .tmp_subindex = i,
1084 .arg_slot = cum->arg_slot + i,
1087 cum->info_in_idx += n;
1088 cum->arg_slot += n;
1091 static void layout_arg_by_ref(TCGCumulativeArgs *cum, TCGHelperInfo *info)
1093 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
1094 int n = 128 / TCG_TARGET_REG_BITS;
1096 /* The first subindex carries the pointer. */
1097 layout_arg_1(cum, info, TCG_CALL_ARG_BY_REF);
1100 * The callee is allowed to clobber memory associated with
1101 * structure pass by-reference. Therefore we must make copies.
1102 * Allocate space from "ref_slot", which will be adjusted to
1103 * follow the parameters on the stack.
1105 loc[0].ref_slot = cum->ref_slot;
1108 * Subsequent words also go into the reference slot, but
1109 * do not accumulate into the regular arguments.
1111 for (int i = 1; i < n; ++i) {
1112 loc[i] = (TCGCallArgumentLoc){
1113 .kind = TCG_CALL_ARG_BY_REF_N,
1114 .arg_idx = cum->arg_idx,
1115 .tmp_subindex = i,
1116 .ref_slot = cum->ref_slot + i,
1119 cum->info_in_idx += n - 1; /* i=0 accounted for in layout_arg_1 */
1120 cum->ref_slot += n;
1123 static void init_call_layout(TCGHelperInfo *info)
1125 int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
1126 int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
1127 unsigned typemask = info->typemask;
1128 unsigned typecode;
1129 TCGCumulativeArgs cum = { };
1132 * Parse and place any function return value.
1134 typecode = typemask & 7;
1135 switch (typecode) {
1136 case dh_typecode_void:
1137 info->nr_out = 0;
1138 break;
1139 case dh_typecode_i32:
1140 case dh_typecode_s32:
1141 case dh_typecode_ptr:
1142 info->nr_out = 1;
1143 info->out_kind = TCG_CALL_RET_NORMAL;
1144 break;
1145 case dh_typecode_i64:
1146 case dh_typecode_s64:
1147 info->nr_out = 64 / TCG_TARGET_REG_BITS;
1148 info->out_kind = TCG_CALL_RET_NORMAL;
1149 /* Query the last register now to trigger any assert early. */
1150 tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
1151 break;
1152 case dh_typecode_i128:
1153 info->nr_out = 128 / TCG_TARGET_REG_BITS;
1154 info->out_kind = TCG_TARGET_CALL_RET_I128;
1155 switch (TCG_TARGET_CALL_RET_I128) {
1156 case TCG_CALL_RET_NORMAL:
1157 /* Query the last register now to trigger any assert early. */
1158 tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
1159 break;
1160 case TCG_CALL_RET_BY_VEC:
1161 /* Query the single register now to trigger any assert early. */
1162 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0);
1163 break;
1164 case TCG_CALL_RET_BY_REF:
1166 * Allocate the first argument to the output.
1167 * We don't need to store this anywhere, just make it
1168 * unavailable for use in the input loop below.
1170 cum.arg_slot = 1;
1171 break;
1172 default:
1173 qemu_build_not_reached();
1175 break;
1176 default:
1177 g_assert_not_reached();
1181 * Parse and place function arguments.
1183 for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
1184 TCGCallArgumentKind kind;
1185 TCGType type;
1187 typecode = typemask & 7;
1188 switch (typecode) {
1189 case dh_typecode_i32:
1190 case dh_typecode_s32:
1191 type = TCG_TYPE_I32;
1192 break;
1193 case dh_typecode_i64:
1194 case dh_typecode_s64:
1195 type = TCG_TYPE_I64;
1196 break;
1197 case dh_typecode_ptr:
1198 type = TCG_TYPE_PTR;
1199 break;
1200 case dh_typecode_i128:
1201 type = TCG_TYPE_I128;
1202 break;
1203 default:
1204 g_assert_not_reached();
1207 switch (type) {
1208 case TCG_TYPE_I32:
1209 switch (TCG_TARGET_CALL_ARG_I32) {
1210 case TCG_CALL_ARG_EVEN:
1211 layout_arg_even(&cum);
1212 /* fall through */
1213 case TCG_CALL_ARG_NORMAL:
1214 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
1215 break;
1216 case TCG_CALL_ARG_EXTEND:
1217 kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
1218 layout_arg_1(&cum, info, kind);
1219 break;
1220 default:
1221 qemu_build_not_reached();
1223 break;
1225 case TCG_TYPE_I64:
1226 switch (TCG_TARGET_CALL_ARG_I64) {
1227 case TCG_CALL_ARG_EVEN:
1228 layout_arg_even(&cum);
1229 /* fall through */
1230 case TCG_CALL_ARG_NORMAL:
1231 if (TCG_TARGET_REG_BITS == 32) {
1232 layout_arg_normal_n(&cum, info, 2);
1233 } else {
1234 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
1236 break;
1237 default:
1238 qemu_build_not_reached();
1240 break;
1242 case TCG_TYPE_I128:
1243 switch (TCG_TARGET_CALL_ARG_I128) {
1244 case TCG_CALL_ARG_EVEN:
1245 layout_arg_even(&cum);
1246 /* fall through */
1247 case TCG_CALL_ARG_NORMAL:
1248 layout_arg_normal_n(&cum, info, 128 / TCG_TARGET_REG_BITS);
1249 break;
1250 case TCG_CALL_ARG_BY_REF:
1251 layout_arg_by_ref(&cum, info);
1252 break;
1253 default:
1254 qemu_build_not_reached();
1256 break;
1258 default:
1259 g_assert_not_reached();
1262 info->nr_in = cum.info_in_idx;
1264 /* Validate that we didn't overrun the input array. */
1265 assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
1266 /* Validate the backend has enough argument space. */
1267 assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
1270 * Relocate the "ref_slot" area to the end of the parameters.
1271 * Minimizing this stack offset helps code size for x86,
1272 * which has a signed 8-bit offset encoding.
1274 if (cum.ref_slot != 0) {
1275 int ref_base = 0;
1277 if (cum.arg_slot > max_reg_slots) {
1278 int align = __alignof(Int128) / sizeof(tcg_target_long);
1280 ref_base = cum.arg_slot - max_reg_slots;
1281 if (align > 1) {
1282 ref_base = ROUND_UP(ref_base, align);
1285 assert(ref_base + cum.ref_slot <= max_stk_slots);
1286 ref_base += max_reg_slots;
1288 if (ref_base != 0) {
1289 for (int i = cum.info_in_idx - 1; i >= 0; --i) {
1290 TCGCallArgumentLoc *loc = &info->in[i];
1291 switch (loc->kind) {
1292 case TCG_CALL_ARG_BY_REF:
1293 case TCG_CALL_ARG_BY_REF_N:
1294 loc->ref_slot += ref_base;
1295 break;
1296 default:
1297 break;
1304 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
1305 static void process_op_defs(TCGContext *s);
1306 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1307 TCGReg reg, const char *name);
1309 static void tcg_context_init(unsigned max_cpus)
1311 TCGContext *s = &tcg_init_ctx;
1312 int op, total_args, n, i;
1313 TCGOpDef *def;
1314 TCGArgConstraint *args_ct;
1315 TCGTemp *ts;
1317 memset(s, 0, sizeof(*s));
1318 s->nb_globals = 0;
1320 /* Count total number of arguments and allocate the corresponding
1321 space */
1322 total_args = 0;
1323 for(op = 0; op < NB_OPS; op++) {
1324 def = &tcg_op_defs[op];
1325 n = def->nb_iargs + def->nb_oargs;
1326 total_args += n;
1329 args_ct = g_new0(TCGArgConstraint, total_args);
1331 for(op = 0; op < NB_OPS; op++) {
1332 def = &tcg_op_defs[op];
1333 def->args_ct = args_ct;
1334 n = def->nb_iargs + def->nb_oargs;
1335 args_ct += n;
1338 init_call_layout(&info_helper_ld32_mmu);
1339 init_call_layout(&info_helper_ld64_mmu);
1340 init_call_layout(&info_helper_ld128_mmu);
1341 init_call_layout(&info_helper_st32_mmu);
1342 init_call_layout(&info_helper_st64_mmu);
1343 init_call_layout(&info_helper_st128_mmu);
1345 tcg_target_init(s);
1346 process_op_defs(s);
1348 /* Reverse the order of the saved registers, assuming they're all at
1349 the start of tcg_target_reg_alloc_order. */
1350 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
1351 int r = tcg_target_reg_alloc_order[n];
1352 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
1353 break;
1356 for (i = 0; i < n; ++i) {
1357 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
1359 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
1360 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
1363 alloc_tcg_plugin_context(s);
1365 tcg_ctx = s;
1367 * In user-mode we simply share the init context among threads, since we
1368 * use a single region. See the documentation tcg_region_init() for the
1369 * reasoning behind this.
1370 * In system-mode we will have at most max_cpus TCG threads.
1372 #ifdef CONFIG_USER_ONLY
1373 tcg_ctxs = &tcg_ctx;
1374 tcg_cur_ctxs = 1;
1375 tcg_max_ctxs = 1;
1376 #else
1377 tcg_max_ctxs = max_cpus;
1378 tcg_ctxs = g_new0(TCGContext *, max_cpus);
1379 #endif
1381 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
1382 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
1383 tcg_env = temp_tcgv_ptr(ts);
1386 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
1388 tcg_context_init(max_cpus);
1389 tcg_region_init(tb_size, splitwx, max_cpus);
1393 * Allocate TBs right before their corresponding translated code, making
1394 * sure that TBs and code are on different cache lines.
1396 TranslationBlock *tcg_tb_alloc(TCGContext *s)
1398 uintptr_t align = qemu_icache_linesize;
1399 TranslationBlock *tb;
1400 void *next;
1402 retry:
1403 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
1404 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
1406 if (unlikely(next > s->code_gen_highwater)) {
1407 if (tcg_region_alloc(s)) {
1408 return NULL;
1410 goto retry;
1412 qatomic_set(&s->code_gen_ptr, next);
1413 s->data_gen_ptr = NULL;
1414 return tb;
1417 void tcg_prologue_init(void)
1419 TCGContext *s = tcg_ctx;
1420 size_t prologue_size;
1422 s->code_ptr = s->code_gen_ptr;
1423 s->code_buf = s->code_gen_ptr;
1424 s->data_gen_ptr = NULL;
1426 #ifndef CONFIG_TCG_INTERPRETER
1427 tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
1428 #endif
1430 #ifdef TCG_TARGET_NEED_POOL_LABELS
1431 s->pool_labels = NULL;
1432 #endif
1434 qemu_thread_jit_write();
1435 /* Generate the prologue. */
1436 tcg_target_qemu_prologue(s);
1438 #ifdef TCG_TARGET_NEED_POOL_LABELS
1439 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1441 int result = tcg_out_pool_finalize(s);
1442 tcg_debug_assert(result == 0);
1444 #endif
1446 prologue_size = tcg_current_code_size(s);
1447 perf_report_prologue(s->code_gen_ptr, prologue_size);
1449 #ifndef CONFIG_TCG_INTERPRETER
1450 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
1451 (uintptr_t)s->code_buf, prologue_size);
1452 #endif
1454 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1455 FILE *logfile = qemu_log_trylock();
1456 if (logfile) {
1457 fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
1458 if (s->data_gen_ptr) {
1459 size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
1460 size_t data_size = prologue_size - code_size;
1461 size_t i;
1463 disas(logfile, s->code_gen_ptr, code_size);
1465 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1466 if (sizeof(tcg_target_ulong) == 8) {
1467 fprintf(logfile,
1468 "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1469 (uintptr_t)s->data_gen_ptr + i,
1470 *(uint64_t *)(s->data_gen_ptr + i));
1471 } else {
1472 fprintf(logfile,
1473 "0x%08" PRIxPTR ": .long 0x%08x\n",
1474 (uintptr_t)s->data_gen_ptr + i,
1475 *(uint32_t *)(s->data_gen_ptr + i));
1478 } else {
1479 disas(logfile, s->code_gen_ptr, prologue_size);
1481 fprintf(logfile, "\n");
1482 qemu_log_unlock(logfile);
1486 #ifndef CONFIG_TCG_INTERPRETER
1488 * Assert that goto_ptr is implemented completely, setting an epilogue.
1489 * For tci, we use NULL as the signal to return from the interpreter,
1490 * so skip this check.
1492 tcg_debug_assert(tcg_code_gen_epilogue != NULL);
1493 #endif
1495 tcg_region_prologue_set(s);
1498 void tcg_func_start(TCGContext *s)
1500 tcg_pool_reset(s);
1501 s->nb_temps = s->nb_globals;
1503 /* No temps have been previously allocated for size or locality. */
1504 memset(s->free_temps, 0, sizeof(s->free_temps));
1506 /* No constant temps have been previously allocated. */
1507 for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
1508 if (s->const_table[i]) {
1509 g_hash_table_remove_all(s->const_table[i]);
1513 s->nb_ops = 0;
1514 s->nb_labels = 0;
1515 s->current_frame_offset = s->frame_start;
1517 #ifdef CONFIG_DEBUG_TCG
1518 s->goto_tb_issue_mask = 0;
1519 #endif
1521 QTAILQ_INIT(&s->ops);
1522 QTAILQ_INIT(&s->free_ops);
1523 QSIMPLEQ_INIT(&s->labels);
1525 tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
1526 s->addr_type == TCG_TYPE_I64);
1528 tcg_debug_assert(s->insn_start_words > 0);
1531 static TCGTemp *tcg_temp_alloc(TCGContext *s)
1533 int n = s->nb_temps++;
1535 if (n >= TCG_MAX_TEMPS) {
1536 tcg_raise_tb_overflow(s);
1538 return memset(&s->temps[n], 0, sizeof(TCGTemp));
1541 static TCGTemp *tcg_global_alloc(TCGContext *s)
1543 TCGTemp *ts;
1545 tcg_debug_assert(s->nb_globals == s->nb_temps);
1546 tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
1547 s->nb_globals++;
1548 ts = tcg_temp_alloc(s);
1549 ts->kind = TEMP_GLOBAL;
1551 return ts;
1554 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1555 TCGReg reg, const char *name)
1557 TCGTemp *ts;
1559 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1561 ts = tcg_global_alloc(s);
1562 ts->base_type = type;
1563 ts->type = type;
1564 ts->kind = TEMP_FIXED;
1565 ts->reg = reg;
1566 ts->name = name;
1567 tcg_regset_set_reg(s->reserved_regs, reg);
1569 return ts;
1572 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
1574 s->frame_start = start;
1575 s->frame_end = start + size;
1576 s->frame_temp
1577 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
1580 static TCGTemp *tcg_global_mem_new_internal(TCGv_ptr base, intptr_t offset,
1581 const char *name, TCGType type)
1583 TCGContext *s = tcg_ctx;
1584 TCGTemp *base_ts = tcgv_ptr_temp(base);
1585 TCGTemp *ts = tcg_global_alloc(s);
1586 int indirect_reg = 0;
1588 switch (base_ts->kind) {
1589 case TEMP_FIXED:
1590 break;
1591 case TEMP_GLOBAL:
1592 /* We do not support double-indirect registers. */
1593 tcg_debug_assert(!base_ts->indirect_reg);
1594 base_ts->indirect_base = 1;
1595 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1596 ? 2 : 1);
1597 indirect_reg = 1;
1598 break;
1599 default:
1600 g_assert_not_reached();
1603 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1604 TCGTemp *ts2 = tcg_global_alloc(s);
1605 char buf[64];
1607 ts->base_type = TCG_TYPE_I64;
1608 ts->type = TCG_TYPE_I32;
1609 ts->indirect_reg = indirect_reg;
1610 ts->mem_allocated = 1;
1611 ts->mem_base = base_ts;
1612 ts->mem_offset = offset;
1613 pstrcpy(buf, sizeof(buf), name);
1614 pstrcat(buf, sizeof(buf), "_0");
1615 ts->name = strdup(buf);
1617 tcg_debug_assert(ts2 == ts + 1);
1618 ts2->base_type = TCG_TYPE_I64;
1619 ts2->type = TCG_TYPE_I32;
1620 ts2->indirect_reg = indirect_reg;
1621 ts2->mem_allocated = 1;
1622 ts2->mem_base = base_ts;
1623 ts2->mem_offset = offset + 4;
1624 ts2->temp_subindex = 1;
1625 pstrcpy(buf, sizeof(buf), name);
1626 pstrcat(buf, sizeof(buf), "_1");
1627 ts2->name = strdup(buf);
1628 } else {
1629 ts->base_type = type;
1630 ts->type = type;
1631 ts->indirect_reg = indirect_reg;
1632 ts->mem_allocated = 1;
1633 ts->mem_base = base_ts;
1634 ts->mem_offset = offset;
1635 ts->name = name;
1637 return ts;
1640 TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t off, const char *name)
1642 TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_I32);
1643 return temp_tcgv_i32(ts);
1646 TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t off, const char *name)
1648 TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_I64);
1649 return temp_tcgv_i64(ts);
1652 TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name)
1654 TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_PTR);
1655 return temp_tcgv_ptr(ts);
1658 static TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
1660 TCGContext *s = tcg_ctx;
1661 TCGTemp *ts;
1662 int n;
1664 if (kind == TEMP_EBB) {
1665 int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS);
1667 if (idx < TCG_MAX_TEMPS) {
1668 /* There is already an available temp with the right type. */
1669 clear_bit(idx, s->free_temps[type].l);
1671 ts = &s->temps[idx];
1672 ts->temp_allocated = 1;
1673 tcg_debug_assert(ts->base_type == type);
1674 tcg_debug_assert(ts->kind == kind);
1675 return ts;
1677 } else {
1678 tcg_debug_assert(kind == TEMP_TB);
1681 switch (type) {
1682 case TCG_TYPE_I32:
1683 case TCG_TYPE_V64:
1684 case TCG_TYPE_V128:
1685 case TCG_TYPE_V256:
1686 n = 1;
1687 break;
1688 case TCG_TYPE_I64:
1689 n = 64 / TCG_TARGET_REG_BITS;
1690 break;
1691 case TCG_TYPE_I128:
1692 n = 128 / TCG_TARGET_REG_BITS;
1693 break;
1694 default:
1695 g_assert_not_reached();
1698 ts = tcg_temp_alloc(s);
1699 ts->base_type = type;
1700 ts->temp_allocated = 1;
1701 ts->kind = kind;
1703 if (n == 1) {
1704 ts->type = type;
1705 } else {
1706 ts->type = TCG_TYPE_REG;
1708 for (int i = 1; i < n; ++i) {
1709 TCGTemp *ts2 = tcg_temp_alloc(s);
1711 tcg_debug_assert(ts2 == ts + i);
1712 ts2->base_type = type;
1713 ts2->type = TCG_TYPE_REG;
1714 ts2->temp_allocated = 1;
1715 ts2->temp_subindex = i;
1716 ts2->kind = kind;
1719 return ts;
1722 TCGv_i32 tcg_temp_new_i32(void)
1724 return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB));
1727 TCGv_i32 tcg_temp_ebb_new_i32(void)
1729 return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB));
1732 TCGv_i64 tcg_temp_new_i64(void)
1734 return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB));
1737 TCGv_i64 tcg_temp_ebb_new_i64(void)
1739 return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB));
1742 TCGv_ptr tcg_temp_new_ptr(void)
1744 return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB));
1747 TCGv_ptr tcg_temp_ebb_new_ptr(void)
1749 return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB));
1752 TCGv_i128 tcg_temp_new_i128(void)
1754 return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB));
1757 TCGv_i128 tcg_temp_ebb_new_i128(void)
1759 return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB));
1762 TCGv_vec tcg_temp_new_vec(TCGType type)
1764 TCGTemp *t;
1766 #ifdef CONFIG_DEBUG_TCG
1767 switch (type) {
1768 case TCG_TYPE_V64:
1769 assert(TCG_TARGET_HAS_v64);
1770 break;
1771 case TCG_TYPE_V128:
1772 assert(TCG_TARGET_HAS_v128);
1773 break;
1774 case TCG_TYPE_V256:
1775 assert(TCG_TARGET_HAS_v256);
1776 break;
1777 default:
1778 g_assert_not_reached();
1780 #endif
1782 t = tcg_temp_new_internal(type, TEMP_EBB);
1783 return temp_tcgv_vec(t);
1786 /* Create a new temp of the same type as an existing temp. */
1787 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1789 TCGTemp *t = tcgv_vec_temp(match);
1791 tcg_debug_assert(t->temp_allocated != 0);
1793 t = tcg_temp_new_internal(t->base_type, TEMP_EBB);
1794 return temp_tcgv_vec(t);
1797 void tcg_temp_free_internal(TCGTemp *ts)
1799 TCGContext *s = tcg_ctx;
1801 switch (ts->kind) {
1802 case TEMP_CONST:
1803 case TEMP_TB:
1804 /* Silently ignore free. */
1805 break;
1806 case TEMP_EBB:
1807 tcg_debug_assert(ts->temp_allocated != 0);
1808 ts->temp_allocated = 0;
1809 set_bit(temp_idx(ts), s->free_temps[ts->base_type].l);
1810 break;
1811 default:
1812 /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
1813 g_assert_not_reached();
1817 void tcg_temp_free_i32(TCGv_i32 arg)
1819 tcg_temp_free_internal(tcgv_i32_temp(arg));
1822 void tcg_temp_free_i64(TCGv_i64 arg)
1824 tcg_temp_free_internal(tcgv_i64_temp(arg));
1827 void tcg_temp_free_i128(TCGv_i128 arg)
1829 tcg_temp_free_internal(tcgv_i128_temp(arg));
1832 void tcg_temp_free_ptr(TCGv_ptr arg)
1834 tcg_temp_free_internal(tcgv_ptr_temp(arg));
1837 void tcg_temp_free_vec(TCGv_vec arg)
1839 tcg_temp_free_internal(tcgv_vec_temp(arg));
1842 TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
1844 TCGContext *s = tcg_ctx;
1845 GHashTable *h = s->const_table[type];
1846 TCGTemp *ts;
1848 if (h == NULL) {
1849 h = g_hash_table_new(g_int64_hash, g_int64_equal);
1850 s->const_table[type] = h;
1853 ts = g_hash_table_lookup(h, &val);
1854 if (ts == NULL) {
1855 int64_t *val_ptr;
1857 ts = tcg_temp_alloc(s);
1859 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1860 TCGTemp *ts2 = tcg_temp_alloc(s);
1862 tcg_debug_assert(ts2 == ts + 1);
1864 ts->base_type = TCG_TYPE_I64;
1865 ts->type = TCG_TYPE_I32;
1866 ts->kind = TEMP_CONST;
1867 ts->temp_allocated = 1;
1869 ts2->base_type = TCG_TYPE_I64;
1870 ts2->type = TCG_TYPE_I32;
1871 ts2->kind = TEMP_CONST;
1872 ts2->temp_allocated = 1;
1873 ts2->temp_subindex = 1;
1876 * Retain the full value of the 64-bit constant in the low
1877 * part, so that the hash table works. Actual uses will
1878 * truncate the value to the low part.
1880 ts[HOST_BIG_ENDIAN].val = val;
1881 ts[!HOST_BIG_ENDIAN].val = val >> 32;
1882 val_ptr = &ts[HOST_BIG_ENDIAN].val;
1883 } else {
1884 ts->base_type = type;
1885 ts->type = type;
1886 ts->kind = TEMP_CONST;
1887 ts->temp_allocated = 1;
1888 ts->val = val;
1889 val_ptr = &ts->val;
1891 g_hash_table_insert(h, val_ptr, ts);
1894 return ts;
1897 TCGv_i32 tcg_constant_i32(int32_t val)
1899 return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
1902 TCGv_i64 tcg_constant_i64(int64_t val)
1904 return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
1907 TCGv_ptr tcg_constant_ptr_int(intptr_t val)
1909 return temp_tcgv_ptr(tcg_constant_internal(TCG_TYPE_PTR, val));
1912 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1914 val = dup_const(vece, val);
1915 return temp_tcgv_vec(tcg_constant_internal(type, val));
1918 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1920 TCGTemp *t = tcgv_vec_temp(match);
1922 tcg_debug_assert(t->temp_allocated != 0);
1923 return tcg_constant_vec(t->base_type, vece, val);
1926 #ifdef CONFIG_DEBUG_TCG
1927 size_t temp_idx(TCGTemp *ts)
1929 ptrdiff_t n = ts - tcg_ctx->temps;
1930 assert(n >= 0 && n < tcg_ctx->nb_temps);
1931 return n;
1934 TCGTemp *tcgv_i32_temp(TCGv_i32 v)
1936 uintptr_t o = (uintptr_t)v - offsetof(TCGContext, temps);
1938 assert(o < sizeof(TCGTemp) * tcg_ctx->nb_temps);
1939 assert(o % sizeof(TCGTemp) == 0);
1941 return (void *)tcg_ctx + (uintptr_t)v;
1943 #endif /* CONFIG_DEBUG_TCG */
1945 /* Return true if OP may appear in the opcode stream.
1946 Test the runtime variable that controls each opcode. */
1947 bool tcg_op_supported(TCGOpcode op)
1949 const bool have_vec
1950 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1952 switch (op) {
1953 case INDEX_op_discard:
1954 case INDEX_op_set_label:
1955 case INDEX_op_call:
1956 case INDEX_op_br:
1957 case INDEX_op_mb:
1958 case INDEX_op_insn_start:
1959 case INDEX_op_exit_tb:
1960 case INDEX_op_goto_tb:
1961 case INDEX_op_goto_ptr:
1962 case INDEX_op_qemu_ld_a32_i32:
1963 case INDEX_op_qemu_ld_a64_i32:
1964 case INDEX_op_qemu_st_a32_i32:
1965 case INDEX_op_qemu_st_a64_i32:
1966 case INDEX_op_qemu_ld_a32_i64:
1967 case INDEX_op_qemu_ld_a64_i64:
1968 case INDEX_op_qemu_st_a32_i64:
1969 case INDEX_op_qemu_st_a64_i64:
1970 return true;
1972 case INDEX_op_qemu_st8_a32_i32:
1973 case INDEX_op_qemu_st8_a64_i32:
1974 return TCG_TARGET_HAS_qemu_st8_i32;
1976 case INDEX_op_qemu_ld_a32_i128:
1977 case INDEX_op_qemu_ld_a64_i128:
1978 case INDEX_op_qemu_st_a32_i128:
1979 case INDEX_op_qemu_st_a64_i128:
1980 return TCG_TARGET_HAS_qemu_ldst_i128;
1982 case INDEX_op_mov_i32:
1983 case INDEX_op_setcond_i32:
1984 case INDEX_op_brcond_i32:
1985 case INDEX_op_movcond_i32:
1986 case INDEX_op_ld8u_i32:
1987 case INDEX_op_ld8s_i32:
1988 case INDEX_op_ld16u_i32:
1989 case INDEX_op_ld16s_i32:
1990 case INDEX_op_ld_i32:
1991 case INDEX_op_st8_i32:
1992 case INDEX_op_st16_i32:
1993 case INDEX_op_st_i32:
1994 case INDEX_op_add_i32:
1995 case INDEX_op_sub_i32:
1996 case INDEX_op_neg_i32:
1997 case INDEX_op_mul_i32:
1998 case INDEX_op_and_i32:
1999 case INDEX_op_or_i32:
2000 case INDEX_op_xor_i32:
2001 case INDEX_op_shl_i32:
2002 case INDEX_op_shr_i32:
2003 case INDEX_op_sar_i32:
2004 return true;
2006 case INDEX_op_negsetcond_i32:
2007 return TCG_TARGET_HAS_negsetcond_i32;
2008 case INDEX_op_div_i32:
2009 case INDEX_op_divu_i32:
2010 return TCG_TARGET_HAS_div_i32;
2011 case INDEX_op_rem_i32:
2012 case INDEX_op_remu_i32:
2013 return TCG_TARGET_HAS_rem_i32;
2014 case INDEX_op_div2_i32:
2015 case INDEX_op_divu2_i32:
2016 return TCG_TARGET_HAS_div2_i32;
2017 case INDEX_op_rotl_i32:
2018 case INDEX_op_rotr_i32:
2019 return TCG_TARGET_HAS_rot_i32;
2020 case INDEX_op_deposit_i32:
2021 return TCG_TARGET_HAS_deposit_i32;
2022 case INDEX_op_extract_i32:
2023 return TCG_TARGET_HAS_extract_i32;
2024 case INDEX_op_sextract_i32:
2025 return TCG_TARGET_HAS_sextract_i32;
2026 case INDEX_op_extract2_i32:
2027 return TCG_TARGET_HAS_extract2_i32;
2028 case INDEX_op_add2_i32:
2029 return TCG_TARGET_HAS_add2_i32;
2030 case INDEX_op_sub2_i32:
2031 return TCG_TARGET_HAS_sub2_i32;
2032 case INDEX_op_mulu2_i32:
2033 return TCG_TARGET_HAS_mulu2_i32;
2034 case INDEX_op_muls2_i32:
2035 return TCG_TARGET_HAS_muls2_i32;
2036 case INDEX_op_muluh_i32:
2037 return TCG_TARGET_HAS_muluh_i32;
2038 case INDEX_op_mulsh_i32:
2039 return TCG_TARGET_HAS_mulsh_i32;
2040 case INDEX_op_ext8s_i32:
2041 return TCG_TARGET_HAS_ext8s_i32;
2042 case INDEX_op_ext16s_i32:
2043 return TCG_TARGET_HAS_ext16s_i32;
2044 case INDEX_op_ext8u_i32:
2045 return TCG_TARGET_HAS_ext8u_i32;
2046 case INDEX_op_ext16u_i32:
2047 return TCG_TARGET_HAS_ext16u_i32;
2048 case INDEX_op_bswap16_i32:
2049 return TCG_TARGET_HAS_bswap16_i32;
2050 case INDEX_op_bswap32_i32:
2051 return TCG_TARGET_HAS_bswap32_i32;
2052 case INDEX_op_not_i32:
2053 return TCG_TARGET_HAS_not_i32;
2054 case INDEX_op_andc_i32:
2055 return TCG_TARGET_HAS_andc_i32;
2056 case INDEX_op_orc_i32:
2057 return TCG_TARGET_HAS_orc_i32;
2058 case INDEX_op_eqv_i32:
2059 return TCG_TARGET_HAS_eqv_i32;
2060 case INDEX_op_nand_i32:
2061 return TCG_TARGET_HAS_nand_i32;
2062 case INDEX_op_nor_i32:
2063 return TCG_TARGET_HAS_nor_i32;
2064 case INDEX_op_clz_i32:
2065 return TCG_TARGET_HAS_clz_i32;
2066 case INDEX_op_ctz_i32:
2067 return TCG_TARGET_HAS_ctz_i32;
2068 case INDEX_op_ctpop_i32:
2069 return TCG_TARGET_HAS_ctpop_i32;
2071 case INDEX_op_brcond2_i32:
2072 case INDEX_op_setcond2_i32:
2073 return TCG_TARGET_REG_BITS == 32;
2075 case INDEX_op_mov_i64:
2076 case INDEX_op_setcond_i64:
2077 case INDEX_op_brcond_i64:
2078 case INDEX_op_movcond_i64:
2079 case INDEX_op_ld8u_i64:
2080 case INDEX_op_ld8s_i64:
2081 case INDEX_op_ld16u_i64:
2082 case INDEX_op_ld16s_i64:
2083 case INDEX_op_ld32u_i64:
2084 case INDEX_op_ld32s_i64:
2085 case INDEX_op_ld_i64:
2086 case INDEX_op_st8_i64:
2087 case INDEX_op_st16_i64:
2088 case INDEX_op_st32_i64:
2089 case INDEX_op_st_i64:
2090 case INDEX_op_add_i64:
2091 case INDEX_op_sub_i64:
2092 case INDEX_op_neg_i64:
2093 case INDEX_op_mul_i64:
2094 case INDEX_op_and_i64:
2095 case INDEX_op_or_i64:
2096 case INDEX_op_xor_i64:
2097 case INDEX_op_shl_i64:
2098 case INDEX_op_shr_i64:
2099 case INDEX_op_sar_i64:
2100 case INDEX_op_ext_i32_i64:
2101 case INDEX_op_extu_i32_i64:
2102 return TCG_TARGET_REG_BITS == 64;
2104 case INDEX_op_negsetcond_i64:
2105 return TCG_TARGET_HAS_negsetcond_i64;
2106 case INDEX_op_div_i64:
2107 case INDEX_op_divu_i64:
2108 return TCG_TARGET_HAS_div_i64;
2109 case INDEX_op_rem_i64:
2110 case INDEX_op_remu_i64:
2111 return TCG_TARGET_HAS_rem_i64;
2112 case INDEX_op_div2_i64:
2113 case INDEX_op_divu2_i64:
2114 return TCG_TARGET_HAS_div2_i64;
2115 case INDEX_op_rotl_i64:
2116 case INDEX_op_rotr_i64:
2117 return TCG_TARGET_HAS_rot_i64;
2118 case INDEX_op_deposit_i64:
2119 return TCG_TARGET_HAS_deposit_i64;
2120 case INDEX_op_extract_i64:
2121 return TCG_TARGET_HAS_extract_i64;
2122 case INDEX_op_sextract_i64:
2123 return TCG_TARGET_HAS_sextract_i64;
2124 case INDEX_op_extract2_i64:
2125 return TCG_TARGET_HAS_extract2_i64;
2126 case INDEX_op_extrl_i64_i32:
2127 case INDEX_op_extrh_i64_i32:
2128 return TCG_TARGET_HAS_extr_i64_i32;
2129 case INDEX_op_ext8s_i64:
2130 return TCG_TARGET_HAS_ext8s_i64;
2131 case INDEX_op_ext16s_i64:
2132 return TCG_TARGET_HAS_ext16s_i64;
2133 case INDEX_op_ext32s_i64:
2134 return TCG_TARGET_HAS_ext32s_i64;
2135 case INDEX_op_ext8u_i64:
2136 return TCG_TARGET_HAS_ext8u_i64;
2137 case INDEX_op_ext16u_i64:
2138 return TCG_TARGET_HAS_ext16u_i64;
2139 case INDEX_op_ext32u_i64:
2140 return TCG_TARGET_HAS_ext32u_i64;
2141 case INDEX_op_bswap16_i64:
2142 return TCG_TARGET_HAS_bswap16_i64;
2143 case INDEX_op_bswap32_i64:
2144 return TCG_TARGET_HAS_bswap32_i64;
2145 case INDEX_op_bswap64_i64:
2146 return TCG_TARGET_HAS_bswap64_i64;
2147 case INDEX_op_not_i64:
2148 return TCG_TARGET_HAS_not_i64;
2149 case INDEX_op_andc_i64:
2150 return TCG_TARGET_HAS_andc_i64;
2151 case INDEX_op_orc_i64:
2152 return TCG_TARGET_HAS_orc_i64;
2153 case INDEX_op_eqv_i64:
2154 return TCG_TARGET_HAS_eqv_i64;
2155 case INDEX_op_nand_i64:
2156 return TCG_TARGET_HAS_nand_i64;
2157 case INDEX_op_nor_i64:
2158 return TCG_TARGET_HAS_nor_i64;
2159 case INDEX_op_clz_i64:
2160 return TCG_TARGET_HAS_clz_i64;
2161 case INDEX_op_ctz_i64:
2162 return TCG_TARGET_HAS_ctz_i64;
2163 case INDEX_op_ctpop_i64:
2164 return TCG_TARGET_HAS_ctpop_i64;
2165 case INDEX_op_add2_i64:
2166 return TCG_TARGET_HAS_add2_i64;
2167 case INDEX_op_sub2_i64:
2168 return TCG_TARGET_HAS_sub2_i64;
2169 case INDEX_op_mulu2_i64:
2170 return TCG_TARGET_HAS_mulu2_i64;
2171 case INDEX_op_muls2_i64:
2172 return TCG_TARGET_HAS_muls2_i64;
2173 case INDEX_op_muluh_i64:
2174 return TCG_TARGET_HAS_muluh_i64;
2175 case INDEX_op_mulsh_i64:
2176 return TCG_TARGET_HAS_mulsh_i64;
2178 case INDEX_op_mov_vec:
2179 case INDEX_op_dup_vec:
2180 case INDEX_op_dupm_vec:
2181 case INDEX_op_ld_vec:
2182 case INDEX_op_st_vec:
2183 case INDEX_op_add_vec:
2184 case INDEX_op_sub_vec:
2185 case INDEX_op_and_vec:
2186 case INDEX_op_or_vec:
2187 case INDEX_op_xor_vec:
2188 case INDEX_op_cmp_vec:
2189 return have_vec;
2190 case INDEX_op_dup2_vec:
2191 return have_vec && TCG_TARGET_REG_BITS == 32;
2192 case INDEX_op_not_vec:
2193 return have_vec && TCG_TARGET_HAS_not_vec;
2194 case INDEX_op_neg_vec:
2195 return have_vec && TCG_TARGET_HAS_neg_vec;
2196 case INDEX_op_abs_vec:
2197 return have_vec && TCG_TARGET_HAS_abs_vec;
2198 case INDEX_op_andc_vec:
2199 return have_vec && TCG_TARGET_HAS_andc_vec;
2200 case INDEX_op_orc_vec:
2201 return have_vec && TCG_TARGET_HAS_orc_vec;
2202 case INDEX_op_nand_vec:
2203 return have_vec && TCG_TARGET_HAS_nand_vec;
2204 case INDEX_op_nor_vec:
2205 return have_vec && TCG_TARGET_HAS_nor_vec;
2206 case INDEX_op_eqv_vec:
2207 return have_vec && TCG_TARGET_HAS_eqv_vec;
2208 case INDEX_op_mul_vec:
2209 return have_vec && TCG_TARGET_HAS_mul_vec;
2210 case INDEX_op_shli_vec:
2211 case INDEX_op_shri_vec:
2212 case INDEX_op_sari_vec:
2213 return have_vec && TCG_TARGET_HAS_shi_vec;
2214 case INDEX_op_shls_vec:
2215 case INDEX_op_shrs_vec:
2216 case INDEX_op_sars_vec:
2217 return have_vec && TCG_TARGET_HAS_shs_vec;
2218 case INDEX_op_shlv_vec:
2219 case INDEX_op_shrv_vec:
2220 case INDEX_op_sarv_vec:
2221 return have_vec && TCG_TARGET_HAS_shv_vec;
2222 case INDEX_op_rotli_vec:
2223 return have_vec && TCG_TARGET_HAS_roti_vec;
2224 case INDEX_op_rotls_vec:
2225 return have_vec && TCG_TARGET_HAS_rots_vec;
2226 case INDEX_op_rotlv_vec:
2227 case INDEX_op_rotrv_vec:
2228 return have_vec && TCG_TARGET_HAS_rotv_vec;
2229 case INDEX_op_ssadd_vec:
2230 case INDEX_op_usadd_vec:
2231 case INDEX_op_sssub_vec:
2232 case INDEX_op_ussub_vec:
2233 return have_vec && TCG_TARGET_HAS_sat_vec;
2234 case INDEX_op_smin_vec:
2235 case INDEX_op_umin_vec:
2236 case INDEX_op_smax_vec:
2237 case INDEX_op_umax_vec:
2238 return have_vec && TCG_TARGET_HAS_minmax_vec;
2239 case INDEX_op_bitsel_vec:
2240 return have_vec && TCG_TARGET_HAS_bitsel_vec;
2241 case INDEX_op_cmpsel_vec:
2242 return have_vec && TCG_TARGET_HAS_cmpsel_vec;
2244 default:
2245 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
2246 return true;
2250 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
2252 static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
2254 TCGv_i64 extend_free[MAX_CALL_IARGS];
2255 int n_extend = 0;
2256 TCGOp *op;
2257 int i, n, pi = 0, total_args;
2259 if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info)))) {
2260 init_call_layout(info);
2261 g_once_init_leave(HELPER_INFO_INIT(info), HELPER_INFO_INIT_VAL(info));
2264 total_args = info->nr_out + info->nr_in + 2;
2265 op = tcg_op_alloc(INDEX_op_call, total_args);
2267 #ifdef CONFIG_PLUGIN
2268 /* Flag helpers that may affect guest state */
2269 if (tcg_ctx->plugin_insn &&
2270 !(info->flags & TCG_CALL_PLUGIN) &&
2271 !(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
2272 tcg_ctx->plugin_insn->calls_helpers = true;
2274 #endif
2276 TCGOP_CALLO(op) = n = info->nr_out;
2277 switch (n) {
2278 case 0:
2279 tcg_debug_assert(ret == NULL);
2280 break;
2281 case 1:
2282 tcg_debug_assert(ret != NULL);
2283 op->args[pi++] = temp_arg(ret);
2284 break;
2285 case 2:
2286 case 4:
2287 tcg_debug_assert(ret != NULL);
2288 tcg_debug_assert(ret->base_type == ret->type + ctz32(n));
2289 tcg_debug_assert(ret->temp_subindex == 0);
2290 for (i = 0; i < n; ++i) {
2291 op->args[pi++] = temp_arg(ret + i);
2293 break;
2294 default:
2295 g_assert_not_reached();
2298 TCGOP_CALLI(op) = n = info->nr_in;
2299 for (i = 0; i < n; i++) {
2300 const TCGCallArgumentLoc *loc = &info->in[i];
2301 TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
2303 switch (loc->kind) {
2304 case TCG_CALL_ARG_NORMAL:
2305 case TCG_CALL_ARG_BY_REF:
2306 case TCG_CALL_ARG_BY_REF_N:
2307 op->args[pi++] = temp_arg(ts);
2308 break;
2310 case TCG_CALL_ARG_EXTEND_U:
2311 case TCG_CALL_ARG_EXTEND_S:
2313 TCGv_i64 temp = tcg_temp_ebb_new_i64();
2314 TCGv_i32 orig = temp_tcgv_i32(ts);
2316 if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
2317 tcg_gen_ext_i32_i64(temp, orig);
2318 } else {
2319 tcg_gen_extu_i32_i64(temp, orig);
2321 op->args[pi++] = tcgv_i64_arg(temp);
2322 extend_free[n_extend++] = temp;
2324 break;
2326 default:
2327 g_assert_not_reached();
2330 op->args[pi++] = (uintptr_t)info->func;
2331 op->args[pi++] = (uintptr_t)info;
2332 tcg_debug_assert(pi == total_args);
2334 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2336 tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
2337 for (i = 0; i < n_extend; ++i) {
2338 tcg_temp_free_i64(extend_free[i]);
2342 void tcg_gen_call0(TCGHelperInfo *info, TCGTemp *ret)
2344 tcg_gen_callN(info, ret, NULL);
2347 void tcg_gen_call1(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1)
2349 tcg_gen_callN(info, ret, &t1);
2352 void tcg_gen_call2(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2)
2354 TCGTemp *args[2] = { t1, t2 };
2355 tcg_gen_callN(info, ret, args);
2358 void tcg_gen_call3(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2359 TCGTemp *t2, TCGTemp *t3)
2361 TCGTemp *args[3] = { t1, t2, t3 };
2362 tcg_gen_callN(info, ret, args);
2365 void tcg_gen_call4(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2366 TCGTemp *t2, TCGTemp *t3, TCGTemp *t4)
2368 TCGTemp *args[4] = { t1, t2, t3, t4 };
2369 tcg_gen_callN(info, ret, args);
2372 void tcg_gen_call5(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2373 TCGTemp *t2, TCGTemp *t3, TCGTemp *t4, TCGTemp *t5)
2375 TCGTemp *args[5] = { t1, t2, t3, t4, t5 };
2376 tcg_gen_callN(info, ret, args);
2379 void tcg_gen_call6(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2,
2380 TCGTemp *t3, TCGTemp *t4, TCGTemp *t5, TCGTemp *t6)
2382 TCGTemp *args[6] = { t1, t2, t3, t4, t5, t6 };
2383 tcg_gen_callN(info, ret, args);
2386 void tcg_gen_call7(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2387 TCGTemp *t2, TCGTemp *t3, TCGTemp *t4,
2388 TCGTemp *t5, TCGTemp *t6, TCGTemp *t7)
2390 TCGTemp *args[7] = { t1, t2, t3, t4, t5, t6, t7 };
2391 tcg_gen_callN(info, ret, args);
2394 static void tcg_reg_alloc_start(TCGContext *s)
2396 int i, n;
2398 for (i = 0, n = s->nb_temps; i < n; i++) {
2399 TCGTemp *ts = &s->temps[i];
2400 TCGTempVal val = TEMP_VAL_MEM;
2402 switch (ts->kind) {
2403 case TEMP_CONST:
2404 val = TEMP_VAL_CONST;
2405 break;
2406 case TEMP_FIXED:
2407 val = TEMP_VAL_REG;
2408 break;
2409 case TEMP_GLOBAL:
2410 break;
2411 case TEMP_EBB:
2412 val = TEMP_VAL_DEAD;
2413 /* fall through */
2414 case TEMP_TB:
2415 ts->mem_allocated = 0;
2416 break;
2417 default:
2418 g_assert_not_reached();
2420 ts->val_type = val;
2423 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
2426 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
2427 TCGTemp *ts)
2429 int idx = temp_idx(ts);
2431 switch (ts->kind) {
2432 case TEMP_FIXED:
2433 case TEMP_GLOBAL:
2434 pstrcpy(buf, buf_size, ts->name);
2435 break;
2436 case TEMP_TB:
2437 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
2438 break;
2439 case TEMP_EBB:
2440 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
2441 break;
2442 case TEMP_CONST:
2443 switch (ts->type) {
2444 case TCG_TYPE_I32:
2445 snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
2446 break;
2447 #if TCG_TARGET_REG_BITS > 32
2448 case TCG_TYPE_I64:
2449 snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
2450 break;
2451 #endif
2452 case TCG_TYPE_V64:
2453 case TCG_TYPE_V128:
2454 case TCG_TYPE_V256:
2455 snprintf(buf, buf_size, "v%d$0x%" PRIx64,
2456 64 << (ts->type - TCG_TYPE_V64), ts->val);
2457 break;
2458 default:
2459 g_assert_not_reached();
2461 break;
2463 return buf;
2466 static char *tcg_get_arg_str(TCGContext *s, char *buf,
2467 int buf_size, TCGArg arg)
2469 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
2472 static const char * const cond_name[] =
2474 [TCG_COND_NEVER] = "never",
2475 [TCG_COND_ALWAYS] = "always",
2476 [TCG_COND_EQ] = "eq",
2477 [TCG_COND_NE] = "ne",
2478 [TCG_COND_LT] = "lt",
2479 [TCG_COND_GE] = "ge",
2480 [TCG_COND_LE] = "le",
2481 [TCG_COND_GT] = "gt",
2482 [TCG_COND_LTU] = "ltu",
2483 [TCG_COND_GEU] = "geu",
2484 [TCG_COND_LEU] = "leu",
2485 [TCG_COND_GTU] = "gtu"
2488 static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] =
2490 [MO_UB] = "ub",
2491 [MO_SB] = "sb",
2492 [MO_LEUW] = "leuw",
2493 [MO_LESW] = "lesw",
2494 [MO_LEUL] = "leul",
2495 [MO_LESL] = "lesl",
2496 [MO_LEUQ] = "leq",
2497 [MO_BEUW] = "beuw",
2498 [MO_BESW] = "besw",
2499 [MO_BEUL] = "beul",
2500 [MO_BESL] = "besl",
2501 [MO_BEUQ] = "beq",
2502 [MO_128 + MO_BE] = "beo",
2503 [MO_128 + MO_LE] = "leo",
2506 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
2507 [MO_UNALN >> MO_ASHIFT] = "un+",
2508 [MO_ALIGN >> MO_ASHIFT] = "al+",
2509 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
2510 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
2511 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
2512 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
2513 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
2514 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
2517 static const char * const atom_name[(MO_ATOM_MASK >> MO_ATOM_SHIFT) + 1] = {
2518 [MO_ATOM_IFALIGN >> MO_ATOM_SHIFT] = "",
2519 [MO_ATOM_IFALIGN_PAIR >> MO_ATOM_SHIFT] = "pair+",
2520 [MO_ATOM_WITHIN16 >> MO_ATOM_SHIFT] = "w16+",
2521 [MO_ATOM_WITHIN16_PAIR >> MO_ATOM_SHIFT] = "w16p+",
2522 [MO_ATOM_SUBALIGN >> MO_ATOM_SHIFT] = "sub+",
2523 [MO_ATOM_NONE >> MO_ATOM_SHIFT] = "noat+",
2526 static const char bswap_flag_name[][6] = {
2527 [TCG_BSWAP_IZ] = "iz",
2528 [TCG_BSWAP_OZ] = "oz",
2529 [TCG_BSWAP_OS] = "os",
2530 [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
2531 [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
2534 static inline bool tcg_regset_single(TCGRegSet d)
2536 return (d & (d - 1)) == 0;
2539 static inline TCGReg tcg_regset_first(TCGRegSet d)
2541 if (TCG_TARGET_NB_REGS <= 32) {
2542 return ctz32(d);
2543 } else {
2544 return ctz64(d);
2548 /* Return only the number of characters output -- no error return. */
2549 #define ne_fprintf(...) \
2550 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
2552 static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
2554 char buf[128];
2555 TCGOp *op;
2557 QTAILQ_FOREACH(op, &s->ops, link) {
2558 int i, k, nb_oargs, nb_iargs, nb_cargs;
2559 const TCGOpDef *def;
2560 TCGOpcode c;
2561 int col = 0;
2563 c = op->opc;
2564 def = &tcg_op_defs[c];
2566 if (c == INDEX_op_insn_start) {
2567 nb_oargs = 0;
2568 col += ne_fprintf(f, "\n ----");
2570 for (i = 0, k = s->insn_start_words; i < k; ++i) {
2571 col += ne_fprintf(f, " %016" PRIx64,
2572 tcg_get_insn_start_param(op, i));
2574 } else if (c == INDEX_op_call) {
2575 const TCGHelperInfo *info = tcg_call_info(op);
2576 void *func = tcg_call_func(op);
2578 /* variable number of arguments */
2579 nb_oargs = TCGOP_CALLO(op);
2580 nb_iargs = TCGOP_CALLI(op);
2581 nb_cargs = def->nb_cargs;
2583 col += ne_fprintf(f, " %s ", def->name);
2586 * Print the function name from TCGHelperInfo, if available.
2587 * Note that plugins have a template function for the info,
2588 * but the actual function pointer comes from the plugin.
2590 if (func == info->func) {
2591 col += ne_fprintf(f, "%s", info->name);
2592 } else {
2593 col += ne_fprintf(f, "plugin(%p)", func);
2596 col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
2597 for (i = 0; i < nb_oargs; i++) {
2598 col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
2599 op->args[i]));
2601 for (i = 0; i < nb_iargs; i++) {
2602 TCGArg arg = op->args[nb_oargs + i];
2603 const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
2604 col += ne_fprintf(f, ",%s", t);
2606 } else {
2607 col += ne_fprintf(f, " %s ", def->name);
2609 nb_oargs = def->nb_oargs;
2610 nb_iargs = def->nb_iargs;
2611 nb_cargs = def->nb_cargs;
2613 if (def->flags & TCG_OPF_VECTOR) {
2614 col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
2615 8 << TCGOP_VECE(op));
2618 k = 0;
2619 for (i = 0; i < nb_oargs; i++) {
2620 const char *sep = k ? "," : "";
2621 col += ne_fprintf(f, "%s%s", sep,
2622 tcg_get_arg_str(s, buf, sizeof(buf),
2623 op->args[k++]));
2625 for (i = 0; i < nb_iargs; i++) {
2626 const char *sep = k ? "," : "";
2627 col += ne_fprintf(f, "%s%s", sep,
2628 tcg_get_arg_str(s, buf, sizeof(buf),
2629 op->args[k++]));
2631 switch (c) {
2632 case INDEX_op_brcond_i32:
2633 case INDEX_op_setcond_i32:
2634 case INDEX_op_negsetcond_i32:
2635 case INDEX_op_movcond_i32:
2636 case INDEX_op_brcond2_i32:
2637 case INDEX_op_setcond2_i32:
2638 case INDEX_op_brcond_i64:
2639 case INDEX_op_setcond_i64:
2640 case INDEX_op_negsetcond_i64:
2641 case INDEX_op_movcond_i64:
2642 case INDEX_op_cmp_vec:
2643 case INDEX_op_cmpsel_vec:
2644 if (op->args[k] < ARRAY_SIZE(cond_name)
2645 && cond_name[op->args[k]]) {
2646 col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
2647 } else {
2648 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
2650 i = 1;
2651 break;
2652 case INDEX_op_qemu_ld_a32_i32:
2653 case INDEX_op_qemu_ld_a64_i32:
2654 case INDEX_op_qemu_st_a32_i32:
2655 case INDEX_op_qemu_st_a64_i32:
2656 case INDEX_op_qemu_st8_a32_i32:
2657 case INDEX_op_qemu_st8_a64_i32:
2658 case INDEX_op_qemu_ld_a32_i64:
2659 case INDEX_op_qemu_ld_a64_i64:
2660 case INDEX_op_qemu_st_a32_i64:
2661 case INDEX_op_qemu_st_a64_i64:
2662 case INDEX_op_qemu_ld_a32_i128:
2663 case INDEX_op_qemu_ld_a64_i128:
2664 case INDEX_op_qemu_st_a32_i128:
2665 case INDEX_op_qemu_st_a64_i128:
2667 const char *s_al, *s_op, *s_at;
2668 MemOpIdx oi = op->args[k++];
2669 MemOp mop = get_memop(oi);
2670 unsigned ix = get_mmuidx(oi);
2672 s_al = alignment_name[(mop & MO_AMASK) >> MO_ASHIFT];
2673 s_op = ldst_name[mop & (MO_BSWAP | MO_SSIZE)];
2674 s_at = atom_name[(mop & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
2675 mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
2677 /* If all fields are accounted for, print symbolically. */
2678 if (!mop && s_al && s_op && s_at) {
2679 col += ne_fprintf(f, ",%s%s%s,%u",
2680 s_at, s_al, s_op, ix);
2681 } else {
2682 mop = get_memop(oi);
2683 col += ne_fprintf(f, ",$0x%x,%u", mop, ix);
2685 i = 1;
2687 break;
2688 case INDEX_op_bswap16_i32:
2689 case INDEX_op_bswap16_i64:
2690 case INDEX_op_bswap32_i32:
2691 case INDEX_op_bswap32_i64:
2692 case INDEX_op_bswap64_i64:
2694 TCGArg flags = op->args[k];
2695 const char *name = NULL;
2697 if (flags < ARRAY_SIZE(bswap_flag_name)) {
2698 name = bswap_flag_name[flags];
2700 if (name) {
2701 col += ne_fprintf(f, ",%s", name);
2702 } else {
2703 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
2705 i = k = 1;
2707 break;
2708 default:
2709 i = 0;
2710 break;
2712 switch (c) {
2713 case INDEX_op_set_label:
2714 case INDEX_op_br:
2715 case INDEX_op_brcond_i32:
2716 case INDEX_op_brcond_i64:
2717 case INDEX_op_brcond2_i32:
2718 col += ne_fprintf(f, "%s$L%d", k ? "," : "",
2719 arg_label(op->args[k])->id);
2720 i++, k++;
2721 break;
2722 case INDEX_op_mb:
2724 TCGBar membar = op->args[k];
2725 const char *b_op, *m_op;
2727 switch (membar & TCG_BAR_SC) {
2728 case 0:
2729 b_op = "none";
2730 break;
2731 case TCG_BAR_LDAQ:
2732 b_op = "acq";
2733 break;
2734 case TCG_BAR_STRL:
2735 b_op = "rel";
2736 break;
2737 case TCG_BAR_SC:
2738 b_op = "seq";
2739 break;
2740 default:
2741 g_assert_not_reached();
2744 switch (membar & TCG_MO_ALL) {
2745 case 0:
2746 m_op = "none";
2747 break;
2748 case TCG_MO_LD_LD:
2749 m_op = "rr";
2750 break;
2751 case TCG_MO_LD_ST:
2752 m_op = "rw";
2753 break;
2754 case TCG_MO_ST_LD:
2755 m_op = "wr";
2756 break;
2757 case TCG_MO_ST_ST:
2758 m_op = "ww";
2759 break;
2760 case TCG_MO_LD_LD | TCG_MO_LD_ST:
2761 m_op = "rr+rw";
2762 break;
2763 case TCG_MO_LD_LD | TCG_MO_ST_LD:
2764 m_op = "rr+wr";
2765 break;
2766 case TCG_MO_LD_LD | TCG_MO_ST_ST:
2767 m_op = "rr+ww";
2768 break;
2769 case TCG_MO_LD_ST | TCG_MO_ST_LD:
2770 m_op = "rw+wr";
2771 break;
2772 case TCG_MO_LD_ST | TCG_MO_ST_ST:
2773 m_op = "rw+ww";
2774 break;
2775 case TCG_MO_ST_LD | TCG_MO_ST_ST:
2776 m_op = "wr+ww";
2777 break;
2778 case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_LD:
2779 m_op = "rr+rw+wr";
2780 break;
2781 case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST:
2782 m_op = "rr+rw+ww";
2783 break;
2784 case TCG_MO_LD_LD | TCG_MO_ST_LD | TCG_MO_ST_ST:
2785 m_op = "rr+wr+ww";
2786 break;
2787 case TCG_MO_LD_ST | TCG_MO_ST_LD | TCG_MO_ST_ST:
2788 m_op = "rw+wr+ww";
2789 break;
2790 case TCG_MO_ALL:
2791 m_op = "all";
2792 break;
2793 default:
2794 g_assert_not_reached();
2797 col += ne_fprintf(f, "%s%s:%s", (k ? "," : ""), b_op, m_op);
2798 i++, k++;
2800 break;
2801 default:
2802 break;
2804 for (; i < nb_cargs; i++, k++) {
2805 col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
2806 op->args[k]);
2810 if (have_prefs || op->life) {
2811 for (; col < 40; ++col) {
2812 putc(' ', f);
2816 if (op->life) {
2817 unsigned life = op->life;
2819 if (life & (SYNC_ARG * 3)) {
2820 ne_fprintf(f, " sync:");
2821 for (i = 0; i < 2; ++i) {
2822 if (life & (SYNC_ARG << i)) {
2823 ne_fprintf(f, " %d", i);
2827 life /= DEAD_ARG;
2828 if (life) {
2829 ne_fprintf(f, " dead:");
2830 for (i = 0; life; ++i, life >>= 1) {
2831 if (life & 1) {
2832 ne_fprintf(f, " %d", i);
2838 if (have_prefs) {
2839 for (i = 0; i < nb_oargs; ++i) {
2840 TCGRegSet set = output_pref(op, i);
2842 if (i == 0) {
2843 ne_fprintf(f, " pref=");
2844 } else {
2845 ne_fprintf(f, ",");
2847 if (set == 0) {
2848 ne_fprintf(f, "none");
2849 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
2850 ne_fprintf(f, "all");
2851 #ifdef CONFIG_DEBUG_TCG
2852 } else if (tcg_regset_single(set)) {
2853 TCGReg reg = tcg_regset_first(set);
2854 ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
2855 #endif
2856 } else if (TCG_TARGET_NB_REGS <= 32) {
2857 ne_fprintf(f, "0x%x", (uint32_t)set);
2858 } else {
2859 ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
2864 putc('\n', f);
2868 /* we give more priority to constraints with less registers */
2869 static int get_constraint_priority(const TCGOpDef *def, int k)
2871 const TCGArgConstraint *arg_ct = &def->args_ct[k];
2872 int n = ctpop64(arg_ct->regs);
2875 * Sort constraints of a single register first, which includes output
2876 * aliases (which must exactly match the input already allocated).
2878 if (n == 1 || arg_ct->oalias) {
2879 return INT_MAX;
2883 * Sort register pairs next, first then second immediately after.
2884 * Arbitrarily sort multiple pairs by the index of the first reg;
2885 * there shouldn't be many pairs.
2887 switch (arg_ct->pair) {
2888 case 1:
2889 case 3:
2890 return (k + 1) * 2;
2891 case 2:
2892 return (arg_ct->pair_index + 1) * 2 - 1;
2895 /* Finally, sort by decreasing register count. */
2896 assert(n > 1);
2897 return -n;
2900 /* sort from highest priority to lowest */
2901 static void sort_constraints(TCGOpDef *def, int start, int n)
2903 int i, j;
2904 TCGArgConstraint *a = def->args_ct;
2906 for (i = 0; i < n; i++) {
2907 a[start + i].sort_index = start + i;
2909 if (n <= 1) {
2910 return;
2912 for (i = 0; i < n - 1; i++) {
2913 for (j = i + 1; j < n; j++) {
2914 int p1 = get_constraint_priority(def, a[start + i].sort_index);
2915 int p2 = get_constraint_priority(def, a[start + j].sort_index);
2916 if (p1 < p2) {
2917 int tmp = a[start + i].sort_index;
2918 a[start + i].sort_index = a[start + j].sort_index;
2919 a[start + j].sort_index = tmp;
2925 static void process_op_defs(TCGContext *s)
2927 TCGOpcode op;
2929 for (op = 0; op < NB_OPS; op++) {
2930 TCGOpDef *def = &tcg_op_defs[op];
2931 const TCGTargetOpDef *tdefs;
2932 bool saw_alias_pair = false;
2933 int i, o, i2, o2, nb_args;
2935 if (def->flags & TCG_OPF_NOT_PRESENT) {
2936 continue;
2939 nb_args = def->nb_iargs + def->nb_oargs;
2940 if (nb_args == 0) {
2941 continue;
2945 * Macro magic should make it impossible, but double-check that
2946 * the array index is in range. Since the signness of an enum
2947 * is implementation defined, force the result to unsigned.
2949 unsigned con_set = tcg_target_op_def(op);
2950 tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
2951 tdefs = &constraint_sets[con_set];
2953 for (i = 0; i < nb_args; i++) {
2954 const char *ct_str = tdefs->args_ct_str[i];
2955 bool input_p = i >= def->nb_oargs;
2957 /* Incomplete TCGTargetOpDef entry. */
2958 tcg_debug_assert(ct_str != NULL);
2960 switch (*ct_str) {
2961 case '0' ... '9':
2962 o = *ct_str - '0';
2963 tcg_debug_assert(input_p);
2964 tcg_debug_assert(o < def->nb_oargs);
2965 tcg_debug_assert(def->args_ct[o].regs != 0);
2966 tcg_debug_assert(!def->args_ct[o].oalias);
2967 def->args_ct[i] = def->args_ct[o];
2968 /* The output sets oalias. */
2969 def->args_ct[o].oalias = 1;
2970 def->args_ct[o].alias_index = i;
2971 /* The input sets ialias. */
2972 def->args_ct[i].ialias = 1;
2973 def->args_ct[i].alias_index = o;
2974 if (def->args_ct[i].pair) {
2975 saw_alias_pair = true;
2977 tcg_debug_assert(ct_str[1] == '\0');
2978 continue;
2980 case '&':
2981 tcg_debug_assert(!input_p);
2982 def->args_ct[i].newreg = true;
2983 ct_str++;
2984 break;
2986 case 'p': /* plus */
2987 /* Allocate to the register after the previous. */
2988 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2989 o = i - 1;
2990 tcg_debug_assert(!def->args_ct[o].pair);
2991 tcg_debug_assert(!def->args_ct[o].ct);
2992 def->args_ct[i] = (TCGArgConstraint){
2993 .pair = 2,
2994 .pair_index = o,
2995 .regs = def->args_ct[o].regs << 1,
2996 .newreg = def->args_ct[o].newreg,
2998 def->args_ct[o].pair = 1;
2999 def->args_ct[o].pair_index = i;
3000 tcg_debug_assert(ct_str[1] == '\0');
3001 continue;
3003 case 'm': /* minus */
3004 /* Allocate to the register before the previous. */
3005 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
3006 o = i - 1;
3007 tcg_debug_assert(!def->args_ct[o].pair);
3008 tcg_debug_assert(!def->args_ct[o].ct);
3009 def->args_ct[i] = (TCGArgConstraint){
3010 .pair = 1,
3011 .pair_index = o,
3012 .regs = def->args_ct[o].regs >> 1,
3013 .newreg = def->args_ct[o].newreg,
3015 def->args_ct[o].pair = 2;
3016 def->args_ct[o].pair_index = i;
3017 tcg_debug_assert(ct_str[1] == '\0');
3018 continue;
3021 do {
3022 switch (*ct_str) {
3023 case 'i':
3024 def->args_ct[i].ct |= TCG_CT_CONST;
3025 break;
3027 /* Include all of the target-specific constraints. */
3029 #undef CONST
3030 #define CONST(CASE, MASK) \
3031 case CASE: def->args_ct[i].ct |= MASK; break;
3032 #define REGS(CASE, MASK) \
3033 case CASE: def->args_ct[i].regs |= MASK; break;
3035 #include "tcg-target-con-str.h"
3037 #undef REGS
3038 #undef CONST
3039 default:
3040 case '0' ... '9':
3041 case '&':
3042 case 'p':
3043 case 'm':
3044 /* Typo in TCGTargetOpDef constraint. */
3045 g_assert_not_reached();
3047 } while (*++ct_str != '\0');
3050 /* TCGTargetOpDef entry with too much information? */
3051 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
3054 * Fix up output pairs that are aliased with inputs.
3055 * When we created the alias, we copied pair from the output.
3056 * There are three cases:
3057 * (1a) Pairs of inputs alias pairs of outputs.
3058 * (1b) One input aliases the first of a pair of outputs.
3059 * (2) One input aliases the second of a pair of outputs.
3061 * Case 1a is handled by making sure that the pair_index'es are
3062 * properly updated so that they appear the same as a pair of inputs.
3064 * Case 1b is handled by setting the pair_index of the input to
3065 * itself, simply so it doesn't point to an unrelated argument.
3066 * Since we don't encounter the "second" during the input allocation
3067 * phase, nothing happens with the second half of the input pair.
3069 * Case 2 is handled by setting the second input to pair=3, the
3070 * first output to pair=3, and the pair_index'es to match.
3072 if (saw_alias_pair) {
3073 for (i = def->nb_oargs; i < nb_args; i++) {
3075 * Since [0-9pm] must be alone in the constraint string,
3076 * the only way they can both be set is if the pair comes
3077 * from the output alias.
3079 if (!def->args_ct[i].ialias) {
3080 continue;
3082 switch (def->args_ct[i].pair) {
3083 case 0:
3084 break;
3085 case 1:
3086 o = def->args_ct[i].alias_index;
3087 o2 = def->args_ct[o].pair_index;
3088 tcg_debug_assert(def->args_ct[o].pair == 1);
3089 tcg_debug_assert(def->args_ct[o2].pair == 2);
3090 if (def->args_ct[o2].oalias) {
3091 /* Case 1a */
3092 i2 = def->args_ct[o2].alias_index;
3093 tcg_debug_assert(def->args_ct[i2].pair == 2);
3094 def->args_ct[i2].pair_index = i;
3095 def->args_ct[i].pair_index = i2;
3096 } else {
3097 /* Case 1b */
3098 def->args_ct[i].pair_index = i;
3100 break;
3101 case 2:
3102 o = def->args_ct[i].alias_index;
3103 o2 = def->args_ct[o].pair_index;
3104 tcg_debug_assert(def->args_ct[o].pair == 2);
3105 tcg_debug_assert(def->args_ct[o2].pair == 1);
3106 if (def->args_ct[o2].oalias) {
3107 /* Case 1a */
3108 i2 = def->args_ct[o2].alias_index;
3109 tcg_debug_assert(def->args_ct[i2].pair == 1);
3110 def->args_ct[i2].pair_index = i;
3111 def->args_ct[i].pair_index = i2;
3112 } else {
3113 /* Case 2 */
3114 def->args_ct[i].pair = 3;
3115 def->args_ct[o2].pair = 3;
3116 def->args_ct[i].pair_index = o2;
3117 def->args_ct[o2].pair_index = i;
3119 break;
3120 default:
3121 g_assert_not_reached();
3126 /* sort the constraints (XXX: this is just an heuristic) */
3127 sort_constraints(def, 0, def->nb_oargs);
3128 sort_constraints(def, def->nb_oargs, def->nb_iargs);
3132 static void remove_label_use(TCGOp *op, int idx)
3134 TCGLabel *label = arg_label(op->args[idx]);
3135 TCGLabelUse *use;
3137 QSIMPLEQ_FOREACH(use, &label->branches, next) {
3138 if (use->op == op) {
3139 QSIMPLEQ_REMOVE(&label->branches, use, TCGLabelUse, next);
3140 return;
3143 g_assert_not_reached();
3146 void tcg_op_remove(TCGContext *s, TCGOp *op)
3148 switch (op->opc) {
3149 case INDEX_op_br:
3150 remove_label_use(op, 0);
3151 break;
3152 case INDEX_op_brcond_i32:
3153 case INDEX_op_brcond_i64:
3154 remove_label_use(op, 3);
3155 break;
3156 case INDEX_op_brcond2_i32:
3157 remove_label_use(op, 5);
3158 break;
3159 default:
3160 break;
3163 QTAILQ_REMOVE(&s->ops, op, link);
3164 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
3165 s->nb_ops--;
3168 void tcg_remove_ops_after(TCGOp *op)
3170 TCGContext *s = tcg_ctx;
3172 while (true) {
3173 TCGOp *last = tcg_last_op();
3174 if (last == op) {
3175 return;
3177 tcg_op_remove(s, last);
3181 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
3183 TCGContext *s = tcg_ctx;
3184 TCGOp *op = NULL;
3186 if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
3187 QTAILQ_FOREACH(op, &s->free_ops, link) {
3188 if (nargs <= op->nargs) {
3189 QTAILQ_REMOVE(&s->free_ops, op, link);
3190 nargs = op->nargs;
3191 goto found;
3196 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
3197 nargs = MAX(4, nargs);
3198 op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
3200 found:
3201 memset(op, 0, offsetof(TCGOp, link));
3202 op->opc = opc;
3203 op->nargs = nargs;
3205 /* Check for bitfield overflow. */
3206 tcg_debug_assert(op->nargs == nargs);
3208 s->nb_ops++;
3209 return op;
3212 TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
3214 TCGOp *op = tcg_op_alloc(opc, nargs);
3215 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
3216 return op;
3219 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
3220 TCGOpcode opc, unsigned nargs)
3222 TCGOp *new_op = tcg_op_alloc(opc, nargs);
3223 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
3224 return new_op;
3227 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
3228 TCGOpcode opc, unsigned nargs)
3230 TCGOp *new_op = tcg_op_alloc(opc, nargs);
3231 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
3232 return new_op;
3235 static void move_label_uses(TCGLabel *to, TCGLabel *from)
3237 TCGLabelUse *u;
3239 QSIMPLEQ_FOREACH(u, &from->branches, next) {
3240 TCGOp *op = u->op;
3241 switch (op->opc) {
3242 case INDEX_op_br:
3243 op->args[0] = label_arg(to);
3244 break;
3245 case INDEX_op_brcond_i32:
3246 case INDEX_op_brcond_i64:
3247 op->args[3] = label_arg(to);
3248 break;
3249 case INDEX_op_brcond2_i32:
3250 op->args[5] = label_arg(to);
3251 break;
3252 default:
3253 g_assert_not_reached();
3257 QSIMPLEQ_CONCAT(&to->branches, &from->branches);
3260 /* Reachable analysis : remove unreachable code. */
3261 static void __attribute__((noinline))
3262 reachable_code_pass(TCGContext *s)
3264 TCGOp *op, *op_next, *op_prev;
3265 bool dead = false;
3267 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
3268 bool remove = dead;
3269 TCGLabel *label;
3271 switch (op->opc) {
3272 case INDEX_op_set_label:
3273 label = arg_label(op->args[0]);
3276 * Note that the first op in the TB is always a load,
3277 * so there is always something before a label.
3279 op_prev = QTAILQ_PREV(op, link);
3282 * If we find two sequential labels, move all branches to
3283 * reference the second label and remove the first label.
3284 * Do this before branch to next optimization, so that the
3285 * middle label is out of the way.
3287 if (op_prev->opc == INDEX_op_set_label) {
3288 move_label_uses(label, arg_label(op_prev->args[0]));
3289 tcg_op_remove(s, op_prev);
3290 op_prev = QTAILQ_PREV(op, link);
3294 * Optimization can fold conditional branches to unconditional.
3295 * If we find a label which is preceded by an unconditional
3296 * branch to next, remove the branch. We couldn't do this when
3297 * processing the branch because any dead code between the branch
3298 * and label had not yet been removed.
3300 if (op_prev->opc == INDEX_op_br &&
3301 label == arg_label(op_prev->args[0])) {
3302 tcg_op_remove(s, op_prev);
3303 /* Fall through means insns become live again. */
3304 dead = false;
3307 if (QSIMPLEQ_EMPTY(&label->branches)) {
3309 * While there is an occasional backward branch, virtually
3310 * all branches generated by the translators are forward.
3311 * Which means that generally we will have already removed
3312 * all references to the label that will be, and there is
3313 * little to be gained by iterating.
3315 remove = true;
3316 } else {
3317 /* Once we see a label, insns become live again. */
3318 dead = false;
3319 remove = false;
3321 break;
3323 case INDEX_op_br:
3324 case INDEX_op_exit_tb:
3325 case INDEX_op_goto_ptr:
3326 /* Unconditional branches; everything following is dead. */
3327 dead = true;
3328 break;
3330 case INDEX_op_call:
3331 /* Notice noreturn helper calls, raising exceptions. */
3332 if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
3333 dead = true;
3335 break;
3337 case INDEX_op_insn_start:
3338 /* Never remove -- we need to keep these for unwind. */
3339 remove = false;
3340 break;
3342 default:
3343 break;
3346 if (remove) {
3347 tcg_op_remove(s, op);
3352 #define TS_DEAD 1
3353 #define TS_MEM 2
3355 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
3356 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
3358 /* For liveness_pass_1, the register preferences for a given temp. */
3359 static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
3361 return ts->state_ptr;
3364 /* For liveness_pass_1, reset the preferences for a given temp to the
3365 * maximal regset for its type.
3367 static inline void la_reset_pref(TCGTemp *ts)
3369 *la_temp_pref(ts)
3370 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
3373 /* liveness analysis: end of function: all temps are dead, and globals
3374 should be in memory. */
3375 static void la_func_end(TCGContext *s, int ng, int nt)
3377 int i;
3379 for (i = 0; i < ng; ++i) {
3380 s->temps[i].state = TS_DEAD | TS_MEM;
3381 la_reset_pref(&s->temps[i]);
3383 for (i = ng; i < nt; ++i) {
3384 s->temps[i].state = TS_DEAD;
3385 la_reset_pref(&s->temps[i]);
3389 /* liveness analysis: end of basic block: all temps are dead, globals
3390 and local temps should be in memory. */
3391 static void la_bb_end(TCGContext *s, int ng, int nt)
3393 int i;
3395 for (i = 0; i < nt; ++i) {
3396 TCGTemp *ts = &s->temps[i];
3397 int state;
3399 switch (ts->kind) {
3400 case TEMP_FIXED:
3401 case TEMP_GLOBAL:
3402 case TEMP_TB:
3403 state = TS_DEAD | TS_MEM;
3404 break;
3405 case TEMP_EBB:
3406 case TEMP_CONST:
3407 state = TS_DEAD;
3408 break;
3409 default:
3410 g_assert_not_reached();
3412 ts->state = state;
3413 la_reset_pref(ts);
3417 /* liveness analysis: sync globals back to memory. */
3418 static void la_global_sync(TCGContext *s, int ng)
3420 int i;
3422 for (i = 0; i < ng; ++i) {
3423 int state = s->temps[i].state;
3424 s->temps[i].state = state | TS_MEM;
3425 if (state == TS_DEAD) {
3426 /* If the global was previously dead, reset prefs. */
3427 la_reset_pref(&s->temps[i]);
3433 * liveness analysis: conditional branch: all temps are dead unless
3434 * explicitly live-across-conditional-branch, globals and local temps
3435 * should be synced.
3437 static void la_bb_sync(TCGContext *s, int ng, int nt)
3439 la_global_sync(s, ng);
3441 for (int i = ng; i < nt; ++i) {
3442 TCGTemp *ts = &s->temps[i];
3443 int state;
3445 switch (ts->kind) {
3446 case TEMP_TB:
3447 state = ts->state;
3448 ts->state = state | TS_MEM;
3449 if (state != TS_DEAD) {
3450 continue;
3452 break;
3453 case TEMP_EBB:
3454 case TEMP_CONST:
3455 continue;
3456 default:
3457 g_assert_not_reached();
3459 la_reset_pref(&s->temps[i]);
3463 /* liveness analysis: sync globals back to memory and kill. */
3464 static void la_global_kill(TCGContext *s, int ng)
3466 int i;
3468 for (i = 0; i < ng; i++) {
3469 s->temps[i].state = TS_DEAD | TS_MEM;
3470 la_reset_pref(&s->temps[i]);
3474 /* liveness analysis: note live globals crossing calls. */
3475 static void la_cross_call(TCGContext *s, int nt)
3477 TCGRegSet mask = ~tcg_target_call_clobber_regs;
3478 int i;
3480 for (i = 0; i < nt; i++) {
3481 TCGTemp *ts = &s->temps[i];
3482 if (!(ts->state & TS_DEAD)) {
3483 TCGRegSet *pset = la_temp_pref(ts);
3484 TCGRegSet set = *pset;
3486 set &= mask;
3487 /* If the combination is not possible, restart. */
3488 if (set == 0) {
3489 set = tcg_target_available_regs[ts->type] & mask;
3491 *pset = set;
3497 * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
3498 * to TEMP_EBB, if possible.
3500 static void __attribute__((noinline))
3501 liveness_pass_0(TCGContext *s)
3503 void * const multiple_ebb = (void *)(uintptr_t)-1;
3504 int nb_temps = s->nb_temps;
3505 TCGOp *op, *ebb;
3507 for (int i = s->nb_globals; i < nb_temps; ++i) {
3508 s->temps[i].state_ptr = NULL;
3512 * Represent each EBB by the op at which it begins. In the case of
3513 * the first EBB, this is the first op, otherwise it is a label.
3514 * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
3515 * within a single EBB, else MULTIPLE_EBB.
3517 ebb = QTAILQ_FIRST(&s->ops);
3518 QTAILQ_FOREACH(op, &s->ops, link) {
3519 const TCGOpDef *def;
3520 int nb_oargs, nb_iargs;
3522 switch (op->opc) {
3523 case INDEX_op_set_label:
3524 ebb = op;
3525 continue;
3526 case INDEX_op_discard:
3527 continue;
3528 case INDEX_op_call:
3529 nb_oargs = TCGOP_CALLO(op);
3530 nb_iargs = TCGOP_CALLI(op);
3531 break;
3532 default:
3533 def = &tcg_op_defs[op->opc];
3534 nb_oargs = def->nb_oargs;
3535 nb_iargs = def->nb_iargs;
3536 break;
3539 for (int i = 0; i < nb_oargs + nb_iargs; ++i) {
3540 TCGTemp *ts = arg_temp(op->args[i]);
3542 if (ts->kind != TEMP_TB) {
3543 continue;
3545 if (ts->state_ptr == NULL) {
3546 ts->state_ptr = ebb;
3547 } else if (ts->state_ptr != ebb) {
3548 ts->state_ptr = multiple_ebb;
3554 * For TEMP_TB that turned out not to be used beyond one EBB,
3555 * reduce the liveness to TEMP_EBB.
3557 for (int i = s->nb_globals; i < nb_temps; ++i) {
3558 TCGTemp *ts = &s->temps[i];
3559 if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) {
3560 ts->kind = TEMP_EBB;
3565 /* Liveness analysis : update the opc_arg_life array to tell if a
3566 given input arguments is dead. Instructions updating dead
3567 temporaries are removed. */
3568 static void __attribute__((noinline))
3569 liveness_pass_1(TCGContext *s)
3571 int nb_globals = s->nb_globals;
3572 int nb_temps = s->nb_temps;
3573 TCGOp *op, *op_prev;
3574 TCGRegSet *prefs;
3575 int i;
3577 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
3578 for (i = 0; i < nb_temps; ++i) {
3579 s->temps[i].state_ptr = prefs + i;
3582 /* ??? Should be redundant with the exit_tb that ends the TB. */
3583 la_func_end(s, nb_globals, nb_temps);
3585 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
3586 int nb_iargs, nb_oargs;
3587 TCGOpcode opc_new, opc_new2;
3588 bool have_opc_new2;
3589 TCGLifeData arg_life = 0;
3590 TCGTemp *ts;
3591 TCGOpcode opc = op->opc;
3592 const TCGOpDef *def = &tcg_op_defs[opc];
3594 switch (opc) {
3595 case INDEX_op_call:
3597 const TCGHelperInfo *info = tcg_call_info(op);
3598 int call_flags = tcg_call_flags(op);
3600 nb_oargs = TCGOP_CALLO(op);
3601 nb_iargs = TCGOP_CALLI(op);
3603 /* pure functions can be removed if their result is unused */
3604 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
3605 for (i = 0; i < nb_oargs; i++) {
3606 ts = arg_temp(op->args[i]);
3607 if (ts->state != TS_DEAD) {
3608 goto do_not_remove_call;
3611 goto do_remove;
3613 do_not_remove_call:
3615 /* Output args are dead. */
3616 for (i = 0; i < nb_oargs; i++) {
3617 ts = arg_temp(op->args[i]);
3618 if (ts->state & TS_DEAD) {
3619 arg_life |= DEAD_ARG << i;
3621 if (ts->state & TS_MEM) {
3622 arg_life |= SYNC_ARG << i;
3624 ts->state = TS_DEAD;
3625 la_reset_pref(ts);
3628 /* Not used -- it will be tcg_target_call_oarg_reg(). */
3629 memset(op->output_pref, 0, sizeof(op->output_pref));
3631 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
3632 TCG_CALL_NO_READ_GLOBALS))) {
3633 la_global_kill(s, nb_globals);
3634 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
3635 la_global_sync(s, nb_globals);
3638 /* Record arguments that die in this helper. */
3639 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3640 ts = arg_temp(op->args[i]);
3641 if (ts->state & TS_DEAD) {
3642 arg_life |= DEAD_ARG << i;
3646 /* For all live registers, remove call-clobbered prefs. */
3647 la_cross_call(s, nb_temps);
3650 * Input arguments are live for preceding opcodes.
3652 * For those arguments that die, and will be allocated in
3653 * registers, clear the register set for that arg, to be
3654 * filled in below. For args that will be on the stack,
3655 * reset to any available reg. Process arguments in reverse
3656 * order so that if a temp is used more than once, the stack
3657 * reset to max happens before the register reset to 0.
3659 for (i = nb_iargs - 1; i >= 0; i--) {
3660 const TCGCallArgumentLoc *loc = &info->in[i];
3661 ts = arg_temp(op->args[nb_oargs + i]);
3663 if (ts->state & TS_DEAD) {
3664 switch (loc->kind) {
3665 case TCG_CALL_ARG_NORMAL:
3666 case TCG_CALL_ARG_EXTEND_U:
3667 case TCG_CALL_ARG_EXTEND_S:
3668 if (arg_slot_reg_p(loc->arg_slot)) {
3669 *la_temp_pref(ts) = 0;
3670 break;
3672 /* fall through */
3673 default:
3674 *la_temp_pref(ts) =
3675 tcg_target_available_regs[ts->type];
3676 break;
3678 ts->state &= ~TS_DEAD;
3683 * For each input argument, add its input register to prefs.
3684 * If a temp is used once, this produces a single set bit;
3685 * if a temp is used multiple times, this produces a set.
3687 for (i = 0; i < nb_iargs; i++) {
3688 const TCGCallArgumentLoc *loc = &info->in[i];
3689 ts = arg_temp(op->args[nb_oargs + i]);
3691 switch (loc->kind) {
3692 case TCG_CALL_ARG_NORMAL:
3693 case TCG_CALL_ARG_EXTEND_U:
3694 case TCG_CALL_ARG_EXTEND_S:
3695 if (arg_slot_reg_p(loc->arg_slot)) {
3696 tcg_regset_set_reg(*la_temp_pref(ts),
3697 tcg_target_call_iarg_regs[loc->arg_slot]);
3699 break;
3700 default:
3701 break;
3705 break;
3706 case INDEX_op_insn_start:
3707 break;
3708 case INDEX_op_discard:
3709 /* mark the temporary as dead */
3710 ts = arg_temp(op->args[0]);
3711 ts->state = TS_DEAD;
3712 la_reset_pref(ts);
3713 break;
3715 case INDEX_op_add2_i32:
3716 opc_new = INDEX_op_add_i32;
3717 goto do_addsub2;
3718 case INDEX_op_sub2_i32:
3719 opc_new = INDEX_op_sub_i32;
3720 goto do_addsub2;
3721 case INDEX_op_add2_i64:
3722 opc_new = INDEX_op_add_i64;
3723 goto do_addsub2;
3724 case INDEX_op_sub2_i64:
3725 opc_new = INDEX_op_sub_i64;
3726 do_addsub2:
3727 nb_iargs = 4;
3728 nb_oargs = 2;
3729 /* Test if the high part of the operation is dead, but not
3730 the low part. The result can be optimized to a simple
3731 add or sub. This happens often for x86_64 guest when the
3732 cpu mode is set to 32 bit. */
3733 if (arg_temp(op->args[1])->state == TS_DEAD) {
3734 if (arg_temp(op->args[0])->state == TS_DEAD) {
3735 goto do_remove;
3737 /* Replace the opcode and adjust the args in place,
3738 leaving 3 unused args at the end. */
3739 op->opc = opc = opc_new;
3740 op->args[1] = op->args[2];
3741 op->args[2] = op->args[4];
3742 /* Fall through and mark the single-word operation live. */
3743 nb_iargs = 2;
3744 nb_oargs = 1;
3746 goto do_not_remove;
3748 case INDEX_op_mulu2_i32:
3749 opc_new = INDEX_op_mul_i32;
3750 opc_new2 = INDEX_op_muluh_i32;
3751 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
3752 goto do_mul2;
3753 case INDEX_op_muls2_i32:
3754 opc_new = INDEX_op_mul_i32;
3755 opc_new2 = INDEX_op_mulsh_i32;
3756 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
3757 goto do_mul2;
3758 case INDEX_op_mulu2_i64:
3759 opc_new = INDEX_op_mul_i64;
3760 opc_new2 = INDEX_op_muluh_i64;
3761 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
3762 goto do_mul2;
3763 case INDEX_op_muls2_i64:
3764 opc_new = INDEX_op_mul_i64;
3765 opc_new2 = INDEX_op_mulsh_i64;
3766 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
3767 goto do_mul2;
3768 do_mul2:
3769 nb_iargs = 2;
3770 nb_oargs = 2;
3771 if (arg_temp(op->args[1])->state == TS_DEAD) {
3772 if (arg_temp(op->args[0])->state == TS_DEAD) {
3773 /* Both parts of the operation are dead. */
3774 goto do_remove;
3776 /* The high part of the operation is dead; generate the low. */
3777 op->opc = opc = opc_new;
3778 op->args[1] = op->args[2];
3779 op->args[2] = op->args[3];
3780 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
3781 /* The low part of the operation is dead; generate the high. */
3782 op->opc = opc = opc_new2;
3783 op->args[0] = op->args[1];
3784 op->args[1] = op->args[2];
3785 op->args[2] = op->args[3];
3786 } else {
3787 goto do_not_remove;
3789 /* Mark the single-word operation live. */
3790 nb_oargs = 1;
3791 goto do_not_remove;
3793 default:
3794 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3795 nb_iargs = def->nb_iargs;
3796 nb_oargs = def->nb_oargs;
3798 /* Test if the operation can be removed because all
3799 its outputs are dead. We assume that nb_oargs == 0
3800 implies side effects */
3801 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
3802 for (i = 0; i < nb_oargs; i++) {
3803 if (arg_temp(op->args[i])->state != TS_DEAD) {
3804 goto do_not_remove;
3807 goto do_remove;
3809 goto do_not_remove;
3811 do_remove:
3812 tcg_op_remove(s, op);
3813 break;
3815 do_not_remove:
3816 for (i = 0; i < nb_oargs; i++) {
3817 ts = arg_temp(op->args[i]);
3819 /* Remember the preference of the uses that followed. */
3820 if (i < ARRAY_SIZE(op->output_pref)) {
3821 op->output_pref[i] = *la_temp_pref(ts);
3824 /* Output args are dead. */
3825 if (ts->state & TS_DEAD) {
3826 arg_life |= DEAD_ARG << i;
3828 if (ts->state & TS_MEM) {
3829 arg_life |= SYNC_ARG << i;
3831 ts->state = TS_DEAD;
3832 la_reset_pref(ts);
3835 /* If end of basic block, update. */
3836 if (def->flags & TCG_OPF_BB_EXIT) {
3837 la_func_end(s, nb_globals, nb_temps);
3838 } else if (def->flags & TCG_OPF_COND_BRANCH) {
3839 la_bb_sync(s, nb_globals, nb_temps);
3840 } else if (def->flags & TCG_OPF_BB_END) {
3841 la_bb_end(s, nb_globals, nb_temps);
3842 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3843 la_global_sync(s, nb_globals);
3844 if (def->flags & TCG_OPF_CALL_CLOBBER) {
3845 la_cross_call(s, nb_temps);
3849 /* Record arguments that die in this opcode. */
3850 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3851 ts = arg_temp(op->args[i]);
3852 if (ts->state & TS_DEAD) {
3853 arg_life |= DEAD_ARG << i;
3857 /* Input arguments are live for preceding opcodes. */
3858 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3859 ts = arg_temp(op->args[i]);
3860 if (ts->state & TS_DEAD) {
3861 /* For operands that were dead, initially allow
3862 all regs for the type. */
3863 *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
3864 ts->state &= ~TS_DEAD;
3868 /* Incorporate constraints for this operand. */
3869 switch (opc) {
3870 case INDEX_op_mov_i32:
3871 case INDEX_op_mov_i64:
3872 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3873 have proper constraints. That said, special case
3874 moves to propagate preferences backward. */
3875 if (IS_DEAD_ARG(1)) {
3876 *la_temp_pref(arg_temp(op->args[0]))
3877 = *la_temp_pref(arg_temp(op->args[1]));
3879 break;
3881 default:
3882 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3883 const TCGArgConstraint *ct = &def->args_ct[i];
3884 TCGRegSet set, *pset;
3886 ts = arg_temp(op->args[i]);
3887 pset = la_temp_pref(ts);
3888 set = *pset;
3890 set &= ct->regs;
3891 if (ct->ialias) {
3892 set &= output_pref(op, ct->alias_index);
3894 /* If the combination is not possible, restart. */
3895 if (set == 0) {
3896 set = ct->regs;
3898 *pset = set;
3900 break;
3902 break;
3904 op->life = arg_life;
3908 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3909 static bool __attribute__((noinline))
3910 liveness_pass_2(TCGContext *s)
3912 int nb_globals = s->nb_globals;
3913 int nb_temps, i;
3914 bool changes = false;
3915 TCGOp *op, *op_next;
3917 /* Create a temporary for each indirect global. */
3918 for (i = 0; i < nb_globals; ++i) {
3919 TCGTemp *its = &s->temps[i];
3920 if (its->indirect_reg) {
3921 TCGTemp *dts = tcg_temp_alloc(s);
3922 dts->type = its->type;
3923 dts->base_type = its->base_type;
3924 dts->temp_subindex = its->temp_subindex;
3925 dts->kind = TEMP_EBB;
3926 its->state_ptr = dts;
3927 } else {
3928 its->state_ptr = NULL;
3930 /* All globals begin dead. */
3931 its->state = TS_DEAD;
3933 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
3934 TCGTemp *its = &s->temps[i];
3935 its->state_ptr = NULL;
3936 its->state = TS_DEAD;
3939 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
3940 TCGOpcode opc = op->opc;
3941 const TCGOpDef *def = &tcg_op_defs[opc];
3942 TCGLifeData arg_life = op->life;
3943 int nb_iargs, nb_oargs, call_flags;
3944 TCGTemp *arg_ts, *dir_ts;
3946 if (opc == INDEX_op_call) {
3947 nb_oargs = TCGOP_CALLO(op);
3948 nb_iargs = TCGOP_CALLI(op);
3949 call_flags = tcg_call_flags(op);
3950 } else {
3951 nb_iargs = def->nb_iargs;
3952 nb_oargs = def->nb_oargs;
3954 /* Set flags similar to how calls require. */
3955 if (def->flags & TCG_OPF_COND_BRANCH) {
3956 /* Like reading globals: sync_globals */
3957 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3958 } else if (def->flags & TCG_OPF_BB_END) {
3959 /* Like writing globals: save_globals */
3960 call_flags = 0;
3961 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3962 /* Like reading globals: sync_globals */
3963 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3964 } else {
3965 /* No effect on globals. */
3966 call_flags = (TCG_CALL_NO_READ_GLOBALS |
3967 TCG_CALL_NO_WRITE_GLOBALS);
3971 /* Make sure that input arguments are available. */
3972 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3973 arg_ts = arg_temp(op->args[i]);
3974 dir_ts = arg_ts->state_ptr;
3975 if (dir_ts && arg_ts->state == TS_DEAD) {
3976 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
3977 ? INDEX_op_ld_i32
3978 : INDEX_op_ld_i64);
3979 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
3981 lop->args[0] = temp_arg(dir_ts);
3982 lop->args[1] = temp_arg(arg_ts->mem_base);
3983 lop->args[2] = arg_ts->mem_offset;
3985 /* Loaded, but synced with memory. */
3986 arg_ts->state = TS_MEM;
3990 /* Perform input replacement, and mark inputs that became dead.
3991 No action is required except keeping temp_state up to date
3992 so that we reload when needed. */
3993 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3994 arg_ts = arg_temp(op->args[i]);
3995 dir_ts = arg_ts->state_ptr;
3996 if (dir_ts) {
3997 op->args[i] = temp_arg(dir_ts);
3998 changes = true;
3999 if (IS_DEAD_ARG(i)) {
4000 arg_ts->state = TS_DEAD;
4005 /* Liveness analysis should ensure that the following are
4006 all correct, for call sites and basic block end points. */
4007 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
4008 /* Nothing to do */
4009 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
4010 for (i = 0; i < nb_globals; ++i) {
4011 /* Liveness should see that globals are synced back,
4012 that is, either TS_DEAD or TS_MEM. */
4013 arg_ts = &s->temps[i];
4014 tcg_debug_assert(arg_ts->state_ptr == 0
4015 || arg_ts->state != 0);
4017 } else {
4018 for (i = 0; i < nb_globals; ++i) {
4019 /* Liveness should see that globals are saved back,
4020 that is, TS_DEAD, waiting to be reloaded. */
4021 arg_ts = &s->temps[i];
4022 tcg_debug_assert(arg_ts->state_ptr == 0
4023 || arg_ts->state == TS_DEAD);
4027 /* Outputs become available. */
4028 if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
4029 arg_ts = arg_temp(op->args[0]);
4030 dir_ts = arg_ts->state_ptr;
4031 if (dir_ts) {
4032 op->args[0] = temp_arg(dir_ts);
4033 changes = true;
4035 /* The output is now live and modified. */
4036 arg_ts->state = 0;
4038 if (NEED_SYNC_ARG(0)) {
4039 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
4040 ? INDEX_op_st_i32
4041 : INDEX_op_st_i64);
4042 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
4043 TCGTemp *out_ts = dir_ts;
4045 if (IS_DEAD_ARG(0)) {
4046 out_ts = arg_temp(op->args[1]);
4047 arg_ts->state = TS_DEAD;
4048 tcg_op_remove(s, op);
4049 } else {
4050 arg_ts->state = TS_MEM;
4053 sop->args[0] = temp_arg(out_ts);
4054 sop->args[1] = temp_arg(arg_ts->mem_base);
4055 sop->args[2] = arg_ts->mem_offset;
4056 } else {
4057 tcg_debug_assert(!IS_DEAD_ARG(0));
4060 } else {
4061 for (i = 0; i < nb_oargs; i++) {
4062 arg_ts = arg_temp(op->args[i]);
4063 dir_ts = arg_ts->state_ptr;
4064 if (!dir_ts) {
4065 continue;
4067 op->args[i] = temp_arg(dir_ts);
4068 changes = true;
4070 /* The output is now live and modified. */
4071 arg_ts->state = 0;
4073 /* Sync outputs upon their last write. */
4074 if (NEED_SYNC_ARG(i)) {
4075 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
4076 ? INDEX_op_st_i32
4077 : INDEX_op_st_i64);
4078 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
4080 sop->args[0] = temp_arg(dir_ts);
4081 sop->args[1] = temp_arg(arg_ts->mem_base);
4082 sop->args[2] = arg_ts->mem_offset;
4084 arg_ts->state = TS_MEM;
4086 /* Drop outputs that are dead. */
4087 if (IS_DEAD_ARG(i)) {
4088 arg_ts->state = TS_DEAD;
4094 return changes;
4097 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
4099 intptr_t off;
4100 int size, align;
4102 /* When allocating an object, look at the full type. */
4103 size = tcg_type_size(ts->base_type);
4104 switch (ts->base_type) {
4105 case TCG_TYPE_I32:
4106 align = 4;
4107 break;
4108 case TCG_TYPE_I64:
4109 case TCG_TYPE_V64:
4110 align = 8;
4111 break;
4112 case TCG_TYPE_I128:
4113 case TCG_TYPE_V128:
4114 case TCG_TYPE_V256:
4116 * Note that we do not require aligned storage for V256,
4117 * and that we provide alignment for I128 to match V128,
4118 * even if that's above what the host ABI requires.
4120 align = 16;
4121 break;
4122 default:
4123 g_assert_not_reached();
4127 * Assume the stack is sufficiently aligned.
4128 * This affects e.g. ARM NEON, where we have 8 byte stack alignment
4129 * and do not require 16 byte vector alignment. This seems slightly
4130 * easier than fully parameterizing the above switch statement.
4132 align = MIN(TCG_TARGET_STACK_ALIGN, align);
4133 off = ROUND_UP(s->current_frame_offset, align);
4135 /* If we've exhausted the stack frame, restart with a smaller TB. */
4136 if (off + size > s->frame_end) {
4137 tcg_raise_tb_overflow(s);
4139 s->current_frame_offset = off + size;
4140 #if defined(__sparc__)
4141 off += TCG_TARGET_STACK_BIAS;
4142 #endif
4144 /* If the object was subdivided, assign memory to all the parts. */
4145 if (ts->base_type != ts->type) {
4146 int part_size = tcg_type_size(ts->type);
4147 int part_count = size / part_size;
4150 * Each part is allocated sequentially in tcg_temp_new_internal.
4151 * Jump back to the first part by subtracting the current index.
4153 ts -= ts->temp_subindex;
4154 for (int i = 0; i < part_count; ++i) {
4155 ts[i].mem_offset = off + i * part_size;
4156 ts[i].mem_base = s->frame_temp;
4157 ts[i].mem_allocated = 1;
4159 } else {
4160 ts->mem_offset = off;
4161 ts->mem_base = s->frame_temp;
4162 ts->mem_allocated = 1;
4166 /* Assign @reg to @ts, and update reg_to_temp[]. */
4167 static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
4169 if (ts->val_type == TEMP_VAL_REG) {
4170 TCGReg old = ts->reg;
4171 tcg_debug_assert(s->reg_to_temp[old] == ts);
4172 if (old == reg) {
4173 return;
4175 s->reg_to_temp[old] = NULL;
4177 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
4178 s->reg_to_temp[reg] = ts;
4179 ts->val_type = TEMP_VAL_REG;
4180 ts->reg = reg;
4183 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
4184 static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
4186 tcg_debug_assert(type != TEMP_VAL_REG);
4187 if (ts->val_type == TEMP_VAL_REG) {
4188 TCGReg reg = ts->reg;
4189 tcg_debug_assert(s->reg_to_temp[reg] == ts);
4190 s->reg_to_temp[reg] = NULL;
4192 ts->val_type = type;
4195 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
4197 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
4198 mark it free; otherwise mark it dead. */
4199 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
4201 TCGTempVal new_type;
4203 switch (ts->kind) {
4204 case TEMP_FIXED:
4205 return;
4206 case TEMP_GLOBAL:
4207 case TEMP_TB:
4208 new_type = TEMP_VAL_MEM;
4209 break;
4210 case TEMP_EBB:
4211 new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
4212 break;
4213 case TEMP_CONST:
4214 new_type = TEMP_VAL_CONST;
4215 break;
4216 default:
4217 g_assert_not_reached();
4219 set_temp_val_nonreg(s, ts, new_type);
4222 /* Mark a temporary as dead. */
4223 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
4225 temp_free_or_dead(s, ts, 1);
4228 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
4229 registers needs to be allocated to store a constant. If 'free_or_dead'
4230 is non-zero, subsequently release the temporary; if it is positive, the
4231 temp is dead; if it is negative, the temp is free. */
4232 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
4233 TCGRegSet preferred_regs, int free_or_dead)
4235 if (!temp_readonly(ts) && !ts->mem_coherent) {
4236 if (!ts->mem_allocated) {
4237 temp_allocate_frame(s, ts);
4239 switch (ts->val_type) {
4240 case TEMP_VAL_CONST:
4241 /* If we're going to free the temp immediately, then we won't
4242 require it later in a register, so attempt to store the
4243 constant to memory directly. */
4244 if (free_or_dead
4245 && tcg_out_sti(s, ts->type, ts->val,
4246 ts->mem_base->reg, ts->mem_offset)) {
4247 break;
4249 temp_load(s, ts, tcg_target_available_regs[ts->type],
4250 allocated_regs, preferred_regs);
4251 /* fallthrough */
4253 case TEMP_VAL_REG:
4254 tcg_out_st(s, ts->type, ts->reg,
4255 ts->mem_base->reg, ts->mem_offset);
4256 break;
4258 case TEMP_VAL_MEM:
4259 break;
4261 case TEMP_VAL_DEAD:
4262 default:
4263 g_assert_not_reached();
4265 ts->mem_coherent = 1;
4267 if (free_or_dead) {
4268 temp_free_or_dead(s, ts, free_or_dead);
4272 /* free register 'reg' by spilling the corresponding temporary if necessary */
4273 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
4275 TCGTemp *ts = s->reg_to_temp[reg];
4276 if (ts != NULL) {
4277 temp_sync(s, ts, allocated_regs, 0, -1);
4282 * tcg_reg_alloc:
4283 * @required_regs: Set of registers in which we must allocate.
4284 * @allocated_regs: Set of registers which must be avoided.
4285 * @preferred_regs: Set of registers we should prefer.
4286 * @rev: True if we search the registers in "indirect" order.
4288 * The allocated register must be in @required_regs & ~@allocated_regs,
4289 * but if we can put it in @preferred_regs we may save a move later.
4291 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
4292 TCGRegSet allocated_regs,
4293 TCGRegSet preferred_regs, bool rev)
4295 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
4296 TCGRegSet reg_ct[2];
4297 const int *order;
4299 reg_ct[1] = required_regs & ~allocated_regs;
4300 tcg_debug_assert(reg_ct[1] != 0);
4301 reg_ct[0] = reg_ct[1] & preferred_regs;
4303 /* Skip the preferred_regs option if it cannot be satisfied,
4304 or if the preference made no difference. */
4305 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
4307 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
4309 /* Try free registers, preferences first. */
4310 for (j = f; j < 2; j++) {
4311 TCGRegSet set = reg_ct[j];
4313 if (tcg_regset_single(set)) {
4314 /* One register in the set. */
4315 TCGReg reg = tcg_regset_first(set);
4316 if (s->reg_to_temp[reg] == NULL) {
4317 return reg;
4319 } else {
4320 for (i = 0; i < n; i++) {
4321 TCGReg reg = order[i];
4322 if (s->reg_to_temp[reg] == NULL &&
4323 tcg_regset_test_reg(set, reg)) {
4324 return reg;
4330 /* We must spill something. */
4331 for (j = f; j < 2; j++) {
4332 TCGRegSet set = reg_ct[j];
4334 if (tcg_regset_single(set)) {
4335 /* One register in the set. */
4336 TCGReg reg = tcg_regset_first(set);
4337 tcg_reg_free(s, reg, allocated_regs);
4338 return reg;
4339 } else {
4340 for (i = 0; i < n; i++) {
4341 TCGReg reg = order[i];
4342 if (tcg_regset_test_reg(set, reg)) {
4343 tcg_reg_free(s, reg, allocated_regs);
4344 return reg;
4350 g_assert_not_reached();
4353 static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
4354 TCGRegSet allocated_regs,
4355 TCGRegSet preferred_regs, bool rev)
4357 int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
4358 TCGRegSet reg_ct[2];
4359 const int *order;
4361 /* Ensure that if I is not in allocated_regs, I+1 is not either. */
4362 reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
4363 tcg_debug_assert(reg_ct[1] != 0);
4364 reg_ct[0] = reg_ct[1] & preferred_regs;
4366 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
4369 * Skip the preferred_regs option if it cannot be satisfied,
4370 * or if the preference made no difference.
4372 k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
4375 * Minimize the number of flushes by looking for 2 free registers first,
4376 * then a single flush, then two flushes.
4378 for (fmin = 2; fmin >= 0; fmin--) {
4379 for (j = k; j < 2; j++) {
4380 TCGRegSet set = reg_ct[j];
4382 for (i = 0; i < n; i++) {
4383 TCGReg reg = order[i];
4385 if (tcg_regset_test_reg(set, reg)) {
4386 int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
4387 if (f >= fmin) {
4388 tcg_reg_free(s, reg, allocated_regs);
4389 tcg_reg_free(s, reg + 1, allocated_regs);
4390 return reg;
4396 g_assert_not_reached();
4399 /* Make sure the temporary is in a register. If needed, allocate the register
4400 from DESIRED while avoiding ALLOCATED. */
4401 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
4402 TCGRegSet allocated_regs, TCGRegSet preferred_regs)
4404 TCGReg reg;
4406 switch (ts->val_type) {
4407 case TEMP_VAL_REG:
4408 return;
4409 case TEMP_VAL_CONST:
4410 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
4411 preferred_regs, ts->indirect_base);
4412 if (ts->type <= TCG_TYPE_I64) {
4413 tcg_out_movi(s, ts->type, reg, ts->val);
4414 } else {
4415 uint64_t val = ts->val;
4416 MemOp vece = MO_64;
4419 * Find the minimal vector element that matches the constant.
4420 * The targets will, in general, have to do this search anyway,
4421 * do this generically.
4423 if (val == dup_const(MO_8, val)) {
4424 vece = MO_8;
4425 } else if (val == dup_const(MO_16, val)) {
4426 vece = MO_16;
4427 } else if (val == dup_const(MO_32, val)) {
4428 vece = MO_32;
4431 tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
4433 ts->mem_coherent = 0;
4434 break;
4435 case TEMP_VAL_MEM:
4436 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
4437 preferred_regs, ts->indirect_base);
4438 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
4439 ts->mem_coherent = 1;
4440 break;
4441 case TEMP_VAL_DEAD:
4442 default:
4443 g_assert_not_reached();
4445 set_temp_val_reg(s, ts, reg);
4448 /* Save a temporary to memory. 'allocated_regs' is used in case a
4449 temporary registers needs to be allocated to store a constant. */
4450 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
4452 /* The liveness analysis already ensures that globals are back
4453 in memory. Keep an tcg_debug_assert for safety. */
4454 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
4457 /* save globals to their canonical location and assume they can be
4458 modified be the following code. 'allocated_regs' is used in case a
4459 temporary registers needs to be allocated to store a constant. */
4460 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
4462 int i, n;
4464 for (i = 0, n = s->nb_globals; i < n; i++) {
4465 temp_save(s, &s->temps[i], allocated_regs);
4469 /* sync globals to their canonical location and assume they can be
4470 read by the following code. 'allocated_regs' is used in case a
4471 temporary registers needs to be allocated to store a constant. */
4472 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
4474 int i, n;
4476 for (i = 0, n = s->nb_globals; i < n; i++) {
4477 TCGTemp *ts = &s->temps[i];
4478 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
4479 || ts->kind == TEMP_FIXED
4480 || ts->mem_coherent);
4484 /* at the end of a basic block, we assume all temporaries are dead and
4485 all globals are stored at their canonical location. */
4486 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
4488 int i;
4490 for (i = s->nb_globals; i < s->nb_temps; i++) {
4491 TCGTemp *ts = &s->temps[i];
4493 switch (ts->kind) {
4494 case TEMP_TB:
4495 temp_save(s, ts, allocated_regs);
4496 break;
4497 case TEMP_EBB:
4498 /* The liveness analysis already ensures that temps are dead.
4499 Keep an tcg_debug_assert for safety. */
4500 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
4501 break;
4502 case TEMP_CONST:
4503 /* Similarly, we should have freed any allocated register. */
4504 tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
4505 break;
4506 default:
4507 g_assert_not_reached();
4511 save_globals(s, allocated_regs);
4515 * At a conditional branch, we assume all temporaries are dead unless
4516 * explicitly live-across-conditional-branch; all globals and local
4517 * temps are synced to their location.
4519 static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
4521 sync_globals(s, allocated_regs);
4523 for (int i = s->nb_globals; i < s->nb_temps; i++) {
4524 TCGTemp *ts = &s->temps[i];
4526 * The liveness analysis already ensures that temps are dead.
4527 * Keep tcg_debug_asserts for safety.
4529 switch (ts->kind) {
4530 case TEMP_TB:
4531 tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
4532 break;
4533 case TEMP_EBB:
4534 case TEMP_CONST:
4535 break;
4536 default:
4537 g_assert_not_reached();
4543 * Specialized code generation for INDEX_op_mov_* with a constant.
4545 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
4546 tcg_target_ulong val, TCGLifeData arg_life,
4547 TCGRegSet preferred_regs)
4549 /* ENV should not be modified. */
4550 tcg_debug_assert(!temp_readonly(ots));
4552 /* The movi is not explicitly generated here. */
4553 set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
4554 ots->val = val;
4555 ots->mem_coherent = 0;
4556 if (NEED_SYNC_ARG(0)) {
4557 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
4558 } else if (IS_DEAD_ARG(0)) {
4559 temp_dead(s, ots);
4564 * Specialized code generation for INDEX_op_mov_*.
4566 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
4568 const TCGLifeData arg_life = op->life;
4569 TCGRegSet allocated_regs, preferred_regs;
4570 TCGTemp *ts, *ots;
4571 TCGType otype, itype;
4572 TCGReg oreg, ireg;
4574 allocated_regs = s->reserved_regs;
4575 preferred_regs = output_pref(op, 0);
4576 ots = arg_temp(op->args[0]);
4577 ts = arg_temp(op->args[1]);
4579 /* ENV should not be modified. */
4580 tcg_debug_assert(!temp_readonly(ots));
4582 /* Note that otype != itype for no-op truncation. */
4583 otype = ots->type;
4584 itype = ts->type;
4586 if (ts->val_type == TEMP_VAL_CONST) {
4587 /* propagate constant or generate sti */
4588 tcg_target_ulong val = ts->val;
4589 if (IS_DEAD_ARG(1)) {
4590 temp_dead(s, ts);
4592 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
4593 return;
4596 /* If the source value is in memory we're going to be forced
4597 to have it in a register in order to perform the copy. Copy
4598 the SOURCE value into its own register first, that way we
4599 don't have to reload SOURCE the next time it is used. */
4600 if (ts->val_type == TEMP_VAL_MEM) {
4601 temp_load(s, ts, tcg_target_available_regs[itype],
4602 allocated_regs, preferred_regs);
4604 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
4605 ireg = ts->reg;
4607 if (IS_DEAD_ARG(0)) {
4608 /* mov to a non-saved dead register makes no sense (even with
4609 liveness analysis disabled). */
4610 tcg_debug_assert(NEED_SYNC_ARG(0));
4611 if (!ots->mem_allocated) {
4612 temp_allocate_frame(s, ots);
4614 tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
4615 if (IS_DEAD_ARG(1)) {
4616 temp_dead(s, ts);
4618 temp_dead(s, ots);
4619 return;
4622 if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
4624 * The mov can be suppressed. Kill input first, so that it
4625 * is unlinked from reg_to_temp, then set the output to the
4626 * reg that we saved from the input.
4628 temp_dead(s, ts);
4629 oreg = ireg;
4630 } else {
4631 if (ots->val_type == TEMP_VAL_REG) {
4632 oreg = ots->reg;
4633 } else {
4634 /* Make sure to not spill the input register during allocation. */
4635 oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
4636 allocated_regs | ((TCGRegSet)1 << ireg),
4637 preferred_regs, ots->indirect_base);
4639 if (!tcg_out_mov(s, otype, oreg, ireg)) {
4641 * Cross register class move not supported.
4642 * Store the source register into the destination slot
4643 * and leave the destination temp as TEMP_VAL_MEM.
4645 assert(!temp_readonly(ots));
4646 if (!ts->mem_allocated) {
4647 temp_allocate_frame(s, ots);
4649 tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
4650 set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
4651 ots->mem_coherent = 1;
4652 return;
4655 set_temp_val_reg(s, ots, oreg);
4656 ots->mem_coherent = 0;
4658 if (NEED_SYNC_ARG(0)) {
4659 temp_sync(s, ots, allocated_regs, 0, 0);
4664 * Specialized code generation for INDEX_op_dup_vec.
4666 static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
4668 const TCGLifeData arg_life = op->life;
4669 TCGRegSet dup_out_regs, dup_in_regs;
4670 TCGTemp *its, *ots;
4671 TCGType itype, vtype;
4672 unsigned vece;
4673 int lowpart_ofs;
4674 bool ok;
4676 ots = arg_temp(op->args[0]);
4677 its = arg_temp(op->args[1]);
4679 /* ENV should not be modified. */
4680 tcg_debug_assert(!temp_readonly(ots));
4682 itype = its->type;
4683 vece = TCGOP_VECE(op);
4684 vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
4686 if (its->val_type == TEMP_VAL_CONST) {
4687 /* Propagate constant via movi -> dupi. */
4688 tcg_target_ulong val = its->val;
4689 if (IS_DEAD_ARG(1)) {
4690 temp_dead(s, its);
4692 tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
4693 return;
4696 dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
4697 dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
4699 /* Allocate the output register now. */
4700 if (ots->val_type != TEMP_VAL_REG) {
4701 TCGRegSet allocated_regs = s->reserved_regs;
4702 TCGReg oreg;
4704 if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
4705 /* Make sure to not spill the input register. */
4706 tcg_regset_set_reg(allocated_regs, its->reg);
4708 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
4709 output_pref(op, 0), ots->indirect_base);
4710 set_temp_val_reg(s, ots, oreg);
4713 switch (its->val_type) {
4714 case TEMP_VAL_REG:
4716 * The dup constriaints must be broad, covering all possible VECE.
4717 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
4718 * to fail, indicating that extra moves are required for that case.
4720 if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
4721 if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
4722 goto done;
4724 /* Try again from memory or a vector input register. */
4726 if (!its->mem_coherent) {
4728 * The input register is not synced, and so an extra store
4729 * would be required to use memory. Attempt an integer-vector
4730 * register move first. We do not have a TCGRegSet for this.
4732 if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
4733 break;
4735 /* Sync the temp back to its slot and load from there. */
4736 temp_sync(s, its, s->reserved_regs, 0, 0);
4738 /* fall through */
4740 case TEMP_VAL_MEM:
4741 lowpart_ofs = 0;
4742 if (HOST_BIG_ENDIAN) {
4743 lowpart_ofs = tcg_type_size(itype) - (1 << vece);
4745 if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
4746 its->mem_offset + lowpart_ofs)) {
4747 goto done;
4749 /* Load the input into the destination vector register. */
4750 tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
4751 break;
4753 default:
4754 g_assert_not_reached();
4757 /* We now have a vector input register, so dup must succeed. */
4758 ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
4759 tcg_debug_assert(ok);
4761 done:
4762 ots->mem_coherent = 0;
4763 if (IS_DEAD_ARG(1)) {
4764 temp_dead(s, its);
4766 if (NEED_SYNC_ARG(0)) {
4767 temp_sync(s, ots, s->reserved_regs, 0, 0);
4769 if (IS_DEAD_ARG(0)) {
4770 temp_dead(s, ots);
4774 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
4776 const TCGLifeData arg_life = op->life;
4777 const TCGOpDef * const def = &tcg_op_defs[op->opc];
4778 TCGRegSet i_allocated_regs;
4779 TCGRegSet o_allocated_regs;
4780 int i, k, nb_iargs, nb_oargs;
4781 TCGReg reg;
4782 TCGArg arg;
4783 const TCGArgConstraint *arg_ct;
4784 TCGTemp *ts;
4785 TCGArg new_args[TCG_MAX_OP_ARGS];
4786 int const_args[TCG_MAX_OP_ARGS];
4788 nb_oargs = def->nb_oargs;
4789 nb_iargs = def->nb_iargs;
4791 /* copy constants */
4792 memcpy(new_args + nb_oargs + nb_iargs,
4793 op->args + nb_oargs + nb_iargs,
4794 sizeof(TCGArg) * def->nb_cargs);
4796 i_allocated_regs = s->reserved_regs;
4797 o_allocated_regs = s->reserved_regs;
4799 /* satisfy input constraints */
4800 for (k = 0; k < nb_iargs; k++) {
4801 TCGRegSet i_preferred_regs, i_required_regs;
4802 bool allocate_new_reg, copyto_new_reg;
4803 TCGTemp *ts2;
4804 int i1, i2;
4806 i = def->args_ct[nb_oargs + k].sort_index;
4807 arg = op->args[i];
4808 arg_ct = &def->args_ct[i];
4809 ts = arg_temp(arg);
4811 if (ts->val_type == TEMP_VAL_CONST
4812 && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
4813 /* constant is OK for instruction */
4814 const_args[i] = 1;
4815 new_args[i] = ts->val;
4816 continue;
4819 reg = ts->reg;
4820 i_preferred_regs = 0;
4821 i_required_regs = arg_ct->regs;
4822 allocate_new_reg = false;
4823 copyto_new_reg = false;
4825 switch (arg_ct->pair) {
4826 case 0: /* not paired */
4827 if (arg_ct->ialias) {
4828 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4831 * If the input is readonly, then it cannot also be an
4832 * output and aliased to itself. If the input is not
4833 * dead after the instruction, we must allocate a new
4834 * register and move it.
4836 if (temp_readonly(ts) || !IS_DEAD_ARG(i)
4837 || def->args_ct[arg_ct->alias_index].newreg) {
4838 allocate_new_reg = true;
4839 } else if (ts->val_type == TEMP_VAL_REG) {
4841 * Check if the current register has already been
4842 * allocated for another input.
4844 allocate_new_reg =
4845 tcg_regset_test_reg(i_allocated_regs, reg);
4848 if (!allocate_new_reg) {
4849 temp_load(s, ts, i_required_regs, i_allocated_regs,
4850 i_preferred_regs);
4851 reg = ts->reg;
4852 allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
4854 if (allocate_new_reg) {
4856 * Allocate a new register matching the constraint
4857 * and move the temporary register into it.
4859 temp_load(s, ts, tcg_target_available_regs[ts->type],
4860 i_allocated_regs, 0);
4861 reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
4862 i_preferred_regs, ts->indirect_base);
4863 copyto_new_reg = true;
4865 break;
4867 case 1:
4868 /* First of an input pair; if i1 == i2, the second is an output. */
4869 i1 = i;
4870 i2 = arg_ct->pair_index;
4871 ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
4874 * It is easier to default to allocating a new pair
4875 * and to identify a few cases where it's not required.
4877 if (arg_ct->ialias) {
4878 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4879 if (IS_DEAD_ARG(i1) &&
4880 IS_DEAD_ARG(i2) &&
4881 !temp_readonly(ts) &&
4882 ts->val_type == TEMP_VAL_REG &&
4883 ts->reg < TCG_TARGET_NB_REGS - 1 &&
4884 tcg_regset_test_reg(i_required_regs, reg) &&
4885 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4886 !tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
4887 (ts2
4888 ? ts2->val_type == TEMP_VAL_REG &&
4889 ts2->reg == reg + 1 &&
4890 !temp_readonly(ts2)
4891 : s->reg_to_temp[reg + 1] == NULL)) {
4892 break;
4894 } else {
4895 /* Without aliasing, the pair must also be an input. */
4896 tcg_debug_assert(ts2);
4897 if (ts->val_type == TEMP_VAL_REG &&
4898 ts2->val_type == TEMP_VAL_REG &&
4899 ts2->reg == reg + 1 &&
4900 tcg_regset_test_reg(i_required_regs, reg)) {
4901 break;
4904 reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
4905 0, ts->indirect_base);
4906 goto do_pair;
4908 case 2: /* pair second */
4909 reg = new_args[arg_ct->pair_index] + 1;
4910 goto do_pair;
4912 case 3: /* ialias with second output, no first input */
4913 tcg_debug_assert(arg_ct->ialias);
4914 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4916 if (IS_DEAD_ARG(i) &&
4917 !temp_readonly(ts) &&
4918 ts->val_type == TEMP_VAL_REG &&
4919 reg > 0 &&
4920 s->reg_to_temp[reg - 1] == NULL &&
4921 tcg_regset_test_reg(i_required_regs, reg) &&
4922 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4923 !tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
4924 tcg_regset_set_reg(i_allocated_regs, reg - 1);
4925 break;
4927 reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
4928 i_allocated_regs, 0,
4929 ts->indirect_base);
4930 tcg_regset_set_reg(i_allocated_regs, reg);
4931 reg += 1;
4932 goto do_pair;
4934 do_pair:
4936 * If an aliased input is not dead after the instruction,
4937 * we must allocate a new register and move it.
4939 if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
4940 TCGRegSet t_allocated_regs = i_allocated_regs;
4943 * Because of the alias, and the continued life, make sure
4944 * that the temp is somewhere *other* than the reg pair,
4945 * and we get a copy in reg.
4947 tcg_regset_set_reg(t_allocated_regs, reg);
4948 tcg_regset_set_reg(t_allocated_regs, reg + 1);
4949 if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
4950 /* If ts was already in reg, copy it somewhere else. */
4951 TCGReg nr;
4952 bool ok;
4954 tcg_debug_assert(ts->kind != TEMP_FIXED);
4955 nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
4956 t_allocated_regs, 0, ts->indirect_base);
4957 ok = tcg_out_mov(s, ts->type, nr, reg);
4958 tcg_debug_assert(ok);
4960 set_temp_val_reg(s, ts, nr);
4961 } else {
4962 temp_load(s, ts, tcg_target_available_regs[ts->type],
4963 t_allocated_regs, 0);
4964 copyto_new_reg = true;
4966 } else {
4967 /* Preferably allocate to reg, otherwise copy. */
4968 i_required_regs = (TCGRegSet)1 << reg;
4969 temp_load(s, ts, i_required_regs, i_allocated_regs,
4970 i_preferred_regs);
4971 copyto_new_reg = ts->reg != reg;
4973 break;
4975 default:
4976 g_assert_not_reached();
4979 if (copyto_new_reg) {
4980 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
4982 * Cross register class move not supported. Sync the
4983 * temp back to its slot and load from there.
4985 temp_sync(s, ts, i_allocated_regs, 0, 0);
4986 tcg_out_ld(s, ts->type, reg,
4987 ts->mem_base->reg, ts->mem_offset);
4990 new_args[i] = reg;
4991 const_args[i] = 0;
4992 tcg_regset_set_reg(i_allocated_regs, reg);
4995 /* mark dead temporaries and free the associated registers */
4996 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
4997 if (IS_DEAD_ARG(i)) {
4998 temp_dead(s, arg_temp(op->args[i]));
5002 if (def->flags & TCG_OPF_COND_BRANCH) {
5003 tcg_reg_alloc_cbranch(s, i_allocated_regs);
5004 } else if (def->flags & TCG_OPF_BB_END) {
5005 tcg_reg_alloc_bb_end(s, i_allocated_regs);
5006 } else {
5007 if (def->flags & TCG_OPF_CALL_CLOBBER) {
5008 /* XXX: permit generic clobber register list ? */
5009 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
5010 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
5011 tcg_reg_free(s, i, i_allocated_regs);
5015 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
5016 /* sync globals if the op has side effects and might trigger
5017 an exception. */
5018 sync_globals(s, i_allocated_regs);
5021 /* satisfy the output constraints */
5022 for(k = 0; k < nb_oargs; k++) {
5023 i = def->args_ct[k].sort_index;
5024 arg = op->args[i];
5025 arg_ct = &def->args_ct[i];
5026 ts = arg_temp(arg);
5028 /* ENV should not be modified. */
5029 tcg_debug_assert(!temp_readonly(ts));
5031 switch (arg_ct->pair) {
5032 case 0: /* not paired */
5033 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
5034 reg = new_args[arg_ct->alias_index];
5035 } else if (arg_ct->newreg) {
5036 reg = tcg_reg_alloc(s, arg_ct->regs,
5037 i_allocated_regs | o_allocated_regs,
5038 output_pref(op, k), ts->indirect_base);
5039 } else {
5040 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
5041 output_pref(op, k), ts->indirect_base);
5043 break;
5045 case 1: /* first of pair */
5046 if (arg_ct->oalias) {
5047 reg = new_args[arg_ct->alias_index];
5048 } else if (arg_ct->newreg) {
5049 reg = tcg_reg_alloc_pair(s, arg_ct->regs,
5050 i_allocated_regs | o_allocated_regs,
5051 output_pref(op, k),
5052 ts->indirect_base);
5053 } else {
5054 reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
5055 output_pref(op, k),
5056 ts->indirect_base);
5058 break;
5060 case 2: /* second of pair */
5061 if (arg_ct->oalias) {
5062 reg = new_args[arg_ct->alias_index];
5063 } else {
5064 reg = new_args[arg_ct->pair_index] + 1;
5066 break;
5068 case 3: /* first of pair, aliasing with a second input */
5069 tcg_debug_assert(!arg_ct->newreg);
5070 reg = new_args[arg_ct->pair_index] - 1;
5071 break;
5073 default:
5074 g_assert_not_reached();
5076 tcg_regset_set_reg(o_allocated_regs, reg);
5077 set_temp_val_reg(s, ts, reg);
5078 ts->mem_coherent = 0;
5079 new_args[i] = reg;
5083 /* emit instruction */
5084 switch (op->opc) {
5085 case INDEX_op_ext8s_i32:
5086 tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
5087 break;
5088 case INDEX_op_ext8s_i64:
5089 tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
5090 break;
5091 case INDEX_op_ext8u_i32:
5092 case INDEX_op_ext8u_i64:
5093 tcg_out_ext8u(s, new_args[0], new_args[1]);
5094 break;
5095 case INDEX_op_ext16s_i32:
5096 tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
5097 break;
5098 case INDEX_op_ext16s_i64:
5099 tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
5100 break;
5101 case INDEX_op_ext16u_i32:
5102 case INDEX_op_ext16u_i64:
5103 tcg_out_ext16u(s, new_args[0], new_args[1]);
5104 break;
5105 case INDEX_op_ext32s_i64:
5106 tcg_out_ext32s(s, new_args[0], new_args[1]);
5107 break;
5108 case INDEX_op_ext32u_i64:
5109 tcg_out_ext32u(s, new_args[0], new_args[1]);
5110 break;
5111 case INDEX_op_ext_i32_i64:
5112 tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
5113 break;
5114 case INDEX_op_extu_i32_i64:
5115 tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
5116 break;
5117 case INDEX_op_extrl_i64_i32:
5118 tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
5119 break;
5120 default:
5121 if (def->flags & TCG_OPF_VECTOR) {
5122 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
5123 new_args, const_args);
5124 } else {
5125 tcg_out_op(s, op->opc, new_args, const_args);
5127 break;
5130 /* move the outputs in the correct register if needed */
5131 for(i = 0; i < nb_oargs; i++) {
5132 ts = arg_temp(op->args[i]);
5134 /* ENV should not be modified. */
5135 tcg_debug_assert(!temp_readonly(ts));
5137 if (NEED_SYNC_ARG(i)) {
5138 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
5139 } else if (IS_DEAD_ARG(i)) {
5140 temp_dead(s, ts);
5145 static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
5147 const TCGLifeData arg_life = op->life;
5148 TCGTemp *ots, *itsl, *itsh;
5149 TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
5151 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
5152 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
5153 tcg_debug_assert(TCGOP_VECE(op) == MO_64);
5155 ots = arg_temp(op->args[0]);
5156 itsl = arg_temp(op->args[1]);
5157 itsh = arg_temp(op->args[2]);
5159 /* ENV should not be modified. */
5160 tcg_debug_assert(!temp_readonly(ots));
5162 /* Allocate the output register now. */
5163 if (ots->val_type != TEMP_VAL_REG) {
5164 TCGRegSet allocated_regs = s->reserved_regs;
5165 TCGRegSet dup_out_regs =
5166 tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
5167 TCGReg oreg;
5169 /* Make sure to not spill the input registers. */
5170 if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
5171 tcg_regset_set_reg(allocated_regs, itsl->reg);
5173 if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
5174 tcg_regset_set_reg(allocated_regs, itsh->reg);
5177 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
5178 output_pref(op, 0), ots->indirect_base);
5179 set_temp_val_reg(s, ots, oreg);
5182 /* Promote dup2 of immediates to dupi_vec. */
5183 if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
5184 uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
5185 MemOp vece = MO_64;
5187 if (val == dup_const(MO_8, val)) {
5188 vece = MO_8;
5189 } else if (val == dup_const(MO_16, val)) {
5190 vece = MO_16;
5191 } else if (val == dup_const(MO_32, val)) {
5192 vece = MO_32;
5195 tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
5196 goto done;
5199 /* If the two inputs form one 64-bit value, try dupm_vec. */
5200 if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
5201 itsh->temp_subindex == !HOST_BIG_ENDIAN &&
5202 itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
5203 TCGTemp *its = itsl - HOST_BIG_ENDIAN;
5205 temp_sync(s, its + 0, s->reserved_regs, 0, 0);
5206 temp_sync(s, its + 1, s->reserved_regs, 0, 0);
5208 if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
5209 its->mem_base->reg, its->mem_offset)) {
5210 goto done;
5214 /* Fall back to generic expansion. */
5215 return false;
5217 done:
5218 ots->mem_coherent = 0;
5219 if (IS_DEAD_ARG(1)) {
5220 temp_dead(s, itsl);
5222 if (IS_DEAD_ARG(2)) {
5223 temp_dead(s, itsh);
5225 if (NEED_SYNC_ARG(0)) {
5226 temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
5227 } else if (IS_DEAD_ARG(0)) {
5228 temp_dead(s, ots);
5230 return true;
5233 static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
5234 TCGRegSet allocated_regs)
5236 if (ts->val_type == TEMP_VAL_REG) {
5237 if (ts->reg != reg) {
5238 tcg_reg_free(s, reg, allocated_regs);
5239 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
5241 * Cross register class move not supported. Sync the
5242 * temp back to its slot and load from there.
5244 temp_sync(s, ts, allocated_regs, 0, 0);
5245 tcg_out_ld(s, ts->type, reg,
5246 ts->mem_base->reg, ts->mem_offset);
5249 } else {
5250 TCGRegSet arg_set = 0;
5252 tcg_reg_free(s, reg, allocated_regs);
5253 tcg_regset_set_reg(arg_set, reg);
5254 temp_load(s, ts, arg_set, allocated_regs, 0);
5258 static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts,
5259 TCGRegSet allocated_regs)
5262 * When the destination is on the stack, load up the temp and store.
5263 * If there are many call-saved registers, the temp might live to
5264 * see another use; otherwise it'll be discarded.
5266 temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
5267 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
5268 arg_slot_stk_ofs(arg_slot));
5271 static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
5272 TCGTemp *ts, TCGRegSet *allocated_regs)
5274 if (arg_slot_reg_p(l->arg_slot)) {
5275 TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
5276 load_arg_reg(s, reg, ts, *allocated_regs);
5277 tcg_regset_set_reg(*allocated_regs, reg);
5278 } else {
5279 load_arg_stk(s, l->arg_slot, ts, *allocated_regs);
5283 static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base,
5284 intptr_t ref_off, TCGRegSet *allocated_regs)
5286 TCGReg reg;
5288 if (arg_slot_reg_p(arg_slot)) {
5289 reg = tcg_target_call_iarg_regs[arg_slot];
5290 tcg_reg_free(s, reg, *allocated_regs);
5291 tcg_out_addi_ptr(s, reg, ref_base, ref_off);
5292 tcg_regset_set_reg(*allocated_regs, reg);
5293 } else {
5294 reg = tcg_reg_alloc(s, tcg_target_available_regs[TCG_TYPE_PTR],
5295 *allocated_regs, 0, false);
5296 tcg_out_addi_ptr(s, reg, ref_base, ref_off);
5297 tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
5298 arg_slot_stk_ofs(arg_slot));
5302 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
5304 const int nb_oargs = TCGOP_CALLO(op);
5305 const int nb_iargs = TCGOP_CALLI(op);
5306 const TCGLifeData arg_life = op->life;
5307 const TCGHelperInfo *info = tcg_call_info(op);
5308 TCGRegSet allocated_regs = s->reserved_regs;
5309 int i;
5312 * Move inputs into place in reverse order,
5313 * so that we place stacked arguments first.
5315 for (i = nb_iargs - 1; i >= 0; --i) {
5316 const TCGCallArgumentLoc *loc = &info->in[i];
5317 TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
5319 switch (loc->kind) {
5320 case TCG_CALL_ARG_NORMAL:
5321 case TCG_CALL_ARG_EXTEND_U:
5322 case TCG_CALL_ARG_EXTEND_S:
5323 load_arg_normal(s, loc, ts, &allocated_regs);
5324 break;
5325 case TCG_CALL_ARG_BY_REF:
5326 load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
5327 load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
5328 arg_slot_stk_ofs(loc->ref_slot),
5329 &allocated_regs);
5330 break;
5331 case TCG_CALL_ARG_BY_REF_N:
5332 load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
5333 break;
5334 default:
5335 g_assert_not_reached();
5339 /* Mark dead temporaries and free the associated registers. */
5340 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
5341 if (IS_DEAD_ARG(i)) {
5342 temp_dead(s, arg_temp(op->args[i]));
5346 /* Clobber call registers. */
5347 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
5348 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
5349 tcg_reg_free(s, i, allocated_regs);
5354 * Save globals if they might be written by the helper,
5355 * sync them if they might be read.
5357 if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
5358 /* Nothing to do */
5359 } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
5360 sync_globals(s, allocated_regs);
5361 } else {
5362 save_globals(s, allocated_regs);
5366 * If the ABI passes a pointer to the returned struct as the first
5367 * argument, load that now. Pass a pointer to the output home slot.
5369 if (info->out_kind == TCG_CALL_RET_BY_REF) {
5370 TCGTemp *ts = arg_temp(op->args[0]);
5372 if (!ts->mem_allocated) {
5373 temp_allocate_frame(s, ts);
5375 load_arg_ref(s, 0, ts->mem_base->reg, ts->mem_offset, &allocated_regs);
5378 tcg_out_call(s, tcg_call_func(op), info);
5380 /* Assign output registers and emit moves if needed. */
5381 switch (info->out_kind) {
5382 case TCG_CALL_RET_NORMAL:
5383 for (i = 0; i < nb_oargs; i++) {
5384 TCGTemp *ts = arg_temp(op->args[i]);
5385 TCGReg reg = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, i);
5387 /* ENV should not be modified. */
5388 tcg_debug_assert(!temp_readonly(ts));
5390 set_temp_val_reg(s, ts, reg);
5391 ts->mem_coherent = 0;
5393 break;
5395 case TCG_CALL_RET_BY_VEC:
5397 TCGTemp *ts = arg_temp(op->args[0]);
5399 tcg_debug_assert(ts->base_type == TCG_TYPE_I128);
5400 tcg_debug_assert(ts->temp_subindex == 0);
5401 if (!ts->mem_allocated) {
5402 temp_allocate_frame(s, ts);
5404 tcg_out_st(s, TCG_TYPE_V128,
5405 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
5406 ts->mem_base->reg, ts->mem_offset);
5408 /* fall through to mark all parts in memory */
5410 case TCG_CALL_RET_BY_REF:
5411 /* The callee has performed a write through the reference. */
5412 for (i = 0; i < nb_oargs; i++) {
5413 TCGTemp *ts = arg_temp(op->args[i]);
5414 ts->val_type = TEMP_VAL_MEM;
5416 break;
5418 default:
5419 g_assert_not_reached();
5422 /* Flush or discard output registers as needed. */
5423 for (i = 0; i < nb_oargs; i++) {
5424 TCGTemp *ts = arg_temp(op->args[i]);
5425 if (NEED_SYNC_ARG(i)) {
5426 temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
5427 } else if (IS_DEAD_ARG(i)) {
5428 temp_dead(s, ts);
5434 * atom_and_align_for_opc:
5435 * @s: tcg context
5436 * @opc: memory operation code
5437 * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
5438 * @allow_two_ops: true if we are prepared to issue two operations
5440 * Return the alignment and atomicity to use for the inline fast path
5441 * for the given memory operation. The alignment may be larger than
5442 * that specified in @opc, and the correct alignment will be diagnosed
5443 * by the slow path helper.
5445 * If @allow_two_ops, the host is prepared to test for 2x alignment,
5446 * and issue two loads or stores for subalignment.
5448 static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
5449 MemOp host_atom, bool allow_two_ops)
5451 MemOp align = get_alignment_bits(opc);
5452 MemOp size = opc & MO_SIZE;
5453 MemOp half = size ? size - 1 : 0;
5454 MemOp atom = opc & MO_ATOM_MASK;
5455 MemOp atmax;
5457 switch (atom) {
5458 case MO_ATOM_NONE:
5459 /* The operation requires no specific atomicity. */
5460 atmax = MO_8;
5461 break;
5463 case MO_ATOM_IFALIGN:
5464 atmax = size;
5465 break;
5467 case MO_ATOM_IFALIGN_PAIR:
5468 atmax = half;
5469 break;
5471 case MO_ATOM_WITHIN16:
5472 atmax = size;
5473 if (size == MO_128) {
5474 /* Misalignment implies !within16, and therefore no atomicity. */
5475 } else if (host_atom != MO_ATOM_WITHIN16) {
5476 /* The host does not implement within16, so require alignment. */
5477 align = MAX(align, size);
5479 break;
5481 case MO_ATOM_WITHIN16_PAIR:
5482 atmax = size;
5484 * Misalignment implies !within16, and therefore half atomicity.
5485 * Any host prepared for two operations can implement this with
5486 * half alignment.
5488 if (host_atom != MO_ATOM_WITHIN16 && allow_two_ops) {
5489 align = MAX(align, half);
5491 break;
5493 case MO_ATOM_SUBALIGN:
5494 atmax = size;
5495 if (host_atom != MO_ATOM_SUBALIGN) {
5496 /* If unaligned but not odd, there are subobjects up to half. */
5497 if (allow_two_ops) {
5498 align = MAX(align, half);
5499 } else {
5500 align = MAX(align, size);
5503 break;
5505 default:
5506 g_assert_not_reached();
5509 return (TCGAtomAlign){ .atom = atmax, .align = align };
5513 * Similarly for qemu_ld/st slow path helpers.
5514 * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
5515 * using only the provided backend tcg_out_* functions.
5518 static int tcg_out_helper_stk_ofs(TCGType type, unsigned slot)
5520 int ofs = arg_slot_stk_ofs(slot);
5523 * Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
5524 * require extension to uint64_t, adjust the address for uint32_t.
5526 if (HOST_BIG_ENDIAN &&
5527 TCG_TARGET_REG_BITS == 64 &&
5528 type == TCG_TYPE_I32) {
5529 ofs += 4;
5531 return ofs;
5534 static void tcg_out_helper_load_slots(TCGContext *s,
5535 unsigned nmov, TCGMovExtend *mov,
5536 const TCGLdstHelperParam *parm)
5538 unsigned i;
5539 TCGReg dst3;
5542 * Start from the end, storing to the stack first.
5543 * This frees those registers, so we need not consider overlap.
5545 for (i = nmov; i-- > 0; ) {
5546 unsigned slot = mov[i].dst;
5548 if (arg_slot_reg_p(slot)) {
5549 goto found_reg;
5552 TCGReg src = mov[i].src;
5553 TCGType dst_type = mov[i].dst_type;
5554 MemOp dst_mo = dst_type == TCG_TYPE_I32 ? MO_32 : MO_64;
5556 /* The argument is going onto the stack; extend into scratch. */
5557 if ((mov[i].src_ext & MO_SIZE) != dst_mo) {
5558 tcg_debug_assert(parm->ntmp != 0);
5559 mov[i].dst = src = parm->tmp[0];
5560 tcg_out_movext1(s, &mov[i]);
5563 tcg_out_st(s, dst_type, src, TCG_REG_CALL_STACK,
5564 tcg_out_helper_stk_ofs(dst_type, slot));
5566 return;
5568 found_reg:
5570 * The remaining arguments are in registers.
5571 * Convert slot numbers to argument registers.
5573 nmov = i + 1;
5574 for (i = 0; i < nmov; ++i) {
5575 mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst];
5578 switch (nmov) {
5579 case 4:
5580 /* The backend must have provided enough temps for the worst case. */
5581 tcg_debug_assert(parm->ntmp >= 2);
5583 dst3 = mov[3].dst;
5584 for (unsigned j = 0; j < 3; ++j) {
5585 if (dst3 == mov[j].src) {
5587 * Conflict. Copy the source to a temporary, perform the
5588 * remaining moves, then the extension from our scratch
5589 * on the way out.
5591 TCGReg scratch = parm->tmp[1];
5593 tcg_out_mov(s, mov[3].src_type, scratch, mov[3].src);
5594 tcg_out_movext3(s, mov, mov + 1, mov + 2, parm->tmp[0]);
5595 tcg_out_movext1_new_src(s, &mov[3], scratch);
5596 break;
5600 /* No conflicts: perform this move and continue. */
5601 tcg_out_movext1(s, &mov[3]);
5602 /* fall through */
5604 case 3:
5605 tcg_out_movext3(s, mov, mov + 1, mov + 2,
5606 parm->ntmp ? parm->tmp[0] : -1);
5607 break;
5608 case 2:
5609 tcg_out_movext2(s, mov, mov + 1,
5610 parm->ntmp ? parm->tmp[0] : -1);
5611 break;
5612 case 1:
5613 tcg_out_movext1(s, mov);
5614 break;
5615 default:
5616 g_assert_not_reached();
5620 static void tcg_out_helper_load_imm(TCGContext *s, unsigned slot,
5621 TCGType type, tcg_target_long imm,
5622 const TCGLdstHelperParam *parm)
5624 if (arg_slot_reg_p(slot)) {
5625 tcg_out_movi(s, type, tcg_target_call_iarg_regs[slot], imm);
5626 } else {
5627 int ofs = tcg_out_helper_stk_ofs(type, slot);
5628 if (!tcg_out_sti(s, type, imm, TCG_REG_CALL_STACK, ofs)) {
5629 tcg_debug_assert(parm->ntmp != 0);
5630 tcg_out_movi(s, type, parm->tmp[0], imm);
5631 tcg_out_st(s, type, parm->tmp[0], TCG_REG_CALL_STACK, ofs);
5636 static void tcg_out_helper_load_common_args(TCGContext *s,
5637 const TCGLabelQemuLdst *ldst,
5638 const TCGLdstHelperParam *parm,
5639 const TCGHelperInfo *info,
5640 unsigned next_arg)
5642 TCGMovExtend ptr_mov = {
5643 .dst_type = TCG_TYPE_PTR,
5644 .src_type = TCG_TYPE_PTR,
5645 .src_ext = sizeof(void *) == 4 ? MO_32 : MO_64
5647 const TCGCallArgumentLoc *loc = &info->in[0];
5648 TCGType type;
5649 unsigned slot;
5650 tcg_target_ulong imm;
5653 * Handle env, which is always first.
5655 ptr_mov.dst = loc->arg_slot;
5656 ptr_mov.src = TCG_AREG0;
5657 tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
5660 * Handle oi.
5662 imm = ldst->oi;
5663 loc = &info->in[next_arg];
5664 type = TCG_TYPE_I32;
5665 switch (loc->kind) {
5666 case TCG_CALL_ARG_NORMAL:
5667 break;
5668 case TCG_CALL_ARG_EXTEND_U:
5669 case TCG_CALL_ARG_EXTEND_S:
5670 /* No extension required for MemOpIdx. */
5671 tcg_debug_assert(imm <= INT32_MAX);
5672 type = TCG_TYPE_REG;
5673 break;
5674 default:
5675 g_assert_not_reached();
5677 tcg_out_helper_load_imm(s, loc->arg_slot, type, imm, parm);
5678 next_arg++;
5681 * Handle ra.
5683 loc = &info->in[next_arg];
5684 slot = loc->arg_slot;
5685 if (parm->ra_gen) {
5686 int arg_reg = -1;
5687 TCGReg ra_reg;
5689 if (arg_slot_reg_p(slot)) {
5690 arg_reg = tcg_target_call_iarg_regs[slot];
5692 ra_reg = parm->ra_gen(s, ldst, arg_reg);
5694 ptr_mov.dst = slot;
5695 ptr_mov.src = ra_reg;
5696 tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
5697 } else {
5698 imm = (uintptr_t)ldst->raddr;
5699 tcg_out_helper_load_imm(s, slot, TCG_TYPE_PTR, imm, parm);
5703 static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov,
5704 const TCGCallArgumentLoc *loc,
5705 TCGType dst_type, TCGType src_type,
5706 TCGReg lo, TCGReg hi)
5708 MemOp reg_mo;
5710 if (dst_type <= TCG_TYPE_REG) {
5711 MemOp src_ext;
5713 switch (loc->kind) {
5714 case TCG_CALL_ARG_NORMAL:
5715 src_ext = src_type == TCG_TYPE_I32 ? MO_32 : MO_64;
5716 break;
5717 case TCG_CALL_ARG_EXTEND_U:
5718 dst_type = TCG_TYPE_REG;
5719 src_ext = MO_UL;
5720 break;
5721 case TCG_CALL_ARG_EXTEND_S:
5722 dst_type = TCG_TYPE_REG;
5723 src_ext = MO_SL;
5724 break;
5725 default:
5726 g_assert_not_reached();
5729 mov[0].dst = loc->arg_slot;
5730 mov[0].dst_type = dst_type;
5731 mov[0].src = lo;
5732 mov[0].src_type = src_type;
5733 mov[0].src_ext = src_ext;
5734 return 1;
5737 if (TCG_TARGET_REG_BITS == 32) {
5738 assert(dst_type == TCG_TYPE_I64);
5739 reg_mo = MO_32;
5740 } else {
5741 assert(dst_type == TCG_TYPE_I128);
5742 reg_mo = MO_64;
5745 mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot;
5746 mov[0].src = lo;
5747 mov[0].dst_type = TCG_TYPE_REG;
5748 mov[0].src_type = TCG_TYPE_REG;
5749 mov[0].src_ext = reg_mo;
5751 mov[1].dst = loc[!HOST_BIG_ENDIAN].arg_slot;
5752 mov[1].src = hi;
5753 mov[1].dst_type = TCG_TYPE_REG;
5754 mov[1].src_type = TCG_TYPE_REG;
5755 mov[1].src_ext = reg_mo;
5757 return 2;
5760 static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
5761 const TCGLdstHelperParam *parm)
5763 const TCGHelperInfo *info;
5764 const TCGCallArgumentLoc *loc;
5765 TCGMovExtend mov[2];
5766 unsigned next_arg, nmov;
5767 MemOp mop = get_memop(ldst->oi);
5769 switch (mop & MO_SIZE) {
5770 case MO_8:
5771 case MO_16:
5772 case MO_32:
5773 info = &info_helper_ld32_mmu;
5774 break;
5775 case MO_64:
5776 info = &info_helper_ld64_mmu;
5777 break;
5778 case MO_128:
5779 info = &info_helper_ld128_mmu;
5780 break;
5781 default:
5782 g_assert_not_reached();
5785 /* Defer env argument. */
5786 next_arg = 1;
5788 loc = &info->in[next_arg];
5789 if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
5791 * 32-bit host with 32-bit guest: zero-extend the guest address
5792 * to 64-bits for the helper by storing the low part, then
5793 * load a zero for the high part.
5795 tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
5796 TCG_TYPE_I32, TCG_TYPE_I32,
5797 ldst->addrlo_reg, -1);
5798 tcg_out_helper_load_slots(s, 1, mov, parm);
5800 tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
5801 TCG_TYPE_I32, 0, parm);
5802 next_arg += 2;
5803 } else {
5804 nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
5805 ldst->addrlo_reg, ldst->addrhi_reg);
5806 tcg_out_helper_load_slots(s, nmov, mov, parm);
5807 next_arg += nmov;
5810 switch (info->out_kind) {
5811 case TCG_CALL_RET_NORMAL:
5812 case TCG_CALL_RET_BY_VEC:
5813 break;
5814 case TCG_CALL_RET_BY_REF:
5816 * The return reference is in the first argument slot.
5817 * We need memory in which to return: re-use the top of stack.
5820 int ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
5822 if (arg_slot_reg_p(0)) {
5823 tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[0],
5824 TCG_REG_CALL_STACK, ofs_slot0);
5825 } else {
5826 tcg_debug_assert(parm->ntmp != 0);
5827 tcg_out_addi_ptr(s, parm->tmp[0],
5828 TCG_REG_CALL_STACK, ofs_slot0);
5829 tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
5830 TCG_REG_CALL_STACK, ofs_slot0);
5833 break;
5834 default:
5835 g_assert_not_reached();
5838 tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
5841 static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst,
5842 bool load_sign,
5843 const TCGLdstHelperParam *parm)
5845 MemOp mop = get_memop(ldst->oi);
5846 TCGMovExtend mov[2];
5847 int ofs_slot0;
5849 switch (ldst->type) {
5850 case TCG_TYPE_I64:
5851 if (TCG_TARGET_REG_BITS == 32) {
5852 break;
5854 /* fall through */
5856 case TCG_TYPE_I32:
5857 mov[0].dst = ldst->datalo_reg;
5858 mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0);
5859 mov[0].dst_type = ldst->type;
5860 mov[0].src_type = TCG_TYPE_REG;
5863 * If load_sign, then we allowed the helper to perform the
5864 * appropriate sign extension to tcg_target_ulong, and all
5865 * we need now is a plain move.
5867 * If they do not, then we expect the relevant extension
5868 * instruction to be no more expensive than a move, and
5869 * we thus save the icache etc by only using one of two
5870 * helper functions.
5872 if (load_sign || !(mop & MO_SIGN)) {
5873 if (TCG_TARGET_REG_BITS == 32 || ldst->type == TCG_TYPE_I32) {
5874 mov[0].src_ext = MO_32;
5875 } else {
5876 mov[0].src_ext = MO_64;
5878 } else {
5879 mov[0].src_ext = mop & MO_SSIZE;
5881 tcg_out_movext1(s, mov);
5882 return;
5884 case TCG_TYPE_I128:
5885 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
5886 ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
5887 switch (TCG_TARGET_CALL_RET_I128) {
5888 case TCG_CALL_RET_NORMAL:
5889 break;
5890 case TCG_CALL_RET_BY_VEC:
5891 tcg_out_st(s, TCG_TYPE_V128,
5892 tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
5893 TCG_REG_CALL_STACK, ofs_slot0);
5894 /* fall through */
5895 case TCG_CALL_RET_BY_REF:
5896 tcg_out_ld(s, TCG_TYPE_I64, ldst->datalo_reg,
5897 TCG_REG_CALL_STACK, ofs_slot0 + 8 * HOST_BIG_ENDIAN);
5898 tcg_out_ld(s, TCG_TYPE_I64, ldst->datahi_reg,
5899 TCG_REG_CALL_STACK, ofs_slot0 + 8 * !HOST_BIG_ENDIAN);
5900 return;
5901 default:
5902 g_assert_not_reached();
5904 break;
5906 default:
5907 g_assert_not_reached();
5910 mov[0].dst = ldst->datalo_reg;
5911 mov[0].src =
5912 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN);
5913 mov[0].dst_type = TCG_TYPE_REG;
5914 mov[0].src_type = TCG_TYPE_REG;
5915 mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
5917 mov[1].dst = ldst->datahi_reg;
5918 mov[1].src =
5919 tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN);
5920 mov[1].dst_type = TCG_TYPE_REG;
5921 mov[1].src_type = TCG_TYPE_REG;
5922 mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
5924 tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1);
5927 static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
5928 const TCGLdstHelperParam *parm)
5930 const TCGHelperInfo *info;
5931 const TCGCallArgumentLoc *loc;
5932 TCGMovExtend mov[4];
5933 TCGType data_type;
5934 unsigned next_arg, nmov, n;
5935 MemOp mop = get_memop(ldst->oi);
5937 switch (mop & MO_SIZE) {
5938 case MO_8:
5939 case MO_16:
5940 case MO_32:
5941 info = &info_helper_st32_mmu;
5942 data_type = TCG_TYPE_I32;
5943 break;
5944 case MO_64:
5945 info = &info_helper_st64_mmu;
5946 data_type = TCG_TYPE_I64;
5947 break;
5948 case MO_128:
5949 info = &info_helper_st128_mmu;
5950 data_type = TCG_TYPE_I128;
5951 break;
5952 default:
5953 g_assert_not_reached();
5956 /* Defer env argument. */
5957 next_arg = 1;
5958 nmov = 0;
5960 /* Handle addr argument. */
5961 loc = &info->in[next_arg];
5962 if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
5964 * 32-bit host with 32-bit guest: zero-extend the guest address
5965 * to 64-bits for the helper by storing the low part. Later,
5966 * after we have processed the register inputs, we will load a
5967 * zero for the high part.
5969 tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
5970 TCG_TYPE_I32, TCG_TYPE_I32,
5971 ldst->addrlo_reg, -1);
5972 next_arg += 2;
5973 nmov += 1;
5974 } else {
5975 n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
5976 ldst->addrlo_reg, ldst->addrhi_reg);
5977 next_arg += n;
5978 nmov += n;
5981 /* Handle data argument. */
5982 loc = &info->in[next_arg];
5983 switch (loc->kind) {
5984 case TCG_CALL_ARG_NORMAL:
5985 case TCG_CALL_ARG_EXTEND_U:
5986 case TCG_CALL_ARG_EXTEND_S:
5987 n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type,
5988 ldst->datalo_reg, ldst->datahi_reg);
5989 next_arg += n;
5990 nmov += n;
5991 tcg_out_helper_load_slots(s, nmov, mov, parm);
5992 break;
5994 case TCG_CALL_ARG_BY_REF:
5995 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
5996 tcg_debug_assert(data_type == TCG_TYPE_I128);
5997 tcg_out_st(s, TCG_TYPE_I64,
5998 HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg,
5999 TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[0].ref_slot));
6000 tcg_out_st(s, TCG_TYPE_I64,
6001 HOST_BIG_ENDIAN ? ldst->datalo_reg : ldst->datahi_reg,
6002 TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[1].ref_slot));
6004 tcg_out_helper_load_slots(s, nmov, mov, parm);
6006 if (arg_slot_reg_p(loc->arg_slot)) {
6007 tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[loc->arg_slot],
6008 TCG_REG_CALL_STACK,
6009 arg_slot_stk_ofs(loc->ref_slot));
6010 } else {
6011 tcg_debug_assert(parm->ntmp != 0);
6012 tcg_out_addi_ptr(s, parm->tmp[0], TCG_REG_CALL_STACK,
6013 arg_slot_stk_ofs(loc->ref_slot));
6014 tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
6015 TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc->arg_slot));
6017 next_arg += 2;
6018 break;
6020 default:
6021 g_assert_not_reached();
6024 if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
6025 /* Zero extend the address by loading a zero for the high part. */
6026 loc = &info->in[1 + !HOST_BIG_ENDIAN];
6027 tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
6030 tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
6033 int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
6035 int i, start_words, num_insns;
6036 TCGOp *op;
6038 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
6039 && qemu_log_in_addr_range(pc_start))) {
6040 FILE *logfile = qemu_log_trylock();
6041 if (logfile) {
6042 fprintf(logfile, "OP:\n");
6043 tcg_dump_ops(s, logfile, false);
6044 fprintf(logfile, "\n");
6045 qemu_log_unlock(logfile);
6049 #ifdef CONFIG_DEBUG_TCG
6050 /* Ensure all labels referenced have been emitted. */
6052 TCGLabel *l;
6053 bool error = false;
6055 QSIMPLEQ_FOREACH(l, &s->labels, next) {
6056 if (unlikely(!l->present) && !QSIMPLEQ_EMPTY(&l->branches)) {
6057 qemu_log_mask(CPU_LOG_TB_OP,
6058 "$L%d referenced but not present.\n", l->id);
6059 error = true;
6062 assert(!error);
6064 #endif
6066 tcg_optimize(s);
6068 reachable_code_pass(s);
6069 liveness_pass_0(s);
6070 liveness_pass_1(s);
6072 if (s->nb_indirects > 0) {
6073 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
6074 && qemu_log_in_addr_range(pc_start))) {
6075 FILE *logfile = qemu_log_trylock();
6076 if (logfile) {
6077 fprintf(logfile, "OP before indirect lowering:\n");
6078 tcg_dump_ops(s, logfile, false);
6079 fprintf(logfile, "\n");
6080 qemu_log_unlock(logfile);
6084 /* Replace indirect temps with direct temps. */
6085 if (liveness_pass_2(s)) {
6086 /* If changes were made, re-run liveness. */
6087 liveness_pass_1(s);
6091 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
6092 && qemu_log_in_addr_range(pc_start))) {
6093 FILE *logfile = qemu_log_trylock();
6094 if (logfile) {
6095 fprintf(logfile, "OP after optimization and liveness analysis:\n");
6096 tcg_dump_ops(s, logfile, true);
6097 fprintf(logfile, "\n");
6098 qemu_log_unlock(logfile);
6102 /* Initialize goto_tb jump offsets. */
6103 tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
6104 tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
6105 tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
6106 tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
6108 tcg_reg_alloc_start(s);
6111 * Reset the buffer pointers when restarting after overflow.
6112 * TODO: Move this into translate-all.c with the rest of the
6113 * buffer management. Having only this done here is confusing.
6115 s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
6116 s->code_ptr = s->code_buf;
6118 #ifdef TCG_TARGET_NEED_LDST_LABELS
6119 QSIMPLEQ_INIT(&s->ldst_labels);
6120 #endif
6121 #ifdef TCG_TARGET_NEED_POOL_LABELS
6122 s->pool_labels = NULL;
6123 #endif
6125 start_words = s->insn_start_words;
6126 s->gen_insn_data =
6127 tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
6129 tcg_out_tb_start(s);
6131 num_insns = -1;
6132 QTAILQ_FOREACH(op, &s->ops, link) {
6133 TCGOpcode opc = op->opc;
6135 switch (opc) {
6136 case INDEX_op_mov_i32:
6137 case INDEX_op_mov_i64:
6138 case INDEX_op_mov_vec:
6139 tcg_reg_alloc_mov(s, op);
6140 break;
6141 case INDEX_op_dup_vec:
6142 tcg_reg_alloc_dup(s, op);
6143 break;
6144 case INDEX_op_insn_start:
6145 if (num_insns >= 0) {
6146 size_t off = tcg_current_code_size(s);
6147 s->gen_insn_end_off[num_insns] = off;
6148 /* Assert that we do not overflow our stored offset. */
6149 assert(s->gen_insn_end_off[num_insns] == off);
6151 num_insns++;
6152 for (i = 0; i < start_words; ++i) {
6153 s->gen_insn_data[num_insns * start_words + i] =
6154 tcg_get_insn_start_param(op, i);
6156 break;
6157 case INDEX_op_discard:
6158 temp_dead(s, arg_temp(op->args[0]));
6159 break;
6160 case INDEX_op_set_label:
6161 tcg_reg_alloc_bb_end(s, s->reserved_regs);
6162 tcg_out_label(s, arg_label(op->args[0]));
6163 break;
6164 case INDEX_op_call:
6165 tcg_reg_alloc_call(s, op);
6166 break;
6167 case INDEX_op_exit_tb:
6168 tcg_out_exit_tb(s, op->args[0]);
6169 break;
6170 case INDEX_op_goto_tb:
6171 tcg_out_goto_tb(s, op->args[0]);
6172 break;
6173 case INDEX_op_dup2_vec:
6174 if (tcg_reg_alloc_dup2(s, op)) {
6175 break;
6177 /* fall through */
6178 default:
6179 /* Sanity check that we've not introduced any unhandled opcodes. */
6180 tcg_debug_assert(tcg_op_supported(opc));
6181 /* Note: in order to speed up the code, it would be much
6182 faster to have specialized register allocator functions for
6183 some common argument patterns */
6184 tcg_reg_alloc_op(s, op);
6185 break;
6187 /* Test for (pending) buffer overflow. The assumption is that any
6188 one operation beginning below the high water mark cannot overrun
6189 the buffer completely. Thus we can test for overflow after
6190 generating code without having to check during generation. */
6191 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
6192 return -1;
6194 /* Test for TB overflow, as seen by gen_insn_end_off. */
6195 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
6196 return -2;
6199 tcg_debug_assert(num_insns + 1 == s->gen_tb->icount);
6200 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
6202 /* Generate TB finalization at the end of block */
6203 #ifdef TCG_TARGET_NEED_LDST_LABELS
6204 i = tcg_out_ldst_finalize(s);
6205 if (i < 0) {
6206 return i;
6208 #endif
6209 #ifdef TCG_TARGET_NEED_POOL_LABELS
6210 i = tcg_out_pool_finalize(s);
6211 if (i < 0) {
6212 return i;
6214 #endif
6215 if (!tcg_resolve_relocs(s)) {
6216 return -2;
6219 #ifndef CONFIG_TCG_INTERPRETER
6220 /* flush instruction cache */
6221 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
6222 (uintptr_t)s->code_buf,
6223 tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
6224 #endif
6226 return tcg_current_code_size(s);
6229 #ifdef ELF_HOST_MACHINE
6230 /* In order to use this feature, the backend needs to do three things:
6232 (1) Define ELF_HOST_MACHINE to indicate both what value to
6233 put into the ELF image and to indicate support for the feature.
6235 (2) Define tcg_register_jit. This should create a buffer containing
6236 the contents of a .debug_frame section that describes the post-
6237 prologue unwind info for the tcg machine.
6239 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
6242 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
6243 typedef enum {
6244 JIT_NOACTION = 0,
6245 JIT_REGISTER_FN,
6246 JIT_UNREGISTER_FN
6247 } jit_actions_t;
6249 struct jit_code_entry {
6250 struct jit_code_entry *next_entry;
6251 struct jit_code_entry *prev_entry;
6252 const void *symfile_addr;
6253 uint64_t symfile_size;
6256 struct jit_descriptor {
6257 uint32_t version;
6258 uint32_t action_flag;
6259 struct jit_code_entry *relevant_entry;
6260 struct jit_code_entry *first_entry;
6263 void __jit_debug_register_code(void) __attribute__((noinline));
6264 void __jit_debug_register_code(void)
6266 asm("");
6269 /* Must statically initialize the version, because GDB may check
6270 the version before we can set it. */
6271 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
6273 /* End GDB interface. */
6275 static int find_string(const char *strtab, const char *str)
6277 const char *p = strtab + 1;
6279 while (1) {
6280 if (strcmp(p, str) == 0) {
6281 return p - strtab;
6283 p += strlen(p) + 1;
6287 static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
6288 const void *debug_frame,
6289 size_t debug_frame_size)
6291 struct __attribute__((packed)) DebugInfo {
6292 uint32_t len;
6293 uint16_t version;
6294 uint32_t abbrev;
6295 uint8_t ptr_size;
6296 uint8_t cu_die;
6297 uint16_t cu_lang;
6298 uintptr_t cu_low_pc;
6299 uintptr_t cu_high_pc;
6300 uint8_t fn_die;
6301 char fn_name[16];
6302 uintptr_t fn_low_pc;
6303 uintptr_t fn_high_pc;
6304 uint8_t cu_eoc;
6307 struct ElfImage {
6308 ElfW(Ehdr) ehdr;
6309 ElfW(Phdr) phdr;
6310 ElfW(Shdr) shdr[7];
6311 ElfW(Sym) sym[2];
6312 struct DebugInfo di;
6313 uint8_t da[24];
6314 char str[80];
6317 struct ElfImage *img;
6319 static const struct ElfImage img_template = {
6320 .ehdr = {
6321 .e_ident[EI_MAG0] = ELFMAG0,
6322 .e_ident[EI_MAG1] = ELFMAG1,
6323 .e_ident[EI_MAG2] = ELFMAG2,
6324 .e_ident[EI_MAG3] = ELFMAG3,
6325 .e_ident[EI_CLASS] = ELF_CLASS,
6326 .e_ident[EI_DATA] = ELF_DATA,
6327 .e_ident[EI_VERSION] = EV_CURRENT,
6328 .e_type = ET_EXEC,
6329 .e_machine = ELF_HOST_MACHINE,
6330 .e_version = EV_CURRENT,
6331 .e_phoff = offsetof(struct ElfImage, phdr),
6332 .e_shoff = offsetof(struct ElfImage, shdr),
6333 .e_ehsize = sizeof(ElfW(Shdr)),
6334 .e_phentsize = sizeof(ElfW(Phdr)),
6335 .e_phnum = 1,
6336 .e_shentsize = sizeof(ElfW(Shdr)),
6337 .e_shnum = ARRAY_SIZE(img->shdr),
6338 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
6339 #ifdef ELF_HOST_FLAGS
6340 .e_flags = ELF_HOST_FLAGS,
6341 #endif
6342 #ifdef ELF_OSABI
6343 .e_ident[EI_OSABI] = ELF_OSABI,
6344 #endif
6346 .phdr = {
6347 .p_type = PT_LOAD,
6348 .p_flags = PF_X,
6350 .shdr = {
6351 [0] = { .sh_type = SHT_NULL },
6352 /* Trick: The contents of code_gen_buffer are not present in
6353 this fake ELF file; that got allocated elsewhere. Therefore
6354 we mark .text as SHT_NOBITS (similar to .bss) so that readers
6355 will not look for contents. We can record any address. */
6356 [1] = { /* .text */
6357 .sh_type = SHT_NOBITS,
6358 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
6360 [2] = { /* .debug_info */
6361 .sh_type = SHT_PROGBITS,
6362 .sh_offset = offsetof(struct ElfImage, di),
6363 .sh_size = sizeof(struct DebugInfo),
6365 [3] = { /* .debug_abbrev */
6366 .sh_type = SHT_PROGBITS,
6367 .sh_offset = offsetof(struct ElfImage, da),
6368 .sh_size = sizeof(img->da),
6370 [4] = { /* .debug_frame */
6371 .sh_type = SHT_PROGBITS,
6372 .sh_offset = sizeof(struct ElfImage),
6374 [5] = { /* .symtab */
6375 .sh_type = SHT_SYMTAB,
6376 .sh_offset = offsetof(struct ElfImage, sym),
6377 .sh_size = sizeof(img->sym),
6378 .sh_info = 1,
6379 .sh_link = ARRAY_SIZE(img->shdr) - 1,
6380 .sh_entsize = sizeof(ElfW(Sym)),
6382 [6] = { /* .strtab */
6383 .sh_type = SHT_STRTAB,
6384 .sh_offset = offsetof(struct ElfImage, str),
6385 .sh_size = sizeof(img->str),
6388 .sym = {
6389 [1] = { /* code_gen_buffer */
6390 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
6391 .st_shndx = 1,
6394 .di = {
6395 .len = sizeof(struct DebugInfo) - 4,
6396 .version = 2,
6397 .ptr_size = sizeof(void *),
6398 .cu_die = 1,
6399 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
6400 .fn_die = 2,
6401 .fn_name = "code_gen_buffer"
6403 .da = {
6404 1, /* abbrev number (the cu) */
6405 0x11, 1, /* DW_TAG_compile_unit, has children */
6406 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
6407 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6408 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6409 0, 0, /* end of abbrev */
6410 2, /* abbrev number (the fn) */
6411 0x2e, 0, /* DW_TAG_subprogram, no children */
6412 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
6413 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
6414 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
6415 0, 0, /* end of abbrev */
6416 0 /* no more abbrev */
6418 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
6419 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
6422 /* We only need a single jit entry; statically allocate it. */
6423 static struct jit_code_entry one_entry;
6425 uintptr_t buf = (uintptr_t)buf_ptr;
6426 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
6427 DebugFrameHeader *dfh;
6429 img = g_malloc(img_size);
6430 *img = img_template;
6432 img->phdr.p_vaddr = buf;
6433 img->phdr.p_paddr = buf;
6434 img->phdr.p_memsz = buf_size;
6436 img->shdr[1].sh_name = find_string(img->str, ".text");
6437 img->shdr[1].sh_addr = buf;
6438 img->shdr[1].sh_size = buf_size;
6440 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
6441 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
6443 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
6444 img->shdr[4].sh_size = debug_frame_size;
6446 img->shdr[5].sh_name = find_string(img->str, ".symtab");
6447 img->shdr[6].sh_name = find_string(img->str, ".strtab");
6449 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
6450 img->sym[1].st_value = buf;
6451 img->sym[1].st_size = buf_size;
6453 img->di.cu_low_pc = buf;
6454 img->di.cu_high_pc = buf + buf_size;
6455 img->di.fn_low_pc = buf;
6456 img->di.fn_high_pc = buf + buf_size;
6458 dfh = (DebugFrameHeader *)(img + 1);
6459 memcpy(dfh, debug_frame, debug_frame_size);
6460 dfh->fde.func_start = buf;
6461 dfh->fde.func_len = buf_size;
6463 #ifdef DEBUG_JIT
6464 /* Enable this block to be able to debug the ELF image file creation.
6465 One can use readelf, objdump, or other inspection utilities. */
6467 g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
6468 FILE *f = fopen(jit, "w+b");
6469 if (f) {
6470 if (fwrite(img, img_size, 1, f) != img_size) {
6471 /* Avoid stupid unused return value warning for fwrite. */
6473 fclose(f);
6476 #endif
6478 one_entry.symfile_addr = img;
6479 one_entry.symfile_size = img_size;
6481 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
6482 __jit_debug_descriptor.relevant_entry = &one_entry;
6483 __jit_debug_descriptor.first_entry = &one_entry;
6484 __jit_debug_register_code();
6486 #else
6487 /* No support for the feature. Provide the entry point expected by exec.c,
6488 and implement the internal function we declared earlier. */
6490 static void tcg_register_jit_int(const void *buf, size_t size,
6491 const void *debug_frame,
6492 size_t debug_frame_size)
6496 void tcg_register_jit(const void *buf, size_t buf_size)
6499 #endif /* ELF_HOST_MACHINE */
6501 #if !TCG_TARGET_MAYBE_vec
6502 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
6504 g_assert_not_reached();
6506 #endif