2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
38 #include "qemu/cacheflush.h"
40 /* Note: the long term plan is to reduce the dependencies on the QEMU
41 CPU definitions. Currently they are used for qemu_ld/st
43 #define NO_CPU_IO_DEFS
45 #include "exec/exec-all.h"
47 #if !defined(CONFIG_USER_ONLY)
48 #include "hw/boards.h"
51 #include "tcg/tcg-op.h"
53 #if UINTPTR_MAX == UINT32_MAX
54 # define ELF_CLASS ELFCLASS32
56 # define ELF_CLASS ELFCLASS64
58 #ifdef HOST_WORDS_BIGENDIAN
59 # define ELF_DATA ELFDATA2MSB
61 # define ELF_DATA ELFDATA2LSB
67 /* Forward declarations for functions declared in tcg-target.c.inc and
69 static void tcg_target_init(TCGContext
*s
);
70 static void tcg_target_qemu_prologue(TCGContext
*s
);
71 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
72 intptr_t value
, intptr_t addend
);
74 /* The CIE and FDE header definitions will be common to all hosts. */
76 uint32_t len
__attribute__((aligned((sizeof(void *)))));
82 uint8_t return_column
;
85 typedef struct QEMU_PACKED
{
86 uint32_t len
__attribute__((aligned((sizeof(void *)))));
90 } DebugFrameFDEHeader
;
92 typedef struct QEMU_PACKED
{
94 DebugFrameFDEHeader fde
;
97 static void tcg_register_jit_int(const void *buf
, size_t size
,
98 const void *debug_frame
,
99 size_t debug_frame_size
)
100 __attribute__((unused
));
102 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
103 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
105 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
106 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
107 TCGReg ret
, tcg_target_long arg
);
108 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
109 const TCGArg args
[TCG_MAX_OP_ARGS
],
110 const int const_args
[TCG_MAX_OP_ARGS
]);
111 #if TCG_TARGET_MAYBE_vec
112 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
113 TCGReg dst
, TCGReg src
);
114 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
115 TCGReg dst
, TCGReg base
, intptr_t offset
);
116 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
117 TCGReg dst
, int64_t arg
);
118 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
119 unsigned vecl
, unsigned vece
,
120 const TCGArg args
[TCG_MAX_OP_ARGS
],
121 const int const_args
[TCG_MAX_OP_ARGS
]);
123 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
124 TCGReg dst
, TCGReg src
)
126 g_assert_not_reached();
128 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
129 TCGReg dst
, TCGReg base
, intptr_t offset
)
131 g_assert_not_reached();
133 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
134 TCGReg dst
, int64_t arg
)
136 g_assert_not_reached();
138 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
139 unsigned vecl
, unsigned vece
,
140 const TCGArg args
[TCG_MAX_OP_ARGS
],
141 const int const_args
[TCG_MAX_OP_ARGS
])
143 g_assert_not_reached();
146 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
148 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
149 TCGReg base
, intptr_t ofs
);
150 static void tcg_out_call(TCGContext
*s
, const tcg_insn_unit
*target
);
151 static bool tcg_target_const_match(int64_t val
, TCGType type
, int ct
);
152 #ifdef TCG_TARGET_NEED_LDST_LABELS
153 static int tcg_out_ldst_finalize(TCGContext
*s
);
156 #define TCG_HIGHWATER 1024
158 static TCGContext
**tcg_ctxs
;
159 static unsigned int n_tcg_ctxs
;
160 TCGv_env cpu_env
= 0;
161 const void *tcg_code_gen_epilogue
;
162 uintptr_t tcg_splitwx_diff
;
164 #ifndef CONFIG_TCG_INTERPRETER
165 tcg_prologue_fn
*tcg_qemu_tb_exec
;
168 struct tcg_region_tree
{
171 /* padding to avoid false sharing is computed at run-time */
175 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
176 * dynamically allocate from as demand dictates. Given appropriate region
177 * sizing, this minimizes flushes even when some TCG threads generate a lot
178 * more code than others.
180 struct tcg_region_state
{
183 /* fields set at init time */
188 size_t size
; /* size of one region */
189 size_t stride
; /* .size + guard size */
191 /* fields protected by the lock */
192 size_t current
; /* current region index */
193 size_t agg_size_full
; /* aggregate size of full regions */
196 static struct tcg_region_state region
;
198 * This is an array of struct tcg_region_tree's, with padding.
199 * We use void * to simplify the computation of region_trees[i]; each
200 * struct is found every tree_size bytes.
202 static void *region_trees
;
203 static size_t tree_size
;
204 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
205 static TCGRegSet tcg_target_call_clobber_regs
;
207 #if TCG_TARGET_INSN_UNIT_SIZE == 1
208 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
213 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
220 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
221 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
223 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
226 tcg_insn_unit
*p
= s
->code_ptr
;
227 memcpy(p
, &v
, sizeof(v
));
228 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
232 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
235 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
238 memcpy(p
, &v
, sizeof(v
));
243 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
244 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
246 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
249 tcg_insn_unit
*p
= s
->code_ptr
;
250 memcpy(p
, &v
, sizeof(v
));
251 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
255 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
258 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
261 memcpy(p
, &v
, sizeof(v
));
266 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
267 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
269 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
272 tcg_insn_unit
*p
= s
->code_ptr
;
273 memcpy(p
, &v
, sizeof(v
));
274 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
278 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
281 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
284 memcpy(p
, &v
, sizeof(v
));
289 /* label relocation processing */
291 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
292 TCGLabel
*l
, intptr_t addend
)
294 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
299 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
302 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
)
304 tcg_debug_assert(!l
->has_value
);
306 l
->u
.value_ptr
= tcg_splitwx_to_rx(s
->code_ptr
);
309 TCGLabel
*gen_new_label(void)
311 TCGContext
*s
= tcg_ctx
;
312 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
314 memset(l
, 0, sizeof(TCGLabel
));
315 l
->id
= s
->nb_labels
++;
316 QSIMPLEQ_INIT(&l
->relocs
);
318 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
323 static bool tcg_resolve_relocs(TCGContext
*s
)
327 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
329 uintptr_t value
= l
->u
.value
;
331 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
332 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
340 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
343 * We will check for overflow at the end of the opcode loop in
344 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
346 s
->tb_jmp_reset_offset
[which
] = tcg_current_code_size(s
);
349 /* Signal overflow, starting over with fewer guest insns. */
350 static void QEMU_NORETURN
tcg_raise_tb_overflow(TCGContext
*s
)
352 siglongjmp(s
->jmp_trans
, -2);
355 #define C_PFX1(P, A) P##A
356 #define C_PFX2(P, A, B) P##A##_##B
357 #define C_PFX3(P, A, B, C) P##A##_##B##_##C
358 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
359 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
360 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
362 /* Define an enumeration for the various combinations. */
364 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
365 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
366 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
367 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
369 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
370 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
371 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
372 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
374 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
376 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
377 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
378 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
379 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
382 #include "tcg-target-con-set.h"
383 } TCGConstraintSetIndex
;
385 static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode
);
401 /* Put all of the constraint sets into an array, indexed by the enum. */
403 #define C_O0_I1(I1) { .args_ct_str = { #I1 } },
404 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
405 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
406 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
408 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
409 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
410 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
411 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
413 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
415 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
416 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
417 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
418 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
420 static const TCGTargetOpDef constraint_sets
[] = {
421 #include "tcg-target-con-set.h"
439 /* Expand the enumerator to be returned from tcg_target_op_def(). */
441 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
442 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
443 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
444 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
446 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
447 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
448 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
449 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
451 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
453 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
454 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
455 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
456 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
458 #include "tcg-target.c.inc"
460 /* compare a pointer @ptr and a tb_tc @s */
461 static int ptr_cmp_tb_tc(const void *ptr
, const struct tb_tc
*s
)
463 if (ptr
>= s
->ptr
+ s
->size
) {
465 } else if (ptr
< s
->ptr
) {
471 static gint
tb_tc_cmp(gconstpointer ap
, gconstpointer bp
)
473 const struct tb_tc
*a
= ap
;
474 const struct tb_tc
*b
= bp
;
477 * When both sizes are set, we know this isn't a lookup.
478 * This is the most likely case: every TB must be inserted; lookups
479 * are a lot less frequent.
481 if (likely(a
->size
&& b
->size
)) {
482 if (a
->ptr
> b
->ptr
) {
484 } else if (a
->ptr
< b
->ptr
) {
487 /* a->ptr == b->ptr should happen only on deletions */
488 g_assert(a
->size
== b
->size
);
492 * All lookups have either .size field set to 0.
493 * From the glib sources we see that @ap is always the lookup key. However
494 * the docs provide no guarantee, so we just mark this case as likely.
496 if (likely(a
->size
== 0)) {
497 return ptr_cmp_tb_tc(a
->ptr
, b
);
499 return ptr_cmp_tb_tc(b
->ptr
, a
);
502 static void tcg_region_trees_init(void)
506 tree_size
= ROUND_UP(sizeof(struct tcg_region_tree
), qemu_dcache_linesize
);
507 region_trees
= qemu_memalign(qemu_dcache_linesize
, region
.n
* tree_size
);
508 for (i
= 0; i
< region
.n
; i
++) {
509 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
511 qemu_mutex_init(&rt
->lock
);
512 rt
->tree
= g_tree_new(tb_tc_cmp
);
516 static struct tcg_region_tree
*tc_ptr_to_region_tree(const void *p
)
521 * Like tcg_splitwx_to_rw, with no assert. The pc may come from
522 * a signal handler over which the caller has no control.
524 if (!in_code_gen_buffer(p
)) {
525 p
-= tcg_splitwx_diff
;
526 if (!in_code_gen_buffer(p
)) {
531 if (p
< region
.start_aligned
) {
534 ptrdiff_t offset
= p
- region
.start_aligned
;
536 if (offset
> region
.stride
* (region
.n
- 1)) {
537 region_idx
= region
.n
- 1;
539 region_idx
= offset
/ region
.stride
;
542 return region_trees
+ region_idx
* tree_size
;
545 void tcg_tb_insert(TranslationBlock
*tb
)
547 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree(tb
->tc
.ptr
);
549 g_assert(rt
!= NULL
);
550 qemu_mutex_lock(&rt
->lock
);
551 g_tree_insert(rt
->tree
, &tb
->tc
, tb
);
552 qemu_mutex_unlock(&rt
->lock
);
555 void tcg_tb_remove(TranslationBlock
*tb
)
557 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree(tb
->tc
.ptr
);
559 g_assert(rt
!= NULL
);
560 qemu_mutex_lock(&rt
->lock
);
561 g_tree_remove(rt
->tree
, &tb
->tc
);
562 qemu_mutex_unlock(&rt
->lock
);
566 * Find the TB 'tb' such that
567 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
568 * Return NULL if not found.
570 TranslationBlock
*tcg_tb_lookup(uintptr_t tc_ptr
)
572 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree((void *)tc_ptr
);
573 TranslationBlock
*tb
;
574 struct tb_tc s
= { .ptr
= (void *)tc_ptr
};
580 qemu_mutex_lock(&rt
->lock
);
581 tb
= g_tree_lookup(rt
->tree
, &s
);
582 qemu_mutex_unlock(&rt
->lock
);
586 static void tcg_region_tree_lock_all(void)
590 for (i
= 0; i
< region
.n
; i
++) {
591 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
593 qemu_mutex_lock(&rt
->lock
);
597 static void tcg_region_tree_unlock_all(void)
601 for (i
= 0; i
< region
.n
; i
++) {
602 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
604 qemu_mutex_unlock(&rt
->lock
);
608 void tcg_tb_foreach(GTraverseFunc func
, gpointer user_data
)
612 tcg_region_tree_lock_all();
613 for (i
= 0; i
< region
.n
; i
++) {
614 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
616 g_tree_foreach(rt
->tree
, func
, user_data
);
618 tcg_region_tree_unlock_all();
621 size_t tcg_nb_tbs(void)
626 tcg_region_tree_lock_all();
627 for (i
= 0; i
< region
.n
; i
++) {
628 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
630 nb_tbs
+= g_tree_nnodes(rt
->tree
);
632 tcg_region_tree_unlock_all();
636 static gboolean
tcg_region_tree_traverse(gpointer k
, gpointer v
, gpointer data
)
638 TranslationBlock
*tb
= v
;
644 static void tcg_region_tree_reset_all(void)
648 tcg_region_tree_lock_all();
649 for (i
= 0; i
< region
.n
; i
++) {
650 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
652 g_tree_foreach(rt
->tree
, tcg_region_tree_traverse
, NULL
);
653 /* Increment the refcount first so that destroy acts as a reset */
654 g_tree_ref(rt
->tree
);
655 g_tree_destroy(rt
->tree
);
657 tcg_region_tree_unlock_all();
660 static void tcg_region_bounds(size_t curr_region
, void **pstart
, void **pend
)
664 start
= region
.start_aligned
+ curr_region
* region
.stride
;
665 end
= start
+ region
.size
;
667 if (curr_region
== 0) {
668 start
= region
.start
;
670 if (curr_region
== region
.n
- 1) {
678 static void tcg_region_assign(TCGContext
*s
, size_t curr_region
)
682 tcg_region_bounds(curr_region
, &start
, &end
);
684 s
->code_gen_buffer
= start
;
685 s
->code_gen_ptr
= start
;
686 s
->code_gen_buffer_size
= end
- start
;
687 s
->code_gen_highwater
= end
- TCG_HIGHWATER
;
690 static bool tcg_region_alloc__locked(TCGContext
*s
)
692 if (region
.current
== region
.n
) {
695 tcg_region_assign(s
, region
.current
);
701 * Request a new region once the one in use has filled up.
702 * Returns true on error.
704 static bool tcg_region_alloc(TCGContext
*s
)
707 /* read the region size now; alloc__locked will overwrite it on success */
708 size_t size_full
= s
->code_gen_buffer_size
;
710 qemu_mutex_lock(®ion
.lock
);
711 err
= tcg_region_alloc__locked(s
);
713 region
.agg_size_full
+= size_full
- TCG_HIGHWATER
;
715 qemu_mutex_unlock(®ion
.lock
);
720 * Perform a context's first region allocation.
721 * This function does _not_ increment region.agg_size_full.
723 static inline bool tcg_region_initial_alloc__locked(TCGContext
*s
)
725 return tcg_region_alloc__locked(s
);
728 /* Call from a safe-work context */
729 void tcg_region_reset_all(void)
731 unsigned int n_ctxs
= qatomic_read(&n_tcg_ctxs
);
734 qemu_mutex_lock(®ion
.lock
);
736 region
.agg_size_full
= 0;
738 for (i
= 0; i
< n_ctxs
; i
++) {
739 TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
740 bool err
= tcg_region_initial_alloc__locked(s
);
744 qemu_mutex_unlock(®ion
.lock
);
746 tcg_region_tree_reset_all();
749 #ifdef CONFIG_USER_ONLY
750 static size_t tcg_n_regions(void)
756 * It is likely that some vCPUs will translate more code than others, so we
757 * first try to set more regions than max_cpus, with those regions being of
758 * reasonable size. If that's not possible we make do by evenly dividing
759 * the code_gen_buffer among the vCPUs.
761 static size_t tcg_n_regions(void)
765 /* Use a single region if all we have is one vCPU thread */
766 #if !defined(CONFIG_USER_ONLY)
767 MachineState
*ms
= MACHINE(qdev_get_machine());
768 unsigned int max_cpus
= ms
->smp
.max_cpus
;
770 if (max_cpus
== 1 || !qemu_tcg_mttcg_enabled()) {
774 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
775 for (i
= 8; i
> 0; i
--) {
776 size_t regions_per_thread
= i
;
779 region_size
= tcg_init_ctx
.code_gen_buffer_size
;
780 region_size
/= max_cpus
* regions_per_thread
;
782 if (region_size
>= 2 * 1024u * 1024) {
783 return max_cpus
* regions_per_thread
;
786 /* If we can't, then just allocate one region per vCPU thread */
792 * Initializes region partitioning.
794 * Called at init time from the parent thread (i.e. the one calling
795 * tcg_context_init), after the target's TCG globals have been set.
797 * Region partitioning works by splitting code_gen_buffer into separate regions,
798 * and then assigning regions to TCG threads so that the threads can translate
799 * code in parallel without synchronization.
801 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
802 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
803 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
804 * must have been parsed before calling this function, since it calls
805 * qemu_tcg_mttcg_enabled().
807 * In user-mode we use a single region. Having multiple regions in user-mode
808 * is not supported, because the number of vCPU threads (recall that each thread
809 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
810 * OS, and usually this number is huge (tens of thousands is not uncommon).
811 * Thus, given this large bound on the number of vCPU threads and the fact
812 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
813 * that the availability of at least one region per vCPU thread.
815 * However, this user-mode limitation is unlikely to be a significant problem
816 * in practice. Multi-threaded guests share most if not all of their translated
817 * code, which makes parallel code generation less appealing than in softmmu.
819 void tcg_region_init(void)
821 void *buf
= tcg_init_ctx
.code_gen_buffer
;
823 size_t size
= tcg_init_ctx
.code_gen_buffer_size
;
824 size_t page_size
= qemu_real_host_page_size
;
829 n_regions
= tcg_n_regions();
831 /* The first region will be 'aligned - buf' bytes larger than the others */
832 aligned
= QEMU_ALIGN_PTR_UP(buf
, page_size
);
833 g_assert(aligned
< tcg_init_ctx
.code_gen_buffer
+ size
);
835 * Make region_size a multiple of page_size, using aligned as the start.
836 * As a result of this we might end up with a few extra pages at the end of
837 * the buffer; we will assign those to the last region.
839 region_size
= (size
- (aligned
- buf
)) / n_regions
;
840 region_size
= QEMU_ALIGN_DOWN(region_size
, page_size
);
842 /* A region must have at least 2 pages; one code, one guard */
843 g_assert(region_size
>= 2 * page_size
);
845 /* init the region struct */
846 qemu_mutex_init(®ion
.lock
);
847 region
.n
= n_regions
;
848 region
.size
= region_size
- page_size
;
849 region
.stride
= region_size
;
851 region
.start_aligned
= aligned
;
852 /* page-align the end, since its last page will be a guard page */
853 region
.end
= QEMU_ALIGN_PTR_DOWN(buf
+ size
, page_size
);
854 /* account for that last guard page */
855 region
.end
-= page_size
;
858 * Set guard pages in the rw buffer, as that's the one into which
859 * buffer overruns could occur. Do not set guard pages in the rx
860 * buffer -- let that one use hugepages throughout.
862 for (i
= 0; i
< region
.n
; i
++) {
865 tcg_region_bounds(i
, &start
, &end
);
868 * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
869 * rejects a permission change from RWX -> NONE. Guard pages are
870 * nice for bug detection but are not essential; ignore any failure.
872 (void)qemu_mprotect_none(end
, page_size
);
875 tcg_region_trees_init();
877 /* In user-mode we support only one ctx, so do the initial allocation now */
878 #ifdef CONFIG_USER_ONLY
880 bool err
= tcg_region_initial_alloc__locked(tcg_ctx
);
887 #ifdef CONFIG_DEBUG_TCG
888 const void *tcg_splitwx_to_rx(void *rw
)
890 /* Pass NULL pointers unchanged. */
892 g_assert(in_code_gen_buffer(rw
));
893 rw
+= tcg_splitwx_diff
;
898 void *tcg_splitwx_to_rw(const void *rx
)
900 /* Pass NULL pointers unchanged. */
902 rx
-= tcg_splitwx_diff
;
903 /* Assert that we end with a pointer in the rw region. */
904 g_assert(in_code_gen_buffer(rx
));
908 #endif /* CONFIG_DEBUG_TCG */
910 static void alloc_tcg_plugin_context(TCGContext
*s
)
913 s
->plugin_tb
= g_new0(struct qemu_plugin_tb
, 1);
914 s
->plugin_tb
->insns
=
915 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn
);
920 * All TCG threads except the parent (i.e. the one that called tcg_context_init
921 * and registered the target's TCG globals) must register with this function
922 * before initiating translation.
924 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
925 * of tcg_region_init() for the reasoning behind this.
927 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
928 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
929 * is not used anymore for translation once this function is called.
931 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
932 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
934 #ifdef CONFIG_USER_ONLY
935 void tcg_register_thread(void)
937 tcg_ctx
= &tcg_init_ctx
;
940 void tcg_register_thread(void)
942 MachineState
*ms
= MACHINE(qdev_get_machine());
943 TCGContext
*s
= g_malloc(sizeof(*s
));
949 /* Relink mem_base. */
950 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
951 if (tcg_init_ctx
.temps
[i
].mem_base
) {
952 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
953 tcg_debug_assert(b
>= 0 && b
< n
);
954 s
->temps
[i
].mem_base
= &s
->temps
[b
];
958 /* Claim an entry in tcg_ctxs */
959 n
= qatomic_fetch_inc(&n_tcg_ctxs
);
960 g_assert(n
< ms
->smp
.max_cpus
);
961 qatomic_set(&tcg_ctxs
[n
], s
);
964 alloc_tcg_plugin_context(s
);
968 qemu_mutex_lock(®ion
.lock
);
969 err
= tcg_region_initial_alloc__locked(tcg_ctx
);
971 qemu_mutex_unlock(®ion
.lock
);
973 #endif /* !CONFIG_USER_ONLY */
976 * Returns the size (in bytes) of all translated code (i.e. from all regions)
977 * currently in the cache.
978 * See also: tcg_code_capacity()
979 * Do not confuse with tcg_current_code_size(); that one applies to a single
982 size_t tcg_code_size(void)
984 unsigned int n_ctxs
= qatomic_read(&n_tcg_ctxs
);
988 qemu_mutex_lock(®ion
.lock
);
989 total
= region
.agg_size_full
;
990 for (i
= 0; i
< n_ctxs
; i
++) {
991 const TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
994 size
= qatomic_read(&s
->code_gen_ptr
) - s
->code_gen_buffer
;
995 g_assert(size
<= s
->code_gen_buffer_size
);
998 qemu_mutex_unlock(®ion
.lock
);
1003 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
1005 * See also: tcg_code_size()
1007 size_t tcg_code_capacity(void)
1009 size_t guard_size
, capacity
;
1011 /* no need for synchronization; these variables are set at init time */
1012 guard_size
= region
.stride
- region
.size
;
1013 capacity
= region
.end
+ guard_size
- region
.start
;
1014 capacity
-= region
.n
* (guard_size
+ TCG_HIGHWATER
);
1018 size_t tcg_tb_phys_invalidate_count(void)
1020 unsigned int n_ctxs
= qatomic_read(&n_tcg_ctxs
);
1024 for (i
= 0; i
< n_ctxs
; i
++) {
1025 const TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
1027 total
+= qatomic_read(&s
->tb_phys_invalidate_count
);
1032 /* pool based memory allocation */
1033 void *tcg_malloc_internal(TCGContext
*s
, int size
)
1038 if (size
> TCG_POOL_CHUNK_SIZE
) {
1039 /* big malloc: insert a new pool (XXX: could optimize) */
1040 p
= g_malloc(sizeof(TCGPool
) + size
);
1042 p
->next
= s
->pool_first_large
;
1043 s
->pool_first_large
= p
;
1046 p
= s
->pool_current
;
1054 pool_size
= TCG_POOL_CHUNK_SIZE
;
1055 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
1056 p
->size
= pool_size
;
1058 if (s
->pool_current
)
1059 s
->pool_current
->next
= p
;
1067 s
->pool_current
= p
;
1068 s
->pool_cur
= p
->data
+ size
;
1069 s
->pool_end
= p
->data
+ p
->size
;
1073 void tcg_pool_reset(TCGContext
*s
)
1076 for (p
= s
->pool_first_large
; p
; p
= t
) {
1080 s
->pool_first_large
= NULL
;
1081 s
->pool_cur
= s
->pool_end
= NULL
;
1082 s
->pool_current
= NULL
;
1085 typedef struct TCGHelperInfo
{
1092 #include "exec/helper-proto.h"
1094 static const TCGHelperInfo all_helpers
[] = {
1095 #include "exec/helper-tcg.h"
1097 static GHashTable
*helper_table
;
1099 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
1100 static void process_op_defs(TCGContext
*s
);
1101 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1102 TCGReg reg
, const char *name
);
1104 void tcg_context_init(TCGContext
*s
)
1106 int op
, total_args
, n
, i
;
1108 TCGArgConstraint
*args_ct
;
1111 memset(s
, 0, sizeof(*s
));
1114 /* Count total number of arguments and allocate the corresponding
1117 for(op
= 0; op
< NB_OPS
; op
++) {
1118 def
= &tcg_op_defs
[op
];
1119 n
= def
->nb_iargs
+ def
->nb_oargs
;
1123 args_ct
= g_new0(TCGArgConstraint
, total_args
);
1125 for(op
= 0; op
< NB_OPS
; op
++) {
1126 def
= &tcg_op_defs
[op
];
1127 def
->args_ct
= args_ct
;
1128 n
= def
->nb_iargs
+ def
->nb_oargs
;
1132 /* Register helpers. */
1133 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
1134 helper_table
= g_hash_table_new(NULL
, NULL
);
1136 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
1137 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
1138 (gpointer
)&all_helpers
[i
]);
1144 /* Reverse the order of the saved registers, assuming they're all at
1145 the start of tcg_target_reg_alloc_order. */
1146 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
1147 int r
= tcg_target_reg_alloc_order
[n
];
1148 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
1152 for (i
= 0; i
< n
; ++i
) {
1153 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
1155 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
1156 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
1159 alloc_tcg_plugin_context(s
);
1163 * In user-mode we simply share the init context among threads, since we
1164 * use a single region. See the documentation tcg_region_init() for the
1165 * reasoning behind this.
1166 * In softmmu we will have at most max_cpus TCG threads.
1168 #ifdef CONFIG_USER_ONLY
1169 tcg_ctxs
= &tcg_ctx
;
1172 MachineState
*ms
= MACHINE(qdev_get_machine());
1173 unsigned int max_cpus
= ms
->smp
.max_cpus
;
1174 tcg_ctxs
= g_new(TCGContext
*, max_cpus
);
1177 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
1178 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
1179 cpu_env
= temp_tcgv_ptr(ts
);
1183 * Allocate TBs right before their corresponding translated code, making
1184 * sure that TBs and code are on different cache lines.
1186 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
1188 uintptr_t align
= qemu_icache_linesize
;
1189 TranslationBlock
*tb
;
1193 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
1194 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
1196 if (unlikely(next
> s
->code_gen_highwater
)) {
1197 if (tcg_region_alloc(s
)) {
1202 qatomic_set(&s
->code_gen_ptr
, next
);
1203 s
->data_gen_ptr
= NULL
;
1207 void tcg_prologue_init(TCGContext
*s
)
1209 size_t prologue_size
, total_size
;
1212 /* Put the prologue at the beginning of code_gen_buffer. */
1213 buf0
= s
->code_gen_buffer
;
1214 total_size
= s
->code_gen_buffer_size
;
1217 s
->data_gen_ptr
= NULL
;
1220 * The region trees are not yet configured, but tcg_splitwx_to_rx
1221 * needs the bounds for an assert.
1223 region
.start
= buf0
;
1224 region
.end
= buf0
+ total_size
;
1226 #ifndef CONFIG_TCG_INTERPRETER
1227 tcg_qemu_tb_exec
= (tcg_prologue_fn
*)tcg_splitwx_to_rx(buf0
);
1230 /* Compute a high-water mark, at which we voluntarily flush the buffer
1231 and start over. The size here is arbitrary, significantly larger
1232 than we expect the code generation for any one opcode to require. */
1233 s
->code_gen_highwater
= s
->code_gen_buffer
+ (total_size
- TCG_HIGHWATER
);
1235 #ifdef TCG_TARGET_NEED_POOL_LABELS
1236 s
->pool_labels
= NULL
;
1239 qemu_thread_jit_write();
1240 /* Generate the prologue. */
1241 tcg_target_qemu_prologue(s
);
1243 #ifdef TCG_TARGET_NEED_POOL_LABELS
1244 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1246 int result
= tcg_out_pool_finalize(s
);
1247 tcg_debug_assert(result
== 0);
1252 #ifndef CONFIG_TCG_INTERPRETER
1253 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(buf0
), (uintptr_t)buf0
,
1254 tcg_ptr_byte_diff(buf1
, buf0
));
1257 /* Deduct the prologue from the buffer. */
1258 prologue_size
= tcg_current_code_size(s
);
1259 s
->code_gen_ptr
= buf1
;
1260 s
->code_gen_buffer
= buf1
;
1262 total_size
-= prologue_size
;
1263 s
->code_gen_buffer_size
= total_size
;
1265 tcg_register_jit(tcg_splitwx_to_rx(s
->code_gen_buffer
), total_size
);
1268 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1269 FILE *logfile
= qemu_log_lock();
1270 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size
);
1271 if (s
->data_gen_ptr
) {
1272 size_t code_size
= s
->data_gen_ptr
- buf0
;
1273 size_t data_size
= prologue_size
- code_size
;
1276 log_disas(buf0
, code_size
);
1278 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1279 if (sizeof(tcg_target_ulong
) == 8) {
1280 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1281 (uintptr_t)s
->data_gen_ptr
+ i
,
1282 *(uint64_t *)(s
->data_gen_ptr
+ i
));
1284 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1285 (uintptr_t)s
->data_gen_ptr
+ i
,
1286 *(uint32_t *)(s
->data_gen_ptr
+ i
));
1290 log_disas(buf0
, prologue_size
);
1294 qemu_log_unlock(logfile
);
1298 /* Assert that goto_ptr is implemented completely. */
1299 if (TCG_TARGET_HAS_goto_ptr
) {
1300 tcg_debug_assert(tcg_code_gen_epilogue
!= NULL
);
1304 void tcg_func_start(TCGContext
*s
)
1307 s
->nb_temps
= s
->nb_globals
;
1309 /* No temps have been previously allocated for size or locality. */
1310 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
1312 /* No constant temps have been previously allocated. */
1313 for (int i
= 0; i
< TCG_TYPE_COUNT
; ++i
) {
1314 if (s
->const_table
[i
]) {
1315 g_hash_table_remove_all(s
->const_table
[i
]);
1321 s
->current_frame_offset
= s
->frame_start
;
1323 #ifdef CONFIG_DEBUG_TCG
1324 s
->goto_tb_issue_mask
= 0;
1327 QTAILQ_INIT(&s
->ops
);
1328 QTAILQ_INIT(&s
->free_ops
);
1329 QSIMPLEQ_INIT(&s
->labels
);
1332 static TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
1334 int n
= s
->nb_temps
++;
1336 if (n
>= TCG_MAX_TEMPS
) {
1337 tcg_raise_tb_overflow(s
);
1339 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1342 static TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1346 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1347 tcg_debug_assert(s
->nb_globals
< TCG_MAX_TEMPS
);
1349 ts
= tcg_temp_alloc(s
);
1350 ts
->kind
= TEMP_GLOBAL
;
1355 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1356 TCGReg reg
, const char *name
)
1360 if (TCG_TARGET_REG_BITS
== 32 && type
!= TCG_TYPE_I32
) {
1364 ts
= tcg_global_alloc(s
);
1365 ts
->base_type
= type
;
1367 ts
->kind
= TEMP_FIXED
;
1370 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1375 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1377 s
->frame_start
= start
;
1378 s
->frame_end
= start
+ size
;
1380 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1383 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
1384 intptr_t offset
, const char *name
)
1386 TCGContext
*s
= tcg_ctx
;
1387 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1388 TCGTemp
*ts
= tcg_global_alloc(s
);
1389 int indirect_reg
= 0, bigendian
= 0;
1390 #ifdef HOST_WORDS_BIGENDIAN
1394 switch (base_ts
->kind
) {
1398 /* We do not support double-indirect registers. */
1399 tcg_debug_assert(!base_ts
->indirect_reg
);
1400 base_ts
->indirect_base
= 1;
1401 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1406 g_assert_not_reached();
1409 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1410 TCGTemp
*ts2
= tcg_global_alloc(s
);
1413 ts
->base_type
= TCG_TYPE_I64
;
1414 ts
->type
= TCG_TYPE_I32
;
1415 ts
->indirect_reg
= indirect_reg
;
1416 ts
->mem_allocated
= 1;
1417 ts
->mem_base
= base_ts
;
1418 ts
->mem_offset
= offset
+ bigendian
* 4;
1419 pstrcpy(buf
, sizeof(buf
), name
);
1420 pstrcat(buf
, sizeof(buf
), "_0");
1421 ts
->name
= strdup(buf
);
1423 tcg_debug_assert(ts2
== ts
+ 1);
1424 ts2
->base_type
= TCG_TYPE_I64
;
1425 ts2
->type
= TCG_TYPE_I32
;
1426 ts2
->indirect_reg
= indirect_reg
;
1427 ts2
->mem_allocated
= 1;
1428 ts2
->mem_base
= base_ts
;
1429 ts2
->mem_offset
= offset
+ (1 - bigendian
) * 4;
1430 pstrcpy(buf
, sizeof(buf
), name
);
1431 pstrcat(buf
, sizeof(buf
), "_1");
1432 ts2
->name
= strdup(buf
);
1434 ts
->base_type
= type
;
1436 ts
->indirect_reg
= indirect_reg
;
1437 ts
->mem_allocated
= 1;
1438 ts
->mem_base
= base_ts
;
1439 ts
->mem_offset
= offset
;
1445 TCGTemp
*tcg_temp_new_internal(TCGType type
, bool temp_local
)
1447 TCGContext
*s
= tcg_ctx
;
1448 TCGTempKind kind
= temp_local
? TEMP_LOCAL
: TEMP_NORMAL
;
1452 k
= type
+ (temp_local
? TCG_TYPE_COUNT
: 0);
1453 idx
= find_first_bit(s
->free_temps
[k
].l
, TCG_MAX_TEMPS
);
1454 if (idx
< TCG_MAX_TEMPS
) {
1455 /* There is already an available temp with the right type. */
1456 clear_bit(idx
, s
->free_temps
[k
].l
);
1458 ts
= &s
->temps
[idx
];
1459 ts
->temp_allocated
= 1;
1460 tcg_debug_assert(ts
->base_type
== type
);
1461 tcg_debug_assert(ts
->kind
== kind
);
1463 ts
= tcg_temp_alloc(s
);
1464 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1465 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1467 ts
->base_type
= type
;
1468 ts
->type
= TCG_TYPE_I32
;
1469 ts
->temp_allocated
= 1;
1472 tcg_debug_assert(ts2
== ts
+ 1);
1473 ts2
->base_type
= TCG_TYPE_I64
;
1474 ts2
->type
= TCG_TYPE_I32
;
1475 ts2
->temp_allocated
= 1;
1478 ts
->base_type
= type
;
1480 ts
->temp_allocated
= 1;
1485 #if defined(CONFIG_DEBUG_TCG)
1491 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1495 #ifdef CONFIG_DEBUG_TCG
1498 assert(TCG_TARGET_HAS_v64
);
1501 assert(TCG_TARGET_HAS_v128
);
1504 assert(TCG_TARGET_HAS_v256
);
1507 g_assert_not_reached();
1511 t
= tcg_temp_new_internal(type
, 0);
1512 return temp_tcgv_vec(t
);
1515 /* Create a new temp of the same type as an existing temp. */
1516 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1518 TCGTemp
*t
= tcgv_vec_temp(match
);
1520 tcg_debug_assert(t
->temp_allocated
!= 0);
1522 t
= tcg_temp_new_internal(t
->base_type
, 0);
1523 return temp_tcgv_vec(t
);
1526 void tcg_temp_free_internal(TCGTemp
*ts
)
1528 TCGContext
*s
= tcg_ctx
;
1531 /* In order to simplify users of tcg_constant_*, silently ignore free. */
1532 if (ts
->kind
== TEMP_CONST
) {
1536 #if defined(CONFIG_DEBUG_TCG)
1538 if (s
->temps_in_use
< 0) {
1539 fprintf(stderr
, "More temporaries freed than allocated!\n");
1543 tcg_debug_assert(ts
->kind
< TEMP_GLOBAL
);
1544 tcg_debug_assert(ts
->temp_allocated
!= 0);
1545 ts
->temp_allocated
= 0;
1548 k
= ts
->base_type
+ (ts
->kind
== TEMP_NORMAL
? 0 : TCG_TYPE_COUNT
);
1549 set_bit(idx
, s
->free_temps
[k
].l
);
1552 TCGTemp
*tcg_constant_internal(TCGType type
, int64_t val
)
1554 TCGContext
*s
= tcg_ctx
;
1555 GHashTable
*h
= s
->const_table
[type
];
1559 h
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
1560 s
->const_table
[type
] = h
;
1563 ts
= g_hash_table_lookup(h
, &val
);
1565 ts
= tcg_temp_alloc(s
);
1567 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1568 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1570 ts
->base_type
= TCG_TYPE_I64
;
1571 ts
->type
= TCG_TYPE_I32
;
1572 ts
->kind
= TEMP_CONST
;
1573 ts
->temp_allocated
= 1;
1575 * Retain the full value of the 64-bit constant in the low
1576 * part, so that the hash table works. Actual uses will
1577 * truncate the value to the low part.
1581 tcg_debug_assert(ts2
== ts
+ 1);
1582 ts2
->base_type
= TCG_TYPE_I64
;
1583 ts2
->type
= TCG_TYPE_I32
;
1584 ts2
->kind
= TEMP_CONST
;
1585 ts2
->temp_allocated
= 1;
1586 ts2
->val
= val
>> 32;
1588 ts
->base_type
= type
;
1590 ts
->kind
= TEMP_CONST
;
1591 ts
->temp_allocated
= 1;
1594 g_hash_table_insert(h
, &ts
->val
, ts
);
1600 TCGv_vec
tcg_constant_vec(TCGType type
, unsigned vece
, int64_t val
)
1602 val
= dup_const(vece
, val
);
1603 return temp_tcgv_vec(tcg_constant_internal(type
, val
));
1606 TCGv_vec
tcg_constant_vec_matching(TCGv_vec match
, unsigned vece
, int64_t val
)
1608 TCGTemp
*t
= tcgv_vec_temp(match
);
1610 tcg_debug_assert(t
->temp_allocated
!= 0);
1611 return tcg_constant_vec(t
->base_type
, vece
, val
);
1614 TCGv_i32
tcg_const_i32(int32_t val
)
1617 t0
= tcg_temp_new_i32();
1618 tcg_gen_movi_i32(t0
, val
);
1622 TCGv_i64
tcg_const_i64(int64_t val
)
1625 t0
= tcg_temp_new_i64();
1626 tcg_gen_movi_i64(t0
, val
);
1630 TCGv_i32
tcg_const_local_i32(int32_t val
)
1633 t0
= tcg_temp_local_new_i32();
1634 tcg_gen_movi_i32(t0
, val
);
1638 TCGv_i64
tcg_const_local_i64(int64_t val
)
1641 t0
= tcg_temp_local_new_i64();
1642 tcg_gen_movi_i64(t0
, val
);
1646 #if defined(CONFIG_DEBUG_TCG)
1647 void tcg_clear_temp_count(void)
1649 TCGContext
*s
= tcg_ctx
;
1650 s
->temps_in_use
= 0;
1653 int tcg_check_temp_count(void)
1655 TCGContext
*s
= tcg_ctx
;
1656 if (s
->temps_in_use
) {
1657 /* Clear the count so that we don't give another
1658 * warning immediately next time around.
1660 s
->temps_in_use
= 0;
1667 /* Return true if OP may appear in the opcode stream.
1668 Test the runtime variable that controls each opcode. */
1669 bool tcg_op_supported(TCGOpcode op
)
1672 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1675 case INDEX_op_discard
:
1676 case INDEX_op_set_label
:
1680 case INDEX_op_insn_start
:
1681 case INDEX_op_exit_tb
:
1682 case INDEX_op_goto_tb
:
1683 case INDEX_op_qemu_ld_i32
:
1684 case INDEX_op_qemu_st_i32
:
1685 case INDEX_op_qemu_ld_i64
:
1686 case INDEX_op_qemu_st_i64
:
1689 case INDEX_op_qemu_st8_i32
:
1690 return TCG_TARGET_HAS_qemu_st8_i32
;
1692 case INDEX_op_goto_ptr
:
1693 return TCG_TARGET_HAS_goto_ptr
;
1695 case INDEX_op_mov_i32
:
1696 case INDEX_op_setcond_i32
:
1697 case INDEX_op_brcond_i32
:
1698 case INDEX_op_ld8u_i32
:
1699 case INDEX_op_ld8s_i32
:
1700 case INDEX_op_ld16u_i32
:
1701 case INDEX_op_ld16s_i32
:
1702 case INDEX_op_ld_i32
:
1703 case INDEX_op_st8_i32
:
1704 case INDEX_op_st16_i32
:
1705 case INDEX_op_st_i32
:
1706 case INDEX_op_add_i32
:
1707 case INDEX_op_sub_i32
:
1708 case INDEX_op_mul_i32
:
1709 case INDEX_op_and_i32
:
1710 case INDEX_op_or_i32
:
1711 case INDEX_op_xor_i32
:
1712 case INDEX_op_shl_i32
:
1713 case INDEX_op_shr_i32
:
1714 case INDEX_op_sar_i32
:
1717 case INDEX_op_movcond_i32
:
1718 return TCG_TARGET_HAS_movcond_i32
;
1719 case INDEX_op_div_i32
:
1720 case INDEX_op_divu_i32
:
1721 return TCG_TARGET_HAS_div_i32
;
1722 case INDEX_op_rem_i32
:
1723 case INDEX_op_remu_i32
:
1724 return TCG_TARGET_HAS_rem_i32
;
1725 case INDEX_op_div2_i32
:
1726 case INDEX_op_divu2_i32
:
1727 return TCG_TARGET_HAS_div2_i32
;
1728 case INDEX_op_rotl_i32
:
1729 case INDEX_op_rotr_i32
:
1730 return TCG_TARGET_HAS_rot_i32
;
1731 case INDEX_op_deposit_i32
:
1732 return TCG_TARGET_HAS_deposit_i32
;
1733 case INDEX_op_extract_i32
:
1734 return TCG_TARGET_HAS_extract_i32
;
1735 case INDEX_op_sextract_i32
:
1736 return TCG_TARGET_HAS_sextract_i32
;
1737 case INDEX_op_extract2_i32
:
1738 return TCG_TARGET_HAS_extract2_i32
;
1739 case INDEX_op_add2_i32
:
1740 return TCG_TARGET_HAS_add2_i32
;
1741 case INDEX_op_sub2_i32
:
1742 return TCG_TARGET_HAS_sub2_i32
;
1743 case INDEX_op_mulu2_i32
:
1744 return TCG_TARGET_HAS_mulu2_i32
;
1745 case INDEX_op_muls2_i32
:
1746 return TCG_TARGET_HAS_muls2_i32
;
1747 case INDEX_op_muluh_i32
:
1748 return TCG_TARGET_HAS_muluh_i32
;
1749 case INDEX_op_mulsh_i32
:
1750 return TCG_TARGET_HAS_mulsh_i32
;
1751 case INDEX_op_ext8s_i32
:
1752 return TCG_TARGET_HAS_ext8s_i32
;
1753 case INDEX_op_ext16s_i32
:
1754 return TCG_TARGET_HAS_ext16s_i32
;
1755 case INDEX_op_ext8u_i32
:
1756 return TCG_TARGET_HAS_ext8u_i32
;
1757 case INDEX_op_ext16u_i32
:
1758 return TCG_TARGET_HAS_ext16u_i32
;
1759 case INDEX_op_bswap16_i32
:
1760 return TCG_TARGET_HAS_bswap16_i32
;
1761 case INDEX_op_bswap32_i32
:
1762 return TCG_TARGET_HAS_bswap32_i32
;
1763 case INDEX_op_not_i32
:
1764 return TCG_TARGET_HAS_not_i32
;
1765 case INDEX_op_neg_i32
:
1766 return TCG_TARGET_HAS_neg_i32
;
1767 case INDEX_op_andc_i32
:
1768 return TCG_TARGET_HAS_andc_i32
;
1769 case INDEX_op_orc_i32
:
1770 return TCG_TARGET_HAS_orc_i32
;
1771 case INDEX_op_eqv_i32
:
1772 return TCG_TARGET_HAS_eqv_i32
;
1773 case INDEX_op_nand_i32
:
1774 return TCG_TARGET_HAS_nand_i32
;
1775 case INDEX_op_nor_i32
:
1776 return TCG_TARGET_HAS_nor_i32
;
1777 case INDEX_op_clz_i32
:
1778 return TCG_TARGET_HAS_clz_i32
;
1779 case INDEX_op_ctz_i32
:
1780 return TCG_TARGET_HAS_ctz_i32
;
1781 case INDEX_op_ctpop_i32
:
1782 return TCG_TARGET_HAS_ctpop_i32
;
1784 case INDEX_op_brcond2_i32
:
1785 case INDEX_op_setcond2_i32
:
1786 return TCG_TARGET_REG_BITS
== 32;
1788 case INDEX_op_mov_i64
:
1789 case INDEX_op_setcond_i64
:
1790 case INDEX_op_brcond_i64
:
1791 case INDEX_op_ld8u_i64
:
1792 case INDEX_op_ld8s_i64
:
1793 case INDEX_op_ld16u_i64
:
1794 case INDEX_op_ld16s_i64
:
1795 case INDEX_op_ld32u_i64
:
1796 case INDEX_op_ld32s_i64
:
1797 case INDEX_op_ld_i64
:
1798 case INDEX_op_st8_i64
:
1799 case INDEX_op_st16_i64
:
1800 case INDEX_op_st32_i64
:
1801 case INDEX_op_st_i64
:
1802 case INDEX_op_add_i64
:
1803 case INDEX_op_sub_i64
:
1804 case INDEX_op_mul_i64
:
1805 case INDEX_op_and_i64
:
1806 case INDEX_op_or_i64
:
1807 case INDEX_op_xor_i64
:
1808 case INDEX_op_shl_i64
:
1809 case INDEX_op_shr_i64
:
1810 case INDEX_op_sar_i64
:
1811 case INDEX_op_ext_i32_i64
:
1812 case INDEX_op_extu_i32_i64
:
1813 return TCG_TARGET_REG_BITS
== 64;
1815 case INDEX_op_movcond_i64
:
1816 return TCG_TARGET_HAS_movcond_i64
;
1817 case INDEX_op_div_i64
:
1818 case INDEX_op_divu_i64
:
1819 return TCG_TARGET_HAS_div_i64
;
1820 case INDEX_op_rem_i64
:
1821 case INDEX_op_remu_i64
:
1822 return TCG_TARGET_HAS_rem_i64
;
1823 case INDEX_op_div2_i64
:
1824 case INDEX_op_divu2_i64
:
1825 return TCG_TARGET_HAS_div2_i64
;
1826 case INDEX_op_rotl_i64
:
1827 case INDEX_op_rotr_i64
:
1828 return TCG_TARGET_HAS_rot_i64
;
1829 case INDEX_op_deposit_i64
:
1830 return TCG_TARGET_HAS_deposit_i64
;
1831 case INDEX_op_extract_i64
:
1832 return TCG_TARGET_HAS_extract_i64
;
1833 case INDEX_op_sextract_i64
:
1834 return TCG_TARGET_HAS_sextract_i64
;
1835 case INDEX_op_extract2_i64
:
1836 return TCG_TARGET_HAS_extract2_i64
;
1837 case INDEX_op_extrl_i64_i32
:
1838 return TCG_TARGET_HAS_extrl_i64_i32
;
1839 case INDEX_op_extrh_i64_i32
:
1840 return TCG_TARGET_HAS_extrh_i64_i32
;
1841 case INDEX_op_ext8s_i64
:
1842 return TCG_TARGET_HAS_ext8s_i64
;
1843 case INDEX_op_ext16s_i64
:
1844 return TCG_TARGET_HAS_ext16s_i64
;
1845 case INDEX_op_ext32s_i64
:
1846 return TCG_TARGET_HAS_ext32s_i64
;
1847 case INDEX_op_ext8u_i64
:
1848 return TCG_TARGET_HAS_ext8u_i64
;
1849 case INDEX_op_ext16u_i64
:
1850 return TCG_TARGET_HAS_ext16u_i64
;
1851 case INDEX_op_ext32u_i64
:
1852 return TCG_TARGET_HAS_ext32u_i64
;
1853 case INDEX_op_bswap16_i64
:
1854 return TCG_TARGET_HAS_bswap16_i64
;
1855 case INDEX_op_bswap32_i64
:
1856 return TCG_TARGET_HAS_bswap32_i64
;
1857 case INDEX_op_bswap64_i64
:
1858 return TCG_TARGET_HAS_bswap64_i64
;
1859 case INDEX_op_not_i64
:
1860 return TCG_TARGET_HAS_not_i64
;
1861 case INDEX_op_neg_i64
:
1862 return TCG_TARGET_HAS_neg_i64
;
1863 case INDEX_op_andc_i64
:
1864 return TCG_TARGET_HAS_andc_i64
;
1865 case INDEX_op_orc_i64
:
1866 return TCG_TARGET_HAS_orc_i64
;
1867 case INDEX_op_eqv_i64
:
1868 return TCG_TARGET_HAS_eqv_i64
;
1869 case INDEX_op_nand_i64
:
1870 return TCG_TARGET_HAS_nand_i64
;
1871 case INDEX_op_nor_i64
:
1872 return TCG_TARGET_HAS_nor_i64
;
1873 case INDEX_op_clz_i64
:
1874 return TCG_TARGET_HAS_clz_i64
;
1875 case INDEX_op_ctz_i64
:
1876 return TCG_TARGET_HAS_ctz_i64
;
1877 case INDEX_op_ctpop_i64
:
1878 return TCG_TARGET_HAS_ctpop_i64
;
1879 case INDEX_op_add2_i64
:
1880 return TCG_TARGET_HAS_add2_i64
;
1881 case INDEX_op_sub2_i64
:
1882 return TCG_TARGET_HAS_sub2_i64
;
1883 case INDEX_op_mulu2_i64
:
1884 return TCG_TARGET_HAS_mulu2_i64
;
1885 case INDEX_op_muls2_i64
:
1886 return TCG_TARGET_HAS_muls2_i64
;
1887 case INDEX_op_muluh_i64
:
1888 return TCG_TARGET_HAS_muluh_i64
;
1889 case INDEX_op_mulsh_i64
:
1890 return TCG_TARGET_HAS_mulsh_i64
;
1892 case INDEX_op_mov_vec
:
1893 case INDEX_op_dup_vec
:
1894 case INDEX_op_dupm_vec
:
1895 case INDEX_op_ld_vec
:
1896 case INDEX_op_st_vec
:
1897 case INDEX_op_add_vec
:
1898 case INDEX_op_sub_vec
:
1899 case INDEX_op_and_vec
:
1900 case INDEX_op_or_vec
:
1901 case INDEX_op_xor_vec
:
1902 case INDEX_op_cmp_vec
:
1904 case INDEX_op_dup2_vec
:
1905 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
1906 case INDEX_op_not_vec
:
1907 return have_vec
&& TCG_TARGET_HAS_not_vec
;
1908 case INDEX_op_neg_vec
:
1909 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
1910 case INDEX_op_abs_vec
:
1911 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
1912 case INDEX_op_andc_vec
:
1913 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
1914 case INDEX_op_orc_vec
:
1915 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
1916 case INDEX_op_mul_vec
:
1917 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
1918 case INDEX_op_shli_vec
:
1919 case INDEX_op_shri_vec
:
1920 case INDEX_op_sari_vec
:
1921 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
1922 case INDEX_op_shls_vec
:
1923 case INDEX_op_shrs_vec
:
1924 case INDEX_op_sars_vec
:
1925 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
1926 case INDEX_op_shlv_vec
:
1927 case INDEX_op_shrv_vec
:
1928 case INDEX_op_sarv_vec
:
1929 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
1930 case INDEX_op_rotli_vec
:
1931 return have_vec
&& TCG_TARGET_HAS_roti_vec
;
1932 case INDEX_op_rotls_vec
:
1933 return have_vec
&& TCG_TARGET_HAS_rots_vec
;
1934 case INDEX_op_rotlv_vec
:
1935 case INDEX_op_rotrv_vec
:
1936 return have_vec
&& TCG_TARGET_HAS_rotv_vec
;
1937 case INDEX_op_ssadd_vec
:
1938 case INDEX_op_usadd_vec
:
1939 case INDEX_op_sssub_vec
:
1940 case INDEX_op_ussub_vec
:
1941 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
1942 case INDEX_op_smin_vec
:
1943 case INDEX_op_umin_vec
:
1944 case INDEX_op_smax_vec
:
1945 case INDEX_op_umax_vec
:
1946 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
1947 case INDEX_op_bitsel_vec
:
1948 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
1949 case INDEX_op_cmpsel_vec
:
1950 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
1953 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
1958 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1959 and endian swap. Maybe it would be better to do the alignment
1960 and endian swap in tcg_reg_alloc_call(). */
1961 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
1963 int i
, real_args
, nb_rets
, pi
;
1964 unsigned sizemask
, flags
;
1965 TCGHelperInfo
*info
;
1968 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
1969 flags
= info
->flags
;
1970 sizemask
= info
->sizemask
;
1972 #ifdef CONFIG_PLUGIN
1973 /* detect non-plugin helpers */
1974 if (tcg_ctx
->plugin_insn
&& unlikely(strncmp(info
->name
, "plugin_", 7))) {
1975 tcg_ctx
->plugin_insn
->calls_helpers
= true;
1979 #if defined(__sparc__) && !defined(__arch64__) \
1980 && !defined(CONFIG_TCG_INTERPRETER)
1981 /* We have 64-bit values in one register, but need to pass as two
1982 separate parameters. Split them. */
1983 int orig_sizemask
= sizemask
;
1984 int orig_nargs
= nargs
;
1985 TCGv_i64 retl
, reth
;
1986 TCGTemp
*split_args
[MAX_OPC_PARAM
];
1990 if (sizemask
!= 0) {
1991 for (i
= real_args
= 0; i
< nargs
; ++i
) {
1992 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1994 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1995 TCGv_i32 h
= tcg_temp_new_i32();
1996 TCGv_i32 l
= tcg_temp_new_i32();
1997 tcg_gen_extr_i64_i32(l
, h
, orig
);
1998 split_args
[real_args
++] = tcgv_i32_temp(h
);
1999 split_args
[real_args
++] = tcgv_i32_temp(l
);
2001 split_args
[real_args
++] = args
[i
];
2008 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2009 for (i
= 0; i
< nargs
; ++i
) {
2010 int is_64bit
= sizemask
& (1 << (i
+1)*2);
2011 int is_signed
= sizemask
& (2 << (i
+1)*2);
2013 TCGv_i64 temp
= tcg_temp_new_i64();
2014 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
2016 tcg_gen_ext32s_i64(temp
, orig
);
2018 tcg_gen_ext32u_i64(temp
, orig
);
2020 args
[i
] = tcgv_i64_temp(temp
);
2023 #endif /* TCG_TARGET_EXTEND_ARGS */
2025 op
= tcg_emit_op(INDEX_op_call
);
2029 #if defined(__sparc__) && !defined(__arch64__) \
2030 && !defined(CONFIG_TCG_INTERPRETER)
2031 if (orig_sizemask
& 1) {
2032 /* The 32-bit ABI is going to return the 64-bit value in
2033 the %o0/%o1 register pair. Prepare for this by using
2034 two return temporaries, and reassemble below. */
2035 retl
= tcg_temp_new_i64();
2036 reth
= tcg_temp_new_i64();
2037 op
->args
[pi
++] = tcgv_i64_arg(reth
);
2038 op
->args
[pi
++] = tcgv_i64_arg(retl
);
2041 op
->args
[pi
++] = temp_arg(ret
);
2045 if (TCG_TARGET_REG_BITS
< 64 && (sizemask
& 1)) {
2046 #ifdef HOST_WORDS_BIGENDIAN
2047 op
->args
[pi
++] = temp_arg(ret
+ 1);
2048 op
->args
[pi
++] = temp_arg(ret
);
2050 op
->args
[pi
++] = temp_arg(ret
);
2051 op
->args
[pi
++] = temp_arg(ret
+ 1);
2055 op
->args
[pi
++] = temp_arg(ret
);
2062 TCGOP_CALLO(op
) = nb_rets
;
2065 for (i
= 0; i
< nargs
; i
++) {
2066 int is_64bit
= sizemask
& (1 << (i
+1)*2);
2067 if (TCG_TARGET_REG_BITS
< 64 && is_64bit
) {
2068 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
2069 /* some targets want aligned 64 bit args */
2070 if (real_args
& 1) {
2071 op
->args
[pi
++] = TCG_CALL_DUMMY_ARG
;
2075 /* If stack grows up, then we will be placing successive
2076 arguments at lower addresses, which means we need to
2077 reverse the order compared to how we would normally
2078 treat either big or little-endian. For those arguments
2079 that will wind up in registers, this still works for
2080 HPPA (the only current STACK_GROWSUP target) since the
2081 argument registers are *also* allocated in decreasing
2082 order. If another such target is added, this logic may
2083 have to get more complicated to differentiate between
2084 stack arguments and register arguments. */
2085 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
2086 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
2087 op
->args
[pi
++] = temp_arg(args
[i
]);
2089 op
->args
[pi
++] = temp_arg(args
[i
]);
2090 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
2096 op
->args
[pi
++] = temp_arg(args
[i
]);
2099 op
->args
[pi
++] = (uintptr_t)func
;
2100 op
->args
[pi
++] = flags
;
2101 TCGOP_CALLI(op
) = real_args
;
2103 /* Make sure the fields didn't overflow. */
2104 tcg_debug_assert(TCGOP_CALLI(op
) == real_args
);
2105 tcg_debug_assert(pi
<= ARRAY_SIZE(op
->args
));
2107 #if defined(__sparc__) && !defined(__arch64__) \
2108 && !defined(CONFIG_TCG_INTERPRETER)
2109 /* Free all of the parts we allocated above. */
2110 for (i
= real_args
= 0; i
< orig_nargs
; ++i
) {
2111 int is_64bit
= orig_sizemask
& (1 << (i
+1)*2);
2113 tcg_temp_free_internal(args
[real_args
++]);
2114 tcg_temp_free_internal(args
[real_args
++]);
2119 if (orig_sizemask
& 1) {
2120 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
2121 Note that describing these as TCGv_i64 eliminates an unnecessary
2122 zero-extension that tcg_gen_concat_i32_i64 would create. */
2123 tcg_gen_concat32_i64(temp_tcgv_i64(ret
), retl
, reth
);
2124 tcg_temp_free_i64(retl
);
2125 tcg_temp_free_i64(reth
);
2127 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2128 for (i
= 0; i
< nargs
; ++i
) {
2129 int is_64bit
= sizemask
& (1 << (i
+1)*2);
2131 tcg_temp_free_internal(args
[i
]);
2134 #endif /* TCG_TARGET_EXTEND_ARGS */
2137 static void tcg_reg_alloc_start(TCGContext
*s
)
2141 for (i
= 0, n
= s
->nb_temps
; i
< n
; i
++) {
2142 TCGTemp
*ts
= &s
->temps
[i
];
2143 TCGTempVal val
= TEMP_VAL_MEM
;
2147 val
= TEMP_VAL_CONST
;
2155 val
= TEMP_VAL_DEAD
;
2158 ts
->mem_allocated
= 0;
2161 g_assert_not_reached();
2166 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
2169 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
2172 int idx
= temp_idx(ts
);
2177 pstrcpy(buf
, buf_size
, ts
->name
);
2180 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
2183 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
2188 snprintf(buf
, buf_size
, "$0x%x", (int32_t)ts
->val
);
2190 #if TCG_TARGET_REG_BITS > 32
2192 snprintf(buf
, buf_size
, "$0x%" PRIx64
, ts
->val
);
2198 snprintf(buf
, buf_size
, "v%d$0x%" PRIx64
,
2199 64 << (ts
->type
- TCG_TYPE_V64
), ts
->val
);
2202 g_assert_not_reached();
2209 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
2210 int buf_size
, TCGArg arg
)
2212 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
2215 /* Find helper name. */
2216 static inline const char *tcg_find_helper(TCGContext
*s
, uintptr_t val
)
2218 const char *ret
= NULL
;
2220 TCGHelperInfo
*info
= g_hash_table_lookup(helper_table
, (gpointer
)val
);
2228 static const char * const cond_name
[] =
2230 [TCG_COND_NEVER
] = "never",
2231 [TCG_COND_ALWAYS
] = "always",
2232 [TCG_COND_EQ
] = "eq",
2233 [TCG_COND_NE
] = "ne",
2234 [TCG_COND_LT
] = "lt",
2235 [TCG_COND_GE
] = "ge",
2236 [TCG_COND_LE
] = "le",
2237 [TCG_COND_GT
] = "gt",
2238 [TCG_COND_LTU
] = "ltu",
2239 [TCG_COND_GEU
] = "geu",
2240 [TCG_COND_LEU
] = "leu",
2241 [TCG_COND_GTU
] = "gtu"
2244 static const char * const ldst_name
[] =
2260 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
2261 #ifdef TARGET_ALIGNED_ONLY
2262 [MO_UNALN
>> MO_ASHIFT
] = "un+",
2263 [MO_ALIGN
>> MO_ASHIFT
] = "",
2265 [MO_UNALN
>> MO_ASHIFT
] = "",
2266 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
2268 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
2269 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
2270 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
2271 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
2272 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
2273 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
2276 static inline bool tcg_regset_single(TCGRegSet d
)
2278 return (d
& (d
- 1)) == 0;
2281 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
2283 if (TCG_TARGET_NB_REGS
<= 32) {
2290 static void tcg_dump_ops(TCGContext
*s
, bool have_prefs
)
2295 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
2296 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
2297 const TCGOpDef
*def
;
2302 def
= &tcg_op_defs
[c
];
2304 if (c
== INDEX_op_insn_start
) {
2306 col
+= qemu_log("\n ----");
2308 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
2310 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2311 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
2315 col
+= qemu_log(" " TARGET_FMT_lx
, a
);
2317 } else if (c
== INDEX_op_call
) {
2318 /* variable number of arguments */
2319 nb_oargs
= TCGOP_CALLO(op
);
2320 nb_iargs
= TCGOP_CALLI(op
);
2321 nb_cargs
= def
->nb_cargs
;
2323 /* function name, flags, out args */
2324 col
+= qemu_log(" %s %s,$0x%" TCG_PRIlx
",$%d", def
->name
,
2325 tcg_find_helper(s
, op
->args
[nb_oargs
+ nb_iargs
]),
2326 op
->args
[nb_oargs
+ nb_iargs
+ 1], nb_oargs
);
2327 for (i
= 0; i
< nb_oargs
; i
++) {
2328 col
+= qemu_log(",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2331 for (i
= 0; i
< nb_iargs
; i
++) {
2332 TCGArg arg
= op
->args
[nb_oargs
+ i
];
2333 const char *t
= "<dummy>";
2334 if (arg
!= TCG_CALL_DUMMY_ARG
) {
2335 t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
2337 col
+= qemu_log(",%s", t
);
2340 col
+= qemu_log(" %s ", def
->name
);
2342 nb_oargs
= def
->nb_oargs
;
2343 nb_iargs
= def
->nb_iargs
;
2344 nb_cargs
= def
->nb_cargs
;
2346 if (def
->flags
& TCG_OPF_VECTOR
) {
2347 col
+= qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op
),
2348 8 << TCGOP_VECE(op
));
2352 for (i
= 0; i
< nb_oargs
; i
++) {
2354 col
+= qemu_log(",");
2356 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2359 for (i
= 0; i
< nb_iargs
; i
++) {
2361 col
+= qemu_log(",");
2363 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2367 case INDEX_op_brcond_i32
:
2368 case INDEX_op_setcond_i32
:
2369 case INDEX_op_movcond_i32
:
2370 case INDEX_op_brcond2_i32
:
2371 case INDEX_op_setcond2_i32
:
2372 case INDEX_op_brcond_i64
:
2373 case INDEX_op_setcond_i64
:
2374 case INDEX_op_movcond_i64
:
2375 case INDEX_op_cmp_vec
:
2376 case INDEX_op_cmpsel_vec
:
2377 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
2378 && cond_name
[op
->args
[k
]]) {
2379 col
+= qemu_log(",%s", cond_name
[op
->args
[k
++]]);
2381 col
+= qemu_log(",$0x%" TCG_PRIlx
, op
->args
[k
++]);
2385 case INDEX_op_qemu_ld_i32
:
2386 case INDEX_op_qemu_st_i32
:
2387 case INDEX_op_qemu_st8_i32
:
2388 case INDEX_op_qemu_ld_i64
:
2389 case INDEX_op_qemu_st_i64
:
2391 TCGMemOpIdx oi
= op
->args
[k
++];
2392 MemOp op
= get_memop(oi
);
2393 unsigned ix
= get_mmuidx(oi
);
2395 if (op
& ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
)) {
2396 col
+= qemu_log(",$0x%x,%u", op
, ix
);
2398 const char *s_al
, *s_op
;
2399 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
2400 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
2401 col
+= qemu_log(",%s%s,%u", s_al
, s_op
, ix
);
2411 case INDEX_op_set_label
:
2413 case INDEX_op_brcond_i32
:
2414 case INDEX_op_brcond_i64
:
2415 case INDEX_op_brcond2_i32
:
2416 col
+= qemu_log("%s$L%d", k
? "," : "",
2417 arg_label(op
->args
[k
])->id
);
2423 for (; i
< nb_cargs
; i
++, k
++) {
2424 col
+= qemu_log("%s$0x%" TCG_PRIlx
, k
? "," : "", op
->args
[k
]);
2428 if (have_prefs
|| op
->life
) {
2430 QemuLogFile
*logfile
;
2433 logfile
= qatomic_rcu_read(&qemu_logfile
);
2435 for (; col
< 40; ++col
) {
2436 putc(' ', logfile
->fd
);
2443 unsigned life
= op
->life
;
2445 if (life
& (SYNC_ARG
* 3)) {
2447 for (i
= 0; i
< 2; ++i
) {
2448 if (life
& (SYNC_ARG
<< i
)) {
2456 for (i
= 0; life
; ++i
, life
>>= 1) {
2465 for (i
= 0; i
< nb_oargs
; ++i
) {
2466 TCGRegSet set
= op
->output_pref
[i
];
2475 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2477 #ifdef CONFIG_DEBUG_TCG
2478 } else if (tcg_regset_single(set
)) {
2479 TCGReg reg
= tcg_regset_first(set
);
2480 qemu_log("%s", tcg_target_reg_names
[reg
]);
2482 } else if (TCG_TARGET_NB_REGS
<= 32) {
2483 qemu_log("%#x", (uint32_t)set
);
2485 qemu_log("%#" PRIx64
, (uint64_t)set
);
2494 /* we give more priority to constraints with less registers */
2495 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2497 const TCGArgConstraint
*arg_ct
= &def
->args_ct
[k
];
2500 if (arg_ct
->oalias
) {
2501 /* an alias is equivalent to a single register */
2504 n
= ctpop64(arg_ct
->regs
);
2506 return TCG_TARGET_NB_REGS
- n
+ 1;
2509 /* sort from highest priority to lowest */
2510 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2513 TCGArgConstraint
*a
= def
->args_ct
;
2515 for (i
= 0; i
< n
; i
++) {
2516 a
[start
+ i
].sort_index
= start
+ i
;
2521 for (i
= 0; i
< n
- 1; i
++) {
2522 for (j
= i
+ 1; j
< n
; j
++) {
2523 int p1
= get_constraint_priority(def
, a
[start
+ i
].sort_index
);
2524 int p2
= get_constraint_priority(def
, a
[start
+ j
].sort_index
);
2526 int tmp
= a
[start
+ i
].sort_index
;
2527 a
[start
+ i
].sort_index
= a
[start
+ j
].sort_index
;
2528 a
[start
+ j
].sort_index
= tmp
;
2534 static void process_op_defs(TCGContext
*s
)
2538 for (op
= 0; op
< NB_OPS
; op
++) {
2539 TCGOpDef
*def
= &tcg_op_defs
[op
];
2540 const TCGTargetOpDef
*tdefs
;
2543 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2547 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2553 * Macro magic should make it impossible, but double-check that
2554 * the array index is in range. Since the signness of an enum
2555 * is implementation defined, force the result to unsigned.
2557 unsigned con_set
= tcg_target_op_def(op
);
2558 tcg_debug_assert(con_set
< ARRAY_SIZE(constraint_sets
));
2559 tdefs
= &constraint_sets
[con_set
];
2561 for (i
= 0; i
< nb_args
; i
++) {
2562 const char *ct_str
= tdefs
->args_ct_str
[i
];
2563 /* Incomplete TCGTargetOpDef entry. */
2564 tcg_debug_assert(ct_str
!= NULL
);
2566 while (*ct_str
!= '\0') {
2570 int oarg
= *ct_str
- '0';
2571 tcg_debug_assert(ct_str
== tdefs
->args_ct_str
[i
]);
2572 tcg_debug_assert(oarg
< def
->nb_oargs
);
2573 tcg_debug_assert(def
->args_ct
[oarg
].regs
!= 0);
2574 def
->args_ct
[i
] = def
->args_ct
[oarg
];
2575 /* The output sets oalias. */
2576 def
->args_ct
[oarg
].oalias
= true;
2577 def
->args_ct
[oarg
].alias_index
= i
;
2578 /* The input sets ialias. */
2579 def
->args_ct
[i
].ialias
= true;
2580 def
->args_ct
[i
].alias_index
= oarg
;
2585 def
->args_ct
[i
].newreg
= true;
2589 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2593 /* Include all of the target-specific constraints. */
2596 #define CONST(CASE, MASK) \
2597 case CASE: def->args_ct[i].ct |= MASK; ct_str++; break;
2598 #define REGS(CASE, MASK) \
2599 case CASE: def->args_ct[i].regs |= MASK; ct_str++; break;
2601 #include "tcg-target-con-str.h"
2606 /* Typo in TCGTargetOpDef constraint. */
2607 g_assert_not_reached();
2612 /* TCGTargetOpDef entry with too much information? */
2613 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2615 /* sort the constraints (XXX: this is just an heuristic) */
2616 sort_constraints(def
, 0, def
->nb_oargs
);
2617 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
2621 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
2627 label
= arg_label(op
->args
[0]);
2630 case INDEX_op_brcond_i32
:
2631 case INDEX_op_brcond_i64
:
2632 label
= arg_label(op
->args
[3]);
2635 case INDEX_op_brcond2_i32
:
2636 label
= arg_label(op
->args
[5]);
2643 QTAILQ_REMOVE(&s
->ops
, op
, link
);
2644 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
2647 #ifdef CONFIG_PROFILER
2648 qatomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
2652 static TCGOp
*tcg_op_alloc(TCGOpcode opc
)
2654 TCGContext
*s
= tcg_ctx
;
2657 if (likely(QTAILQ_EMPTY(&s
->free_ops
))) {
2658 op
= tcg_malloc(sizeof(TCGOp
));
2660 op
= QTAILQ_FIRST(&s
->free_ops
);
2661 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
2663 memset(op
, 0, offsetof(TCGOp
, link
));
2670 TCGOp
*tcg_emit_op(TCGOpcode opc
)
2672 TCGOp
*op
= tcg_op_alloc(opc
);
2673 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2677 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2679 TCGOp
*new_op
= tcg_op_alloc(opc
);
2680 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
2684 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2686 TCGOp
*new_op
= tcg_op_alloc(opc
);
2687 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
2691 /* Reachable analysis : remove unreachable code. */
2692 static void reachable_code_pass(TCGContext
*s
)
2694 TCGOp
*op
, *op_next
;
2697 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2703 case INDEX_op_set_label
:
2704 label
= arg_label(op
->args
[0]);
2705 if (label
->refs
== 0) {
2707 * While there is an occasional backward branch, virtually
2708 * all branches generated by the translators are forward.
2709 * Which means that generally we will have already removed
2710 * all references to the label that will be, and there is
2711 * little to be gained by iterating.
2715 /* Once we see a label, insns become live again. */
2720 * Optimization can fold conditional branches to unconditional.
2721 * If we find a label with one reference which is preceded by
2722 * an unconditional branch to it, remove both. This needed to
2723 * wait until the dead code in between them was removed.
2725 if (label
->refs
== 1) {
2726 TCGOp
*op_prev
= QTAILQ_PREV(op
, link
);
2727 if (op_prev
->opc
== INDEX_op_br
&&
2728 label
== arg_label(op_prev
->args
[0])) {
2729 tcg_op_remove(s
, op_prev
);
2737 case INDEX_op_exit_tb
:
2738 case INDEX_op_goto_ptr
:
2739 /* Unconditional branches; everything following is dead. */
2744 /* Notice noreturn helper calls, raising exceptions. */
2745 call_flags
= op
->args
[TCGOP_CALLO(op
) + TCGOP_CALLI(op
) + 1];
2746 if (call_flags
& TCG_CALL_NO_RETURN
) {
2751 case INDEX_op_insn_start
:
2752 /* Never remove -- we need to keep these for unwind. */
2761 tcg_op_remove(s
, op
);
2769 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2770 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2772 /* For liveness_pass_1, the register preferences for a given temp. */
2773 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
2775 return ts
->state_ptr
;
2778 /* For liveness_pass_1, reset the preferences for a given temp to the
2779 * maximal regset for its type.
2781 static inline void la_reset_pref(TCGTemp
*ts
)
2784 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
2787 /* liveness analysis: end of function: all temps are dead, and globals
2788 should be in memory. */
2789 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
2793 for (i
= 0; i
< ng
; ++i
) {
2794 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2795 la_reset_pref(&s
->temps
[i
]);
2797 for (i
= ng
; i
< nt
; ++i
) {
2798 s
->temps
[i
].state
= TS_DEAD
;
2799 la_reset_pref(&s
->temps
[i
]);
2803 /* liveness analysis: end of basic block: all temps are dead, globals
2804 and local temps should be in memory. */
2805 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
2809 for (i
= 0; i
< nt
; ++i
) {
2810 TCGTemp
*ts
= &s
->temps
[i
];
2817 state
= TS_DEAD
| TS_MEM
;
2824 g_assert_not_reached();
2831 /* liveness analysis: sync globals back to memory. */
2832 static void la_global_sync(TCGContext
*s
, int ng
)
2836 for (i
= 0; i
< ng
; ++i
) {
2837 int state
= s
->temps
[i
].state
;
2838 s
->temps
[i
].state
= state
| TS_MEM
;
2839 if (state
== TS_DEAD
) {
2840 /* If the global was previously dead, reset prefs. */
2841 la_reset_pref(&s
->temps
[i
]);
2847 * liveness analysis: conditional branch: all temps are dead,
2848 * globals and local temps should be synced.
2850 static void la_bb_sync(TCGContext
*s
, int ng
, int nt
)
2852 la_global_sync(s
, ng
);
2854 for (int i
= ng
; i
< nt
; ++i
) {
2855 TCGTemp
*ts
= &s
->temps
[i
];
2861 ts
->state
= state
| TS_MEM
;
2862 if (state
!= TS_DEAD
) {
2867 s
->temps
[i
].state
= TS_DEAD
;
2872 g_assert_not_reached();
2874 la_reset_pref(&s
->temps
[i
]);
2878 /* liveness analysis: sync globals back to memory and kill. */
2879 static void la_global_kill(TCGContext
*s
, int ng
)
2883 for (i
= 0; i
< ng
; i
++) {
2884 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2885 la_reset_pref(&s
->temps
[i
]);
2889 /* liveness analysis: note live globals crossing calls. */
2890 static void la_cross_call(TCGContext
*s
, int nt
)
2892 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
2895 for (i
= 0; i
< nt
; i
++) {
2896 TCGTemp
*ts
= &s
->temps
[i
];
2897 if (!(ts
->state
& TS_DEAD
)) {
2898 TCGRegSet
*pset
= la_temp_pref(ts
);
2899 TCGRegSet set
= *pset
;
2902 /* If the combination is not possible, restart. */
2904 set
= tcg_target_available_regs
[ts
->type
] & mask
;
2911 /* Liveness analysis : update the opc_arg_life array to tell if a
2912 given input arguments is dead. Instructions updating dead
2913 temporaries are removed. */
2914 static void liveness_pass_1(TCGContext
*s
)
2916 int nb_globals
= s
->nb_globals
;
2917 int nb_temps
= s
->nb_temps
;
2918 TCGOp
*op
, *op_prev
;
2922 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
2923 for (i
= 0; i
< nb_temps
; ++i
) {
2924 s
->temps
[i
].state_ptr
= prefs
+ i
;
2927 /* ??? Should be redundant with the exit_tb that ends the TB. */
2928 la_func_end(s
, nb_globals
, nb_temps
);
2930 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
2931 int nb_iargs
, nb_oargs
;
2932 TCGOpcode opc_new
, opc_new2
;
2934 TCGLifeData arg_life
= 0;
2936 TCGOpcode opc
= op
->opc
;
2937 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2945 nb_oargs
= TCGOP_CALLO(op
);
2946 nb_iargs
= TCGOP_CALLI(op
);
2947 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2949 /* pure functions can be removed if their result is unused */
2950 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
2951 for (i
= 0; i
< nb_oargs
; i
++) {
2952 ts
= arg_temp(op
->args
[i
]);
2953 if (ts
->state
!= TS_DEAD
) {
2954 goto do_not_remove_call
;
2961 /* Output args are dead. */
2962 for (i
= 0; i
< nb_oargs
; i
++) {
2963 ts
= arg_temp(op
->args
[i
]);
2964 if (ts
->state
& TS_DEAD
) {
2965 arg_life
|= DEAD_ARG
<< i
;
2967 if (ts
->state
& TS_MEM
) {
2968 arg_life
|= SYNC_ARG
<< i
;
2970 ts
->state
= TS_DEAD
;
2973 /* Not used -- it will be tcg_target_call_oarg_regs[i]. */
2974 op
->output_pref
[i
] = 0;
2977 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
2978 TCG_CALL_NO_READ_GLOBALS
))) {
2979 la_global_kill(s
, nb_globals
);
2980 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
2981 la_global_sync(s
, nb_globals
);
2984 /* Record arguments that die in this helper. */
2985 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2986 ts
= arg_temp(op
->args
[i
]);
2987 if (ts
&& ts
->state
& TS_DEAD
) {
2988 arg_life
|= DEAD_ARG
<< i
;
2992 /* For all live registers, remove call-clobbered prefs. */
2993 la_cross_call(s
, nb_temps
);
2995 nb_call_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
2997 /* Input arguments are live for preceding opcodes. */
2998 for (i
= 0; i
< nb_iargs
; i
++) {
2999 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
3000 if (ts
&& ts
->state
& TS_DEAD
) {
3001 /* For those arguments that die, and will be allocated
3002 * in registers, clear the register set for that arg,
3003 * to be filled in below. For args that will be on
3004 * the stack, reset to any available reg.
3007 = (i
< nb_call_regs
? 0 :
3008 tcg_target_available_regs
[ts
->type
]);
3009 ts
->state
&= ~TS_DEAD
;
3013 /* For each input argument, add its input register to prefs.
3014 If a temp is used once, this produces a single set bit. */
3015 for (i
= 0; i
< MIN(nb_call_regs
, nb_iargs
); i
++) {
3016 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
3018 tcg_regset_set_reg(*la_temp_pref(ts
),
3019 tcg_target_call_iarg_regs
[i
]);
3024 case INDEX_op_insn_start
:
3026 case INDEX_op_discard
:
3027 /* mark the temporary as dead */
3028 ts
= arg_temp(op
->args
[0]);
3029 ts
->state
= TS_DEAD
;
3033 case INDEX_op_add2_i32
:
3034 opc_new
= INDEX_op_add_i32
;
3036 case INDEX_op_sub2_i32
:
3037 opc_new
= INDEX_op_sub_i32
;
3039 case INDEX_op_add2_i64
:
3040 opc_new
= INDEX_op_add_i64
;
3042 case INDEX_op_sub2_i64
:
3043 opc_new
= INDEX_op_sub_i64
;
3047 /* Test if the high part of the operation is dead, but not
3048 the low part. The result can be optimized to a simple
3049 add or sub. This happens often for x86_64 guest when the
3050 cpu mode is set to 32 bit. */
3051 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3052 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3055 /* Replace the opcode and adjust the args in place,
3056 leaving 3 unused args at the end. */
3057 op
->opc
= opc
= opc_new
;
3058 op
->args
[1] = op
->args
[2];
3059 op
->args
[2] = op
->args
[4];
3060 /* Fall through and mark the single-word operation live. */
3066 case INDEX_op_mulu2_i32
:
3067 opc_new
= INDEX_op_mul_i32
;
3068 opc_new2
= INDEX_op_muluh_i32
;
3069 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
3071 case INDEX_op_muls2_i32
:
3072 opc_new
= INDEX_op_mul_i32
;
3073 opc_new2
= INDEX_op_mulsh_i32
;
3074 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
3076 case INDEX_op_mulu2_i64
:
3077 opc_new
= INDEX_op_mul_i64
;
3078 opc_new2
= INDEX_op_muluh_i64
;
3079 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
3081 case INDEX_op_muls2_i64
:
3082 opc_new
= INDEX_op_mul_i64
;
3083 opc_new2
= INDEX_op_mulsh_i64
;
3084 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
3089 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
3090 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
3091 /* Both parts of the operation are dead. */
3094 /* The high part of the operation is dead; generate the low. */
3095 op
->opc
= opc
= opc_new
;
3096 op
->args
[1] = op
->args
[2];
3097 op
->args
[2] = op
->args
[3];
3098 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
3099 /* The low part of the operation is dead; generate the high. */
3100 op
->opc
= opc
= opc_new2
;
3101 op
->args
[0] = op
->args
[1];
3102 op
->args
[1] = op
->args
[2];
3103 op
->args
[2] = op
->args
[3];
3107 /* Mark the single-word operation live. */
3112 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3113 nb_iargs
= def
->nb_iargs
;
3114 nb_oargs
= def
->nb_oargs
;
3116 /* Test if the operation can be removed because all
3117 its outputs are dead. We assume that nb_oargs == 0
3118 implies side effects */
3119 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
3120 for (i
= 0; i
< nb_oargs
; i
++) {
3121 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
3130 tcg_op_remove(s
, op
);
3134 for (i
= 0; i
< nb_oargs
; i
++) {
3135 ts
= arg_temp(op
->args
[i
]);
3137 /* Remember the preference of the uses that followed. */
3138 op
->output_pref
[i
] = *la_temp_pref(ts
);
3140 /* Output args are dead. */
3141 if (ts
->state
& TS_DEAD
) {
3142 arg_life
|= DEAD_ARG
<< i
;
3144 if (ts
->state
& TS_MEM
) {
3145 arg_life
|= SYNC_ARG
<< i
;
3147 ts
->state
= TS_DEAD
;
3151 /* If end of basic block, update. */
3152 if (def
->flags
& TCG_OPF_BB_EXIT
) {
3153 la_func_end(s
, nb_globals
, nb_temps
);
3154 } else if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3155 la_bb_sync(s
, nb_globals
, nb_temps
);
3156 } else if (def
->flags
& TCG_OPF_BB_END
) {
3157 la_bb_end(s
, nb_globals
, nb_temps
);
3158 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3159 la_global_sync(s
, nb_globals
);
3160 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3161 la_cross_call(s
, nb_temps
);
3165 /* Record arguments that die in this opcode. */
3166 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3167 ts
= arg_temp(op
->args
[i
]);
3168 if (ts
->state
& TS_DEAD
) {
3169 arg_life
|= DEAD_ARG
<< i
;
3173 /* Input arguments are live for preceding opcodes. */
3174 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3175 ts
= arg_temp(op
->args
[i
]);
3176 if (ts
->state
& TS_DEAD
) {
3177 /* For operands that were dead, initially allow
3178 all regs for the type. */
3179 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
3180 ts
->state
&= ~TS_DEAD
;
3184 /* Incorporate constraints for this operand. */
3186 case INDEX_op_mov_i32
:
3187 case INDEX_op_mov_i64
:
3188 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3189 have proper constraints. That said, special case
3190 moves to propagate preferences backward. */
3191 if (IS_DEAD_ARG(1)) {
3192 *la_temp_pref(arg_temp(op
->args
[0]))
3193 = *la_temp_pref(arg_temp(op
->args
[1]));
3198 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3199 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
3200 TCGRegSet set
, *pset
;
3202 ts
= arg_temp(op
->args
[i
]);
3203 pset
= la_temp_pref(ts
);
3208 set
&= op
->output_pref
[ct
->alias_index
];
3210 /* If the combination is not possible, restart. */
3220 op
->life
= arg_life
;
3224 /* Liveness analysis: Convert indirect regs to direct temporaries. */
3225 static bool liveness_pass_2(TCGContext
*s
)
3227 int nb_globals
= s
->nb_globals
;
3229 bool changes
= false;
3230 TCGOp
*op
, *op_next
;
3232 /* Create a temporary for each indirect global. */
3233 for (i
= 0; i
< nb_globals
; ++i
) {
3234 TCGTemp
*its
= &s
->temps
[i
];
3235 if (its
->indirect_reg
) {
3236 TCGTemp
*dts
= tcg_temp_alloc(s
);
3237 dts
->type
= its
->type
;
3238 dts
->base_type
= its
->base_type
;
3239 its
->state_ptr
= dts
;
3241 its
->state_ptr
= NULL
;
3243 /* All globals begin dead. */
3244 its
->state
= TS_DEAD
;
3246 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
3247 TCGTemp
*its
= &s
->temps
[i
];
3248 its
->state_ptr
= NULL
;
3249 its
->state
= TS_DEAD
;
3252 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
3253 TCGOpcode opc
= op
->opc
;
3254 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
3255 TCGLifeData arg_life
= op
->life
;
3256 int nb_iargs
, nb_oargs
, call_flags
;
3257 TCGTemp
*arg_ts
, *dir_ts
;
3259 if (opc
== INDEX_op_call
) {
3260 nb_oargs
= TCGOP_CALLO(op
);
3261 nb_iargs
= TCGOP_CALLI(op
);
3262 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
3264 nb_iargs
= def
->nb_iargs
;
3265 nb_oargs
= def
->nb_oargs
;
3267 /* Set flags similar to how calls require. */
3268 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
3269 /* Like reading globals: sync_globals */
3270 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3271 } else if (def
->flags
& TCG_OPF_BB_END
) {
3272 /* Like writing globals: save_globals */
3274 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3275 /* Like reading globals: sync_globals */
3276 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
3278 /* No effect on globals. */
3279 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
3280 TCG_CALL_NO_WRITE_GLOBALS
);
3284 /* Make sure that input arguments are available. */
3285 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3286 arg_ts
= arg_temp(op
->args
[i
]);
3288 dir_ts
= arg_ts
->state_ptr
;
3289 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
3290 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
3293 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
);
3295 lop
->args
[0] = temp_arg(dir_ts
);
3296 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3297 lop
->args
[2] = arg_ts
->mem_offset
;
3299 /* Loaded, but synced with memory. */
3300 arg_ts
->state
= TS_MEM
;
3305 /* Perform input replacement, and mark inputs that became dead.
3306 No action is required except keeping temp_state up to date
3307 so that we reload when needed. */
3308 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3309 arg_ts
= arg_temp(op
->args
[i
]);
3311 dir_ts
= arg_ts
->state_ptr
;
3313 op
->args
[i
] = temp_arg(dir_ts
);
3315 if (IS_DEAD_ARG(i
)) {
3316 arg_ts
->state
= TS_DEAD
;
3322 /* Liveness analysis should ensure that the following are
3323 all correct, for call sites and basic block end points. */
3324 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
3326 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3327 for (i
= 0; i
< nb_globals
; ++i
) {
3328 /* Liveness should see that globals are synced back,
3329 that is, either TS_DEAD or TS_MEM. */
3330 arg_ts
= &s
->temps
[i
];
3331 tcg_debug_assert(arg_ts
->state_ptr
== 0
3332 || arg_ts
->state
!= 0);
3335 for (i
= 0; i
< nb_globals
; ++i
) {
3336 /* Liveness should see that globals are saved back,
3337 that is, TS_DEAD, waiting to be reloaded. */
3338 arg_ts
= &s
->temps
[i
];
3339 tcg_debug_assert(arg_ts
->state_ptr
== 0
3340 || arg_ts
->state
== TS_DEAD
);
3344 /* Outputs become available. */
3345 if (opc
== INDEX_op_mov_i32
|| opc
== INDEX_op_mov_i64
) {
3346 arg_ts
= arg_temp(op
->args
[0]);
3347 dir_ts
= arg_ts
->state_ptr
;
3349 op
->args
[0] = temp_arg(dir_ts
);
3352 /* The output is now live and modified. */
3355 if (NEED_SYNC_ARG(0)) {
3356 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3359 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
);
3360 TCGTemp
*out_ts
= dir_ts
;
3362 if (IS_DEAD_ARG(0)) {
3363 out_ts
= arg_temp(op
->args
[1]);
3364 arg_ts
->state
= TS_DEAD
;
3365 tcg_op_remove(s
, op
);
3367 arg_ts
->state
= TS_MEM
;
3370 sop
->args
[0] = temp_arg(out_ts
);
3371 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3372 sop
->args
[2] = arg_ts
->mem_offset
;
3374 tcg_debug_assert(!IS_DEAD_ARG(0));
3378 for (i
= 0; i
< nb_oargs
; i
++) {
3379 arg_ts
= arg_temp(op
->args
[i
]);
3380 dir_ts
= arg_ts
->state_ptr
;
3384 op
->args
[i
] = temp_arg(dir_ts
);
3387 /* The output is now live and modified. */
3390 /* Sync outputs upon their last write. */
3391 if (NEED_SYNC_ARG(i
)) {
3392 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
3395 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
);
3397 sop
->args
[0] = temp_arg(dir_ts
);
3398 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
3399 sop
->args
[2] = arg_ts
->mem_offset
;
3401 arg_ts
->state
= TS_MEM
;
3403 /* Drop outputs that are dead. */
3404 if (IS_DEAD_ARG(i
)) {
3405 arg_ts
->state
= TS_DEAD
;
3414 #ifdef CONFIG_DEBUG_TCG
3415 static void dump_regs(TCGContext
*s
)
3421 for(i
= 0; i
< s
->nb_temps
; i
++) {
3423 printf(" %10s: ", tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
3424 switch(ts
->val_type
) {
3426 printf("%s", tcg_target_reg_names
[ts
->reg
]);
3429 printf("%d(%s)", (int)ts
->mem_offset
,
3430 tcg_target_reg_names
[ts
->mem_base
->reg
]);
3432 case TEMP_VAL_CONST
:
3433 printf("$0x%" PRIx64
, ts
->val
);
3445 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3446 if (s
->reg_to_temp
[i
] != NULL
) {
3448 tcg_target_reg_names
[i
],
3449 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
3454 static void check_regs(TCGContext
*s
)
3461 for (reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
3462 ts
= s
->reg_to_temp
[reg
];
3464 if (ts
->val_type
!= TEMP_VAL_REG
|| ts
->reg
!= reg
) {
3465 printf("Inconsistency for register %s:\n",
3466 tcg_target_reg_names
[reg
]);
3471 for (k
= 0; k
< s
->nb_temps
; k
++) {
3473 if (ts
->val_type
== TEMP_VAL_REG
3474 && ts
->kind
!= TEMP_FIXED
3475 && s
->reg_to_temp
[ts
->reg
] != ts
) {
3476 printf("Inconsistency for temp %s:\n",
3477 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
3479 printf("reg state:\n");
3487 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3489 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
3490 /* Sparc64 stack is accessed with offset of 2047 */
3491 s
->current_frame_offset
= (s
->current_frame_offset
+
3492 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
3493 ~(sizeof(tcg_target_long
) - 1);
3495 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
3499 ts
->mem_offset
= s
->current_frame_offset
;
3500 ts
->mem_base
= s
->frame_temp
;
3501 ts
->mem_allocated
= 1;
3502 s
->current_frame_offset
+= sizeof(tcg_target_long
);
3505 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
3507 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3508 mark it free; otherwise mark it dead. */
3509 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
3511 TCGTempVal new_type
;
3518 new_type
= TEMP_VAL_MEM
;
3521 new_type
= free_or_dead
< 0 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
;
3524 new_type
= TEMP_VAL_CONST
;
3527 g_assert_not_reached();
3529 if (ts
->val_type
== TEMP_VAL_REG
) {
3530 s
->reg_to_temp
[ts
->reg
] = NULL
;
3532 ts
->val_type
= new_type
;
3535 /* Mark a temporary as dead. */
3536 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
3538 temp_free_or_dead(s
, ts
, 1);
3541 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3542 registers needs to be allocated to store a constant. If 'free_or_dead'
3543 is non-zero, subsequently release the temporary; if it is positive, the
3544 temp is dead; if it is negative, the temp is free. */
3545 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
3546 TCGRegSet preferred_regs
, int free_or_dead
)
3548 if (!temp_readonly(ts
) && !ts
->mem_coherent
) {
3549 if (!ts
->mem_allocated
) {
3550 temp_allocate_frame(s
, ts
);
3552 switch (ts
->val_type
) {
3553 case TEMP_VAL_CONST
:
3554 /* If we're going to free the temp immediately, then we won't
3555 require it later in a register, so attempt to store the
3556 constant to memory directly. */
3558 && tcg_out_sti(s
, ts
->type
, ts
->val
,
3559 ts
->mem_base
->reg
, ts
->mem_offset
)) {
3562 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3563 allocated_regs
, preferred_regs
);
3567 tcg_out_st(s
, ts
->type
, ts
->reg
,
3568 ts
->mem_base
->reg
, ts
->mem_offset
);
3578 ts
->mem_coherent
= 1;
3581 temp_free_or_dead(s
, ts
, free_or_dead
);
3585 /* free register 'reg' by spilling the corresponding temporary if necessary */
3586 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
3588 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
3590 temp_sync(s
, ts
, allocated_regs
, 0, -1);
3596 * @required_regs: Set of registers in which we must allocate.
3597 * @allocated_regs: Set of registers which must be avoided.
3598 * @preferred_regs: Set of registers we should prefer.
3599 * @rev: True if we search the registers in "indirect" order.
3601 * The allocated register must be in @required_regs & ~@allocated_regs,
3602 * but if we can put it in @preferred_regs we may save a move later.
3604 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
3605 TCGRegSet allocated_regs
,
3606 TCGRegSet preferred_regs
, bool rev
)
3608 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
3609 TCGRegSet reg_ct
[2];
3612 reg_ct
[1] = required_regs
& ~allocated_regs
;
3613 tcg_debug_assert(reg_ct
[1] != 0);
3614 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
3616 /* Skip the preferred_regs option if it cannot be satisfied,
3617 or if the preference made no difference. */
3618 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
3620 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
3622 /* Try free registers, preferences first. */
3623 for (j
= f
; j
< 2; j
++) {
3624 TCGRegSet set
= reg_ct
[j
];
3626 if (tcg_regset_single(set
)) {
3627 /* One register in the set. */
3628 TCGReg reg
= tcg_regset_first(set
);
3629 if (s
->reg_to_temp
[reg
] == NULL
) {
3633 for (i
= 0; i
< n
; i
++) {
3634 TCGReg reg
= order
[i
];
3635 if (s
->reg_to_temp
[reg
] == NULL
&&
3636 tcg_regset_test_reg(set
, reg
)) {
3643 /* We must spill something. */
3644 for (j
= f
; j
< 2; j
++) {
3645 TCGRegSet set
= reg_ct
[j
];
3647 if (tcg_regset_single(set
)) {
3648 /* One register in the set. */
3649 TCGReg reg
= tcg_regset_first(set
);
3650 tcg_reg_free(s
, reg
, allocated_regs
);
3653 for (i
= 0; i
< n
; i
++) {
3654 TCGReg reg
= order
[i
];
3655 if (tcg_regset_test_reg(set
, reg
)) {
3656 tcg_reg_free(s
, reg
, allocated_regs
);
3666 /* Make sure the temporary is in a register. If needed, allocate the register
3667 from DESIRED while avoiding ALLOCATED. */
3668 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
3669 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
3673 switch (ts
->val_type
) {
3676 case TEMP_VAL_CONST
:
3677 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3678 preferred_regs
, ts
->indirect_base
);
3679 if (ts
->type
<= TCG_TYPE_I64
) {
3680 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
3682 uint64_t val
= ts
->val
;
3686 * Find the minimal vector element that matches the constant.
3687 * The targets will, in general, have to do this search anyway,
3688 * do this generically.
3690 if (val
== dup_const(MO_8
, val
)) {
3692 } else if (val
== dup_const(MO_16
, val
)) {
3694 } else if (val
== dup_const(MO_32
, val
)) {
3698 tcg_out_dupi_vec(s
, ts
->type
, vece
, reg
, ts
->val
);
3700 ts
->mem_coherent
= 0;
3703 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3704 preferred_regs
, ts
->indirect_base
);
3705 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
3706 ts
->mem_coherent
= 1;
3713 ts
->val_type
= TEMP_VAL_REG
;
3714 s
->reg_to_temp
[reg
] = ts
;
3717 /* Save a temporary to memory. 'allocated_regs' is used in case a
3718 temporary registers needs to be allocated to store a constant. */
3719 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
3721 /* The liveness analysis already ensures that globals are back
3722 in memory. Keep an tcg_debug_assert for safety. */
3723 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| temp_readonly(ts
));
3726 /* save globals to their canonical location and assume they can be
3727 modified be the following code. 'allocated_regs' is used in case a
3728 temporary registers needs to be allocated to store a constant. */
3729 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3733 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3734 temp_save(s
, &s
->temps
[i
], allocated_regs
);
3738 /* sync globals to their canonical location and assume they can be
3739 read by the following code. 'allocated_regs' is used in case a
3740 temporary registers needs to be allocated to store a constant. */
3741 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3745 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3746 TCGTemp
*ts
= &s
->temps
[i
];
3747 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
3748 || ts
->kind
== TEMP_FIXED
3749 || ts
->mem_coherent
);
3753 /* at the end of a basic block, we assume all temporaries are dead and
3754 all globals are stored at their canonical location. */
3755 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
3759 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3760 TCGTemp
*ts
= &s
->temps
[i
];
3764 temp_save(s
, ts
, allocated_regs
);
3767 /* The liveness analysis already ensures that temps are dead.
3768 Keep an tcg_debug_assert for safety. */
3769 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3772 /* Similarly, we should have freed any allocated register. */
3773 tcg_debug_assert(ts
->val_type
== TEMP_VAL_CONST
);
3776 g_assert_not_reached();
3780 save_globals(s
, allocated_regs
);
3784 * At a conditional branch, we assume all temporaries are dead and
3785 * all globals and local temps are synced to their location.
3787 static void tcg_reg_alloc_cbranch(TCGContext
*s
, TCGRegSet allocated_regs
)
3789 sync_globals(s
, allocated_regs
);
3791 for (int i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3792 TCGTemp
*ts
= &s
->temps
[i
];
3794 * The liveness analysis already ensures that temps are dead.
3795 * Keep tcg_debug_asserts for safety.
3799 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
|| ts
->mem_coherent
);
3802 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3807 g_assert_not_reached();
3813 * Specialized code generation for INDEX_op_mov_* with a constant.
3815 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
3816 tcg_target_ulong val
, TCGLifeData arg_life
,
3817 TCGRegSet preferred_regs
)
3819 /* ENV should not be modified. */
3820 tcg_debug_assert(!temp_readonly(ots
));
3822 /* The movi is not explicitly generated here. */
3823 if (ots
->val_type
== TEMP_VAL_REG
) {
3824 s
->reg_to_temp
[ots
->reg
] = NULL
;
3826 ots
->val_type
= TEMP_VAL_CONST
;
3828 ots
->mem_coherent
= 0;
3829 if (NEED_SYNC_ARG(0)) {
3830 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
3831 } else if (IS_DEAD_ARG(0)) {
3837 * Specialized code generation for INDEX_op_mov_*.
3839 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
3841 const TCGLifeData arg_life
= op
->life
;
3842 TCGRegSet allocated_regs
, preferred_regs
;
3844 TCGType otype
, itype
;
3846 allocated_regs
= s
->reserved_regs
;
3847 preferred_regs
= op
->output_pref
[0];
3848 ots
= arg_temp(op
->args
[0]);
3849 ts
= arg_temp(op
->args
[1]);
3851 /* ENV should not be modified. */
3852 tcg_debug_assert(!temp_readonly(ots
));
3854 /* Note that otype != itype for no-op truncation. */
3858 if (ts
->val_type
== TEMP_VAL_CONST
) {
3859 /* propagate constant or generate sti */
3860 tcg_target_ulong val
= ts
->val
;
3861 if (IS_DEAD_ARG(1)) {
3864 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
3868 /* If the source value is in memory we're going to be forced
3869 to have it in a register in order to perform the copy. Copy
3870 the SOURCE value into its own register first, that way we
3871 don't have to reload SOURCE the next time it is used. */
3872 if (ts
->val_type
== TEMP_VAL_MEM
) {
3873 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
3874 allocated_regs
, preferred_regs
);
3877 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
3878 if (IS_DEAD_ARG(0)) {
3879 /* mov to a non-saved dead register makes no sense (even with
3880 liveness analysis disabled). */
3881 tcg_debug_assert(NEED_SYNC_ARG(0));
3882 if (!ots
->mem_allocated
) {
3883 temp_allocate_frame(s
, ots
);
3885 tcg_out_st(s
, otype
, ts
->reg
, ots
->mem_base
->reg
, ots
->mem_offset
);
3886 if (IS_DEAD_ARG(1)) {
3891 if (IS_DEAD_ARG(1) && ts
->kind
!= TEMP_FIXED
) {
3892 /* the mov can be suppressed */
3893 if (ots
->val_type
== TEMP_VAL_REG
) {
3894 s
->reg_to_temp
[ots
->reg
] = NULL
;
3899 if (ots
->val_type
!= TEMP_VAL_REG
) {
3900 /* When allocating a new register, make sure to not spill the
3902 tcg_regset_set_reg(allocated_regs
, ts
->reg
);
3903 ots
->reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
3904 allocated_regs
, preferred_regs
,
3905 ots
->indirect_base
);
3907 if (!tcg_out_mov(s
, otype
, ots
->reg
, ts
->reg
)) {
3909 * Cross register class move not supported.
3910 * Store the source register into the destination slot
3911 * and leave the destination temp as TEMP_VAL_MEM.
3913 assert(!temp_readonly(ots
));
3914 if (!ts
->mem_allocated
) {
3915 temp_allocate_frame(s
, ots
);
3917 tcg_out_st(s
, ts
->type
, ts
->reg
,
3918 ots
->mem_base
->reg
, ots
->mem_offset
);
3919 ots
->mem_coherent
= 1;
3920 temp_free_or_dead(s
, ots
, -1);
3924 ots
->val_type
= TEMP_VAL_REG
;
3925 ots
->mem_coherent
= 0;
3926 s
->reg_to_temp
[ots
->reg
] = ots
;
3927 if (NEED_SYNC_ARG(0)) {
3928 temp_sync(s
, ots
, allocated_regs
, 0, 0);
3934 * Specialized code generation for INDEX_op_dup_vec.
3936 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
3938 const TCGLifeData arg_life
= op
->life
;
3939 TCGRegSet dup_out_regs
, dup_in_regs
;
3941 TCGType itype
, vtype
;
3942 intptr_t endian_fixup
;
3946 ots
= arg_temp(op
->args
[0]);
3947 its
= arg_temp(op
->args
[1]);
3949 /* ENV should not be modified. */
3950 tcg_debug_assert(!temp_readonly(ots
));
3953 vece
= TCGOP_VECE(op
);
3954 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
3956 if (its
->val_type
== TEMP_VAL_CONST
) {
3957 /* Propagate constant via movi -> dupi. */
3958 tcg_target_ulong val
= its
->val
;
3959 if (IS_DEAD_ARG(1)) {
3962 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, op
->output_pref
[0]);
3966 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
3967 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].regs
;
3969 /* Allocate the output register now. */
3970 if (ots
->val_type
!= TEMP_VAL_REG
) {
3971 TCGRegSet allocated_regs
= s
->reserved_regs
;
3973 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
3974 /* Make sure to not spill the input register. */
3975 tcg_regset_set_reg(allocated_regs
, its
->reg
);
3977 ots
->reg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
3978 op
->output_pref
[0], ots
->indirect_base
);
3979 ots
->val_type
= TEMP_VAL_REG
;
3980 ots
->mem_coherent
= 0;
3981 s
->reg_to_temp
[ots
->reg
] = ots
;
3984 switch (its
->val_type
) {
3987 * The dup constriaints must be broad, covering all possible VECE.
3988 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3989 * to fail, indicating that extra moves are required for that case.
3991 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
3992 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
3995 /* Try again from memory or a vector input register. */
3997 if (!its
->mem_coherent
) {
3999 * The input register is not synced, and so an extra store
4000 * would be required to use memory. Attempt an integer-vector
4001 * register move first. We do not have a TCGRegSet for this.
4003 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
4006 /* Sync the temp back to its slot and load from there. */
4007 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
4012 #ifdef HOST_WORDS_BIGENDIAN
4013 endian_fixup
= itype
== TCG_TYPE_I32
? 4 : 8;
4014 endian_fixup
-= 1 << vece
;
4018 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
4019 its
->mem_offset
+ endian_fixup
)) {
4022 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
4026 g_assert_not_reached();
4029 /* We now have a vector input register, so dup must succeed. */
4030 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
4031 tcg_debug_assert(ok
);
4034 if (IS_DEAD_ARG(1)) {
4037 if (NEED_SYNC_ARG(0)) {
4038 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
4040 if (IS_DEAD_ARG(0)) {
4045 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
4047 const TCGLifeData arg_life
= op
->life
;
4048 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
4049 TCGRegSet i_allocated_regs
;
4050 TCGRegSet o_allocated_regs
;
4051 int i
, k
, nb_iargs
, nb_oargs
;
4054 const TCGArgConstraint
*arg_ct
;
4056 TCGArg new_args
[TCG_MAX_OP_ARGS
];
4057 int const_args
[TCG_MAX_OP_ARGS
];
4059 nb_oargs
= def
->nb_oargs
;
4060 nb_iargs
= def
->nb_iargs
;
4062 /* copy constants */
4063 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
4064 op
->args
+ nb_oargs
+ nb_iargs
,
4065 sizeof(TCGArg
) * def
->nb_cargs
);
4067 i_allocated_regs
= s
->reserved_regs
;
4068 o_allocated_regs
= s
->reserved_regs
;
4070 /* satisfy input constraints */
4071 for (k
= 0; k
< nb_iargs
; k
++) {
4072 TCGRegSet i_preferred_regs
, o_preferred_regs
;
4074 i
= def
->args_ct
[nb_oargs
+ k
].sort_index
;
4076 arg_ct
= &def
->args_ct
[i
];
4079 if (ts
->val_type
== TEMP_VAL_CONST
4080 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
->ct
)) {
4081 /* constant is OK for instruction */
4083 new_args
[i
] = ts
->val
;
4087 i_preferred_regs
= o_preferred_regs
= 0;
4088 if (arg_ct
->ialias
) {
4089 o_preferred_regs
= op
->output_pref
[arg_ct
->alias_index
];
4092 * If the input is readonly, then it cannot also be an
4093 * output and aliased to itself. If the input is not
4094 * dead after the instruction, we must allocate a new
4095 * register and move it.
4097 if (temp_readonly(ts
) || !IS_DEAD_ARG(i
)) {
4098 goto allocate_in_reg
;
4102 * Check if the current register has already been allocated
4103 * for another input aliased to an output.
4105 if (ts
->val_type
== TEMP_VAL_REG
) {
4107 for (int k2
= 0; k2
< k
; k2
++) {
4108 int i2
= def
->args_ct
[nb_oargs
+ k2
].sort_index
;
4109 if (def
->args_ct
[i2
].ialias
&& reg
== new_args
[i2
]) {
4110 goto allocate_in_reg
;
4114 i_preferred_regs
= o_preferred_regs
;
4117 temp_load(s
, ts
, arg_ct
->regs
, i_allocated_regs
, i_preferred_regs
);
4120 if (!tcg_regset_test_reg(arg_ct
->regs
, reg
)) {
4123 * Allocate a new register matching the constraint
4124 * and move the temporary register into it.
4126 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4127 i_allocated_regs
, 0);
4128 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, i_allocated_regs
,
4129 o_preferred_regs
, ts
->indirect_base
);
4130 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
4132 * Cross register class move not supported. Sync the
4133 * temp back to its slot and load from there.
4135 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
4136 tcg_out_ld(s
, ts
->type
, reg
,
4137 ts
->mem_base
->reg
, ts
->mem_offset
);
4142 tcg_regset_set_reg(i_allocated_regs
, reg
);
4145 /* mark dead temporaries and free the associated registers */
4146 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
4147 if (IS_DEAD_ARG(i
)) {
4148 temp_dead(s
, arg_temp(op
->args
[i
]));
4152 if (def
->flags
& TCG_OPF_COND_BRANCH
) {
4153 tcg_reg_alloc_cbranch(s
, i_allocated_regs
);
4154 } else if (def
->flags
& TCG_OPF_BB_END
) {
4155 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
4157 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
4158 /* XXX: permit generic clobber register list ? */
4159 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4160 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4161 tcg_reg_free(s
, i
, i_allocated_regs
);
4165 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
4166 /* sync globals if the op has side effects and might trigger
4168 sync_globals(s
, i_allocated_regs
);
4171 /* satisfy the output constraints */
4172 for(k
= 0; k
< nb_oargs
; k
++) {
4173 i
= def
->args_ct
[k
].sort_index
;
4175 arg_ct
= &def
->args_ct
[i
];
4178 /* ENV should not be modified. */
4179 tcg_debug_assert(!temp_readonly(ts
));
4181 if (arg_ct
->oalias
&& !const_args
[arg_ct
->alias_index
]) {
4182 reg
= new_args
[arg_ct
->alias_index
];
4183 } else if (arg_ct
->newreg
) {
4184 reg
= tcg_reg_alloc(s
, arg_ct
->regs
,
4185 i_allocated_regs
| o_allocated_regs
,
4186 op
->output_pref
[k
], ts
->indirect_base
);
4188 reg
= tcg_reg_alloc(s
, arg_ct
->regs
, o_allocated_regs
,
4189 op
->output_pref
[k
], ts
->indirect_base
);
4191 tcg_regset_set_reg(o_allocated_regs
, reg
);
4192 if (ts
->val_type
== TEMP_VAL_REG
) {
4193 s
->reg_to_temp
[ts
->reg
] = NULL
;
4195 ts
->val_type
= TEMP_VAL_REG
;
4198 * Temp value is modified, so the value kept in memory is
4199 * potentially not the same.
4201 ts
->mem_coherent
= 0;
4202 s
->reg_to_temp
[reg
] = ts
;
4207 /* emit instruction */
4208 if (def
->flags
& TCG_OPF_VECTOR
) {
4209 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
4210 new_args
, const_args
);
4212 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
4215 /* move the outputs in the correct register if needed */
4216 for(i
= 0; i
< nb_oargs
; i
++) {
4217 ts
= arg_temp(op
->args
[i
]);
4219 /* ENV should not be modified. */
4220 tcg_debug_assert(!temp_readonly(ts
));
4222 if (NEED_SYNC_ARG(i
)) {
4223 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
4224 } else if (IS_DEAD_ARG(i
)) {
4230 static bool tcg_reg_alloc_dup2(TCGContext
*s
, const TCGOp
*op
)
4232 const TCGLifeData arg_life
= op
->life
;
4233 TCGTemp
*ots
, *itsl
, *itsh
;
4234 TCGType vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
4236 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
4237 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
4238 tcg_debug_assert(TCGOP_VECE(op
) == MO_64
);
4240 ots
= arg_temp(op
->args
[0]);
4241 itsl
= arg_temp(op
->args
[1]);
4242 itsh
= arg_temp(op
->args
[2]);
4244 /* ENV should not be modified. */
4245 tcg_debug_assert(!temp_readonly(ots
));
4247 /* Allocate the output register now. */
4248 if (ots
->val_type
!= TEMP_VAL_REG
) {
4249 TCGRegSet allocated_regs
= s
->reserved_regs
;
4250 TCGRegSet dup_out_regs
=
4251 tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].regs
;
4253 /* Make sure to not spill the input registers. */
4254 if (!IS_DEAD_ARG(1) && itsl
->val_type
== TEMP_VAL_REG
) {
4255 tcg_regset_set_reg(allocated_regs
, itsl
->reg
);
4257 if (!IS_DEAD_ARG(2) && itsh
->val_type
== TEMP_VAL_REG
) {
4258 tcg_regset_set_reg(allocated_regs
, itsh
->reg
);
4261 ots
->reg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
4262 op
->output_pref
[0], ots
->indirect_base
);
4263 ots
->val_type
= TEMP_VAL_REG
;
4264 ots
->mem_coherent
= 0;
4265 s
->reg_to_temp
[ots
->reg
] = ots
;
4268 /* Promote dup2 of immediates to dupi_vec. */
4269 if (itsl
->val_type
== TEMP_VAL_CONST
&& itsh
->val_type
== TEMP_VAL_CONST
) {
4270 uint64_t val
= deposit64(itsl
->val
, 32, 32, itsh
->val
);
4273 if (val
== dup_const(MO_8
, val
)) {
4275 } else if (val
== dup_const(MO_16
, val
)) {
4277 } else if (val
== dup_const(MO_32
, val
)) {
4281 tcg_out_dupi_vec(s
, vtype
, vece
, ots
->reg
, val
);
4285 /* If the two inputs form one 64-bit value, try dupm_vec. */
4286 if (itsl
+ 1 == itsh
&& itsl
->base_type
== TCG_TYPE_I64
) {
4287 if (!itsl
->mem_coherent
) {
4288 temp_sync(s
, itsl
, s
->reserved_regs
, 0, 0);
4290 if (!itsh
->mem_coherent
) {
4291 temp_sync(s
, itsh
, s
->reserved_regs
, 0, 0);
4293 #ifdef HOST_WORDS_BIGENDIAN
4294 TCGTemp
*its
= itsh
;
4296 TCGTemp
*its
= itsl
;
4298 if (tcg_out_dupm_vec(s
, vtype
, MO_64
, ots
->reg
,
4299 its
->mem_base
->reg
, its
->mem_offset
)) {
4304 /* Fall back to generic expansion. */
4308 if (IS_DEAD_ARG(1)) {
4311 if (IS_DEAD_ARG(2)) {
4314 if (NEED_SYNC_ARG(0)) {
4315 temp_sync(s
, ots
, s
->reserved_regs
, 0, IS_DEAD_ARG(0));
4316 } else if (IS_DEAD_ARG(0)) {
4322 #ifdef TCG_TARGET_STACK_GROWSUP
4323 #define STACK_DIR(x) (-(x))
4325 #define STACK_DIR(x) (x)
4328 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
4330 const int nb_oargs
= TCGOP_CALLO(op
);
4331 const int nb_iargs
= TCGOP_CALLI(op
);
4332 const TCGLifeData arg_life
= op
->life
;
4333 int flags
, nb_regs
, i
;
4337 intptr_t stack_offset
;
4338 size_t call_stack_size
;
4339 tcg_insn_unit
*func_addr
;
4341 TCGRegSet allocated_regs
;
4343 func_addr
= (tcg_insn_unit
*)(intptr_t)op
->args
[nb_oargs
+ nb_iargs
];
4344 flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
4346 nb_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
4347 if (nb_regs
> nb_iargs
) {
4351 /* assign stack slots first */
4352 call_stack_size
= (nb_iargs
- nb_regs
) * sizeof(tcg_target_long
);
4353 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
4354 ~(TCG_TARGET_STACK_ALIGN
- 1);
4355 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
4356 if (allocate_args
) {
4357 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
4358 preallocate call stack */
4362 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
4363 for (i
= nb_regs
; i
< nb_iargs
; i
++) {
4364 arg
= op
->args
[nb_oargs
+ i
];
4365 #ifdef TCG_TARGET_STACK_GROWSUP
4366 stack_offset
-= sizeof(tcg_target_long
);
4368 if (arg
!= TCG_CALL_DUMMY_ARG
) {
4370 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
4371 s
->reserved_regs
, 0);
4372 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
4374 #ifndef TCG_TARGET_STACK_GROWSUP
4375 stack_offset
+= sizeof(tcg_target_long
);
4379 /* assign input registers */
4380 allocated_regs
= s
->reserved_regs
;
4381 for (i
= 0; i
< nb_regs
; i
++) {
4382 arg
= op
->args
[nb_oargs
+ i
];
4383 if (arg
!= TCG_CALL_DUMMY_ARG
) {
4385 reg
= tcg_target_call_iarg_regs
[i
];
4387 if (ts
->val_type
== TEMP_VAL_REG
) {
4388 if (ts
->reg
!= reg
) {
4389 tcg_reg_free(s
, reg
, allocated_regs
);
4390 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
4392 * Cross register class move not supported. Sync the
4393 * temp back to its slot and load from there.
4395 temp_sync(s
, ts
, allocated_regs
, 0, 0);
4396 tcg_out_ld(s
, ts
->type
, reg
,
4397 ts
->mem_base
->reg
, ts
->mem_offset
);
4401 TCGRegSet arg_set
= 0;
4403 tcg_reg_free(s
, reg
, allocated_regs
);
4404 tcg_regset_set_reg(arg_set
, reg
);
4405 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
4408 tcg_regset_set_reg(allocated_regs
, reg
);
4412 /* mark dead temporaries and free the associated registers */
4413 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
4414 if (IS_DEAD_ARG(i
)) {
4415 temp_dead(s
, arg_temp(op
->args
[i
]));
4419 /* clobber call registers */
4420 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
4421 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
4422 tcg_reg_free(s
, i
, allocated_regs
);
4426 /* Save globals if they might be written by the helper, sync them if
4427 they might be read. */
4428 if (flags
& TCG_CALL_NO_READ_GLOBALS
) {
4430 } else if (flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
4431 sync_globals(s
, allocated_regs
);
4433 save_globals(s
, allocated_regs
);
4436 tcg_out_call(s
, func_addr
);
4438 /* assign output registers and emit moves if needed */
4439 for(i
= 0; i
< nb_oargs
; i
++) {
4443 /* ENV should not be modified. */
4444 tcg_debug_assert(!temp_readonly(ts
));
4446 reg
= tcg_target_call_oarg_regs
[i
];
4447 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
4448 if (ts
->val_type
== TEMP_VAL_REG
) {
4449 s
->reg_to_temp
[ts
->reg
] = NULL
;
4451 ts
->val_type
= TEMP_VAL_REG
;
4453 ts
->mem_coherent
= 0;
4454 s
->reg_to_temp
[reg
] = ts
;
4455 if (NEED_SYNC_ARG(i
)) {
4456 temp_sync(s
, ts
, allocated_regs
, 0, IS_DEAD_ARG(i
));
4457 } else if (IS_DEAD_ARG(i
)) {
4463 #ifdef CONFIG_PROFILER
4465 /* avoid copy/paste errors */
4466 #define PROF_ADD(to, from, field) \
4468 (to)->field += qatomic_read(&((from)->field)); \
4471 #define PROF_MAX(to, from, field) \
4473 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
4474 if (val__ > (to)->field) { \
4475 (to)->field = val__; \
4479 /* Pass in a zero'ed @prof */
4481 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
4483 unsigned int n_ctxs
= qatomic_read(&n_tcg_ctxs
);
4486 for (i
= 0; i
< n_ctxs
; i
++) {
4487 TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
4488 const TCGProfile
*orig
= &s
->prof
;
4491 PROF_ADD(prof
, orig
, cpu_exec_time
);
4492 PROF_ADD(prof
, orig
, tb_count1
);
4493 PROF_ADD(prof
, orig
, tb_count
);
4494 PROF_ADD(prof
, orig
, op_count
);
4495 PROF_MAX(prof
, orig
, op_count_max
);
4496 PROF_ADD(prof
, orig
, temp_count
);
4497 PROF_MAX(prof
, orig
, temp_count_max
);
4498 PROF_ADD(prof
, orig
, del_op_count
);
4499 PROF_ADD(prof
, orig
, code_in_len
);
4500 PROF_ADD(prof
, orig
, code_out_len
);
4501 PROF_ADD(prof
, orig
, search_out_len
);
4502 PROF_ADD(prof
, orig
, interm_time
);
4503 PROF_ADD(prof
, orig
, code_time
);
4504 PROF_ADD(prof
, orig
, la_time
);
4505 PROF_ADD(prof
, orig
, opt_time
);
4506 PROF_ADD(prof
, orig
, restore_count
);
4507 PROF_ADD(prof
, orig
, restore_time
);
4512 for (i
= 0; i
< NB_OPS
; i
++) {
4513 PROF_ADD(prof
, orig
, table_op_count
[i
]);
4522 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
4524 tcg_profile_snapshot(prof
, true, false);
4527 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
4529 tcg_profile_snapshot(prof
, false, true);
4532 void tcg_dump_op_count(void)
4534 TCGProfile prof
= {};
4537 tcg_profile_snapshot_table(&prof
);
4538 for (i
= 0; i
< NB_OPS
; i
++) {
4539 qemu_printf("%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
4540 prof
.table_op_count
[i
]);
4544 int64_t tcg_cpu_exec_time(void)
4546 unsigned int n_ctxs
= qatomic_read(&n_tcg_ctxs
);
4550 for (i
= 0; i
< n_ctxs
; i
++) {
4551 const TCGContext
*s
= qatomic_read(&tcg_ctxs
[i
]);
4552 const TCGProfile
*prof
= &s
->prof
;
4554 ret
+= qatomic_read(&prof
->cpu_exec_time
);
4559 void tcg_dump_op_count(void)
4561 qemu_printf("[TCG profiler not compiled]\n");
4564 int64_t tcg_cpu_exec_time(void)
4566 error_report("%s: TCG profiler not compiled", __func__
);
4572 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
)
4574 #ifdef CONFIG_PROFILER
4575 TCGProfile
*prof
= &s
->prof
;
4580 #ifdef CONFIG_PROFILER
4584 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4587 qatomic_set(&prof
->op_count
, prof
->op_count
+ n
);
4588 if (n
> prof
->op_count_max
) {
4589 qatomic_set(&prof
->op_count_max
, n
);
4593 qatomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
4594 if (n
> prof
->temp_count_max
) {
4595 qatomic_set(&prof
->temp_count_max
, n
);
4601 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
4602 && qemu_log_in_addr_range(tb
->pc
))) {
4603 FILE *logfile
= qemu_log_lock();
4605 tcg_dump_ops(s
, false);
4607 qemu_log_unlock(logfile
);
4611 #ifdef CONFIG_DEBUG_TCG
4612 /* Ensure all labels referenced have been emitted. */
4617 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
4618 if (unlikely(!l
->present
) && l
->refs
) {
4619 qemu_log_mask(CPU_LOG_TB_OP
,
4620 "$L%d referenced but not present.\n", l
->id
);
4628 #ifdef CONFIG_PROFILER
4629 qatomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
4632 #ifdef USE_TCG_OPTIMIZATIONS
4636 #ifdef CONFIG_PROFILER
4637 qatomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
4638 qatomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
4641 reachable_code_pass(s
);
4644 if (s
->nb_indirects
> 0) {
4646 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
4647 && qemu_log_in_addr_range(tb
->pc
))) {
4648 FILE *logfile
= qemu_log_lock();
4649 qemu_log("OP before indirect lowering:\n");
4650 tcg_dump_ops(s
, false);
4652 qemu_log_unlock(logfile
);
4655 /* Replace indirect temps with direct temps. */
4656 if (liveness_pass_2(s
)) {
4657 /* If changes were made, re-run liveness. */
4662 #ifdef CONFIG_PROFILER
4663 qatomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
4667 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
4668 && qemu_log_in_addr_range(tb
->pc
))) {
4669 FILE *logfile
= qemu_log_lock();
4670 qemu_log("OP after optimization and liveness analysis:\n");
4671 tcg_dump_ops(s
, true);
4673 qemu_log_unlock(logfile
);
4677 tcg_reg_alloc_start(s
);
4680 * Reset the buffer pointers when restarting after overflow.
4681 * TODO: Move this into translate-all.c with the rest of the
4682 * buffer management. Having only this done here is confusing.
4684 s
->code_buf
= tcg_splitwx_to_rw(tb
->tc
.ptr
);
4685 s
->code_ptr
= s
->code_buf
;
4687 #ifdef TCG_TARGET_NEED_LDST_LABELS
4688 QSIMPLEQ_INIT(&s
->ldst_labels
);
4690 #ifdef TCG_TARGET_NEED_POOL_LABELS
4691 s
->pool_labels
= NULL
;
4695 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4696 TCGOpcode opc
= op
->opc
;
4698 #ifdef CONFIG_PROFILER
4699 qatomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
4703 case INDEX_op_mov_i32
:
4704 case INDEX_op_mov_i64
:
4705 case INDEX_op_mov_vec
:
4706 tcg_reg_alloc_mov(s
, op
);
4708 case INDEX_op_dup_vec
:
4709 tcg_reg_alloc_dup(s
, op
);
4711 case INDEX_op_insn_start
:
4712 if (num_insns
>= 0) {
4713 size_t off
= tcg_current_code_size(s
);
4714 s
->gen_insn_end_off
[num_insns
] = off
;
4715 /* Assert that we do not overflow our stored offset. */
4716 assert(s
->gen_insn_end_off
[num_insns
] == off
);
4719 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
4721 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
4722 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
4726 s
->gen_insn_data
[num_insns
][i
] = a
;
4729 case INDEX_op_discard
:
4730 temp_dead(s
, arg_temp(op
->args
[0]));
4732 case INDEX_op_set_label
:
4733 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
4734 tcg_out_label(s
, arg_label(op
->args
[0]));
4737 tcg_reg_alloc_call(s
, op
);
4739 case INDEX_op_dup2_vec
:
4740 if (tcg_reg_alloc_dup2(s
, op
)) {
4745 /* Sanity check that we've not introduced any unhandled opcodes. */
4746 tcg_debug_assert(tcg_op_supported(opc
));
4747 /* Note: in order to speed up the code, it would be much
4748 faster to have specialized register allocator functions for
4749 some common argument patterns */
4750 tcg_reg_alloc_op(s
, op
);
4753 #ifdef CONFIG_DEBUG_TCG
4756 /* Test for (pending) buffer overflow. The assumption is that any
4757 one operation beginning below the high water mark cannot overrun
4758 the buffer completely. Thus we can test for overflow after
4759 generating code without having to check during generation. */
4760 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
4763 /* Test for TB overflow, as seen by gen_insn_end_off. */
4764 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
4768 tcg_debug_assert(num_insns
>= 0);
4769 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
4771 /* Generate TB finalization at the end of block */
4772 #ifdef TCG_TARGET_NEED_LDST_LABELS
4773 i
= tcg_out_ldst_finalize(s
);
4778 #ifdef TCG_TARGET_NEED_POOL_LABELS
4779 i
= tcg_out_pool_finalize(s
);
4784 if (!tcg_resolve_relocs(s
)) {
4788 #ifndef CONFIG_TCG_INTERPRETER
4789 /* flush instruction cache */
4790 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s
->code_buf
),
4791 (uintptr_t)s
->code_buf
,
4792 tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
));
4795 return tcg_current_code_size(s
);
4798 #ifdef CONFIG_PROFILER
4799 void tcg_dump_info(void)
4801 TCGProfile prof
= {};
4802 const TCGProfile
*s
;
4804 int64_t tb_div_count
;
4807 tcg_profile_snapshot_counters(&prof
);
4809 tb_count
= s
->tb_count
;
4810 tb_div_count
= tb_count
? tb_count
: 1;
4811 tot
= s
->interm_time
+ s
->code_time
;
4813 qemu_printf("JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
4815 qemu_printf("translated TBs %" PRId64
" (aborted=%" PRId64
4817 tb_count
, s
->tb_count1
- tb_count
,
4818 (double)(s
->tb_count1
- s
->tb_count
)
4819 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
4820 qemu_printf("avg ops/TB %0.1f max=%d\n",
4821 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
4822 qemu_printf("deleted ops/TB %0.2f\n",
4823 (double)s
->del_op_count
/ tb_div_count
);
4824 qemu_printf("avg temps/TB %0.2f max=%d\n",
4825 (double)s
->temp_count
/ tb_div_count
, s
->temp_count_max
);
4826 qemu_printf("avg host code/TB %0.1f\n",
4827 (double)s
->code_out_len
/ tb_div_count
);
4828 qemu_printf("avg search data/TB %0.1f\n",
4829 (double)s
->search_out_len
/ tb_div_count
);
4831 qemu_printf("cycles/op %0.1f\n",
4832 s
->op_count
? (double)tot
/ s
->op_count
: 0);
4833 qemu_printf("cycles/in byte %0.1f\n",
4834 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
4835 qemu_printf("cycles/out byte %0.1f\n",
4836 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
4837 qemu_printf("cycles/search byte %0.1f\n",
4838 s
->search_out_len
? (double)tot
/ s
->search_out_len
: 0);
4842 qemu_printf(" gen_interm time %0.1f%%\n",
4843 (double)s
->interm_time
/ tot
* 100.0);
4844 qemu_printf(" gen_code time %0.1f%%\n",
4845 (double)s
->code_time
/ tot
* 100.0);
4846 qemu_printf("optim./code time %0.1f%%\n",
4847 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
4849 qemu_printf("liveness/code time %0.1f%%\n",
4850 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
4851 qemu_printf("cpu_restore count %" PRId64
"\n",
4853 qemu_printf(" avg cycles %0.1f\n",
4854 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
4857 void tcg_dump_info(void)
4859 qemu_printf("[TCG profiler not compiled]\n");
4863 #ifdef ELF_HOST_MACHINE
4864 /* In order to use this feature, the backend needs to do three things:
4866 (1) Define ELF_HOST_MACHINE to indicate both what value to
4867 put into the ELF image and to indicate support for the feature.
4869 (2) Define tcg_register_jit. This should create a buffer containing
4870 the contents of a .debug_frame section that describes the post-
4871 prologue unwind info for the tcg machine.
4873 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4876 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4883 struct jit_code_entry
{
4884 struct jit_code_entry
*next_entry
;
4885 struct jit_code_entry
*prev_entry
;
4886 const void *symfile_addr
;
4887 uint64_t symfile_size
;
4890 struct jit_descriptor
{
4892 uint32_t action_flag
;
4893 struct jit_code_entry
*relevant_entry
;
4894 struct jit_code_entry
*first_entry
;
4897 void __jit_debug_register_code(void) __attribute__((noinline
));
4898 void __jit_debug_register_code(void)
4903 /* Must statically initialize the version, because GDB may check
4904 the version before we can set it. */
4905 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
4907 /* End GDB interface. */
4909 static int find_string(const char *strtab
, const char *str
)
4911 const char *p
= strtab
+ 1;
4914 if (strcmp(p
, str
) == 0) {
4921 static void tcg_register_jit_int(const void *buf_ptr
, size_t buf_size
,
4922 const void *debug_frame
,
4923 size_t debug_frame_size
)
4925 struct __attribute__((packed
)) DebugInfo
{
4932 uintptr_t cu_low_pc
;
4933 uintptr_t cu_high_pc
;
4936 uintptr_t fn_low_pc
;
4937 uintptr_t fn_high_pc
;
4946 struct DebugInfo di
;
4951 struct ElfImage
*img
;
4953 static const struct ElfImage img_template
= {
4955 .e_ident
[EI_MAG0
] = ELFMAG0
,
4956 .e_ident
[EI_MAG1
] = ELFMAG1
,
4957 .e_ident
[EI_MAG2
] = ELFMAG2
,
4958 .e_ident
[EI_MAG3
] = ELFMAG3
,
4959 .e_ident
[EI_CLASS
] = ELF_CLASS
,
4960 .e_ident
[EI_DATA
] = ELF_DATA
,
4961 .e_ident
[EI_VERSION
] = EV_CURRENT
,
4963 .e_machine
= ELF_HOST_MACHINE
,
4964 .e_version
= EV_CURRENT
,
4965 .e_phoff
= offsetof(struct ElfImage
, phdr
),
4966 .e_shoff
= offsetof(struct ElfImage
, shdr
),
4967 .e_ehsize
= sizeof(ElfW(Shdr
)),
4968 .e_phentsize
= sizeof(ElfW(Phdr
)),
4970 .e_shentsize
= sizeof(ElfW(Shdr
)),
4971 .e_shnum
= ARRAY_SIZE(img
->shdr
),
4972 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
4973 #ifdef ELF_HOST_FLAGS
4974 .e_flags
= ELF_HOST_FLAGS
,
4977 .e_ident
[EI_OSABI
] = ELF_OSABI
,
4985 [0] = { .sh_type
= SHT_NULL
},
4986 /* Trick: The contents of code_gen_buffer are not present in
4987 this fake ELF file; that got allocated elsewhere. Therefore
4988 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4989 will not look for contents. We can record any address. */
4991 .sh_type
= SHT_NOBITS
,
4992 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
4994 [2] = { /* .debug_info */
4995 .sh_type
= SHT_PROGBITS
,
4996 .sh_offset
= offsetof(struct ElfImage
, di
),
4997 .sh_size
= sizeof(struct DebugInfo
),
4999 [3] = { /* .debug_abbrev */
5000 .sh_type
= SHT_PROGBITS
,
5001 .sh_offset
= offsetof(struct ElfImage
, da
),
5002 .sh_size
= sizeof(img
->da
),
5004 [4] = { /* .debug_frame */
5005 .sh_type
= SHT_PROGBITS
,
5006 .sh_offset
= sizeof(struct ElfImage
),
5008 [5] = { /* .symtab */
5009 .sh_type
= SHT_SYMTAB
,
5010 .sh_offset
= offsetof(struct ElfImage
, sym
),
5011 .sh_size
= sizeof(img
->sym
),
5013 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
5014 .sh_entsize
= sizeof(ElfW(Sym
)),
5016 [6] = { /* .strtab */
5017 .sh_type
= SHT_STRTAB
,
5018 .sh_offset
= offsetof(struct ElfImage
, str
),
5019 .sh_size
= sizeof(img
->str
),
5023 [1] = { /* code_gen_buffer */
5024 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
5029 .len
= sizeof(struct DebugInfo
) - 4,
5031 .ptr_size
= sizeof(void *),
5033 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
5035 .fn_name
= "code_gen_buffer"
5038 1, /* abbrev number (the cu) */
5039 0x11, 1, /* DW_TAG_compile_unit, has children */
5040 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
5041 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5042 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5043 0, 0, /* end of abbrev */
5044 2, /* abbrev number (the fn) */
5045 0x2e, 0, /* DW_TAG_subprogram, no children */
5046 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
5047 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
5048 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
5049 0, 0, /* end of abbrev */
5050 0 /* no more abbrev */
5052 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
5053 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
5056 /* We only need a single jit entry; statically allocate it. */
5057 static struct jit_code_entry one_entry
;
5059 uintptr_t buf
= (uintptr_t)buf_ptr
;
5060 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
5061 DebugFrameHeader
*dfh
;
5063 img
= g_malloc(img_size
);
5064 *img
= img_template
;
5066 img
->phdr
.p_vaddr
= buf
;
5067 img
->phdr
.p_paddr
= buf
;
5068 img
->phdr
.p_memsz
= buf_size
;
5070 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
5071 img
->shdr
[1].sh_addr
= buf
;
5072 img
->shdr
[1].sh_size
= buf_size
;
5074 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
5075 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
5077 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
5078 img
->shdr
[4].sh_size
= debug_frame_size
;
5080 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
5081 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
5083 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
5084 img
->sym
[1].st_value
= buf
;
5085 img
->sym
[1].st_size
= buf_size
;
5087 img
->di
.cu_low_pc
= buf
;
5088 img
->di
.cu_high_pc
= buf
+ buf_size
;
5089 img
->di
.fn_low_pc
= buf
;
5090 img
->di
.fn_high_pc
= buf
+ buf_size
;
5092 dfh
= (DebugFrameHeader
*)(img
+ 1);
5093 memcpy(dfh
, debug_frame
, debug_frame_size
);
5094 dfh
->fde
.func_start
= buf
;
5095 dfh
->fde
.func_len
= buf_size
;
5098 /* Enable this block to be able to debug the ELF image file creation.
5099 One can use readelf, objdump, or other inspection utilities. */
5101 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
5103 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
5104 /* Avoid stupid unused return value warning for fwrite. */
5111 one_entry
.symfile_addr
= img
;
5112 one_entry
.symfile_size
= img_size
;
5114 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
5115 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
5116 __jit_debug_descriptor
.first_entry
= &one_entry
;
5117 __jit_debug_register_code();
5120 /* No support for the feature. Provide the entry point expected by exec.c,
5121 and implement the internal function we declared earlier. */
5123 static void tcg_register_jit_int(const void *buf
, size_t size
,
5124 const void *debug_frame
,
5125 size_t debug_frame_size
)
5129 void tcg_register_jit(const void *buf
, size_t buf_size
)
5132 #endif /* ELF_HOST_MACHINE */
5134 #if !TCG_TARGET_MAYBE_vec
5135 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
5137 g_assert_not_reached();