2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/cutils.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/timer.h"
37 /* Note: the long term plan is to reduce the dependencies on the QEMU
38 CPU definitions. Currently they are used for qemu_ld/st
40 #define NO_CPU_IO_DEFS
43 #include "exec/cpu-common.h"
44 #include "exec/exec-all.h"
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS ELFCLASS32
51 # define ELF_CLASS ELFCLASS64
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA ELFDATA2MSB
56 # define ELF_DATA ELFDATA2LSB
61 #include "sysemu/sysemu.h"
63 /* Forward declarations for functions declared in tcg-target.inc.c and
65 static void tcg_target_init(TCGContext
*s
);
66 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode
);
67 static void tcg_target_qemu_prologue(TCGContext
*s
);
68 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
69 intptr_t value
, intptr_t addend
);
71 /* The CIE and FDE header definitions will be common to all hosts. */
73 uint32_t len
__attribute__((aligned((sizeof(void *)))));
79 uint8_t return_column
;
82 typedef struct QEMU_PACKED
{
83 uint32_t len
__attribute__((aligned((sizeof(void *)))));
87 } DebugFrameFDEHeader
;
89 typedef struct QEMU_PACKED
{
91 DebugFrameFDEHeader fde
;
94 static void tcg_register_jit_int(void *buf
, size_t size
,
95 const void *debug_frame
,
96 size_t debug_frame_size
)
97 __attribute__((unused
));
99 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
100 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
101 const char *ct_str
, TCGType type
);
102 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
104 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
105 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
106 TCGReg ret
, tcg_target_long arg
);
107 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
108 const int *const_args
);
109 #if TCG_TARGET_MAYBE_vec
110 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
111 unsigned vece
, const TCGArg
*args
,
112 const int *const_args
);
114 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
115 unsigned vece
, const TCGArg
*args
,
116 const int *const_args
)
118 g_assert_not_reached();
121 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
123 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
124 TCGReg base
, intptr_t ofs
);
125 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*target
);
126 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
127 const TCGArgConstraint
*arg_ct
);
128 #ifdef TCG_TARGET_NEED_LDST_LABELS
129 static bool tcg_out_ldst_finalize(TCGContext
*s
);
132 #define TCG_HIGHWATER 1024
134 static TCGContext
**tcg_ctxs
;
135 static unsigned int n_tcg_ctxs
;
136 TCGv_env cpu_env
= 0;
139 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
140 * dynamically allocate from as demand dictates. Given appropriate region
141 * sizing, this minimizes flushes even when some TCG threads generate a lot
142 * more code than others.
144 struct tcg_region_state
{
147 /* fields set at init time */
152 size_t size
; /* size of one region */
153 size_t stride
; /* .size + guard size */
155 /* fields protected by the lock */
156 size_t current
; /* current region index */
157 size_t agg_size_full
; /* aggregate size of full regions */
160 static struct tcg_region_state region
;
161 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
162 static TCGRegSet tcg_target_call_clobber_regs
;
164 #if TCG_TARGET_INSN_UNIT_SIZE == 1
165 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
170 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
177 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
178 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
180 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
183 tcg_insn_unit
*p
= s
->code_ptr
;
184 memcpy(p
, &v
, sizeof(v
));
185 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
189 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
192 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
195 memcpy(p
, &v
, sizeof(v
));
200 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
201 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
203 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
206 tcg_insn_unit
*p
= s
->code_ptr
;
207 memcpy(p
, &v
, sizeof(v
));
208 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
212 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
215 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
218 memcpy(p
, &v
, sizeof(v
));
223 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
224 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
226 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
229 tcg_insn_unit
*p
= s
->code_ptr
;
230 memcpy(p
, &v
, sizeof(v
));
231 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
235 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
238 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
241 memcpy(p
, &v
, sizeof(v
));
246 /* label relocation processing */
248 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
249 TCGLabel
*l
, intptr_t addend
)
254 /* FIXME: This may break relocations on RISC targets that
255 modify instruction fields in place. The caller may not have
256 written the initial value. */
257 patch_reloc(code_ptr
, type
, l
->u
.value
, addend
);
259 /* add a new relocation entry */
260 r
= tcg_malloc(sizeof(TCGRelocation
));
264 r
->next
= l
->u
.first_reloc
;
265 l
->u
.first_reloc
= r
;
269 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
, tcg_insn_unit
*ptr
)
271 intptr_t value
= (intptr_t)ptr
;
274 tcg_debug_assert(!l
->has_value
);
276 for (r
= l
->u
.first_reloc
; r
!= NULL
; r
= r
->next
) {
277 patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
);
281 l
->u
.value_ptr
= ptr
;
284 TCGLabel
*gen_new_label(void)
286 TCGContext
*s
= tcg_ctx
;
287 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
296 #include "tcg-target.inc.c"
298 static void tcg_region_bounds(size_t curr_region
, void **pstart
, void **pend
)
302 start
= region
.start_aligned
+ curr_region
* region
.stride
;
303 end
= start
+ region
.size
;
305 if (curr_region
== 0) {
306 start
= region
.start
;
308 if (curr_region
== region
.n
- 1) {
316 static void tcg_region_assign(TCGContext
*s
, size_t curr_region
)
320 tcg_region_bounds(curr_region
, &start
, &end
);
322 s
->code_gen_buffer
= start
;
323 s
->code_gen_ptr
= start
;
324 s
->code_gen_buffer_size
= end
- start
;
325 s
->code_gen_highwater
= end
- TCG_HIGHWATER
;
328 static bool tcg_region_alloc__locked(TCGContext
*s
)
330 if (region
.current
== region
.n
) {
333 tcg_region_assign(s
, region
.current
);
339 * Request a new region once the one in use has filled up.
340 * Returns true on error.
342 static bool tcg_region_alloc(TCGContext
*s
)
345 /* read the region size now; alloc__locked will overwrite it on success */
346 size_t size_full
= s
->code_gen_buffer_size
;
348 qemu_mutex_lock(®ion
.lock
);
349 err
= tcg_region_alloc__locked(s
);
351 region
.agg_size_full
+= size_full
- TCG_HIGHWATER
;
353 qemu_mutex_unlock(®ion
.lock
);
358 * Perform a context's first region allocation.
359 * This function does _not_ increment region.agg_size_full.
361 static inline bool tcg_region_initial_alloc__locked(TCGContext
*s
)
363 return tcg_region_alloc__locked(s
);
366 /* Call from a safe-work context */
367 void tcg_region_reset_all(void)
369 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
372 qemu_mutex_lock(®ion
.lock
);
374 region
.agg_size_full
= 0;
376 for (i
= 0; i
< n_ctxs
; i
++) {
377 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
378 bool err
= tcg_region_initial_alloc__locked(s
);
382 qemu_mutex_unlock(®ion
.lock
);
385 #ifdef CONFIG_USER_ONLY
386 static size_t tcg_n_regions(void)
392 * It is likely that some vCPUs will translate more code than others, so we
393 * first try to set more regions than max_cpus, with those regions being of
394 * reasonable size. If that's not possible we make do by evenly dividing
395 * the code_gen_buffer among the vCPUs.
397 static size_t tcg_n_regions(void)
401 /* Use a single region if all we have is one vCPU thread */
402 if (max_cpus
== 1 || !qemu_tcg_mttcg_enabled()) {
406 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
407 for (i
= 8; i
> 0; i
--) {
408 size_t regions_per_thread
= i
;
411 region_size
= tcg_init_ctx
.code_gen_buffer_size
;
412 region_size
/= max_cpus
* regions_per_thread
;
414 if (region_size
>= 2 * 1024u * 1024) {
415 return max_cpus
* regions_per_thread
;
418 /* If we can't, then just allocate one region per vCPU thread */
424 * Initializes region partitioning.
426 * Called at init time from the parent thread (i.e. the one calling
427 * tcg_context_init), after the target's TCG globals have been set.
429 * Region partitioning works by splitting code_gen_buffer into separate regions,
430 * and then assigning regions to TCG threads so that the threads can translate
431 * code in parallel without synchronization.
433 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
434 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
435 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
436 * must have been parsed before calling this function, since it calls
437 * qemu_tcg_mttcg_enabled().
439 * In user-mode we use a single region. Having multiple regions in user-mode
440 * is not supported, because the number of vCPU threads (recall that each thread
441 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
442 * OS, and usually this number is huge (tens of thousands is not uncommon).
443 * Thus, given this large bound on the number of vCPU threads and the fact
444 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
445 * that the availability of at least one region per vCPU thread.
447 * However, this user-mode limitation is unlikely to be a significant problem
448 * in practice. Multi-threaded guests share most if not all of their translated
449 * code, which makes parallel code generation less appealing than in softmmu.
451 void tcg_region_init(void)
453 void *buf
= tcg_init_ctx
.code_gen_buffer
;
455 size_t size
= tcg_init_ctx
.code_gen_buffer_size
;
456 size_t page_size
= qemu_real_host_page_size
;
461 n_regions
= tcg_n_regions();
463 /* The first region will be 'aligned - buf' bytes larger than the others */
464 aligned
= QEMU_ALIGN_PTR_UP(buf
, page_size
);
465 g_assert(aligned
< tcg_init_ctx
.code_gen_buffer
+ size
);
467 * Make region_size a multiple of page_size, using aligned as the start.
468 * As a result of this we might end up with a few extra pages at the end of
469 * the buffer; we will assign those to the last region.
471 region_size
= (size
- (aligned
- buf
)) / n_regions
;
472 region_size
= QEMU_ALIGN_DOWN(region_size
, page_size
);
474 /* A region must have at least 2 pages; one code, one guard */
475 g_assert(region_size
>= 2 * page_size
);
477 /* init the region struct */
478 qemu_mutex_init(®ion
.lock
);
479 region
.n
= n_regions
;
480 region
.size
= region_size
- page_size
;
481 region
.stride
= region_size
;
483 region
.start_aligned
= aligned
;
484 /* page-align the end, since its last page will be a guard page */
485 region
.end
= QEMU_ALIGN_PTR_DOWN(buf
+ size
, page_size
);
486 /* account for that last guard page */
487 region
.end
-= page_size
;
489 /* set guard pages */
490 for (i
= 0; i
< region
.n
; i
++) {
494 tcg_region_bounds(i
, &start
, &end
);
495 rc
= qemu_mprotect_none(end
, page_size
);
499 /* In user-mode we support only one ctx, so do the initial allocation now */
500 #ifdef CONFIG_USER_ONLY
502 bool err
= tcg_region_initial_alloc__locked(tcg_ctx
);
510 * All TCG threads except the parent (i.e. the one that called tcg_context_init
511 * and registered the target's TCG globals) must register with this function
512 * before initiating translation.
514 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
515 * of tcg_region_init() for the reasoning behind this.
517 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
518 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
519 * is not used anymore for translation once this function is called.
521 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
522 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
524 #ifdef CONFIG_USER_ONLY
525 void tcg_register_thread(void)
527 tcg_ctx
= &tcg_init_ctx
;
530 void tcg_register_thread(void)
532 TCGContext
*s
= g_malloc(sizeof(*s
));
538 /* Relink mem_base. */
539 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
540 if (tcg_init_ctx
.temps
[i
].mem_base
) {
541 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
542 tcg_debug_assert(b
>= 0 && b
< n
);
543 s
->temps
[i
].mem_base
= &s
->temps
[b
];
547 /* Claim an entry in tcg_ctxs */
548 n
= atomic_fetch_inc(&n_tcg_ctxs
);
549 g_assert(n
< max_cpus
);
550 atomic_set(&tcg_ctxs
[n
], s
);
553 qemu_mutex_lock(®ion
.lock
);
554 err
= tcg_region_initial_alloc__locked(tcg_ctx
);
556 qemu_mutex_unlock(®ion
.lock
);
558 #endif /* !CONFIG_USER_ONLY */
561 * Returns the size (in bytes) of all translated code (i.e. from all regions)
562 * currently in the cache.
563 * See also: tcg_code_capacity()
564 * Do not confuse with tcg_current_code_size(); that one applies to a single
567 size_t tcg_code_size(void)
569 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
573 qemu_mutex_lock(®ion
.lock
);
574 total
= region
.agg_size_full
;
575 for (i
= 0; i
< n_ctxs
; i
++) {
576 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
579 size
= atomic_read(&s
->code_gen_ptr
) - s
->code_gen_buffer
;
580 g_assert(size
<= s
->code_gen_buffer_size
);
583 qemu_mutex_unlock(®ion
.lock
);
588 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
590 * See also: tcg_code_size()
592 size_t tcg_code_capacity(void)
594 size_t guard_size
, capacity
;
596 /* no need for synchronization; these variables are set at init time */
597 guard_size
= region
.stride
- region
.size
;
598 capacity
= region
.end
+ guard_size
- region
.start
;
599 capacity
-= region
.n
* (guard_size
+ TCG_HIGHWATER
);
603 /* pool based memory allocation */
604 void *tcg_malloc_internal(TCGContext
*s
, int size
)
609 if (size
> TCG_POOL_CHUNK_SIZE
) {
610 /* big malloc: insert a new pool (XXX: could optimize) */
611 p
= g_malloc(sizeof(TCGPool
) + size
);
613 p
->next
= s
->pool_first_large
;
614 s
->pool_first_large
= p
;
625 pool_size
= TCG_POOL_CHUNK_SIZE
;
626 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
630 s
->pool_current
->next
= p
;
639 s
->pool_cur
= p
->data
+ size
;
640 s
->pool_end
= p
->data
+ p
->size
;
644 void tcg_pool_reset(TCGContext
*s
)
647 for (p
= s
->pool_first_large
; p
; p
= t
) {
651 s
->pool_first_large
= NULL
;
652 s
->pool_cur
= s
->pool_end
= NULL
;
653 s
->pool_current
= NULL
;
656 typedef struct TCGHelperInfo
{
663 #include "exec/helper-proto.h"
665 static const TCGHelperInfo all_helpers
[] = {
666 #include "exec/helper-tcg.h"
668 static GHashTable
*helper_table
;
670 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
671 static void process_op_defs(TCGContext
*s
);
672 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
673 TCGReg reg
, const char *name
);
675 void tcg_context_init(TCGContext
*s
)
677 int op
, total_args
, n
, i
;
679 TCGArgConstraint
*args_ct
;
683 memset(s
, 0, sizeof(*s
));
686 /* Count total number of arguments and allocate the corresponding
689 for(op
= 0; op
< NB_OPS
; op
++) {
690 def
= &tcg_op_defs
[op
];
691 n
= def
->nb_iargs
+ def
->nb_oargs
;
695 args_ct
= g_malloc(sizeof(TCGArgConstraint
) * total_args
);
696 sorted_args
= g_malloc(sizeof(int) * total_args
);
698 for(op
= 0; op
< NB_OPS
; op
++) {
699 def
= &tcg_op_defs
[op
];
700 def
->args_ct
= args_ct
;
701 def
->sorted_args
= sorted_args
;
702 n
= def
->nb_iargs
+ def
->nb_oargs
;
707 /* Register helpers. */
708 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
709 helper_table
= g_hash_table_new(NULL
, NULL
);
711 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
712 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
713 (gpointer
)&all_helpers
[i
]);
719 /* Reverse the order of the saved registers, assuming they're all at
720 the start of tcg_target_reg_alloc_order. */
721 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
722 int r
= tcg_target_reg_alloc_order
[n
];
723 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
727 for (i
= 0; i
< n
; ++i
) {
728 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
730 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
731 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
736 * In user-mode we simply share the init context among threads, since we
737 * use a single region. See the documentation tcg_region_init() for the
738 * reasoning behind this.
739 * In softmmu we will have at most max_cpus TCG threads.
741 #ifdef CONFIG_USER_ONLY
745 tcg_ctxs
= g_new(TCGContext
*, max_cpus
);
748 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
749 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
750 cpu_env
= temp_tcgv_ptr(ts
);
754 * Allocate TBs right before their corresponding translated code, making
755 * sure that TBs and code are on different cache lines.
757 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
759 uintptr_t align
= qemu_icache_linesize
;
760 TranslationBlock
*tb
;
764 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
765 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
767 if (unlikely(next
> s
->code_gen_highwater
)) {
768 if (tcg_region_alloc(s
)) {
773 atomic_set(&s
->code_gen_ptr
, next
);
774 s
->data_gen_ptr
= NULL
;
778 void tcg_prologue_init(TCGContext
*s
)
780 size_t prologue_size
, total_size
;
783 /* Put the prologue at the beginning of code_gen_buffer. */
784 buf0
= s
->code_gen_buffer
;
785 total_size
= s
->code_gen_buffer_size
;
788 s
->data_gen_ptr
= NULL
;
789 s
->code_gen_prologue
= buf0
;
791 /* Compute a high-water mark, at which we voluntarily flush the buffer
792 and start over. The size here is arbitrary, significantly larger
793 than we expect the code generation for any one opcode to require. */
794 s
->code_gen_highwater
= s
->code_gen_buffer
+ (total_size
- TCG_HIGHWATER
);
796 #ifdef TCG_TARGET_NEED_POOL_LABELS
797 s
->pool_labels
= NULL
;
800 /* Generate the prologue. */
801 tcg_target_qemu_prologue(s
);
803 #ifdef TCG_TARGET_NEED_POOL_LABELS
804 /* Allow the prologue to put e.g. guest_base into a pool entry. */
806 bool ok
= tcg_out_pool_finalize(s
);
807 tcg_debug_assert(ok
);
812 flush_icache_range((uintptr_t)buf0
, (uintptr_t)buf1
);
814 /* Deduct the prologue from the buffer. */
815 prologue_size
= tcg_current_code_size(s
);
816 s
->code_gen_ptr
= buf1
;
817 s
->code_gen_buffer
= buf1
;
819 total_size
-= prologue_size
;
820 s
->code_gen_buffer_size
= total_size
;
822 tcg_register_jit(s
->code_gen_buffer
, total_size
);
825 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
827 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size
);
828 if (s
->data_gen_ptr
) {
829 size_t code_size
= s
->data_gen_ptr
- buf0
;
830 size_t data_size
= prologue_size
- code_size
;
833 log_disas(buf0
, code_size
);
835 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
836 if (sizeof(tcg_target_ulong
) == 8) {
837 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
838 (uintptr_t)s
->data_gen_ptr
+ i
,
839 *(uint64_t *)(s
->data_gen_ptr
+ i
));
841 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
842 (uintptr_t)s
->data_gen_ptr
+ i
,
843 *(uint32_t *)(s
->data_gen_ptr
+ i
));
847 log_disas(buf0
, prologue_size
);
855 /* Assert that goto_ptr is implemented completely. */
856 if (TCG_TARGET_HAS_goto_ptr
) {
857 tcg_debug_assert(s
->code_gen_epilogue
!= NULL
);
861 void tcg_func_start(TCGContext
*s
)
864 s
->nb_temps
= s
->nb_globals
;
866 /* No temps have been previously allocated for size or locality. */
867 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
871 s
->current_frame_offset
= s
->frame_start
;
873 #ifdef CONFIG_DEBUG_TCG
874 s
->goto_tb_issue_mask
= 0;
877 QTAILQ_INIT(&s
->ops
);
878 QTAILQ_INIT(&s
->free_ops
);
881 static inline TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
883 int n
= s
->nb_temps
++;
884 tcg_debug_assert(n
< TCG_MAX_TEMPS
);
885 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
888 static inline TCGTemp
*tcg_global_alloc(TCGContext
*s
)
892 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
894 ts
= tcg_temp_alloc(s
);
900 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
901 TCGReg reg
, const char *name
)
905 if (TCG_TARGET_REG_BITS
== 32 && type
!= TCG_TYPE_I32
) {
909 ts
= tcg_global_alloc(s
);
910 ts
->base_type
= type
;
915 tcg_regset_set_reg(s
->reserved_regs
, reg
);
920 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
922 s
->frame_start
= start
;
923 s
->frame_end
= start
+ size
;
925 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
928 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
929 intptr_t offset
, const char *name
)
931 TCGContext
*s
= tcg_ctx
;
932 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
933 TCGTemp
*ts
= tcg_global_alloc(s
);
934 int indirect_reg
= 0, bigendian
= 0;
935 #ifdef HOST_WORDS_BIGENDIAN
939 if (!base_ts
->fixed_reg
) {
940 /* We do not support double-indirect registers. */
941 tcg_debug_assert(!base_ts
->indirect_reg
);
942 base_ts
->indirect_base
= 1;
943 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
948 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
949 TCGTemp
*ts2
= tcg_global_alloc(s
);
952 ts
->base_type
= TCG_TYPE_I64
;
953 ts
->type
= TCG_TYPE_I32
;
954 ts
->indirect_reg
= indirect_reg
;
955 ts
->mem_allocated
= 1;
956 ts
->mem_base
= base_ts
;
957 ts
->mem_offset
= offset
+ bigendian
* 4;
958 pstrcpy(buf
, sizeof(buf
), name
);
959 pstrcat(buf
, sizeof(buf
), "_0");
960 ts
->name
= strdup(buf
);
962 tcg_debug_assert(ts2
== ts
+ 1);
963 ts2
->base_type
= TCG_TYPE_I64
;
964 ts2
->type
= TCG_TYPE_I32
;
965 ts2
->indirect_reg
= indirect_reg
;
966 ts2
->mem_allocated
= 1;
967 ts2
->mem_base
= base_ts
;
968 ts2
->mem_offset
= offset
+ (1 - bigendian
) * 4;
969 pstrcpy(buf
, sizeof(buf
), name
);
970 pstrcat(buf
, sizeof(buf
), "_1");
971 ts2
->name
= strdup(buf
);
973 ts
->base_type
= type
;
975 ts
->indirect_reg
= indirect_reg
;
976 ts
->mem_allocated
= 1;
977 ts
->mem_base
= base_ts
;
978 ts
->mem_offset
= offset
;
984 TCGTemp
*tcg_temp_new_internal(TCGType type
, bool temp_local
)
986 TCGContext
*s
= tcg_ctx
;
990 k
= type
+ (temp_local
? TCG_TYPE_COUNT
: 0);
991 idx
= find_first_bit(s
->free_temps
[k
].l
, TCG_MAX_TEMPS
);
992 if (idx
< TCG_MAX_TEMPS
) {
993 /* There is already an available temp with the right type. */
994 clear_bit(idx
, s
->free_temps
[k
].l
);
997 ts
->temp_allocated
= 1;
998 tcg_debug_assert(ts
->base_type
== type
);
999 tcg_debug_assert(ts
->temp_local
== temp_local
);
1001 ts
= tcg_temp_alloc(s
);
1002 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1003 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1005 ts
->base_type
= type
;
1006 ts
->type
= TCG_TYPE_I32
;
1007 ts
->temp_allocated
= 1;
1008 ts
->temp_local
= temp_local
;
1010 tcg_debug_assert(ts2
== ts
+ 1);
1011 ts2
->base_type
= TCG_TYPE_I64
;
1012 ts2
->type
= TCG_TYPE_I32
;
1013 ts2
->temp_allocated
= 1;
1014 ts2
->temp_local
= temp_local
;
1016 ts
->base_type
= type
;
1018 ts
->temp_allocated
= 1;
1019 ts
->temp_local
= temp_local
;
1023 #if defined(CONFIG_DEBUG_TCG)
1029 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1033 #ifdef CONFIG_DEBUG_TCG
1036 assert(TCG_TARGET_HAS_v64
);
1039 assert(TCG_TARGET_HAS_v128
);
1042 assert(TCG_TARGET_HAS_v256
);
1045 g_assert_not_reached();
1049 t
= tcg_temp_new_internal(type
, 0);
1050 return temp_tcgv_vec(t
);
1053 /* Create a new temp of the same type as an existing temp. */
1054 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1056 TCGTemp
*t
= tcgv_vec_temp(match
);
1058 tcg_debug_assert(t
->temp_allocated
!= 0);
1060 t
= tcg_temp_new_internal(t
->base_type
, 0);
1061 return temp_tcgv_vec(t
);
1064 void tcg_temp_free_internal(TCGTemp
*ts
)
1066 TCGContext
*s
= tcg_ctx
;
1069 #if defined(CONFIG_DEBUG_TCG)
1071 if (s
->temps_in_use
< 0) {
1072 fprintf(stderr
, "More temporaries freed than allocated!\n");
1076 tcg_debug_assert(ts
->temp_global
== 0);
1077 tcg_debug_assert(ts
->temp_allocated
!= 0);
1078 ts
->temp_allocated
= 0;
1081 k
= ts
->base_type
+ (ts
->temp_local
? TCG_TYPE_COUNT
: 0);
1082 set_bit(idx
, s
->free_temps
[k
].l
);
1085 TCGv_i32
tcg_const_i32(int32_t val
)
1088 t0
= tcg_temp_new_i32();
1089 tcg_gen_movi_i32(t0
, val
);
1093 TCGv_i64
tcg_const_i64(int64_t val
)
1096 t0
= tcg_temp_new_i64();
1097 tcg_gen_movi_i64(t0
, val
);
1101 TCGv_i32
tcg_const_local_i32(int32_t val
)
1104 t0
= tcg_temp_local_new_i32();
1105 tcg_gen_movi_i32(t0
, val
);
1109 TCGv_i64
tcg_const_local_i64(int64_t val
)
1112 t0
= tcg_temp_local_new_i64();
1113 tcg_gen_movi_i64(t0
, val
);
1117 #if defined(CONFIG_DEBUG_TCG)
1118 void tcg_clear_temp_count(void)
1120 TCGContext
*s
= tcg_ctx
;
1121 s
->temps_in_use
= 0;
1124 int tcg_check_temp_count(void)
1126 TCGContext
*s
= tcg_ctx
;
1127 if (s
->temps_in_use
) {
1128 /* Clear the count so that we don't give another
1129 * warning immediately next time around.
1131 s
->temps_in_use
= 0;
1138 /* Return true if OP may appear in the opcode stream.
1139 Test the runtime variable that controls each opcode. */
1140 bool tcg_op_supported(TCGOpcode op
)
1143 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1146 case INDEX_op_discard
:
1147 case INDEX_op_set_label
:
1151 case INDEX_op_insn_start
:
1152 case INDEX_op_exit_tb
:
1153 case INDEX_op_goto_tb
:
1154 case INDEX_op_qemu_ld_i32
:
1155 case INDEX_op_qemu_st_i32
:
1156 case INDEX_op_qemu_ld_i64
:
1157 case INDEX_op_qemu_st_i64
:
1160 case INDEX_op_goto_ptr
:
1161 return TCG_TARGET_HAS_goto_ptr
;
1163 case INDEX_op_mov_i32
:
1164 case INDEX_op_movi_i32
:
1165 case INDEX_op_setcond_i32
:
1166 case INDEX_op_brcond_i32
:
1167 case INDEX_op_ld8u_i32
:
1168 case INDEX_op_ld8s_i32
:
1169 case INDEX_op_ld16u_i32
:
1170 case INDEX_op_ld16s_i32
:
1171 case INDEX_op_ld_i32
:
1172 case INDEX_op_st8_i32
:
1173 case INDEX_op_st16_i32
:
1174 case INDEX_op_st_i32
:
1175 case INDEX_op_add_i32
:
1176 case INDEX_op_sub_i32
:
1177 case INDEX_op_mul_i32
:
1178 case INDEX_op_and_i32
:
1179 case INDEX_op_or_i32
:
1180 case INDEX_op_xor_i32
:
1181 case INDEX_op_shl_i32
:
1182 case INDEX_op_shr_i32
:
1183 case INDEX_op_sar_i32
:
1186 case INDEX_op_movcond_i32
:
1187 return TCG_TARGET_HAS_movcond_i32
;
1188 case INDEX_op_div_i32
:
1189 case INDEX_op_divu_i32
:
1190 return TCG_TARGET_HAS_div_i32
;
1191 case INDEX_op_rem_i32
:
1192 case INDEX_op_remu_i32
:
1193 return TCG_TARGET_HAS_rem_i32
;
1194 case INDEX_op_div2_i32
:
1195 case INDEX_op_divu2_i32
:
1196 return TCG_TARGET_HAS_div2_i32
;
1197 case INDEX_op_rotl_i32
:
1198 case INDEX_op_rotr_i32
:
1199 return TCG_TARGET_HAS_rot_i32
;
1200 case INDEX_op_deposit_i32
:
1201 return TCG_TARGET_HAS_deposit_i32
;
1202 case INDEX_op_extract_i32
:
1203 return TCG_TARGET_HAS_extract_i32
;
1204 case INDEX_op_sextract_i32
:
1205 return TCG_TARGET_HAS_sextract_i32
;
1206 case INDEX_op_add2_i32
:
1207 return TCG_TARGET_HAS_add2_i32
;
1208 case INDEX_op_sub2_i32
:
1209 return TCG_TARGET_HAS_sub2_i32
;
1210 case INDEX_op_mulu2_i32
:
1211 return TCG_TARGET_HAS_mulu2_i32
;
1212 case INDEX_op_muls2_i32
:
1213 return TCG_TARGET_HAS_muls2_i32
;
1214 case INDEX_op_muluh_i32
:
1215 return TCG_TARGET_HAS_muluh_i32
;
1216 case INDEX_op_mulsh_i32
:
1217 return TCG_TARGET_HAS_mulsh_i32
;
1218 case INDEX_op_ext8s_i32
:
1219 return TCG_TARGET_HAS_ext8s_i32
;
1220 case INDEX_op_ext16s_i32
:
1221 return TCG_TARGET_HAS_ext16s_i32
;
1222 case INDEX_op_ext8u_i32
:
1223 return TCG_TARGET_HAS_ext8u_i32
;
1224 case INDEX_op_ext16u_i32
:
1225 return TCG_TARGET_HAS_ext16u_i32
;
1226 case INDEX_op_bswap16_i32
:
1227 return TCG_TARGET_HAS_bswap16_i32
;
1228 case INDEX_op_bswap32_i32
:
1229 return TCG_TARGET_HAS_bswap32_i32
;
1230 case INDEX_op_not_i32
:
1231 return TCG_TARGET_HAS_not_i32
;
1232 case INDEX_op_neg_i32
:
1233 return TCG_TARGET_HAS_neg_i32
;
1234 case INDEX_op_andc_i32
:
1235 return TCG_TARGET_HAS_andc_i32
;
1236 case INDEX_op_orc_i32
:
1237 return TCG_TARGET_HAS_orc_i32
;
1238 case INDEX_op_eqv_i32
:
1239 return TCG_TARGET_HAS_eqv_i32
;
1240 case INDEX_op_nand_i32
:
1241 return TCG_TARGET_HAS_nand_i32
;
1242 case INDEX_op_nor_i32
:
1243 return TCG_TARGET_HAS_nor_i32
;
1244 case INDEX_op_clz_i32
:
1245 return TCG_TARGET_HAS_clz_i32
;
1246 case INDEX_op_ctz_i32
:
1247 return TCG_TARGET_HAS_ctz_i32
;
1248 case INDEX_op_ctpop_i32
:
1249 return TCG_TARGET_HAS_ctpop_i32
;
1251 case INDEX_op_brcond2_i32
:
1252 case INDEX_op_setcond2_i32
:
1253 return TCG_TARGET_REG_BITS
== 32;
1255 case INDEX_op_mov_i64
:
1256 case INDEX_op_movi_i64
:
1257 case INDEX_op_setcond_i64
:
1258 case INDEX_op_brcond_i64
:
1259 case INDEX_op_ld8u_i64
:
1260 case INDEX_op_ld8s_i64
:
1261 case INDEX_op_ld16u_i64
:
1262 case INDEX_op_ld16s_i64
:
1263 case INDEX_op_ld32u_i64
:
1264 case INDEX_op_ld32s_i64
:
1265 case INDEX_op_ld_i64
:
1266 case INDEX_op_st8_i64
:
1267 case INDEX_op_st16_i64
:
1268 case INDEX_op_st32_i64
:
1269 case INDEX_op_st_i64
:
1270 case INDEX_op_add_i64
:
1271 case INDEX_op_sub_i64
:
1272 case INDEX_op_mul_i64
:
1273 case INDEX_op_and_i64
:
1274 case INDEX_op_or_i64
:
1275 case INDEX_op_xor_i64
:
1276 case INDEX_op_shl_i64
:
1277 case INDEX_op_shr_i64
:
1278 case INDEX_op_sar_i64
:
1279 case INDEX_op_ext_i32_i64
:
1280 case INDEX_op_extu_i32_i64
:
1281 return TCG_TARGET_REG_BITS
== 64;
1283 case INDEX_op_movcond_i64
:
1284 return TCG_TARGET_HAS_movcond_i64
;
1285 case INDEX_op_div_i64
:
1286 case INDEX_op_divu_i64
:
1287 return TCG_TARGET_HAS_div_i64
;
1288 case INDEX_op_rem_i64
:
1289 case INDEX_op_remu_i64
:
1290 return TCG_TARGET_HAS_rem_i64
;
1291 case INDEX_op_div2_i64
:
1292 case INDEX_op_divu2_i64
:
1293 return TCG_TARGET_HAS_div2_i64
;
1294 case INDEX_op_rotl_i64
:
1295 case INDEX_op_rotr_i64
:
1296 return TCG_TARGET_HAS_rot_i64
;
1297 case INDEX_op_deposit_i64
:
1298 return TCG_TARGET_HAS_deposit_i64
;
1299 case INDEX_op_extract_i64
:
1300 return TCG_TARGET_HAS_extract_i64
;
1301 case INDEX_op_sextract_i64
:
1302 return TCG_TARGET_HAS_sextract_i64
;
1303 case INDEX_op_extrl_i64_i32
:
1304 return TCG_TARGET_HAS_extrl_i64_i32
;
1305 case INDEX_op_extrh_i64_i32
:
1306 return TCG_TARGET_HAS_extrh_i64_i32
;
1307 case INDEX_op_ext8s_i64
:
1308 return TCG_TARGET_HAS_ext8s_i64
;
1309 case INDEX_op_ext16s_i64
:
1310 return TCG_TARGET_HAS_ext16s_i64
;
1311 case INDEX_op_ext32s_i64
:
1312 return TCG_TARGET_HAS_ext32s_i64
;
1313 case INDEX_op_ext8u_i64
:
1314 return TCG_TARGET_HAS_ext8u_i64
;
1315 case INDEX_op_ext16u_i64
:
1316 return TCG_TARGET_HAS_ext16u_i64
;
1317 case INDEX_op_ext32u_i64
:
1318 return TCG_TARGET_HAS_ext32u_i64
;
1319 case INDEX_op_bswap16_i64
:
1320 return TCG_TARGET_HAS_bswap16_i64
;
1321 case INDEX_op_bswap32_i64
:
1322 return TCG_TARGET_HAS_bswap32_i64
;
1323 case INDEX_op_bswap64_i64
:
1324 return TCG_TARGET_HAS_bswap64_i64
;
1325 case INDEX_op_not_i64
:
1326 return TCG_TARGET_HAS_not_i64
;
1327 case INDEX_op_neg_i64
:
1328 return TCG_TARGET_HAS_neg_i64
;
1329 case INDEX_op_andc_i64
:
1330 return TCG_TARGET_HAS_andc_i64
;
1331 case INDEX_op_orc_i64
:
1332 return TCG_TARGET_HAS_orc_i64
;
1333 case INDEX_op_eqv_i64
:
1334 return TCG_TARGET_HAS_eqv_i64
;
1335 case INDEX_op_nand_i64
:
1336 return TCG_TARGET_HAS_nand_i64
;
1337 case INDEX_op_nor_i64
:
1338 return TCG_TARGET_HAS_nor_i64
;
1339 case INDEX_op_clz_i64
:
1340 return TCG_TARGET_HAS_clz_i64
;
1341 case INDEX_op_ctz_i64
:
1342 return TCG_TARGET_HAS_ctz_i64
;
1343 case INDEX_op_ctpop_i64
:
1344 return TCG_TARGET_HAS_ctpop_i64
;
1345 case INDEX_op_add2_i64
:
1346 return TCG_TARGET_HAS_add2_i64
;
1347 case INDEX_op_sub2_i64
:
1348 return TCG_TARGET_HAS_sub2_i64
;
1349 case INDEX_op_mulu2_i64
:
1350 return TCG_TARGET_HAS_mulu2_i64
;
1351 case INDEX_op_muls2_i64
:
1352 return TCG_TARGET_HAS_muls2_i64
;
1353 case INDEX_op_muluh_i64
:
1354 return TCG_TARGET_HAS_muluh_i64
;
1355 case INDEX_op_mulsh_i64
:
1356 return TCG_TARGET_HAS_mulsh_i64
;
1358 case INDEX_op_mov_vec
:
1359 case INDEX_op_dup_vec
:
1360 case INDEX_op_dupi_vec
:
1361 case INDEX_op_ld_vec
:
1362 case INDEX_op_st_vec
:
1363 case INDEX_op_add_vec
:
1364 case INDEX_op_sub_vec
:
1365 case INDEX_op_and_vec
:
1366 case INDEX_op_or_vec
:
1367 case INDEX_op_xor_vec
:
1368 case INDEX_op_cmp_vec
:
1370 case INDEX_op_dup2_vec
:
1371 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
1372 case INDEX_op_not_vec
:
1373 return have_vec
&& TCG_TARGET_HAS_not_vec
;
1374 case INDEX_op_neg_vec
:
1375 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
1376 case INDEX_op_andc_vec
:
1377 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
1378 case INDEX_op_orc_vec
:
1379 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
1380 case INDEX_op_mul_vec
:
1381 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
1382 case INDEX_op_shli_vec
:
1383 case INDEX_op_shri_vec
:
1384 case INDEX_op_sari_vec
:
1385 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
1386 case INDEX_op_shls_vec
:
1387 case INDEX_op_shrs_vec
:
1388 case INDEX_op_sars_vec
:
1389 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
1390 case INDEX_op_shlv_vec
:
1391 case INDEX_op_shrv_vec
:
1392 case INDEX_op_sarv_vec
:
1393 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
1396 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
1401 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1402 and endian swap. Maybe it would be better to do the alignment
1403 and endian swap in tcg_reg_alloc_call(). */
1404 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
1406 int i
, real_args
, nb_rets
, pi
;
1407 unsigned sizemask
, flags
;
1408 TCGHelperInfo
*info
;
1411 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
1412 flags
= info
->flags
;
1413 sizemask
= info
->sizemask
;
1415 #if defined(__sparc__) && !defined(__arch64__) \
1416 && !defined(CONFIG_TCG_INTERPRETER)
1417 /* We have 64-bit values in one register, but need to pass as two
1418 separate parameters. Split them. */
1419 int orig_sizemask
= sizemask
;
1420 int orig_nargs
= nargs
;
1421 TCGv_i64 retl
, reth
;
1422 TCGTemp
*split_args
[MAX_OPC_PARAM
];
1426 if (sizemask
!= 0) {
1427 for (i
= real_args
= 0; i
< nargs
; ++i
) {
1428 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1430 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1431 TCGv_i32 h
= tcg_temp_new_i32();
1432 TCGv_i32 l
= tcg_temp_new_i32();
1433 tcg_gen_extr_i64_i32(l
, h
, orig
);
1434 split_args
[real_args
++] = tcgv_i32_temp(h
);
1435 split_args
[real_args
++] = tcgv_i32_temp(l
);
1437 split_args
[real_args
++] = args
[i
];
1444 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1445 for (i
= 0; i
< nargs
; ++i
) {
1446 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1447 int is_signed
= sizemask
& (2 << (i
+1)*2);
1449 TCGv_i64 temp
= tcg_temp_new_i64();
1450 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1452 tcg_gen_ext32s_i64(temp
, orig
);
1454 tcg_gen_ext32u_i64(temp
, orig
);
1456 args
[i
] = tcgv_i64_temp(temp
);
1459 #endif /* TCG_TARGET_EXTEND_ARGS */
1461 op
= tcg_emit_op(INDEX_op_call
);
1465 #if defined(__sparc__) && !defined(__arch64__) \
1466 && !defined(CONFIG_TCG_INTERPRETER)
1467 if (orig_sizemask
& 1) {
1468 /* The 32-bit ABI is going to return the 64-bit value in
1469 the %o0/%o1 register pair. Prepare for this by using
1470 two return temporaries, and reassemble below. */
1471 retl
= tcg_temp_new_i64();
1472 reth
= tcg_temp_new_i64();
1473 op
->args
[pi
++] = tcgv_i64_arg(reth
);
1474 op
->args
[pi
++] = tcgv_i64_arg(retl
);
1477 op
->args
[pi
++] = temp_arg(ret
);
1481 if (TCG_TARGET_REG_BITS
< 64 && (sizemask
& 1)) {
1482 #ifdef HOST_WORDS_BIGENDIAN
1483 op
->args
[pi
++] = temp_arg(ret
+ 1);
1484 op
->args
[pi
++] = temp_arg(ret
);
1486 op
->args
[pi
++] = temp_arg(ret
);
1487 op
->args
[pi
++] = temp_arg(ret
+ 1);
1491 op
->args
[pi
++] = temp_arg(ret
);
1498 TCGOP_CALLO(op
) = nb_rets
;
1501 for (i
= 0; i
< nargs
; i
++) {
1502 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1503 if (TCG_TARGET_REG_BITS
< 64 && is_64bit
) {
1504 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1505 /* some targets want aligned 64 bit args */
1506 if (real_args
& 1) {
1507 op
->args
[pi
++] = TCG_CALL_DUMMY_ARG
;
1511 /* If stack grows up, then we will be placing successive
1512 arguments at lower addresses, which means we need to
1513 reverse the order compared to how we would normally
1514 treat either big or little-endian. For those arguments
1515 that will wind up in registers, this still works for
1516 HPPA (the only current STACK_GROWSUP target) since the
1517 argument registers are *also* allocated in decreasing
1518 order. If another such target is added, this logic may
1519 have to get more complicated to differentiate between
1520 stack arguments and register arguments. */
1521 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1522 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1523 op
->args
[pi
++] = temp_arg(args
[i
]);
1525 op
->args
[pi
++] = temp_arg(args
[i
]);
1526 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1532 op
->args
[pi
++] = temp_arg(args
[i
]);
1535 op
->args
[pi
++] = (uintptr_t)func
;
1536 op
->args
[pi
++] = flags
;
1537 TCGOP_CALLI(op
) = real_args
;
1539 /* Make sure the fields didn't overflow. */
1540 tcg_debug_assert(TCGOP_CALLI(op
) == real_args
);
1541 tcg_debug_assert(pi
<= ARRAY_SIZE(op
->args
));
1543 #if defined(__sparc__) && !defined(__arch64__) \
1544 && !defined(CONFIG_TCG_INTERPRETER)
1545 /* Free all of the parts we allocated above. */
1546 for (i
= real_args
= 0; i
< orig_nargs
; ++i
) {
1547 int is_64bit
= orig_sizemask
& (1 << (i
+1)*2);
1549 tcg_temp_free_internal(args
[real_args
++]);
1550 tcg_temp_free_internal(args
[real_args
++]);
1555 if (orig_sizemask
& 1) {
1556 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1557 Note that describing these as TCGv_i64 eliminates an unnecessary
1558 zero-extension that tcg_gen_concat_i32_i64 would create. */
1559 tcg_gen_concat32_i64(temp_tcgv_i64(ret
), retl
, reth
);
1560 tcg_temp_free_i64(retl
);
1561 tcg_temp_free_i64(reth
);
1563 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1564 for (i
= 0; i
< nargs
; ++i
) {
1565 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1567 tcg_temp_free_internal(args
[i
]);
1570 #endif /* TCG_TARGET_EXTEND_ARGS */
1573 static void tcg_reg_alloc_start(TCGContext
*s
)
1578 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
1580 ts
->val_type
= (ts
->fixed_reg
? TEMP_VAL_REG
: TEMP_VAL_MEM
);
1582 for (n
= s
->nb_temps
; i
< n
; i
++) {
1584 ts
->val_type
= (ts
->temp_local
? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
1585 ts
->mem_allocated
= 0;
1589 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
1592 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
1595 int idx
= temp_idx(ts
);
1597 if (ts
->temp_global
) {
1598 pstrcpy(buf
, buf_size
, ts
->name
);
1599 } else if (ts
->temp_local
) {
1600 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
1602 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
1607 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
1608 int buf_size
, TCGArg arg
)
1610 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
1613 /* Find helper name. */
1614 static inline const char *tcg_find_helper(TCGContext
*s
, uintptr_t val
)
1616 const char *ret
= NULL
;
1618 TCGHelperInfo
*info
= g_hash_table_lookup(helper_table
, (gpointer
)val
);
1626 static const char * const cond_name
[] =
1628 [TCG_COND_NEVER
] = "never",
1629 [TCG_COND_ALWAYS
] = "always",
1630 [TCG_COND_EQ
] = "eq",
1631 [TCG_COND_NE
] = "ne",
1632 [TCG_COND_LT
] = "lt",
1633 [TCG_COND_GE
] = "ge",
1634 [TCG_COND_LE
] = "le",
1635 [TCG_COND_GT
] = "gt",
1636 [TCG_COND_LTU
] = "ltu",
1637 [TCG_COND_GEU
] = "geu",
1638 [TCG_COND_LEU
] = "leu",
1639 [TCG_COND_GTU
] = "gtu"
1642 static const char * const ldst_name
[] =
1658 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
1660 [MO_UNALN
>> MO_ASHIFT
] = "un+",
1661 [MO_ALIGN
>> MO_ASHIFT
] = "",
1663 [MO_UNALN
>> MO_ASHIFT
] = "",
1664 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
1666 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
1667 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
1668 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
1669 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
1670 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
1671 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
1674 void tcg_dump_ops(TCGContext
*s
)
1679 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
1680 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
1681 const TCGOpDef
*def
;
1686 def
= &tcg_op_defs
[c
];
1688 if (c
== INDEX_op_insn_start
) {
1689 col
+= qemu_log("\n ----");
1691 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
1693 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1694 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
1698 col
+= qemu_log(" " TARGET_FMT_lx
, a
);
1700 } else if (c
== INDEX_op_call
) {
1701 /* variable number of arguments */
1702 nb_oargs
= TCGOP_CALLO(op
);
1703 nb_iargs
= TCGOP_CALLI(op
);
1704 nb_cargs
= def
->nb_cargs
;
1706 /* function name, flags, out args */
1707 col
+= qemu_log(" %s %s,$0x%" TCG_PRIlx
",$%d", def
->name
,
1708 tcg_find_helper(s
, op
->args
[nb_oargs
+ nb_iargs
]),
1709 op
->args
[nb_oargs
+ nb_iargs
+ 1], nb_oargs
);
1710 for (i
= 0; i
< nb_oargs
; i
++) {
1711 col
+= qemu_log(",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1714 for (i
= 0; i
< nb_iargs
; i
++) {
1715 TCGArg arg
= op
->args
[nb_oargs
+ i
];
1716 const char *t
= "<dummy>";
1717 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1718 t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
1720 col
+= qemu_log(",%s", t
);
1723 col
+= qemu_log(" %s ", def
->name
);
1725 nb_oargs
= def
->nb_oargs
;
1726 nb_iargs
= def
->nb_iargs
;
1727 nb_cargs
= def
->nb_cargs
;
1729 if (def
->flags
& TCG_OPF_VECTOR
) {
1730 col
+= qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op
),
1731 8 << TCGOP_VECE(op
));
1735 for (i
= 0; i
< nb_oargs
; i
++) {
1737 col
+= qemu_log(",");
1739 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1742 for (i
= 0; i
< nb_iargs
; i
++) {
1744 col
+= qemu_log(",");
1746 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1750 case INDEX_op_brcond_i32
:
1751 case INDEX_op_setcond_i32
:
1752 case INDEX_op_movcond_i32
:
1753 case INDEX_op_brcond2_i32
:
1754 case INDEX_op_setcond2_i32
:
1755 case INDEX_op_brcond_i64
:
1756 case INDEX_op_setcond_i64
:
1757 case INDEX_op_movcond_i64
:
1758 case INDEX_op_cmp_vec
:
1759 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
1760 && cond_name
[op
->args
[k
]]) {
1761 col
+= qemu_log(",%s", cond_name
[op
->args
[k
++]]);
1763 col
+= qemu_log(",$0x%" TCG_PRIlx
, op
->args
[k
++]);
1767 case INDEX_op_qemu_ld_i32
:
1768 case INDEX_op_qemu_st_i32
:
1769 case INDEX_op_qemu_ld_i64
:
1770 case INDEX_op_qemu_st_i64
:
1772 TCGMemOpIdx oi
= op
->args
[k
++];
1773 TCGMemOp op
= get_memop(oi
);
1774 unsigned ix
= get_mmuidx(oi
);
1776 if (op
& ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
)) {
1777 col
+= qemu_log(",$0x%x,%u", op
, ix
);
1779 const char *s_al
, *s_op
;
1780 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
1781 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
1782 col
+= qemu_log(",%s%s,%u", s_al
, s_op
, ix
);
1792 case INDEX_op_set_label
:
1794 case INDEX_op_brcond_i32
:
1795 case INDEX_op_brcond_i64
:
1796 case INDEX_op_brcond2_i32
:
1797 col
+= qemu_log("%s$L%d", k
? "," : "",
1798 arg_label(op
->args
[k
])->id
);
1804 for (; i
< nb_cargs
; i
++, k
++) {
1805 col
+= qemu_log("%s$0x%" TCG_PRIlx
, k
? "," : "", op
->args
[k
]);
1809 unsigned life
= op
->life
;
1811 for (; col
< 48; ++col
) {
1812 putc(' ', qemu_logfile
);
1815 if (life
& (SYNC_ARG
* 3)) {
1817 for (i
= 0; i
< 2; ++i
) {
1818 if (life
& (SYNC_ARG
<< i
)) {
1826 for (i
= 0; life
; ++i
, life
>>= 1) {
1837 /* we give more priority to constraints with less registers */
1838 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
1840 const TCGArgConstraint
*arg_ct
;
1843 arg_ct
= &def
->args_ct
[k
];
1844 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1845 /* an alias is equivalent to a single register */
1848 if (!(arg_ct
->ct
& TCG_CT_REG
))
1851 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1852 if (tcg_regset_test_reg(arg_ct
->u
.regs
, i
))
1856 return TCG_TARGET_NB_REGS
- n
+ 1;
1859 /* sort from highest priority to lowest */
1860 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
1862 int i
, j
, p1
, p2
, tmp
;
1864 for(i
= 0; i
< n
; i
++)
1865 def
->sorted_args
[start
+ i
] = start
+ i
;
1868 for(i
= 0; i
< n
- 1; i
++) {
1869 for(j
= i
+ 1; j
< n
; j
++) {
1870 p1
= get_constraint_priority(def
, def
->sorted_args
[start
+ i
]);
1871 p2
= get_constraint_priority(def
, def
->sorted_args
[start
+ j
]);
1873 tmp
= def
->sorted_args
[start
+ i
];
1874 def
->sorted_args
[start
+ i
] = def
->sorted_args
[start
+ j
];
1875 def
->sorted_args
[start
+ j
] = tmp
;
1881 static void process_op_defs(TCGContext
*s
)
1885 for (op
= 0; op
< NB_OPS
; op
++) {
1886 TCGOpDef
*def
= &tcg_op_defs
[op
];
1887 const TCGTargetOpDef
*tdefs
;
1891 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
1895 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
1900 tdefs
= tcg_target_op_def(op
);
1901 /* Missing TCGTargetOpDef entry. */
1902 tcg_debug_assert(tdefs
!= NULL
);
1904 type
= (def
->flags
& TCG_OPF_64BIT
? TCG_TYPE_I64
: TCG_TYPE_I32
);
1905 for (i
= 0; i
< nb_args
; i
++) {
1906 const char *ct_str
= tdefs
->args_ct_str
[i
];
1907 /* Incomplete TCGTargetOpDef entry. */
1908 tcg_debug_assert(ct_str
!= NULL
);
1910 def
->args_ct
[i
].u
.regs
= 0;
1911 def
->args_ct
[i
].ct
= 0;
1912 while (*ct_str
!= '\0') {
1916 int oarg
= *ct_str
- '0';
1917 tcg_debug_assert(ct_str
== tdefs
->args_ct_str
[i
]);
1918 tcg_debug_assert(oarg
< def
->nb_oargs
);
1919 tcg_debug_assert(def
->args_ct
[oarg
].ct
& TCG_CT_REG
);
1920 /* TCG_CT_ALIAS is for the output arguments.
1921 The input is tagged with TCG_CT_IALIAS. */
1922 def
->args_ct
[i
] = def
->args_ct
[oarg
];
1923 def
->args_ct
[oarg
].ct
|= TCG_CT_ALIAS
;
1924 def
->args_ct
[oarg
].alias_index
= i
;
1925 def
->args_ct
[i
].ct
|= TCG_CT_IALIAS
;
1926 def
->args_ct
[i
].alias_index
= oarg
;
1931 def
->args_ct
[i
].ct
|= TCG_CT_NEWREG
;
1935 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
1939 ct_str
= target_parse_constraint(&def
->args_ct
[i
],
1941 /* Typo in TCGTargetOpDef constraint. */
1942 tcg_debug_assert(ct_str
!= NULL
);
1947 /* TCGTargetOpDef entry with too much information? */
1948 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
1950 /* sort the constraints (XXX: this is just an heuristic) */
1951 sort_constraints(def
, 0, def
->nb_oargs
);
1952 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
1956 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
1958 QTAILQ_REMOVE(&s
->ops
, op
, link
);
1959 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
1962 #ifdef CONFIG_PROFILER
1963 atomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
1967 static TCGOp
*tcg_op_alloc(TCGOpcode opc
)
1969 TCGContext
*s
= tcg_ctx
;
1972 if (likely(QTAILQ_EMPTY(&s
->free_ops
))) {
1973 op
= tcg_malloc(sizeof(TCGOp
));
1975 op
= QTAILQ_FIRST(&s
->free_ops
);
1976 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
1978 memset(op
, 0, offsetof(TCGOp
, link
));
1985 TCGOp
*tcg_emit_op(TCGOpcode opc
)
1987 TCGOp
*op
= tcg_op_alloc(opc
);
1988 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
1992 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
,
1993 TCGOpcode opc
, int nargs
)
1995 TCGOp
*new_op
= tcg_op_alloc(opc
);
1996 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
2000 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
,
2001 TCGOpcode opc
, int nargs
)
2003 TCGOp
*new_op
= tcg_op_alloc(opc
);
2004 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
2011 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2012 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2014 /* liveness analysis: end of function: all temps are dead, and globals
2015 should be in memory. */
2016 static void tcg_la_func_end(TCGContext
*s
)
2018 int ng
= s
->nb_globals
;
2019 int nt
= s
->nb_temps
;
2022 for (i
= 0; i
< ng
; ++i
) {
2023 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2025 for (i
= ng
; i
< nt
; ++i
) {
2026 s
->temps
[i
].state
= TS_DEAD
;
2030 /* liveness analysis: end of basic block: all temps are dead, globals
2031 and local temps should be in memory. */
2032 static void tcg_la_bb_end(TCGContext
*s
)
2034 int ng
= s
->nb_globals
;
2035 int nt
= s
->nb_temps
;
2038 for (i
= 0; i
< ng
; ++i
) {
2039 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2041 for (i
= ng
; i
< nt
; ++i
) {
2042 s
->temps
[i
].state
= (s
->temps
[i
].temp_local
2048 /* Liveness analysis : update the opc_arg_life array to tell if a
2049 given input arguments is dead. Instructions updating dead
2050 temporaries are removed. */
2051 static void liveness_pass_1(TCGContext
*s
)
2053 int nb_globals
= s
->nb_globals
;
2054 TCGOp
*op
, *op_prev
;
2058 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, TCGOpHead
, link
, op_prev
) {
2059 int i
, nb_iargs
, nb_oargs
;
2060 TCGOpcode opc_new
, opc_new2
;
2062 TCGLifeData arg_life
= 0;
2064 TCGOpcode opc
= op
->opc
;
2065 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2072 nb_oargs
= TCGOP_CALLO(op
);
2073 nb_iargs
= TCGOP_CALLI(op
);
2074 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2076 /* pure functions can be removed if their result is unused */
2077 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
2078 for (i
= 0; i
< nb_oargs
; i
++) {
2079 arg_ts
= arg_temp(op
->args
[i
]);
2080 if (arg_ts
->state
!= TS_DEAD
) {
2081 goto do_not_remove_call
;
2088 /* output args are dead */
2089 for (i
= 0; i
< nb_oargs
; i
++) {
2090 arg_ts
= arg_temp(op
->args
[i
]);
2091 if (arg_ts
->state
& TS_DEAD
) {
2092 arg_life
|= DEAD_ARG
<< i
;
2094 if (arg_ts
->state
& TS_MEM
) {
2095 arg_life
|= SYNC_ARG
<< i
;
2097 arg_ts
->state
= TS_DEAD
;
2100 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
2101 TCG_CALL_NO_READ_GLOBALS
))) {
2102 /* globals should go back to memory */
2103 for (i
= 0; i
< nb_globals
; i
++) {
2104 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2106 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
2107 /* globals should be synced to memory */
2108 for (i
= 0; i
< nb_globals
; i
++) {
2109 s
->temps
[i
].state
|= TS_MEM
;
2113 /* record arguments that die in this helper */
2114 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2115 arg_ts
= arg_temp(op
->args
[i
]);
2116 if (arg_ts
&& arg_ts
->state
& TS_DEAD
) {
2117 arg_life
|= DEAD_ARG
<< i
;
2120 /* input arguments are live for preceding opcodes */
2121 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2122 arg_ts
= arg_temp(op
->args
[i
]);
2124 arg_ts
->state
&= ~TS_DEAD
;
2130 case INDEX_op_insn_start
:
2132 case INDEX_op_discard
:
2133 /* mark the temporary as dead */
2134 arg_temp(op
->args
[0])->state
= TS_DEAD
;
2137 case INDEX_op_add2_i32
:
2138 opc_new
= INDEX_op_add_i32
;
2140 case INDEX_op_sub2_i32
:
2141 opc_new
= INDEX_op_sub_i32
;
2143 case INDEX_op_add2_i64
:
2144 opc_new
= INDEX_op_add_i64
;
2146 case INDEX_op_sub2_i64
:
2147 opc_new
= INDEX_op_sub_i64
;
2151 /* Test if the high part of the operation is dead, but not
2152 the low part. The result can be optimized to a simple
2153 add or sub. This happens often for x86_64 guest when the
2154 cpu mode is set to 32 bit. */
2155 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2156 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2159 /* Replace the opcode and adjust the args in place,
2160 leaving 3 unused args at the end. */
2161 op
->opc
= opc
= opc_new
;
2162 op
->args
[1] = op
->args
[2];
2163 op
->args
[2] = op
->args
[4];
2164 /* Fall through and mark the single-word operation live. */
2170 case INDEX_op_mulu2_i32
:
2171 opc_new
= INDEX_op_mul_i32
;
2172 opc_new2
= INDEX_op_muluh_i32
;
2173 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
2175 case INDEX_op_muls2_i32
:
2176 opc_new
= INDEX_op_mul_i32
;
2177 opc_new2
= INDEX_op_mulsh_i32
;
2178 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
2180 case INDEX_op_mulu2_i64
:
2181 opc_new
= INDEX_op_mul_i64
;
2182 opc_new2
= INDEX_op_muluh_i64
;
2183 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
2185 case INDEX_op_muls2_i64
:
2186 opc_new
= INDEX_op_mul_i64
;
2187 opc_new2
= INDEX_op_mulsh_i64
;
2188 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
2193 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2194 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2195 /* Both parts of the operation are dead. */
2198 /* The high part of the operation is dead; generate the low. */
2199 op
->opc
= opc
= opc_new
;
2200 op
->args
[1] = op
->args
[2];
2201 op
->args
[2] = op
->args
[3];
2202 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
2203 /* The low part of the operation is dead; generate the high. */
2204 op
->opc
= opc
= opc_new2
;
2205 op
->args
[0] = op
->args
[1];
2206 op
->args
[1] = op
->args
[2];
2207 op
->args
[2] = op
->args
[3];
2211 /* Mark the single-word operation live. */
2216 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2217 nb_iargs
= def
->nb_iargs
;
2218 nb_oargs
= def
->nb_oargs
;
2220 /* Test if the operation can be removed because all
2221 its outputs are dead. We assume that nb_oargs == 0
2222 implies side effects */
2223 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
2224 for (i
= 0; i
< nb_oargs
; i
++) {
2225 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
2230 tcg_op_remove(s
, op
);
2233 /* output args are dead */
2234 for (i
= 0; i
< nb_oargs
; i
++) {
2235 arg_ts
= arg_temp(op
->args
[i
]);
2236 if (arg_ts
->state
& TS_DEAD
) {
2237 arg_life
|= DEAD_ARG
<< i
;
2239 if (arg_ts
->state
& TS_MEM
) {
2240 arg_life
|= SYNC_ARG
<< i
;
2242 arg_ts
->state
= TS_DEAD
;
2245 /* if end of basic block, update */
2246 if (def
->flags
& TCG_OPF_BB_END
) {
2248 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2249 /* globals should be synced to memory */
2250 for (i
= 0; i
< nb_globals
; i
++) {
2251 s
->temps
[i
].state
|= TS_MEM
;
2255 /* record arguments that die in this opcode */
2256 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2257 arg_ts
= arg_temp(op
->args
[i
]);
2258 if (arg_ts
->state
& TS_DEAD
) {
2259 arg_life
|= DEAD_ARG
<< i
;
2262 /* input arguments are live for preceding opcodes */
2263 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2264 arg_temp(op
->args
[i
])->state
&= ~TS_DEAD
;
2269 op
->life
= arg_life
;
2273 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2274 static bool liveness_pass_2(TCGContext
*s
)
2276 int nb_globals
= s
->nb_globals
;
2278 bool changes
= false;
2279 TCGOp
*op
, *op_next
;
2281 /* Create a temporary for each indirect global. */
2282 for (i
= 0; i
< nb_globals
; ++i
) {
2283 TCGTemp
*its
= &s
->temps
[i
];
2284 if (its
->indirect_reg
) {
2285 TCGTemp
*dts
= tcg_temp_alloc(s
);
2286 dts
->type
= its
->type
;
2287 dts
->base_type
= its
->base_type
;
2288 its
->state_ptr
= dts
;
2290 its
->state_ptr
= NULL
;
2292 /* All globals begin dead. */
2293 its
->state
= TS_DEAD
;
2295 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
2296 TCGTemp
*its
= &s
->temps
[i
];
2297 its
->state_ptr
= NULL
;
2298 its
->state
= TS_DEAD
;
2301 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2302 TCGOpcode opc
= op
->opc
;
2303 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2304 TCGLifeData arg_life
= op
->life
;
2305 int nb_iargs
, nb_oargs
, call_flags
;
2306 TCGTemp
*arg_ts
, *dir_ts
;
2308 if (opc
== INDEX_op_call
) {
2309 nb_oargs
= TCGOP_CALLO(op
);
2310 nb_iargs
= TCGOP_CALLI(op
);
2311 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2313 nb_iargs
= def
->nb_iargs
;
2314 nb_oargs
= def
->nb_oargs
;
2316 /* Set flags similar to how calls require. */
2317 if (def
->flags
& TCG_OPF_BB_END
) {
2318 /* Like writing globals: save_globals */
2320 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2321 /* Like reading globals: sync_globals */
2322 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
2324 /* No effect on globals. */
2325 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
2326 TCG_CALL_NO_WRITE_GLOBALS
);
2330 /* Make sure that input arguments are available. */
2331 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2332 arg_ts
= arg_temp(op
->args
[i
]);
2334 dir_ts
= arg_ts
->state_ptr
;
2335 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
2336 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
2339 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
, 3);
2341 lop
->args
[0] = temp_arg(dir_ts
);
2342 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2343 lop
->args
[2] = arg_ts
->mem_offset
;
2345 /* Loaded, but synced with memory. */
2346 arg_ts
->state
= TS_MEM
;
2351 /* Perform input replacement, and mark inputs that became dead.
2352 No action is required except keeping temp_state up to date
2353 so that we reload when needed. */
2354 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2355 arg_ts
= arg_temp(op
->args
[i
]);
2357 dir_ts
= arg_ts
->state_ptr
;
2359 op
->args
[i
] = temp_arg(dir_ts
);
2361 if (IS_DEAD_ARG(i
)) {
2362 arg_ts
->state
= TS_DEAD
;
2368 /* Liveness analysis should ensure that the following are
2369 all correct, for call sites and basic block end points. */
2370 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
2372 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
2373 for (i
= 0; i
< nb_globals
; ++i
) {
2374 /* Liveness should see that globals are synced back,
2375 that is, either TS_DEAD or TS_MEM. */
2376 arg_ts
= &s
->temps
[i
];
2377 tcg_debug_assert(arg_ts
->state_ptr
== 0
2378 || arg_ts
->state
!= 0);
2381 for (i
= 0; i
< nb_globals
; ++i
) {
2382 /* Liveness should see that globals are saved back,
2383 that is, TS_DEAD, waiting to be reloaded. */
2384 arg_ts
= &s
->temps
[i
];
2385 tcg_debug_assert(arg_ts
->state_ptr
== 0
2386 || arg_ts
->state
== TS_DEAD
);
2390 /* Outputs become available. */
2391 for (i
= 0; i
< nb_oargs
; i
++) {
2392 arg_ts
= arg_temp(op
->args
[i
]);
2393 dir_ts
= arg_ts
->state_ptr
;
2397 op
->args
[i
] = temp_arg(dir_ts
);
2400 /* The output is now live and modified. */
2403 /* Sync outputs upon their last write. */
2404 if (NEED_SYNC_ARG(i
)) {
2405 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
2408 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
, 3);
2410 sop
->args
[0] = temp_arg(dir_ts
);
2411 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2412 sop
->args
[2] = arg_ts
->mem_offset
;
2414 arg_ts
->state
= TS_MEM
;
2416 /* Drop outputs that are dead. */
2417 if (IS_DEAD_ARG(i
)) {
2418 arg_ts
->state
= TS_DEAD
;
2426 #ifdef CONFIG_DEBUG_TCG
2427 static void dump_regs(TCGContext
*s
)
2433 for(i
= 0; i
< s
->nb_temps
; i
++) {
2435 printf(" %10s: ", tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
2436 switch(ts
->val_type
) {
2438 printf("%s", tcg_target_reg_names
[ts
->reg
]);
2441 printf("%d(%s)", (int)ts
->mem_offset
,
2442 tcg_target_reg_names
[ts
->mem_base
->reg
]);
2444 case TEMP_VAL_CONST
:
2445 printf("$0x%" TCG_PRIlx
, ts
->val
);
2457 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
2458 if (s
->reg_to_temp
[i
] != NULL
) {
2460 tcg_target_reg_names
[i
],
2461 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
2466 static void check_regs(TCGContext
*s
)
2473 for (reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
2474 ts
= s
->reg_to_temp
[reg
];
2476 if (ts
->val_type
!= TEMP_VAL_REG
|| ts
->reg
!= reg
) {
2477 printf("Inconsistency for register %s:\n",
2478 tcg_target_reg_names
[reg
]);
2483 for (k
= 0; k
< s
->nb_temps
; k
++) {
2485 if (ts
->val_type
== TEMP_VAL_REG
&& !ts
->fixed_reg
2486 && s
->reg_to_temp
[ts
->reg
] != ts
) {
2487 printf("Inconsistency for temp %s:\n",
2488 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
2490 printf("reg state:\n");
2498 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
2500 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2501 /* Sparc64 stack is accessed with offset of 2047 */
2502 s
->current_frame_offset
= (s
->current_frame_offset
+
2503 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
2504 ~(sizeof(tcg_target_long
) - 1);
2506 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
2510 ts
->mem_offset
= s
->current_frame_offset
;
2511 ts
->mem_base
= s
->frame_temp
;
2512 ts
->mem_allocated
= 1;
2513 s
->current_frame_offset
+= sizeof(tcg_target_long
);
2516 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
);
2518 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2519 mark it free; otherwise mark it dead. */
2520 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
2522 if (ts
->fixed_reg
) {
2525 if (ts
->val_type
== TEMP_VAL_REG
) {
2526 s
->reg_to_temp
[ts
->reg
] = NULL
;
2528 ts
->val_type
= (free_or_dead
< 0
2531 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
2534 /* Mark a temporary as dead. */
2535 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
2537 temp_free_or_dead(s
, ts
, 1);
2540 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2541 registers needs to be allocated to store a constant. If 'free_or_dead'
2542 is non-zero, subsequently release the temporary; if it is positive, the
2543 temp is dead; if it is negative, the temp is free. */
2544 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
,
2545 TCGRegSet allocated_regs
, int free_or_dead
)
2547 if (ts
->fixed_reg
) {
2550 if (!ts
->mem_coherent
) {
2551 if (!ts
->mem_allocated
) {
2552 temp_allocate_frame(s
, ts
);
2554 switch (ts
->val_type
) {
2555 case TEMP_VAL_CONST
:
2556 /* If we're going to free the temp immediately, then we won't
2557 require it later in a register, so attempt to store the
2558 constant to memory directly. */
2560 && tcg_out_sti(s
, ts
->type
, ts
->val
,
2561 ts
->mem_base
->reg
, ts
->mem_offset
)) {
2564 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
2569 tcg_out_st(s
, ts
->type
, ts
->reg
,
2570 ts
->mem_base
->reg
, ts
->mem_offset
);
2580 ts
->mem_coherent
= 1;
2583 temp_free_or_dead(s
, ts
, free_or_dead
);
2587 /* free register 'reg' by spilling the corresponding temporary if necessary */
2588 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
2590 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
2592 temp_sync(s
, ts
, allocated_regs
, -1);
2596 /* Allocate a register belonging to reg1 & ~reg2 */
2597 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet desired_regs
,
2598 TCGRegSet allocated_regs
, bool rev
)
2600 int i
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
2605 reg_ct
= desired_regs
& ~allocated_regs
;
2606 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
2608 /* first try free registers */
2609 for(i
= 0; i
< n
; i
++) {
2611 if (tcg_regset_test_reg(reg_ct
, reg
) && s
->reg_to_temp
[reg
] == NULL
)
2615 /* XXX: do better spill choice */
2616 for(i
= 0; i
< n
; i
++) {
2618 if (tcg_regset_test_reg(reg_ct
, reg
)) {
2619 tcg_reg_free(s
, reg
, allocated_regs
);
2627 /* Make sure the temporary is in a register. If needed, allocate the register
2628 from DESIRED while avoiding ALLOCATED. */
2629 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
2630 TCGRegSet allocated_regs
)
2634 switch (ts
->val_type
) {
2637 case TEMP_VAL_CONST
:
2638 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
, ts
->indirect_base
);
2639 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
2640 ts
->mem_coherent
= 0;
2643 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
, ts
->indirect_base
);
2644 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
2645 ts
->mem_coherent
= 1;
2652 ts
->val_type
= TEMP_VAL_REG
;
2653 s
->reg_to_temp
[reg
] = ts
;
2656 /* Save a temporary to memory. 'allocated_regs' is used in case a
2657 temporary registers needs to be allocated to store a constant. */
2658 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
2660 /* The liveness analysis already ensures that globals are back
2661 in memory. Keep an tcg_debug_assert for safety. */
2662 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| ts
->fixed_reg
);
2665 /* save globals to their canonical location and assume they can be
2666 modified be the following code. 'allocated_regs' is used in case a
2667 temporary registers needs to be allocated to store a constant. */
2668 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
2672 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
2673 temp_save(s
, &s
->temps
[i
], allocated_regs
);
2677 /* sync globals to their canonical location and assume they can be
2678 read by the following code. 'allocated_regs' is used in case a
2679 temporary registers needs to be allocated to store a constant. */
2680 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
2684 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
2685 TCGTemp
*ts
= &s
->temps
[i
];
2686 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
2688 || ts
->mem_coherent
);
2692 /* at the end of a basic block, we assume all temporaries are dead and
2693 all globals are stored at their canonical location. */
2694 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
2698 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
2699 TCGTemp
*ts
= &s
->temps
[i
];
2700 if (ts
->temp_local
) {
2701 temp_save(s
, ts
, allocated_regs
);
2703 /* The liveness analysis already ensures that temps are dead.
2704 Keep an tcg_debug_assert for safety. */
2705 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
2709 save_globals(s
, allocated_regs
);
2712 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
2713 tcg_target_ulong val
, TCGLifeData arg_life
)
2715 if (ots
->fixed_reg
) {
2716 /* For fixed registers, we do not do any constant propagation. */
2717 tcg_out_movi(s
, ots
->type
, ots
->reg
, val
);
2721 /* The movi is not explicitly generated here. */
2722 if (ots
->val_type
== TEMP_VAL_REG
) {
2723 s
->reg_to_temp
[ots
->reg
] = NULL
;
2725 ots
->val_type
= TEMP_VAL_CONST
;
2727 ots
->mem_coherent
= 0;
2728 if (NEED_SYNC_ARG(0)) {
2729 temp_sync(s
, ots
, s
->reserved_regs
, IS_DEAD_ARG(0));
2730 } else if (IS_DEAD_ARG(0)) {
2735 static void tcg_reg_alloc_movi(TCGContext
*s
, const TCGOp
*op
)
2737 TCGTemp
*ots
= arg_temp(op
->args
[0]);
2738 tcg_target_ulong val
= op
->args
[1];
2740 tcg_reg_alloc_do_movi(s
, ots
, val
, op
->life
);
2743 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
2745 const TCGLifeData arg_life
= op
->life
;
2746 TCGRegSet allocated_regs
;
2748 TCGType otype
, itype
;
2750 allocated_regs
= s
->reserved_regs
;
2751 ots
= arg_temp(op
->args
[0]);
2752 ts
= arg_temp(op
->args
[1]);
2754 /* Note that otype != itype for no-op truncation. */
2758 if (ts
->val_type
== TEMP_VAL_CONST
) {
2759 /* propagate constant or generate sti */
2760 tcg_target_ulong val
= ts
->val
;
2761 if (IS_DEAD_ARG(1)) {
2764 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
);
2768 /* If the source value is in memory we're going to be forced
2769 to have it in a register in order to perform the copy. Copy
2770 the SOURCE value into its own register first, that way we
2771 don't have to reload SOURCE the next time it is used. */
2772 if (ts
->val_type
== TEMP_VAL_MEM
) {
2773 temp_load(s
, ts
, tcg_target_available_regs
[itype
], allocated_regs
);
2776 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
2777 if (IS_DEAD_ARG(0) && !ots
->fixed_reg
) {
2778 /* mov to a non-saved dead register makes no sense (even with
2779 liveness analysis disabled). */
2780 tcg_debug_assert(NEED_SYNC_ARG(0));
2781 if (!ots
->mem_allocated
) {
2782 temp_allocate_frame(s
, ots
);
2784 tcg_out_st(s
, otype
, ts
->reg
, ots
->mem_base
->reg
, ots
->mem_offset
);
2785 if (IS_DEAD_ARG(1)) {
2790 if (IS_DEAD_ARG(1) && !ts
->fixed_reg
&& !ots
->fixed_reg
) {
2791 /* the mov can be suppressed */
2792 if (ots
->val_type
== TEMP_VAL_REG
) {
2793 s
->reg_to_temp
[ots
->reg
] = NULL
;
2798 if (ots
->val_type
!= TEMP_VAL_REG
) {
2799 /* When allocating a new register, make sure to not spill the
2801 tcg_regset_set_reg(allocated_regs
, ts
->reg
);
2802 ots
->reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
2803 allocated_regs
, ots
->indirect_base
);
2805 tcg_out_mov(s
, otype
, ots
->reg
, ts
->reg
);
2807 ots
->val_type
= TEMP_VAL_REG
;
2808 ots
->mem_coherent
= 0;
2809 s
->reg_to_temp
[ots
->reg
] = ots
;
2810 if (NEED_SYNC_ARG(0)) {
2811 temp_sync(s
, ots
, allocated_regs
, 0);
2816 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
2818 const TCGLifeData arg_life
= op
->life
;
2819 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
2820 TCGRegSet i_allocated_regs
;
2821 TCGRegSet o_allocated_regs
;
2822 int i
, k
, nb_iargs
, nb_oargs
;
2825 const TCGArgConstraint
*arg_ct
;
2827 TCGArg new_args
[TCG_MAX_OP_ARGS
];
2828 int const_args
[TCG_MAX_OP_ARGS
];
2830 nb_oargs
= def
->nb_oargs
;
2831 nb_iargs
= def
->nb_iargs
;
2833 /* copy constants */
2834 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
2835 op
->args
+ nb_oargs
+ nb_iargs
,
2836 sizeof(TCGArg
) * def
->nb_cargs
);
2838 i_allocated_regs
= s
->reserved_regs
;
2839 o_allocated_regs
= s
->reserved_regs
;
2841 /* satisfy input constraints */
2842 for (k
= 0; k
< nb_iargs
; k
++) {
2843 i
= def
->sorted_args
[nb_oargs
+ k
];
2845 arg_ct
= &def
->args_ct
[i
];
2848 if (ts
->val_type
== TEMP_VAL_CONST
2849 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
)) {
2850 /* constant is OK for instruction */
2852 new_args
[i
] = ts
->val
;
2856 temp_load(s
, ts
, arg_ct
->u
.regs
, i_allocated_regs
);
2858 if (arg_ct
->ct
& TCG_CT_IALIAS
) {
2859 if (ts
->fixed_reg
) {
2860 /* if fixed register, we must allocate a new register
2861 if the alias is not the same register */
2862 if (arg
!= op
->args
[arg_ct
->alias_index
])
2863 goto allocate_in_reg
;
2865 /* if the input is aliased to an output and if it is
2866 not dead after the instruction, we must allocate
2867 a new register and move it */
2868 if (!IS_DEAD_ARG(i
)) {
2869 goto allocate_in_reg
;
2871 /* check if the current register has already been allocated
2872 for another input aliased to an output */
2874 for (k2
= 0 ; k2
< k
; k2
++) {
2875 i2
= def
->sorted_args
[nb_oargs
+ k2
];
2876 if ((def
->args_ct
[i2
].ct
& TCG_CT_IALIAS
) &&
2877 (new_args
[i2
] == ts
->reg
)) {
2878 goto allocate_in_reg
;
2884 if (tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
2885 /* nothing to do : the constraint is satisfied */
2888 /* allocate a new register matching the constraint
2889 and move the temporary register into it */
2890 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, i_allocated_regs
,
2892 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
2896 tcg_regset_set_reg(i_allocated_regs
, reg
);
2900 /* mark dead temporaries and free the associated registers */
2901 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2902 if (IS_DEAD_ARG(i
)) {
2903 temp_dead(s
, arg_temp(op
->args
[i
]));
2907 if (def
->flags
& TCG_OPF_BB_END
) {
2908 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
2910 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
2911 /* XXX: permit generic clobber register list ? */
2912 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
2913 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
2914 tcg_reg_free(s
, i
, i_allocated_regs
);
2918 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2919 /* sync globals if the op has side effects and might trigger
2921 sync_globals(s
, i_allocated_regs
);
2924 /* satisfy the output constraints */
2925 for(k
= 0; k
< nb_oargs
; k
++) {
2926 i
= def
->sorted_args
[k
];
2928 arg_ct
= &def
->args_ct
[i
];
2930 if ((arg_ct
->ct
& TCG_CT_ALIAS
)
2931 && !const_args
[arg_ct
->alias_index
]) {
2932 reg
= new_args
[arg_ct
->alias_index
];
2933 } else if (arg_ct
->ct
& TCG_CT_NEWREG
) {
2934 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
,
2935 i_allocated_regs
| o_allocated_regs
,
2938 /* if fixed register, we try to use it */
2940 if (ts
->fixed_reg
&&
2941 tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
2944 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, o_allocated_regs
,
2947 tcg_regset_set_reg(o_allocated_regs
, reg
);
2948 /* if a fixed register is used, then a move will be done afterwards */
2949 if (!ts
->fixed_reg
) {
2950 if (ts
->val_type
== TEMP_VAL_REG
) {
2951 s
->reg_to_temp
[ts
->reg
] = NULL
;
2953 ts
->val_type
= TEMP_VAL_REG
;
2955 /* temp value is modified, so the value kept in memory is
2956 potentially not the same */
2957 ts
->mem_coherent
= 0;
2958 s
->reg_to_temp
[reg
] = ts
;
2965 /* emit instruction */
2966 if (def
->flags
& TCG_OPF_VECTOR
) {
2967 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
2968 new_args
, const_args
);
2970 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
2973 /* move the outputs in the correct register if needed */
2974 for(i
= 0; i
< nb_oargs
; i
++) {
2975 ts
= arg_temp(op
->args
[i
]);
2977 if (ts
->fixed_reg
&& ts
->reg
!= reg
) {
2978 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
2980 if (NEED_SYNC_ARG(i
)) {
2981 temp_sync(s
, ts
, o_allocated_regs
, IS_DEAD_ARG(i
));
2982 } else if (IS_DEAD_ARG(i
)) {
2988 #ifdef TCG_TARGET_STACK_GROWSUP
2989 #define STACK_DIR(x) (-(x))
2991 #define STACK_DIR(x) (x)
2994 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
2996 const int nb_oargs
= TCGOP_CALLO(op
);
2997 const int nb_iargs
= TCGOP_CALLI(op
);
2998 const TCGLifeData arg_life
= op
->life
;
2999 int flags
, nb_regs
, i
;
3003 intptr_t stack_offset
;
3004 size_t call_stack_size
;
3005 tcg_insn_unit
*func_addr
;
3007 TCGRegSet allocated_regs
;
3009 func_addr
= (tcg_insn_unit
*)(intptr_t)op
->args
[nb_oargs
+ nb_iargs
];
3010 flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
3012 nb_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
3013 if (nb_regs
> nb_iargs
) {
3017 /* assign stack slots first */
3018 call_stack_size
= (nb_iargs
- nb_regs
) * sizeof(tcg_target_long
);
3019 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
3020 ~(TCG_TARGET_STACK_ALIGN
- 1);
3021 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
3022 if (allocate_args
) {
3023 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3024 preallocate call stack */
3028 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
3029 for (i
= nb_regs
; i
< nb_iargs
; i
++) {
3030 arg
= op
->args
[nb_oargs
+ i
];
3031 #ifdef TCG_TARGET_STACK_GROWSUP
3032 stack_offset
-= sizeof(tcg_target_long
);
3034 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3036 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3038 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
3040 #ifndef TCG_TARGET_STACK_GROWSUP
3041 stack_offset
+= sizeof(tcg_target_long
);
3045 /* assign input registers */
3046 allocated_regs
= s
->reserved_regs
;
3047 for (i
= 0; i
< nb_regs
; i
++) {
3048 arg
= op
->args
[nb_oargs
+ i
];
3049 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3051 reg
= tcg_target_call_iarg_regs
[i
];
3052 tcg_reg_free(s
, reg
, allocated_regs
);
3054 if (ts
->val_type
== TEMP_VAL_REG
) {
3055 if (ts
->reg
!= reg
) {
3056 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
3059 TCGRegSet arg_set
= 0;
3061 tcg_regset_set_reg(arg_set
, reg
);
3062 temp_load(s
, ts
, arg_set
, allocated_regs
);
3065 tcg_regset_set_reg(allocated_regs
, reg
);
3069 /* mark dead temporaries and free the associated registers */
3070 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3071 if (IS_DEAD_ARG(i
)) {
3072 temp_dead(s
, arg_temp(op
->args
[i
]));
3076 /* clobber call registers */
3077 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3078 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
3079 tcg_reg_free(s
, i
, allocated_regs
);
3083 /* Save globals if they might be written by the helper, sync them if
3084 they might be read. */
3085 if (flags
& TCG_CALL_NO_READ_GLOBALS
) {
3087 } else if (flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3088 sync_globals(s
, allocated_regs
);
3090 save_globals(s
, allocated_regs
);
3093 tcg_out_call(s
, func_addr
);
3095 /* assign output registers and emit moves if needed */
3096 for(i
= 0; i
< nb_oargs
; i
++) {
3099 reg
= tcg_target_call_oarg_regs
[i
];
3100 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
3102 if (ts
->fixed_reg
) {
3103 if (ts
->reg
!= reg
) {
3104 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
3107 if (ts
->val_type
== TEMP_VAL_REG
) {
3108 s
->reg_to_temp
[ts
->reg
] = NULL
;
3110 ts
->val_type
= TEMP_VAL_REG
;
3112 ts
->mem_coherent
= 0;
3113 s
->reg_to_temp
[reg
] = ts
;
3114 if (NEED_SYNC_ARG(i
)) {
3115 temp_sync(s
, ts
, allocated_regs
, IS_DEAD_ARG(i
));
3116 } else if (IS_DEAD_ARG(i
)) {
3123 #ifdef CONFIG_PROFILER
3125 /* avoid copy/paste errors */
3126 #define PROF_ADD(to, from, field) \
3128 (to)->field += atomic_read(&((from)->field)); \
3131 #define PROF_MAX(to, from, field) \
3133 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3134 if (val__ > (to)->field) { \
3135 (to)->field = val__; \
3139 /* Pass in a zero'ed @prof */
3141 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
3143 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
3146 for (i
= 0; i
< n_ctxs
; i
++) {
3147 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
3148 const TCGProfile
*orig
= &s
->prof
;
3151 PROF_ADD(prof
, orig
, tb_count1
);
3152 PROF_ADD(prof
, orig
, tb_count
);
3153 PROF_ADD(prof
, orig
, op_count
);
3154 PROF_MAX(prof
, orig
, op_count_max
);
3155 PROF_ADD(prof
, orig
, temp_count
);
3156 PROF_MAX(prof
, orig
, temp_count_max
);
3157 PROF_ADD(prof
, orig
, del_op_count
);
3158 PROF_ADD(prof
, orig
, code_in_len
);
3159 PROF_ADD(prof
, orig
, code_out_len
);
3160 PROF_ADD(prof
, orig
, search_out_len
);
3161 PROF_ADD(prof
, orig
, interm_time
);
3162 PROF_ADD(prof
, orig
, code_time
);
3163 PROF_ADD(prof
, orig
, la_time
);
3164 PROF_ADD(prof
, orig
, opt_time
);
3165 PROF_ADD(prof
, orig
, restore_count
);
3166 PROF_ADD(prof
, orig
, restore_time
);
3171 for (i
= 0; i
< NB_OPS
; i
++) {
3172 PROF_ADD(prof
, orig
, table_op_count
[i
]);
3181 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
3183 tcg_profile_snapshot(prof
, true, false);
3186 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
3188 tcg_profile_snapshot(prof
, false, true);
3191 void tcg_dump_op_count(FILE *f
, fprintf_function cpu_fprintf
)
3193 TCGProfile prof
= {};
3196 tcg_profile_snapshot_table(&prof
);
3197 for (i
= 0; i
< NB_OPS
; i
++) {
3198 cpu_fprintf(f
, "%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
3199 prof
.table_op_count
[i
]);
3203 void tcg_dump_op_count(FILE *f
, fprintf_function cpu_fprintf
)
3205 cpu_fprintf(f
, "[TCG profiler not compiled]\n");
3210 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
)
3212 #ifdef CONFIG_PROFILER
3213 TCGProfile
*prof
= &s
->prof
;
3218 #ifdef CONFIG_PROFILER
3222 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3225 atomic_set(&prof
->op_count
, prof
->op_count
+ n
);
3226 if (n
> prof
->op_count_max
) {
3227 atomic_set(&prof
->op_count_max
, n
);
3231 atomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
3232 if (n
> prof
->temp_count_max
) {
3233 atomic_set(&prof
->temp_count_max
, n
);
3239 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
3240 && qemu_log_in_addr_range(tb
->pc
))) {
3249 #ifdef CONFIG_PROFILER
3250 atomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
3253 #ifdef USE_TCG_OPTIMIZATIONS
3257 #ifdef CONFIG_PROFILER
3258 atomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
3259 atomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
3264 if (s
->nb_indirects
> 0) {
3266 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
3267 && qemu_log_in_addr_range(tb
->pc
))) {
3269 qemu_log("OP before indirect lowering:\n");
3275 /* Replace indirect temps with direct temps. */
3276 if (liveness_pass_2(s
)) {
3277 /* If changes were made, re-run liveness. */
3282 #ifdef CONFIG_PROFILER
3283 atomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
3287 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
3288 && qemu_log_in_addr_range(tb
->pc
))) {
3290 qemu_log("OP after optimization and liveness analysis:\n");
3297 tcg_reg_alloc_start(s
);
3299 s
->code_buf
= tb
->tc
.ptr
;
3300 s
->code_ptr
= tb
->tc
.ptr
;
3302 #ifdef TCG_TARGET_NEED_LDST_LABELS
3303 QSIMPLEQ_INIT(&s
->ldst_labels
);
3305 #ifdef TCG_TARGET_NEED_POOL_LABELS
3306 s
->pool_labels
= NULL
;
3310 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3311 TCGOpcode opc
= op
->opc
;
3313 #ifdef CONFIG_PROFILER
3314 atomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
3318 case INDEX_op_mov_i32
:
3319 case INDEX_op_mov_i64
:
3320 case INDEX_op_mov_vec
:
3321 tcg_reg_alloc_mov(s
, op
);
3323 case INDEX_op_movi_i32
:
3324 case INDEX_op_movi_i64
:
3325 case INDEX_op_dupi_vec
:
3326 tcg_reg_alloc_movi(s
, op
);
3328 case INDEX_op_insn_start
:
3329 if (num_insns
>= 0) {
3330 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
3333 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
3335 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
3336 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
3340 s
->gen_insn_data
[num_insns
][i
] = a
;
3343 case INDEX_op_discard
:
3344 temp_dead(s
, arg_temp(op
->args
[0]));
3346 case INDEX_op_set_label
:
3347 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
3348 tcg_out_label(s
, arg_label(op
->args
[0]), s
->code_ptr
);
3351 tcg_reg_alloc_call(s
, op
);
3354 /* Sanity check that we've not introduced any unhandled opcodes. */
3355 tcg_debug_assert(tcg_op_supported(opc
));
3356 /* Note: in order to speed up the code, it would be much
3357 faster to have specialized register allocator functions for
3358 some common argument patterns */
3359 tcg_reg_alloc_op(s
, op
);
3362 #ifdef CONFIG_DEBUG_TCG
3365 /* Test for (pending) buffer overflow. The assumption is that any
3366 one operation beginning below the high water mark cannot overrun
3367 the buffer completely. Thus we can test for overflow after
3368 generating code without having to check during generation. */
3369 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
3373 tcg_debug_assert(num_insns
>= 0);
3374 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
3376 /* Generate TB finalization at the end of block */
3377 #ifdef TCG_TARGET_NEED_LDST_LABELS
3378 if (!tcg_out_ldst_finalize(s
)) {
3382 #ifdef TCG_TARGET_NEED_POOL_LABELS
3383 if (!tcg_out_pool_finalize(s
)) {
3388 /* flush instruction cache */
3389 flush_icache_range((uintptr_t)s
->code_buf
, (uintptr_t)s
->code_ptr
);
3391 return tcg_current_code_size(s
);
3394 #ifdef CONFIG_PROFILER
3395 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
3397 TCGProfile prof
= {};
3398 const TCGProfile
*s
;
3400 int64_t tb_div_count
;
3403 tcg_profile_snapshot_counters(&prof
);
3405 tb_count
= s
->tb_count
;
3406 tb_div_count
= tb_count
? tb_count
: 1;
3407 tot
= s
->interm_time
+ s
->code_time
;
3409 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
3411 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
3412 tb_count
, s
->tb_count1
- tb_count
,
3413 (double)(s
->tb_count1
- s
->tb_count
)
3414 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
3415 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
3416 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
3417 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
3418 (double)s
->del_op_count
/ tb_div_count
);
3419 cpu_fprintf(f
, "avg temps/TB %0.2f max=%d\n",
3420 (double)s
->temp_count
/ tb_div_count
, s
->temp_count_max
);
3421 cpu_fprintf(f
, "avg host code/TB %0.1f\n",
3422 (double)s
->code_out_len
/ tb_div_count
);
3423 cpu_fprintf(f
, "avg search data/TB %0.1f\n",
3424 (double)s
->search_out_len
/ tb_div_count
);
3426 cpu_fprintf(f
, "cycles/op %0.1f\n",
3427 s
->op_count
? (double)tot
/ s
->op_count
: 0);
3428 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
3429 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
3430 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
3431 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
3432 cpu_fprintf(f
, "cycles/search byte %0.1f\n",
3433 s
->search_out_len
? (double)tot
/ s
->search_out_len
: 0);
3437 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
3438 (double)s
->interm_time
/ tot
* 100.0);
3439 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3440 (double)s
->code_time
/ tot
* 100.0);
3441 cpu_fprintf(f
, "optim./code time %0.1f%%\n",
3442 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
3444 cpu_fprintf(f
, "liveness/code time %0.1f%%\n",
3445 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
3446 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3448 cpu_fprintf(f
, " avg cycles %0.1f\n",
3449 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
3452 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
3454 cpu_fprintf(f
, "[TCG profiler not compiled]\n");
3458 #ifdef ELF_HOST_MACHINE
3459 /* In order to use this feature, the backend needs to do three things:
3461 (1) Define ELF_HOST_MACHINE to indicate both what value to
3462 put into the ELF image and to indicate support for the feature.
3464 (2) Define tcg_register_jit. This should create a buffer containing
3465 the contents of a .debug_frame section that describes the post-
3466 prologue unwind info for the tcg machine.
3468 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
3471 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
3478 struct jit_code_entry
{
3479 struct jit_code_entry
*next_entry
;
3480 struct jit_code_entry
*prev_entry
;
3481 const void *symfile_addr
;
3482 uint64_t symfile_size
;
3485 struct jit_descriptor
{
3487 uint32_t action_flag
;
3488 struct jit_code_entry
*relevant_entry
;
3489 struct jit_code_entry
*first_entry
;
3492 void __jit_debug_register_code(void) __attribute__((noinline
));
3493 void __jit_debug_register_code(void)
3498 /* Must statically initialize the version, because GDB may check
3499 the version before we can set it. */
3500 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
3502 /* End GDB interface. */
3504 static int find_string(const char *strtab
, const char *str
)
3506 const char *p
= strtab
+ 1;
3509 if (strcmp(p
, str
) == 0) {
3516 static void tcg_register_jit_int(void *buf_ptr
, size_t buf_size
,
3517 const void *debug_frame
,
3518 size_t debug_frame_size
)
3520 struct __attribute__((packed
)) DebugInfo
{
3527 uintptr_t cu_low_pc
;
3528 uintptr_t cu_high_pc
;
3531 uintptr_t fn_low_pc
;
3532 uintptr_t fn_high_pc
;
3541 struct DebugInfo di
;
3546 struct ElfImage
*img
;
3548 static const struct ElfImage img_template
= {
3550 .e_ident
[EI_MAG0
] = ELFMAG0
,
3551 .e_ident
[EI_MAG1
] = ELFMAG1
,
3552 .e_ident
[EI_MAG2
] = ELFMAG2
,
3553 .e_ident
[EI_MAG3
] = ELFMAG3
,
3554 .e_ident
[EI_CLASS
] = ELF_CLASS
,
3555 .e_ident
[EI_DATA
] = ELF_DATA
,
3556 .e_ident
[EI_VERSION
] = EV_CURRENT
,
3558 .e_machine
= ELF_HOST_MACHINE
,
3559 .e_version
= EV_CURRENT
,
3560 .e_phoff
= offsetof(struct ElfImage
, phdr
),
3561 .e_shoff
= offsetof(struct ElfImage
, shdr
),
3562 .e_ehsize
= sizeof(ElfW(Shdr
)),
3563 .e_phentsize
= sizeof(ElfW(Phdr
)),
3565 .e_shentsize
= sizeof(ElfW(Shdr
)),
3566 .e_shnum
= ARRAY_SIZE(img
->shdr
),
3567 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
3568 #ifdef ELF_HOST_FLAGS
3569 .e_flags
= ELF_HOST_FLAGS
,
3572 .e_ident
[EI_OSABI
] = ELF_OSABI
,
3580 [0] = { .sh_type
= SHT_NULL
},
3581 /* Trick: The contents of code_gen_buffer are not present in
3582 this fake ELF file; that got allocated elsewhere. Therefore
3583 we mark .text as SHT_NOBITS (similar to .bss) so that readers
3584 will not look for contents. We can record any address. */
3586 .sh_type
= SHT_NOBITS
,
3587 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
3589 [2] = { /* .debug_info */
3590 .sh_type
= SHT_PROGBITS
,
3591 .sh_offset
= offsetof(struct ElfImage
, di
),
3592 .sh_size
= sizeof(struct DebugInfo
),
3594 [3] = { /* .debug_abbrev */
3595 .sh_type
= SHT_PROGBITS
,
3596 .sh_offset
= offsetof(struct ElfImage
, da
),
3597 .sh_size
= sizeof(img
->da
),
3599 [4] = { /* .debug_frame */
3600 .sh_type
= SHT_PROGBITS
,
3601 .sh_offset
= sizeof(struct ElfImage
),
3603 [5] = { /* .symtab */
3604 .sh_type
= SHT_SYMTAB
,
3605 .sh_offset
= offsetof(struct ElfImage
, sym
),
3606 .sh_size
= sizeof(img
->sym
),
3608 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
3609 .sh_entsize
= sizeof(ElfW(Sym
)),
3611 [6] = { /* .strtab */
3612 .sh_type
= SHT_STRTAB
,
3613 .sh_offset
= offsetof(struct ElfImage
, str
),
3614 .sh_size
= sizeof(img
->str
),
3618 [1] = { /* code_gen_buffer */
3619 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
3624 .len
= sizeof(struct DebugInfo
) - 4,
3626 .ptr_size
= sizeof(void *),
3628 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
3630 .fn_name
= "code_gen_buffer"
3633 1, /* abbrev number (the cu) */
3634 0x11, 1, /* DW_TAG_compile_unit, has children */
3635 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
3636 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3637 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3638 0, 0, /* end of abbrev */
3639 2, /* abbrev number (the fn) */
3640 0x2e, 0, /* DW_TAG_subprogram, no children */
3641 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
3642 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3643 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3644 0, 0, /* end of abbrev */
3645 0 /* no more abbrev */
3647 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3648 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
3651 /* We only need a single jit entry; statically allocate it. */
3652 static struct jit_code_entry one_entry
;
3654 uintptr_t buf
= (uintptr_t)buf_ptr
;
3655 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
3656 DebugFrameHeader
*dfh
;
3658 img
= g_malloc(img_size
);
3659 *img
= img_template
;
3661 img
->phdr
.p_vaddr
= buf
;
3662 img
->phdr
.p_paddr
= buf
;
3663 img
->phdr
.p_memsz
= buf_size
;
3665 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
3666 img
->shdr
[1].sh_addr
= buf
;
3667 img
->shdr
[1].sh_size
= buf_size
;
3669 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
3670 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
3672 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
3673 img
->shdr
[4].sh_size
= debug_frame_size
;
3675 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
3676 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
3678 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
3679 img
->sym
[1].st_value
= buf
;
3680 img
->sym
[1].st_size
= buf_size
;
3682 img
->di
.cu_low_pc
= buf
;
3683 img
->di
.cu_high_pc
= buf
+ buf_size
;
3684 img
->di
.fn_low_pc
= buf
;
3685 img
->di
.fn_high_pc
= buf
+ buf_size
;
3687 dfh
= (DebugFrameHeader
*)(img
+ 1);
3688 memcpy(dfh
, debug_frame
, debug_frame_size
);
3689 dfh
->fde
.func_start
= buf
;
3690 dfh
->fde
.func_len
= buf_size
;
3693 /* Enable this block to be able to debug the ELF image file creation.
3694 One can use readelf, objdump, or other inspection utilities. */
3696 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
3698 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
3699 /* Avoid stupid unused return value warning for fwrite. */
3706 one_entry
.symfile_addr
= img
;
3707 one_entry
.symfile_size
= img_size
;
3709 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
3710 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
3711 __jit_debug_descriptor
.first_entry
= &one_entry
;
3712 __jit_debug_register_code();
3715 /* No support for the feature. Provide the entry point expected by exec.c,
3716 and implement the internal function we declared earlier. */
3718 static void tcg_register_jit_int(void *buf
, size_t size
,
3719 const void *debug_frame
,
3720 size_t debug_frame_size
)
3724 void tcg_register_jit(void *buf
, size_t buf_size
)
3727 #endif /* ELF_HOST_MACHINE */
3729 #if !TCG_TARGET_MAYBE_vec
3730 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
3732 g_assert_not_reached();