2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
39 /* Note: the long term plan is to reduce the dependencies on the QEMU
40 CPU definitions. Currently they are used for qemu_ld/st
42 #define NO_CPU_IO_DEFS
45 #include "exec/cpu-common.h"
46 #include "exec/exec-all.h"
50 #if UINTPTR_MAX == UINT32_MAX
51 # define ELF_CLASS ELFCLASS32
53 # define ELF_CLASS ELFCLASS64
55 #ifdef HOST_WORDS_BIGENDIAN
56 # define ELF_DATA ELFDATA2MSB
58 # define ELF_DATA ELFDATA2LSB
63 #include "sysemu/sysemu.h"
65 /* Forward declarations for functions declared in tcg-target.inc.c and
67 static void tcg_target_init(TCGContext
*s
);
68 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode
);
69 static void tcg_target_qemu_prologue(TCGContext
*s
);
70 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
71 intptr_t value
, intptr_t addend
);
73 /* The CIE and FDE header definitions will be common to all hosts. */
75 uint32_t len
__attribute__((aligned((sizeof(void *)))));
81 uint8_t return_column
;
84 typedef struct QEMU_PACKED
{
85 uint32_t len
__attribute__((aligned((sizeof(void *)))));
89 } DebugFrameFDEHeader
;
91 typedef struct QEMU_PACKED
{
93 DebugFrameFDEHeader fde
;
96 static void tcg_register_jit_int(void *buf
, size_t size
,
97 const void *debug_frame
,
98 size_t debug_frame_size
)
99 __attribute__((unused
));
101 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
102 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
103 const char *ct_str
, TCGType type
);
104 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
106 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
107 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
108 TCGReg ret
, tcg_target_long arg
);
109 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
110 const int *const_args
);
111 #if TCG_TARGET_MAYBE_vec
112 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
113 unsigned vece
, const TCGArg
*args
,
114 const int *const_args
);
116 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
117 unsigned vece
, const TCGArg
*args
,
118 const int *const_args
)
120 g_assert_not_reached();
123 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
125 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
126 TCGReg base
, intptr_t ofs
);
127 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*target
);
128 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
129 const TCGArgConstraint
*arg_ct
);
130 #ifdef TCG_TARGET_NEED_LDST_LABELS
131 static int tcg_out_ldst_finalize(TCGContext
*s
);
134 #define TCG_HIGHWATER 1024
136 static TCGContext
**tcg_ctxs
;
137 static unsigned int n_tcg_ctxs
;
138 TCGv_env cpu_env
= 0;
140 struct tcg_region_tree
{
143 /* padding to avoid false sharing is computed at run-time */
147 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
148 * dynamically allocate from as demand dictates. Given appropriate region
149 * sizing, this minimizes flushes even when some TCG threads generate a lot
150 * more code than others.
152 struct tcg_region_state
{
155 /* fields set at init time */
160 size_t size
; /* size of one region */
161 size_t stride
; /* .size + guard size */
163 /* fields protected by the lock */
164 size_t current
; /* current region index */
165 size_t agg_size_full
; /* aggregate size of full regions */
168 static struct tcg_region_state region
;
170 * This is an array of struct tcg_region_tree's, with padding.
171 * We use void * to simplify the computation of region_trees[i]; each
172 * struct is found every tree_size bytes.
174 static void *region_trees
;
175 static size_t tree_size
;
176 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
177 static TCGRegSet tcg_target_call_clobber_regs
;
179 #if TCG_TARGET_INSN_UNIT_SIZE == 1
180 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
185 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
192 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
193 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
195 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
198 tcg_insn_unit
*p
= s
->code_ptr
;
199 memcpy(p
, &v
, sizeof(v
));
200 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
204 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
207 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
210 memcpy(p
, &v
, sizeof(v
));
215 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
216 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
218 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
221 tcg_insn_unit
*p
= s
->code_ptr
;
222 memcpy(p
, &v
, sizeof(v
));
223 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
227 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
230 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
233 memcpy(p
, &v
, sizeof(v
));
238 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
239 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
241 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
244 tcg_insn_unit
*p
= s
->code_ptr
;
245 memcpy(p
, &v
, sizeof(v
));
246 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
250 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
253 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
256 memcpy(p
, &v
, sizeof(v
));
261 /* label relocation processing */
263 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
264 TCGLabel
*l
, intptr_t addend
)
266 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
271 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
274 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
, tcg_insn_unit
*ptr
)
276 tcg_debug_assert(!l
->has_value
);
278 l
->u
.value_ptr
= ptr
;
281 TCGLabel
*gen_new_label(void)
283 TCGContext
*s
= tcg_ctx
;
284 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
286 memset(l
, 0, sizeof(TCGLabel
));
287 l
->id
= s
->nb_labels
++;
288 QSIMPLEQ_INIT(&l
->relocs
);
290 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
295 static bool tcg_resolve_relocs(TCGContext
*s
)
299 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
301 uintptr_t value
= l
->u
.value
;
303 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
304 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
312 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
314 size_t off
= tcg_current_code_size(s
);
315 s
->tb_jmp_reset_offset
[which
] = off
;
316 /* Make sure that we didn't overflow the stored offset. */
317 assert(s
->tb_jmp_reset_offset
[which
] == off
);
320 #include "tcg-target.inc.c"
322 /* compare a pointer @ptr and a tb_tc @s */
323 static int ptr_cmp_tb_tc(const void *ptr
, const struct tb_tc
*s
)
325 if (ptr
>= s
->ptr
+ s
->size
) {
327 } else if (ptr
< s
->ptr
) {
333 static gint
tb_tc_cmp(gconstpointer ap
, gconstpointer bp
)
335 const struct tb_tc
*a
= ap
;
336 const struct tb_tc
*b
= bp
;
339 * When both sizes are set, we know this isn't a lookup.
340 * This is the most likely case: every TB must be inserted; lookups
341 * are a lot less frequent.
343 if (likely(a
->size
&& b
->size
)) {
344 if (a
->ptr
> b
->ptr
) {
346 } else if (a
->ptr
< b
->ptr
) {
349 /* a->ptr == b->ptr should happen only on deletions */
350 g_assert(a
->size
== b
->size
);
354 * All lookups have either .size field set to 0.
355 * From the glib sources we see that @ap is always the lookup key. However
356 * the docs provide no guarantee, so we just mark this case as likely.
358 if (likely(a
->size
== 0)) {
359 return ptr_cmp_tb_tc(a
->ptr
, b
);
361 return ptr_cmp_tb_tc(b
->ptr
, a
);
364 static void tcg_region_trees_init(void)
368 tree_size
= ROUND_UP(sizeof(struct tcg_region_tree
), qemu_dcache_linesize
);
369 region_trees
= qemu_memalign(qemu_dcache_linesize
, region
.n
* tree_size
);
370 for (i
= 0; i
< region
.n
; i
++) {
371 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
373 qemu_mutex_init(&rt
->lock
);
374 rt
->tree
= g_tree_new(tb_tc_cmp
);
378 static struct tcg_region_tree
*tc_ptr_to_region_tree(void *p
)
382 if (p
< region
.start_aligned
) {
385 ptrdiff_t offset
= p
- region
.start_aligned
;
387 if (offset
> region
.stride
* (region
.n
- 1)) {
388 region_idx
= region
.n
- 1;
390 region_idx
= offset
/ region
.stride
;
393 return region_trees
+ region_idx
* tree_size
;
396 void tcg_tb_insert(TranslationBlock
*tb
)
398 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree(tb
->tc
.ptr
);
400 qemu_mutex_lock(&rt
->lock
);
401 g_tree_insert(rt
->tree
, &tb
->tc
, tb
);
402 qemu_mutex_unlock(&rt
->lock
);
405 void tcg_tb_remove(TranslationBlock
*tb
)
407 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree(tb
->tc
.ptr
);
409 qemu_mutex_lock(&rt
->lock
);
410 g_tree_remove(rt
->tree
, &tb
->tc
);
411 qemu_mutex_unlock(&rt
->lock
);
415 * Find the TB 'tb' such that
416 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
417 * Return NULL if not found.
419 TranslationBlock
*tcg_tb_lookup(uintptr_t tc_ptr
)
421 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree((void *)tc_ptr
);
422 TranslationBlock
*tb
;
423 struct tb_tc s
= { .ptr
= (void *)tc_ptr
};
425 qemu_mutex_lock(&rt
->lock
);
426 tb
= g_tree_lookup(rt
->tree
, &s
);
427 qemu_mutex_unlock(&rt
->lock
);
431 static void tcg_region_tree_lock_all(void)
435 for (i
= 0; i
< region
.n
; i
++) {
436 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
438 qemu_mutex_lock(&rt
->lock
);
442 static void tcg_region_tree_unlock_all(void)
446 for (i
= 0; i
< region
.n
; i
++) {
447 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
449 qemu_mutex_unlock(&rt
->lock
);
453 void tcg_tb_foreach(GTraverseFunc func
, gpointer user_data
)
457 tcg_region_tree_lock_all();
458 for (i
= 0; i
< region
.n
; i
++) {
459 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
461 g_tree_foreach(rt
->tree
, func
, user_data
);
463 tcg_region_tree_unlock_all();
466 size_t tcg_nb_tbs(void)
471 tcg_region_tree_lock_all();
472 for (i
= 0; i
< region
.n
; i
++) {
473 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
475 nb_tbs
+= g_tree_nnodes(rt
->tree
);
477 tcg_region_tree_unlock_all();
481 static void tcg_region_tree_reset_all(void)
485 tcg_region_tree_lock_all();
486 for (i
= 0; i
< region
.n
; i
++) {
487 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
489 /* Increment the refcount first so that destroy acts as a reset */
490 g_tree_ref(rt
->tree
);
491 g_tree_destroy(rt
->tree
);
493 tcg_region_tree_unlock_all();
496 static void tcg_region_bounds(size_t curr_region
, void **pstart
, void **pend
)
500 start
= region
.start_aligned
+ curr_region
* region
.stride
;
501 end
= start
+ region
.size
;
503 if (curr_region
== 0) {
504 start
= region
.start
;
506 if (curr_region
== region
.n
- 1) {
514 static void tcg_region_assign(TCGContext
*s
, size_t curr_region
)
518 tcg_region_bounds(curr_region
, &start
, &end
);
520 s
->code_gen_buffer
= start
;
521 s
->code_gen_ptr
= start
;
522 s
->code_gen_buffer_size
= end
- start
;
523 s
->code_gen_highwater
= end
- TCG_HIGHWATER
;
526 static bool tcg_region_alloc__locked(TCGContext
*s
)
528 if (region
.current
== region
.n
) {
531 tcg_region_assign(s
, region
.current
);
537 * Request a new region once the one in use has filled up.
538 * Returns true on error.
540 static bool tcg_region_alloc(TCGContext
*s
)
543 /* read the region size now; alloc__locked will overwrite it on success */
544 size_t size_full
= s
->code_gen_buffer_size
;
546 qemu_mutex_lock(®ion
.lock
);
547 err
= tcg_region_alloc__locked(s
);
549 region
.agg_size_full
+= size_full
- TCG_HIGHWATER
;
551 qemu_mutex_unlock(®ion
.lock
);
556 * Perform a context's first region allocation.
557 * This function does _not_ increment region.agg_size_full.
559 static inline bool tcg_region_initial_alloc__locked(TCGContext
*s
)
561 return tcg_region_alloc__locked(s
);
564 /* Call from a safe-work context */
565 void tcg_region_reset_all(void)
567 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
570 qemu_mutex_lock(®ion
.lock
);
572 region
.agg_size_full
= 0;
574 for (i
= 0; i
< n_ctxs
; i
++) {
575 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
576 bool err
= tcg_region_initial_alloc__locked(s
);
580 qemu_mutex_unlock(®ion
.lock
);
582 tcg_region_tree_reset_all();
585 #ifdef CONFIG_USER_ONLY
586 static size_t tcg_n_regions(void)
592 * It is likely that some vCPUs will translate more code than others, so we
593 * first try to set more regions than max_cpus, with those regions being of
594 * reasonable size. If that's not possible we make do by evenly dividing
595 * the code_gen_buffer among the vCPUs.
597 static size_t tcg_n_regions(void)
601 /* Use a single region if all we have is one vCPU thread */
602 if (max_cpus
== 1 || !qemu_tcg_mttcg_enabled()) {
606 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
607 for (i
= 8; i
> 0; i
--) {
608 size_t regions_per_thread
= i
;
611 region_size
= tcg_init_ctx
.code_gen_buffer_size
;
612 region_size
/= max_cpus
* regions_per_thread
;
614 if (region_size
>= 2 * 1024u * 1024) {
615 return max_cpus
* regions_per_thread
;
618 /* If we can't, then just allocate one region per vCPU thread */
624 * Initializes region partitioning.
626 * Called at init time from the parent thread (i.e. the one calling
627 * tcg_context_init), after the target's TCG globals have been set.
629 * Region partitioning works by splitting code_gen_buffer into separate regions,
630 * and then assigning regions to TCG threads so that the threads can translate
631 * code in parallel without synchronization.
633 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
634 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
635 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
636 * must have been parsed before calling this function, since it calls
637 * qemu_tcg_mttcg_enabled().
639 * In user-mode we use a single region. Having multiple regions in user-mode
640 * is not supported, because the number of vCPU threads (recall that each thread
641 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
642 * OS, and usually this number is huge (tens of thousands is not uncommon).
643 * Thus, given this large bound on the number of vCPU threads and the fact
644 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
645 * that the availability of at least one region per vCPU thread.
647 * However, this user-mode limitation is unlikely to be a significant problem
648 * in practice. Multi-threaded guests share most if not all of their translated
649 * code, which makes parallel code generation less appealing than in softmmu.
651 void tcg_region_init(void)
653 void *buf
= tcg_init_ctx
.code_gen_buffer
;
655 size_t size
= tcg_init_ctx
.code_gen_buffer_size
;
656 size_t page_size
= qemu_real_host_page_size
;
661 n_regions
= tcg_n_regions();
663 /* The first region will be 'aligned - buf' bytes larger than the others */
664 aligned
= QEMU_ALIGN_PTR_UP(buf
, page_size
);
665 g_assert(aligned
< tcg_init_ctx
.code_gen_buffer
+ size
);
667 * Make region_size a multiple of page_size, using aligned as the start.
668 * As a result of this we might end up with a few extra pages at the end of
669 * the buffer; we will assign those to the last region.
671 region_size
= (size
- (aligned
- buf
)) / n_regions
;
672 region_size
= QEMU_ALIGN_DOWN(region_size
, page_size
);
674 /* A region must have at least 2 pages; one code, one guard */
675 g_assert(region_size
>= 2 * page_size
);
677 /* init the region struct */
678 qemu_mutex_init(®ion
.lock
);
679 region
.n
= n_regions
;
680 region
.size
= region_size
- page_size
;
681 region
.stride
= region_size
;
683 region
.start_aligned
= aligned
;
684 /* page-align the end, since its last page will be a guard page */
685 region
.end
= QEMU_ALIGN_PTR_DOWN(buf
+ size
, page_size
);
686 /* account for that last guard page */
687 region
.end
-= page_size
;
689 /* set guard pages */
690 for (i
= 0; i
< region
.n
; i
++) {
694 tcg_region_bounds(i
, &start
, &end
);
695 rc
= qemu_mprotect_none(end
, page_size
);
699 tcg_region_trees_init();
701 /* In user-mode we support only one ctx, so do the initial allocation now */
702 #ifdef CONFIG_USER_ONLY
704 bool err
= tcg_region_initial_alloc__locked(tcg_ctx
);
712 * All TCG threads except the parent (i.e. the one that called tcg_context_init
713 * and registered the target's TCG globals) must register with this function
714 * before initiating translation.
716 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
717 * of tcg_region_init() for the reasoning behind this.
719 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
720 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
721 * is not used anymore for translation once this function is called.
723 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
724 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
726 #ifdef CONFIG_USER_ONLY
727 void tcg_register_thread(void)
729 tcg_ctx
= &tcg_init_ctx
;
732 void tcg_register_thread(void)
734 TCGContext
*s
= g_malloc(sizeof(*s
));
740 /* Relink mem_base. */
741 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
742 if (tcg_init_ctx
.temps
[i
].mem_base
) {
743 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
744 tcg_debug_assert(b
>= 0 && b
< n
);
745 s
->temps
[i
].mem_base
= &s
->temps
[b
];
749 /* Claim an entry in tcg_ctxs */
750 n
= atomic_fetch_inc(&n_tcg_ctxs
);
751 g_assert(n
< max_cpus
);
752 atomic_set(&tcg_ctxs
[n
], s
);
755 qemu_mutex_lock(®ion
.lock
);
756 err
= tcg_region_initial_alloc__locked(tcg_ctx
);
758 qemu_mutex_unlock(®ion
.lock
);
760 #endif /* !CONFIG_USER_ONLY */
763 * Returns the size (in bytes) of all translated code (i.e. from all regions)
764 * currently in the cache.
765 * See also: tcg_code_capacity()
766 * Do not confuse with tcg_current_code_size(); that one applies to a single
769 size_t tcg_code_size(void)
771 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
775 qemu_mutex_lock(®ion
.lock
);
776 total
= region
.agg_size_full
;
777 for (i
= 0; i
< n_ctxs
; i
++) {
778 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
781 size
= atomic_read(&s
->code_gen_ptr
) - s
->code_gen_buffer
;
782 g_assert(size
<= s
->code_gen_buffer_size
);
785 qemu_mutex_unlock(®ion
.lock
);
790 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
792 * See also: tcg_code_size()
794 size_t tcg_code_capacity(void)
796 size_t guard_size
, capacity
;
798 /* no need for synchronization; these variables are set at init time */
799 guard_size
= region
.stride
- region
.size
;
800 capacity
= region
.end
+ guard_size
- region
.start
;
801 capacity
-= region
.n
* (guard_size
+ TCG_HIGHWATER
);
805 size_t tcg_tb_phys_invalidate_count(void)
807 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
811 for (i
= 0; i
< n_ctxs
; i
++) {
812 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
814 total
+= atomic_read(&s
->tb_phys_invalidate_count
);
819 /* pool based memory allocation */
820 void *tcg_malloc_internal(TCGContext
*s
, int size
)
825 if (size
> TCG_POOL_CHUNK_SIZE
) {
826 /* big malloc: insert a new pool (XXX: could optimize) */
827 p
= g_malloc(sizeof(TCGPool
) + size
);
829 p
->next
= s
->pool_first_large
;
830 s
->pool_first_large
= p
;
841 pool_size
= TCG_POOL_CHUNK_SIZE
;
842 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
846 s
->pool_current
->next
= p
;
855 s
->pool_cur
= p
->data
+ size
;
856 s
->pool_end
= p
->data
+ p
->size
;
860 void tcg_pool_reset(TCGContext
*s
)
863 for (p
= s
->pool_first_large
; p
; p
= t
) {
867 s
->pool_first_large
= NULL
;
868 s
->pool_cur
= s
->pool_end
= NULL
;
869 s
->pool_current
= NULL
;
872 typedef struct TCGHelperInfo
{
879 #include "exec/helper-proto.h"
881 static const TCGHelperInfo all_helpers
[] = {
882 #include "exec/helper-tcg.h"
884 static GHashTable
*helper_table
;
886 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
887 static void process_op_defs(TCGContext
*s
);
888 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
889 TCGReg reg
, const char *name
);
891 void tcg_context_init(TCGContext
*s
)
893 int op
, total_args
, n
, i
;
895 TCGArgConstraint
*args_ct
;
899 memset(s
, 0, sizeof(*s
));
902 /* Count total number of arguments and allocate the corresponding
905 for(op
= 0; op
< NB_OPS
; op
++) {
906 def
= &tcg_op_defs
[op
];
907 n
= def
->nb_iargs
+ def
->nb_oargs
;
911 args_ct
= g_malloc(sizeof(TCGArgConstraint
) * total_args
);
912 sorted_args
= g_malloc(sizeof(int) * total_args
);
914 for(op
= 0; op
< NB_OPS
; op
++) {
915 def
= &tcg_op_defs
[op
];
916 def
->args_ct
= args_ct
;
917 def
->sorted_args
= sorted_args
;
918 n
= def
->nb_iargs
+ def
->nb_oargs
;
923 /* Register helpers. */
924 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
925 helper_table
= g_hash_table_new(NULL
, NULL
);
927 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
928 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
929 (gpointer
)&all_helpers
[i
]);
935 /* Reverse the order of the saved registers, assuming they're all at
936 the start of tcg_target_reg_alloc_order. */
937 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
938 int r
= tcg_target_reg_alloc_order
[n
];
939 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
943 for (i
= 0; i
< n
; ++i
) {
944 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
946 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
947 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
952 * In user-mode we simply share the init context among threads, since we
953 * use a single region. See the documentation tcg_region_init() for the
954 * reasoning behind this.
955 * In softmmu we will have at most max_cpus TCG threads.
957 #ifdef CONFIG_USER_ONLY
961 tcg_ctxs
= g_new(TCGContext
*, max_cpus
);
964 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
965 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
966 cpu_env
= temp_tcgv_ptr(ts
);
970 * Allocate TBs right before their corresponding translated code, making
971 * sure that TBs and code are on different cache lines.
973 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
975 uintptr_t align
= qemu_icache_linesize
;
976 TranslationBlock
*tb
;
980 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
981 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
983 if (unlikely(next
> s
->code_gen_highwater
)) {
984 if (tcg_region_alloc(s
)) {
989 atomic_set(&s
->code_gen_ptr
, next
);
990 s
->data_gen_ptr
= NULL
;
994 void tcg_prologue_init(TCGContext
*s
)
996 size_t prologue_size
, total_size
;
999 /* Put the prologue at the beginning of code_gen_buffer. */
1000 buf0
= s
->code_gen_buffer
;
1001 total_size
= s
->code_gen_buffer_size
;
1004 s
->data_gen_ptr
= NULL
;
1005 s
->code_gen_prologue
= buf0
;
1007 /* Compute a high-water mark, at which we voluntarily flush the buffer
1008 and start over. The size here is arbitrary, significantly larger
1009 than we expect the code generation for any one opcode to require. */
1010 s
->code_gen_highwater
= s
->code_gen_buffer
+ (total_size
- TCG_HIGHWATER
);
1012 #ifdef TCG_TARGET_NEED_POOL_LABELS
1013 s
->pool_labels
= NULL
;
1016 /* Generate the prologue. */
1017 tcg_target_qemu_prologue(s
);
1019 #ifdef TCG_TARGET_NEED_POOL_LABELS
1020 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1022 int result
= tcg_out_pool_finalize(s
);
1023 tcg_debug_assert(result
== 0);
1028 flush_icache_range((uintptr_t)buf0
, (uintptr_t)buf1
);
1030 /* Deduct the prologue from the buffer. */
1031 prologue_size
= tcg_current_code_size(s
);
1032 s
->code_gen_ptr
= buf1
;
1033 s
->code_gen_buffer
= buf1
;
1035 total_size
-= prologue_size
;
1036 s
->code_gen_buffer_size
= total_size
;
1038 tcg_register_jit(s
->code_gen_buffer
, total_size
);
1041 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1043 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size
);
1044 if (s
->data_gen_ptr
) {
1045 size_t code_size
= s
->data_gen_ptr
- buf0
;
1046 size_t data_size
= prologue_size
- code_size
;
1049 log_disas(buf0
, code_size
);
1051 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1052 if (sizeof(tcg_target_ulong
) == 8) {
1053 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1054 (uintptr_t)s
->data_gen_ptr
+ i
,
1055 *(uint64_t *)(s
->data_gen_ptr
+ i
));
1057 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1058 (uintptr_t)s
->data_gen_ptr
+ i
,
1059 *(uint32_t *)(s
->data_gen_ptr
+ i
));
1063 log_disas(buf0
, prologue_size
);
1071 /* Assert that goto_ptr is implemented completely. */
1072 if (TCG_TARGET_HAS_goto_ptr
) {
1073 tcg_debug_assert(s
->code_gen_epilogue
!= NULL
);
1077 void tcg_func_start(TCGContext
*s
)
1080 s
->nb_temps
= s
->nb_globals
;
1082 /* No temps have been previously allocated for size or locality. */
1083 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
1087 s
->current_frame_offset
= s
->frame_start
;
1089 #ifdef CONFIG_DEBUG_TCG
1090 s
->goto_tb_issue_mask
= 0;
1093 QTAILQ_INIT(&s
->ops
);
1094 QTAILQ_INIT(&s
->free_ops
);
1095 QSIMPLEQ_INIT(&s
->labels
);
1098 static inline TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
1100 int n
= s
->nb_temps
++;
1101 tcg_debug_assert(n
< TCG_MAX_TEMPS
);
1102 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1105 static inline TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1109 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1111 ts
= tcg_temp_alloc(s
);
1112 ts
->temp_global
= 1;
1117 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1118 TCGReg reg
, const char *name
)
1122 if (TCG_TARGET_REG_BITS
== 32 && type
!= TCG_TYPE_I32
) {
1126 ts
= tcg_global_alloc(s
);
1127 ts
->base_type
= type
;
1132 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1137 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1139 s
->frame_start
= start
;
1140 s
->frame_end
= start
+ size
;
1142 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1145 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
1146 intptr_t offset
, const char *name
)
1148 TCGContext
*s
= tcg_ctx
;
1149 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1150 TCGTemp
*ts
= tcg_global_alloc(s
);
1151 int indirect_reg
= 0, bigendian
= 0;
1152 #ifdef HOST_WORDS_BIGENDIAN
1156 if (!base_ts
->fixed_reg
) {
1157 /* We do not support double-indirect registers. */
1158 tcg_debug_assert(!base_ts
->indirect_reg
);
1159 base_ts
->indirect_base
= 1;
1160 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1165 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1166 TCGTemp
*ts2
= tcg_global_alloc(s
);
1169 ts
->base_type
= TCG_TYPE_I64
;
1170 ts
->type
= TCG_TYPE_I32
;
1171 ts
->indirect_reg
= indirect_reg
;
1172 ts
->mem_allocated
= 1;
1173 ts
->mem_base
= base_ts
;
1174 ts
->mem_offset
= offset
+ bigendian
* 4;
1175 pstrcpy(buf
, sizeof(buf
), name
);
1176 pstrcat(buf
, sizeof(buf
), "_0");
1177 ts
->name
= strdup(buf
);
1179 tcg_debug_assert(ts2
== ts
+ 1);
1180 ts2
->base_type
= TCG_TYPE_I64
;
1181 ts2
->type
= TCG_TYPE_I32
;
1182 ts2
->indirect_reg
= indirect_reg
;
1183 ts2
->mem_allocated
= 1;
1184 ts2
->mem_base
= base_ts
;
1185 ts2
->mem_offset
= offset
+ (1 - bigendian
) * 4;
1186 pstrcpy(buf
, sizeof(buf
), name
);
1187 pstrcat(buf
, sizeof(buf
), "_1");
1188 ts2
->name
= strdup(buf
);
1190 ts
->base_type
= type
;
1192 ts
->indirect_reg
= indirect_reg
;
1193 ts
->mem_allocated
= 1;
1194 ts
->mem_base
= base_ts
;
1195 ts
->mem_offset
= offset
;
1201 TCGTemp
*tcg_temp_new_internal(TCGType type
, bool temp_local
)
1203 TCGContext
*s
= tcg_ctx
;
1207 k
= type
+ (temp_local
? TCG_TYPE_COUNT
: 0);
1208 idx
= find_first_bit(s
->free_temps
[k
].l
, TCG_MAX_TEMPS
);
1209 if (idx
< TCG_MAX_TEMPS
) {
1210 /* There is already an available temp with the right type. */
1211 clear_bit(idx
, s
->free_temps
[k
].l
);
1213 ts
= &s
->temps
[idx
];
1214 ts
->temp_allocated
= 1;
1215 tcg_debug_assert(ts
->base_type
== type
);
1216 tcg_debug_assert(ts
->temp_local
== temp_local
);
1218 ts
= tcg_temp_alloc(s
);
1219 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1220 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1222 ts
->base_type
= type
;
1223 ts
->type
= TCG_TYPE_I32
;
1224 ts
->temp_allocated
= 1;
1225 ts
->temp_local
= temp_local
;
1227 tcg_debug_assert(ts2
== ts
+ 1);
1228 ts2
->base_type
= TCG_TYPE_I64
;
1229 ts2
->type
= TCG_TYPE_I32
;
1230 ts2
->temp_allocated
= 1;
1231 ts2
->temp_local
= temp_local
;
1233 ts
->base_type
= type
;
1235 ts
->temp_allocated
= 1;
1236 ts
->temp_local
= temp_local
;
1240 #if defined(CONFIG_DEBUG_TCG)
1246 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1250 #ifdef CONFIG_DEBUG_TCG
1253 assert(TCG_TARGET_HAS_v64
);
1256 assert(TCG_TARGET_HAS_v128
);
1259 assert(TCG_TARGET_HAS_v256
);
1262 g_assert_not_reached();
1266 t
= tcg_temp_new_internal(type
, 0);
1267 return temp_tcgv_vec(t
);
1270 /* Create a new temp of the same type as an existing temp. */
1271 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1273 TCGTemp
*t
= tcgv_vec_temp(match
);
1275 tcg_debug_assert(t
->temp_allocated
!= 0);
1277 t
= tcg_temp_new_internal(t
->base_type
, 0);
1278 return temp_tcgv_vec(t
);
1281 void tcg_temp_free_internal(TCGTemp
*ts
)
1283 TCGContext
*s
= tcg_ctx
;
1286 #if defined(CONFIG_DEBUG_TCG)
1288 if (s
->temps_in_use
< 0) {
1289 fprintf(stderr
, "More temporaries freed than allocated!\n");
1293 tcg_debug_assert(ts
->temp_global
== 0);
1294 tcg_debug_assert(ts
->temp_allocated
!= 0);
1295 ts
->temp_allocated
= 0;
1298 k
= ts
->base_type
+ (ts
->temp_local
? TCG_TYPE_COUNT
: 0);
1299 set_bit(idx
, s
->free_temps
[k
].l
);
1302 TCGv_i32
tcg_const_i32(int32_t val
)
1305 t0
= tcg_temp_new_i32();
1306 tcg_gen_movi_i32(t0
, val
);
1310 TCGv_i64
tcg_const_i64(int64_t val
)
1313 t0
= tcg_temp_new_i64();
1314 tcg_gen_movi_i64(t0
, val
);
1318 TCGv_i32
tcg_const_local_i32(int32_t val
)
1321 t0
= tcg_temp_local_new_i32();
1322 tcg_gen_movi_i32(t0
, val
);
1326 TCGv_i64
tcg_const_local_i64(int64_t val
)
1329 t0
= tcg_temp_local_new_i64();
1330 tcg_gen_movi_i64(t0
, val
);
1334 #if defined(CONFIG_DEBUG_TCG)
1335 void tcg_clear_temp_count(void)
1337 TCGContext
*s
= tcg_ctx
;
1338 s
->temps_in_use
= 0;
1341 int tcg_check_temp_count(void)
1343 TCGContext
*s
= tcg_ctx
;
1344 if (s
->temps_in_use
) {
1345 /* Clear the count so that we don't give another
1346 * warning immediately next time around.
1348 s
->temps_in_use
= 0;
1355 /* Return true if OP may appear in the opcode stream.
1356 Test the runtime variable that controls each opcode. */
1357 bool tcg_op_supported(TCGOpcode op
)
1360 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1363 case INDEX_op_discard
:
1364 case INDEX_op_set_label
:
1368 case INDEX_op_insn_start
:
1369 case INDEX_op_exit_tb
:
1370 case INDEX_op_goto_tb
:
1371 case INDEX_op_qemu_ld_i32
:
1372 case INDEX_op_qemu_st_i32
:
1373 case INDEX_op_qemu_ld_i64
:
1374 case INDEX_op_qemu_st_i64
:
1377 case INDEX_op_goto_ptr
:
1378 return TCG_TARGET_HAS_goto_ptr
;
1380 case INDEX_op_mov_i32
:
1381 case INDEX_op_movi_i32
:
1382 case INDEX_op_setcond_i32
:
1383 case INDEX_op_brcond_i32
:
1384 case INDEX_op_ld8u_i32
:
1385 case INDEX_op_ld8s_i32
:
1386 case INDEX_op_ld16u_i32
:
1387 case INDEX_op_ld16s_i32
:
1388 case INDEX_op_ld_i32
:
1389 case INDEX_op_st8_i32
:
1390 case INDEX_op_st16_i32
:
1391 case INDEX_op_st_i32
:
1392 case INDEX_op_add_i32
:
1393 case INDEX_op_sub_i32
:
1394 case INDEX_op_mul_i32
:
1395 case INDEX_op_and_i32
:
1396 case INDEX_op_or_i32
:
1397 case INDEX_op_xor_i32
:
1398 case INDEX_op_shl_i32
:
1399 case INDEX_op_shr_i32
:
1400 case INDEX_op_sar_i32
:
1403 case INDEX_op_movcond_i32
:
1404 return TCG_TARGET_HAS_movcond_i32
;
1405 case INDEX_op_div_i32
:
1406 case INDEX_op_divu_i32
:
1407 return TCG_TARGET_HAS_div_i32
;
1408 case INDEX_op_rem_i32
:
1409 case INDEX_op_remu_i32
:
1410 return TCG_TARGET_HAS_rem_i32
;
1411 case INDEX_op_div2_i32
:
1412 case INDEX_op_divu2_i32
:
1413 return TCG_TARGET_HAS_div2_i32
;
1414 case INDEX_op_rotl_i32
:
1415 case INDEX_op_rotr_i32
:
1416 return TCG_TARGET_HAS_rot_i32
;
1417 case INDEX_op_deposit_i32
:
1418 return TCG_TARGET_HAS_deposit_i32
;
1419 case INDEX_op_extract_i32
:
1420 return TCG_TARGET_HAS_extract_i32
;
1421 case INDEX_op_sextract_i32
:
1422 return TCG_TARGET_HAS_sextract_i32
;
1423 case INDEX_op_extract2_i32
:
1424 return TCG_TARGET_HAS_extract2_i32
;
1425 case INDEX_op_add2_i32
:
1426 return TCG_TARGET_HAS_add2_i32
;
1427 case INDEX_op_sub2_i32
:
1428 return TCG_TARGET_HAS_sub2_i32
;
1429 case INDEX_op_mulu2_i32
:
1430 return TCG_TARGET_HAS_mulu2_i32
;
1431 case INDEX_op_muls2_i32
:
1432 return TCG_TARGET_HAS_muls2_i32
;
1433 case INDEX_op_muluh_i32
:
1434 return TCG_TARGET_HAS_muluh_i32
;
1435 case INDEX_op_mulsh_i32
:
1436 return TCG_TARGET_HAS_mulsh_i32
;
1437 case INDEX_op_ext8s_i32
:
1438 return TCG_TARGET_HAS_ext8s_i32
;
1439 case INDEX_op_ext16s_i32
:
1440 return TCG_TARGET_HAS_ext16s_i32
;
1441 case INDEX_op_ext8u_i32
:
1442 return TCG_TARGET_HAS_ext8u_i32
;
1443 case INDEX_op_ext16u_i32
:
1444 return TCG_TARGET_HAS_ext16u_i32
;
1445 case INDEX_op_bswap16_i32
:
1446 return TCG_TARGET_HAS_bswap16_i32
;
1447 case INDEX_op_bswap32_i32
:
1448 return TCG_TARGET_HAS_bswap32_i32
;
1449 case INDEX_op_not_i32
:
1450 return TCG_TARGET_HAS_not_i32
;
1451 case INDEX_op_neg_i32
:
1452 return TCG_TARGET_HAS_neg_i32
;
1453 case INDEX_op_andc_i32
:
1454 return TCG_TARGET_HAS_andc_i32
;
1455 case INDEX_op_orc_i32
:
1456 return TCG_TARGET_HAS_orc_i32
;
1457 case INDEX_op_eqv_i32
:
1458 return TCG_TARGET_HAS_eqv_i32
;
1459 case INDEX_op_nand_i32
:
1460 return TCG_TARGET_HAS_nand_i32
;
1461 case INDEX_op_nor_i32
:
1462 return TCG_TARGET_HAS_nor_i32
;
1463 case INDEX_op_clz_i32
:
1464 return TCG_TARGET_HAS_clz_i32
;
1465 case INDEX_op_ctz_i32
:
1466 return TCG_TARGET_HAS_ctz_i32
;
1467 case INDEX_op_ctpop_i32
:
1468 return TCG_TARGET_HAS_ctpop_i32
;
1470 case INDEX_op_brcond2_i32
:
1471 case INDEX_op_setcond2_i32
:
1472 return TCG_TARGET_REG_BITS
== 32;
1474 case INDEX_op_mov_i64
:
1475 case INDEX_op_movi_i64
:
1476 case INDEX_op_setcond_i64
:
1477 case INDEX_op_brcond_i64
:
1478 case INDEX_op_ld8u_i64
:
1479 case INDEX_op_ld8s_i64
:
1480 case INDEX_op_ld16u_i64
:
1481 case INDEX_op_ld16s_i64
:
1482 case INDEX_op_ld32u_i64
:
1483 case INDEX_op_ld32s_i64
:
1484 case INDEX_op_ld_i64
:
1485 case INDEX_op_st8_i64
:
1486 case INDEX_op_st16_i64
:
1487 case INDEX_op_st32_i64
:
1488 case INDEX_op_st_i64
:
1489 case INDEX_op_add_i64
:
1490 case INDEX_op_sub_i64
:
1491 case INDEX_op_mul_i64
:
1492 case INDEX_op_and_i64
:
1493 case INDEX_op_or_i64
:
1494 case INDEX_op_xor_i64
:
1495 case INDEX_op_shl_i64
:
1496 case INDEX_op_shr_i64
:
1497 case INDEX_op_sar_i64
:
1498 case INDEX_op_ext_i32_i64
:
1499 case INDEX_op_extu_i32_i64
:
1500 return TCG_TARGET_REG_BITS
== 64;
1502 case INDEX_op_movcond_i64
:
1503 return TCG_TARGET_HAS_movcond_i64
;
1504 case INDEX_op_div_i64
:
1505 case INDEX_op_divu_i64
:
1506 return TCG_TARGET_HAS_div_i64
;
1507 case INDEX_op_rem_i64
:
1508 case INDEX_op_remu_i64
:
1509 return TCG_TARGET_HAS_rem_i64
;
1510 case INDEX_op_div2_i64
:
1511 case INDEX_op_divu2_i64
:
1512 return TCG_TARGET_HAS_div2_i64
;
1513 case INDEX_op_rotl_i64
:
1514 case INDEX_op_rotr_i64
:
1515 return TCG_TARGET_HAS_rot_i64
;
1516 case INDEX_op_deposit_i64
:
1517 return TCG_TARGET_HAS_deposit_i64
;
1518 case INDEX_op_extract_i64
:
1519 return TCG_TARGET_HAS_extract_i64
;
1520 case INDEX_op_sextract_i64
:
1521 return TCG_TARGET_HAS_sextract_i64
;
1522 case INDEX_op_extract2_i64
:
1523 return TCG_TARGET_HAS_extract2_i64
;
1524 case INDEX_op_extrl_i64_i32
:
1525 return TCG_TARGET_HAS_extrl_i64_i32
;
1526 case INDEX_op_extrh_i64_i32
:
1527 return TCG_TARGET_HAS_extrh_i64_i32
;
1528 case INDEX_op_ext8s_i64
:
1529 return TCG_TARGET_HAS_ext8s_i64
;
1530 case INDEX_op_ext16s_i64
:
1531 return TCG_TARGET_HAS_ext16s_i64
;
1532 case INDEX_op_ext32s_i64
:
1533 return TCG_TARGET_HAS_ext32s_i64
;
1534 case INDEX_op_ext8u_i64
:
1535 return TCG_TARGET_HAS_ext8u_i64
;
1536 case INDEX_op_ext16u_i64
:
1537 return TCG_TARGET_HAS_ext16u_i64
;
1538 case INDEX_op_ext32u_i64
:
1539 return TCG_TARGET_HAS_ext32u_i64
;
1540 case INDEX_op_bswap16_i64
:
1541 return TCG_TARGET_HAS_bswap16_i64
;
1542 case INDEX_op_bswap32_i64
:
1543 return TCG_TARGET_HAS_bswap32_i64
;
1544 case INDEX_op_bswap64_i64
:
1545 return TCG_TARGET_HAS_bswap64_i64
;
1546 case INDEX_op_not_i64
:
1547 return TCG_TARGET_HAS_not_i64
;
1548 case INDEX_op_neg_i64
:
1549 return TCG_TARGET_HAS_neg_i64
;
1550 case INDEX_op_andc_i64
:
1551 return TCG_TARGET_HAS_andc_i64
;
1552 case INDEX_op_orc_i64
:
1553 return TCG_TARGET_HAS_orc_i64
;
1554 case INDEX_op_eqv_i64
:
1555 return TCG_TARGET_HAS_eqv_i64
;
1556 case INDEX_op_nand_i64
:
1557 return TCG_TARGET_HAS_nand_i64
;
1558 case INDEX_op_nor_i64
:
1559 return TCG_TARGET_HAS_nor_i64
;
1560 case INDEX_op_clz_i64
:
1561 return TCG_TARGET_HAS_clz_i64
;
1562 case INDEX_op_ctz_i64
:
1563 return TCG_TARGET_HAS_ctz_i64
;
1564 case INDEX_op_ctpop_i64
:
1565 return TCG_TARGET_HAS_ctpop_i64
;
1566 case INDEX_op_add2_i64
:
1567 return TCG_TARGET_HAS_add2_i64
;
1568 case INDEX_op_sub2_i64
:
1569 return TCG_TARGET_HAS_sub2_i64
;
1570 case INDEX_op_mulu2_i64
:
1571 return TCG_TARGET_HAS_mulu2_i64
;
1572 case INDEX_op_muls2_i64
:
1573 return TCG_TARGET_HAS_muls2_i64
;
1574 case INDEX_op_muluh_i64
:
1575 return TCG_TARGET_HAS_muluh_i64
;
1576 case INDEX_op_mulsh_i64
:
1577 return TCG_TARGET_HAS_mulsh_i64
;
1579 case INDEX_op_mov_vec
:
1580 case INDEX_op_dup_vec
:
1581 case INDEX_op_dupi_vec
:
1582 case INDEX_op_ld_vec
:
1583 case INDEX_op_st_vec
:
1584 case INDEX_op_add_vec
:
1585 case INDEX_op_sub_vec
:
1586 case INDEX_op_and_vec
:
1587 case INDEX_op_or_vec
:
1588 case INDEX_op_xor_vec
:
1589 case INDEX_op_cmp_vec
:
1591 case INDEX_op_dup2_vec
:
1592 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
1593 case INDEX_op_not_vec
:
1594 return have_vec
&& TCG_TARGET_HAS_not_vec
;
1595 case INDEX_op_neg_vec
:
1596 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
1597 case INDEX_op_andc_vec
:
1598 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
1599 case INDEX_op_orc_vec
:
1600 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
1601 case INDEX_op_mul_vec
:
1602 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
1603 case INDEX_op_shli_vec
:
1604 case INDEX_op_shri_vec
:
1605 case INDEX_op_sari_vec
:
1606 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
1607 case INDEX_op_shls_vec
:
1608 case INDEX_op_shrs_vec
:
1609 case INDEX_op_sars_vec
:
1610 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
1611 case INDEX_op_shlv_vec
:
1612 case INDEX_op_shrv_vec
:
1613 case INDEX_op_sarv_vec
:
1614 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
1615 case INDEX_op_ssadd_vec
:
1616 case INDEX_op_usadd_vec
:
1617 case INDEX_op_sssub_vec
:
1618 case INDEX_op_ussub_vec
:
1619 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
1620 case INDEX_op_smin_vec
:
1621 case INDEX_op_umin_vec
:
1622 case INDEX_op_smax_vec
:
1623 case INDEX_op_umax_vec
:
1624 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
1627 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
1632 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1633 and endian swap. Maybe it would be better to do the alignment
1634 and endian swap in tcg_reg_alloc_call(). */
1635 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
1637 int i
, real_args
, nb_rets
, pi
;
1638 unsigned sizemask
, flags
;
1639 TCGHelperInfo
*info
;
1642 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
1643 flags
= info
->flags
;
1644 sizemask
= info
->sizemask
;
1646 #if defined(__sparc__) && !defined(__arch64__) \
1647 && !defined(CONFIG_TCG_INTERPRETER)
1648 /* We have 64-bit values in one register, but need to pass as two
1649 separate parameters. Split them. */
1650 int orig_sizemask
= sizemask
;
1651 int orig_nargs
= nargs
;
1652 TCGv_i64 retl
, reth
;
1653 TCGTemp
*split_args
[MAX_OPC_PARAM
];
1657 if (sizemask
!= 0) {
1658 for (i
= real_args
= 0; i
< nargs
; ++i
) {
1659 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1661 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1662 TCGv_i32 h
= tcg_temp_new_i32();
1663 TCGv_i32 l
= tcg_temp_new_i32();
1664 tcg_gen_extr_i64_i32(l
, h
, orig
);
1665 split_args
[real_args
++] = tcgv_i32_temp(h
);
1666 split_args
[real_args
++] = tcgv_i32_temp(l
);
1668 split_args
[real_args
++] = args
[i
];
1675 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1676 for (i
= 0; i
< nargs
; ++i
) {
1677 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1678 int is_signed
= sizemask
& (2 << (i
+1)*2);
1680 TCGv_i64 temp
= tcg_temp_new_i64();
1681 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1683 tcg_gen_ext32s_i64(temp
, orig
);
1685 tcg_gen_ext32u_i64(temp
, orig
);
1687 args
[i
] = tcgv_i64_temp(temp
);
1690 #endif /* TCG_TARGET_EXTEND_ARGS */
1692 op
= tcg_emit_op(INDEX_op_call
);
1696 #if defined(__sparc__) && !defined(__arch64__) \
1697 && !defined(CONFIG_TCG_INTERPRETER)
1698 if (orig_sizemask
& 1) {
1699 /* The 32-bit ABI is going to return the 64-bit value in
1700 the %o0/%o1 register pair. Prepare for this by using
1701 two return temporaries, and reassemble below. */
1702 retl
= tcg_temp_new_i64();
1703 reth
= tcg_temp_new_i64();
1704 op
->args
[pi
++] = tcgv_i64_arg(reth
);
1705 op
->args
[pi
++] = tcgv_i64_arg(retl
);
1708 op
->args
[pi
++] = temp_arg(ret
);
1712 if (TCG_TARGET_REG_BITS
< 64 && (sizemask
& 1)) {
1713 #ifdef HOST_WORDS_BIGENDIAN
1714 op
->args
[pi
++] = temp_arg(ret
+ 1);
1715 op
->args
[pi
++] = temp_arg(ret
);
1717 op
->args
[pi
++] = temp_arg(ret
);
1718 op
->args
[pi
++] = temp_arg(ret
+ 1);
1722 op
->args
[pi
++] = temp_arg(ret
);
1729 TCGOP_CALLO(op
) = nb_rets
;
1732 for (i
= 0; i
< nargs
; i
++) {
1733 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1734 if (TCG_TARGET_REG_BITS
< 64 && is_64bit
) {
1735 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1736 /* some targets want aligned 64 bit args */
1737 if (real_args
& 1) {
1738 op
->args
[pi
++] = TCG_CALL_DUMMY_ARG
;
1742 /* If stack grows up, then we will be placing successive
1743 arguments at lower addresses, which means we need to
1744 reverse the order compared to how we would normally
1745 treat either big or little-endian. For those arguments
1746 that will wind up in registers, this still works for
1747 HPPA (the only current STACK_GROWSUP target) since the
1748 argument registers are *also* allocated in decreasing
1749 order. If another such target is added, this logic may
1750 have to get more complicated to differentiate between
1751 stack arguments and register arguments. */
1752 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1753 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1754 op
->args
[pi
++] = temp_arg(args
[i
]);
1756 op
->args
[pi
++] = temp_arg(args
[i
]);
1757 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1763 op
->args
[pi
++] = temp_arg(args
[i
]);
1766 op
->args
[pi
++] = (uintptr_t)func
;
1767 op
->args
[pi
++] = flags
;
1768 TCGOP_CALLI(op
) = real_args
;
1770 /* Make sure the fields didn't overflow. */
1771 tcg_debug_assert(TCGOP_CALLI(op
) == real_args
);
1772 tcg_debug_assert(pi
<= ARRAY_SIZE(op
->args
));
1774 #if defined(__sparc__) && !defined(__arch64__) \
1775 && !defined(CONFIG_TCG_INTERPRETER)
1776 /* Free all of the parts we allocated above. */
1777 for (i
= real_args
= 0; i
< orig_nargs
; ++i
) {
1778 int is_64bit
= orig_sizemask
& (1 << (i
+1)*2);
1780 tcg_temp_free_internal(args
[real_args
++]);
1781 tcg_temp_free_internal(args
[real_args
++]);
1786 if (orig_sizemask
& 1) {
1787 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1788 Note that describing these as TCGv_i64 eliminates an unnecessary
1789 zero-extension that tcg_gen_concat_i32_i64 would create. */
1790 tcg_gen_concat32_i64(temp_tcgv_i64(ret
), retl
, reth
);
1791 tcg_temp_free_i64(retl
);
1792 tcg_temp_free_i64(reth
);
1794 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1795 for (i
= 0; i
< nargs
; ++i
) {
1796 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1798 tcg_temp_free_internal(args
[i
]);
1801 #endif /* TCG_TARGET_EXTEND_ARGS */
1804 static void tcg_reg_alloc_start(TCGContext
*s
)
1809 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
1811 ts
->val_type
= (ts
->fixed_reg
? TEMP_VAL_REG
: TEMP_VAL_MEM
);
1813 for (n
= s
->nb_temps
; i
< n
; i
++) {
1815 ts
->val_type
= (ts
->temp_local
? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
1816 ts
->mem_allocated
= 0;
1820 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
1823 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
1826 int idx
= temp_idx(ts
);
1828 if (ts
->temp_global
) {
1829 pstrcpy(buf
, buf_size
, ts
->name
);
1830 } else if (ts
->temp_local
) {
1831 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
1833 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
1838 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
1839 int buf_size
, TCGArg arg
)
1841 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
1844 /* Find helper name. */
1845 static inline const char *tcg_find_helper(TCGContext
*s
, uintptr_t val
)
1847 const char *ret
= NULL
;
1849 TCGHelperInfo
*info
= g_hash_table_lookup(helper_table
, (gpointer
)val
);
1857 static const char * const cond_name
[] =
1859 [TCG_COND_NEVER
] = "never",
1860 [TCG_COND_ALWAYS
] = "always",
1861 [TCG_COND_EQ
] = "eq",
1862 [TCG_COND_NE
] = "ne",
1863 [TCG_COND_LT
] = "lt",
1864 [TCG_COND_GE
] = "ge",
1865 [TCG_COND_LE
] = "le",
1866 [TCG_COND_GT
] = "gt",
1867 [TCG_COND_LTU
] = "ltu",
1868 [TCG_COND_GEU
] = "geu",
1869 [TCG_COND_LEU
] = "leu",
1870 [TCG_COND_GTU
] = "gtu"
1873 static const char * const ldst_name
[] =
1889 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
1891 [MO_UNALN
>> MO_ASHIFT
] = "un+",
1892 [MO_ALIGN
>> MO_ASHIFT
] = "",
1894 [MO_UNALN
>> MO_ASHIFT
] = "",
1895 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
1897 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
1898 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
1899 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
1900 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
1901 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
1902 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
1905 static inline bool tcg_regset_single(TCGRegSet d
)
1907 return (d
& (d
- 1)) == 0;
1910 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
1912 if (TCG_TARGET_NB_REGS
<= 32) {
1919 static void tcg_dump_ops(TCGContext
*s
, bool have_prefs
)
1924 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
1925 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
1926 const TCGOpDef
*def
;
1931 def
= &tcg_op_defs
[c
];
1933 if (c
== INDEX_op_insn_start
) {
1935 col
+= qemu_log("\n ----");
1937 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
1939 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1940 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
1944 col
+= qemu_log(" " TARGET_FMT_lx
, a
);
1946 } else if (c
== INDEX_op_call
) {
1947 /* variable number of arguments */
1948 nb_oargs
= TCGOP_CALLO(op
);
1949 nb_iargs
= TCGOP_CALLI(op
);
1950 nb_cargs
= def
->nb_cargs
;
1952 /* function name, flags, out args */
1953 col
+= qemu_log(" %s %s,$0x%" TCG_PRIlx
",$%d", def
->name
,
1954 tcg_find_helper(s
, op
->args
[nb_oargs
+ nb_iargs
]),
1955 op
->args
[nb_oargs
+ nb_iargs
+ 1], nb_oargs
);
1956 for (i
= 0; i
< nb_oargs
; i
++) {
1957 col
+= qemu_log(",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1960 for (i
= 0; i
< nb_iargs
; i
++) {
1961 TCGArg arg
= op
->args
[nb_oargs
+ i
];
1962 const char *t
= "<dummy>";
1963 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1964 t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
1966 col
+= qemu_log(",%s", t
);
1969 col
+= qemu_log(" %s ", def
->name
);
1971 nb_oargs
= def
->nb_oargs
;
1972 nb_iargs
= def
->nb_iargs
;
1973 nb_cargs
= def
->nb_cargs
;
1975 if (def
->flags
& TCG_OPF_VECTOR
) {
1976 col
+= qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op
),
1977 8 << TCGOP_VECE(op
));
1981 for (i
= 0; i
< nb_oargs
; i
++) {
1983 col
+= qemu_log(",");
1985 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1988 for (i
= 0; i
< nb_iargs
; i
++) {
1990 col
+= qemu_log(",");
1992 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1996 case INDEX_op_brcond_i32
:
1997 case INDEX_op_setcond_i32
:
1998 case INDEX_op_movcond_i32
:
1999 case INDEX_op_brcond2_i32
:
2000 case INDEX_op_setcond2_i32
:
2001 case INDEX_op_brcond_i64
:
2002 case INDEX_op_setcond_i64
:
2003 case INDEX_op_movcond_i64
:
2004 case INDEX_op_cmp_vec
:
2005 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
2006 && cond_name
[op
->args
[k
]]) {
2007 col
+= qemu_log(",%s", cond_name
[op
->args
[k
++]]);
2009 col
+= qemu_log(",$0x%" TCG_PRIlx
, op
->args
[k
++]);
2013 case INDEX_op_qemu_ld_i32
:
2014 case INDEX_op_qemu_st_i32
:
2015 case INDEX_op_qemu_ld_i64
:
2016 case INDEX_op_qemu_st_i64
:
2018 TCGMemOpIdx oi
= op
->args
[k
++];
2019 TCGMemOp op
= get_memop(oi
);
2020 unsigned ix
= get_mmuidx(oi
);
2022 if (op
& ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
)) {
2023 col
+= qemu_log(",$0x%x,%u", op
, ix
);
2025 const char *s_al
, *s_op
;
2026 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
2027 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
2028 col
+= qemu_log(",%s%s,%u", s_al
, s_op
, ix
);
2038 case INDEX_op_set_label
:
2040 case INDEX_op_brcond_i32
:
2041 case INDEX_op_brcond_i64
:
2042 case INDEX_op_brcond2_i32
:
2043 col
+= qemu_log("%s$L%d", k
? "," : "",
2044 arg_label(op
->args
[k
])->id
);
2050 for (; i
< nb_cargs
; i
++, k
++) {
2051 col
+= qemu_log("%s$0x%" TCG_PRIlx
, k
? "," : "", op
->args
[k
]);
2055 if (have_prefs
|| op
->life
) {
2056 for (; col
< 40; ++col
) {
2057 putc(' ', qemu_logfile
);
2062 unsigned life
= op
->life
;
2064 if (life
& (SYNC_ARG
* 3)) {
2066 for (i
= 0; i
< 2; ++i
) {
2067 if (life
& (SYNC_ARG
<< i
)) {
2075 for (i
= 0; life
; ++i
, life
>>= 1) {
2084 for (i
= 0; i
< nb_oargs
; ++i
) {
2085 TCGRegSet set
= op
->output_pref
[i
];
2094 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2096 #ifdef CONFIG_DEBUG_TCG
2097 } else if (tcg_regset_single(set
)) {
2098 TCGReg reg
= tcg_regset_first(set
);
2099 qemu_log("%s", tcg_target_reg_names
[reg
]);
2101 } else if (TCG_TARGET_NB_REGS
<= 32) {
2102 qemu_log("%#x", (uint32_t)set
);
2104 qemu_log("%#" PRIx64
, (uint64_t)set
);
2113 /* we give more priority to constraints with less registers */
2114 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2116 const TCGArgConstraint
*arg_ct
;
2119 arg_ct
= &def
->args_ct
[k
];
2120 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
2121 /* an alias is equivalent to a single register */
2124 if (!(arg_ct
->ct
& TCG_CT_REG
))
2127 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
2128 if (tcg_regset_test_reg(arg_ct
->u
.regs
, i
))
2132 return TCG_TARGET_NB_REGS
- n
+ 1;
2135 /* sort from highest priority to lowest */
2136 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2138 int i
, j
, p1
, p2
, tmp
;
2140 for(i
= 0; i
< n
; i
++)
2141 def
->sorted_args
[start
+ i
] = start
+ i
;
2144 for(i
= 0; i
< n
- 1; i
++) {
2145 for(j
= i
+ 1; j
< n
; j
++) {
2146 p1
= get_constraint_priority(def
, def
->sorted_args
[start
+ i
]);
2147 p2
= get_constraint_priority(def
, def
->sorted_args
[start
+ j
]);
2149 tmp
= def
->sorted_args
[start
+ i
];
2150 def
->sorted_args
[start
+ i
] = def
->sorted_args
[start
+ j
];
2151 def
->sorted_args
[start
+ j
] = tmp
;
2157 static void process_op_defs(TCGContext
*s
)
2161 for (op
= 0; op
< NB_OPS
; op
++) {
2162 TCGOpDef
*def
= &tcg_op_defs
[op
];
2163 const TCGTargetOpDef
*tdefs
;
2167 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2171 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2176 tdefs
= tcg_target_op_def(op
);
2177 /* Missing TCGTargetOpDef entry. */
2178 tcg_debug_assert(tdefs
!= NULL
);
2180 type
= (def
->flags
& TCG_OPF_64BIT
? TCG_TYPE_I64
: TCG_TYPE_I32
);
2181 for (i
= 0; i
< nb_args
; i
++) {
2182 const char *ct_str
= tdefs
->args_ct_str
[i
];
2183 /* Incomplete TCGTargetOpDef entry. */
2184 tcg_debug_assert(ct_str
!= NULL
);
2186 def
->args_ct
[i
].u
.regs
= 0;
2187 def
->args_ct
[i
].ct
= 0;
2188 while (*ct_str
!= '\0') {
2192 int oarg
= *ct_str
- '0';
2193 tcg_debug_assert(ct_str
== tdefs
->args_ct_str
[i
]);
2194 tcg_debug_assert(oarg
< def
->nb_oargs
);
2195 tcg_debug_assert(def
->args_ct
[oarg
].ct
& TCG_CT_REG
);
2196 /* TCG_CT_ALIAS is for the output arguments.
2197 The input is tagged with TCG_CT_IALIAS. */
2198 def
->args_ct
[i
] = def
->args_ct
[oarg
];
2199 def
->args_ct
[oarg
].ct
|= TCG_CT_ALIAS
;
2200 def
->args_ct
[oarg
].alias_index
= i
;
2201 def
->args_ct
[i
].ct
|= TCG_CT_IALIAS
;
2202 def
->args_ct
[i
].alias_index
= oarg
;
2207 def
->args_ct
[i
].ct
|= TCG_CT_NEWREG
;
2211 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2215 ct_str
= target_parse_constraint(&def
->args_ct
[i
],
2217 /* Typo in TCGTargetOpDef constraint. */
2218 tcg_debug_assert(ct_str
!= NULL
);
2223 /* TCGTargetOpDef entry with too much information? */
2224 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2226 /* sort the constraints (XXX: this is just an heuristic) */
2227 sort_constraints(def
, 0, def
->nb_oargs
);
2228 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
2232 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
2238 label
= arg_label(op
->args
[0]);
2241 case INDEX_op_brcond_i32
:
2242 case INDEX_op_brcond_i64
:
2243 label
= arg_label(op
->args
[3]);
2246 case INDEX_op_brcond2_i32
:
2247 label
= arg_label(op
->args
[5]);
2254 QTAILQ_REMOVE(&s
->ops
, op
, link
);
2255 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
2258 #ifdef CONFIG_PROFILER
2259 atomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
2263 static TCGOp
*tcg_op_alloc(TCGOpcode opc
)
2265 TCGContext
*s
= tcg_ctx
;
2268 if (likely(QTAILQ_EMPTY(&s
->free_ops
))) {
2269 op
= tcg_malloc(sizeof(TCGOp
));
2271 op
= QTAILQ_FIRST(&s
->free_ops
);
2272 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
2274 memset(op
, 0, offsetof(TCGOp
, link
));
2281 TCGOp
*tcg_emit_op(TCGOpcode opc
)
2283 TCGOp
*op
= tcg_op_alloc(opc
);
2284 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2288 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2290 TCGOp
*new_op
= tcg_op_alloc(opc
);
2291 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
2295 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2297 TCGOp
*new_op
= tcg_op_alloc(opc
);
2298 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
2302 /* Reachable analysis : remove unreachable code. */
2303 static void reachable_code_pass(TCGContext
*s
)
2305 TCGOp
*op
, *op_next
;
2308 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2314 case INDEX_op_set_label
:
2315 label
= arg_label(op
->args
[0]);
2316 if (label
->refs
== 0) {
2318 * While there is an occasional backward branch, virtually
2319 * all branches generated by the translators are forward.
2320 * Which means that generally we will have already removed
2321 * all references to the label that will be, and there is
2322 * little to be gained by iterating.
2326 /* Once we see a label, insns become live again. */
2331 * Optimization can fold conditional branches to unconditional.
2332 * If we find a label with one reference which is preceded by
2333 * an unconditional branch to it, remove both. This needed to
2334 * wait until the dead code in between them was removed.
2336 if (label
->refs
== 1) {
2337 TCGOp
*op_prev
= QTAILQ_PREV(op
, link
);
2338 if (op_prev
->opc
== INDEX_op_br
&&
2339 label
== arg_label(op_prev
->args
[0])) {
2340 tcg_op_remove(s
, op_prev
);
2348 case INDEX_op_exit_tb
:
2349 case INDEX_op_goto_ptr
:
2350 /* Unconditional branches; everything following is dead. */
2355 /* Notice noreturn helper calls, raising exceptions. */
2356 call_flags
= op
->args
[TCGOP_CALLO(op
) + TCGOP_CALLI(op
) + 1];
2357 if (call_flags
& TCG_CALL_NO_RETURN
) {
2362 case INDEX_op_insn_start
:
2363 /* Never remove -- we need to keep these for unwind. */
2372 tcg_op_remove(s
, op
);
2380 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2381 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2383 /* For liveness_pass_1, the register preferences for a given temp. */
2384 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
2386 return ts
->state_ptr
;
2389 /* For liveness_pass_1, reset the preferences for a given temp to the
2390 * maximal regset for its type.
2392 static inline void la_reset_pref(TCGTemp
*ts
)
2395 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
2398 /* liveness analysis: end of function: all temps are dead, and globals
2399 should be in memory. */
2400 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
2404 for (i
= 0; i
< ng
; ++i
) {
2405 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2406 la_reset_pref(&s
->temps
[i
]);
2408 for (i
= ng
; i
< nt
; ++i
) {
2409 s
->temps
[i
].state
= TS_DEAD
;
2410 la_reset_pref(&s
->temps
[i
]);
2414 /* liveness analysis: end of basic block: all temps are dead, globals
2415 and local temps should be in memory. */
2416 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
2420 for (i
= 0; i
< ng
; ++i
) {
2421 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2422 la_reset_pref(&s
->temps
[i
]);
2424 for (i
= ng
; i
< nt
; ++i
) {
2425 s
->temps
[i
].state
= (s
->temps
[i
].temp_local
2428 la_reset_pref(&s
->temps
[i
]);
2432 /* liveness analysis: sync globals back to memory. */
2433 static void la_global_sync(TCGContext
*s
, int ng
)
2437 for (i
= 0; i
< ng
; ++i
) {
2438 int state
= s
->temps
[i
].state
;
2439 s
->temps
[i
].state
= state
| TS_MEM
;
2440 if (state
== TS_DEAD
) {
2441 /* If the global was previously dead, reset prefs. */
2442 la_reset_pref(&s
->temps
[i
]);
2447 /* liveness analysis: sync globals back to memory and kill. */
2448 static void la_global_kill(TCGContext
*s
, int ng
)
2452 for (i
= 0; i
< ng
; i
++) {
2453 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2454 la_reset_pref(&s
->temps
[i
]);
2458 /* liveness analysis: note live globals crossing calls. */
2459 static void la_cross_call(TCGContext
*s
, int nt
)
2461 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
2464 for (i
= 0; i
< nt
; i
++) {
2465 TCGTemp
*ts
= &s
->temps
[i
];
2466 if (!(ts
->state
& TS_DEAD
)) {
2467 TCGRegSet
*pset
= la_temp_pref(ts
);
2468 TCGRegSet set
= *pset
;
2471 /* If the combination is not possible, restart. */
2473 set
= tcg_target_available_regs
[ts
->type
] & mask
;
2480 /* Liveness analysis : update the opc_arg_life array to tell if a
2481 given input arguments is dead. Instructions updating dead
2482 temporaries are removed. */
2483 static void liveness_pass_1(TCGContext
*s
)
2485 int nb_globals
= s
->nb_globals
;
2486 int nb_temps
= s
->nb_temps
;
2487 TCGOp
*op
, *op_prev
;
2491 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
2492 for (i
= 0; i
< nb_temps
; ++i
) {
2493 s
->temps
[i
].state_ptr
= prefs
+ i
;
2496 /* ??? Should be redundant with the exit_tb that ends the TB. */
2497 la_func_end(s
, nb_globals
, nb_temps
);
2499 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
2500 int nb_iargs
, nb_oargs
;
2501 TCGOpcode opc_new
, opc_new2
;
2503 TCGLifeData arg_life
= 0;
2505 TCGOpcode opc
= op
->opc
;
2506 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2514 nb_oargs
= TCGOP_CALLO(op
);
2515 nb_iargs
= TCGOP_CALLI(op
);
2516 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2518 /* pure functions can be removed if their result is unused */
2519 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
2520 for (i
= 0; i
< nb_oargs
; i
++) {
2521 ts
= arg_temp(op
->args
[i
]);
2522 if (ts
->state
!= TS_DEAD
) {
2523 goto do_not_remove_call
;
2530 /* Output args are dead. */
2531 for (i
= 0; i
< nb_oargs
; i
++) {
2532 ts
= arg_temp(op
->args
[i
]);
2533 if (ts
->state
& TS_DEAD
) {
2534 arg_life
|= DEAD_ARG
<< i
;
2536 if (ts
->state
& TS_MEM
) {
2537 arg_life
|= SYNC_ARG
<< i
;
2539 ts
->state
= TS_DEAD
;
2542 /* Not used -- it will be tcg_target_call_oarg_regs[i]. */
2543 op
->output_pref
[i
] = 0;
2546 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
2547 TCG_CALL_NO_READ_GLOBALS
))) {
2548 la_global_kill(s
, nb_globals
);
2549 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
2550 la_global_sync(s
, nb_globals
);
2553 /* Record arguments that die in this helper. */
2554 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2555 ts
= arg_temp(op
->args
[i
]);
2556 if (ts
&& ts
->state
& TS_DEAD
) {
2557 arg_life
|= DEAD_ARG
<< i
;
2561 /* For all live registers, remove call-clobbered prefs. */
2562 la_cross_call(s
, nb_temps
);
2564 nb_call_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
2566 /* Input arguments are live for preceding opcodes. */
2567 for (i
= 0; i
< nb_iargs
; i
++) {
2568 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
2569 if (ts
&& ts
->state
& TS_DEAD
) {
2570 /* For those arguments that die, and will be allocated
2571 * in registers, clear the register set for that arg,
2572 * to be filled in below. For args that will be on
2573 * the stack, reset to any available reg.
2576 = (i
< nb_call_regs
? 0 :
2577 tcg_target_available_regs
[ts
->type
]);
2578 ts
->state
&= ~TS_DEAD
;
2582 /* For each input argument, add its input register to prefs.
2583 If a temp is used once, this produces a single set bit. */
2584 for (i
= 0; i
< MIN(nb_call_regs
, nb_iargs
); i
++) {
2585 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
2587 tcg_regset_set_reg(*la_temp_pref(ts
),
2588 tcg_target_call_iarg_regs
[i
]);
2593 case INDEX_op_insn_start
:
2595 case INDEX_op_discard
:
2596 /* mark the temporary as dead */
2597 ts
= arg_temp(op
->args
[0]);
2598 ts
->state
= TS_DEAD
;
2602 case INDEX_op_add2_i32
:
2603 opc_new
= INDEX_op_add_i32
;
2605 case INDEX_op_sub2_i32
:
2606 opc_new
= INDEX_op_sub_i32
;
2608 case INDEX_op_add2_i64
:
2609 opc_new
= INDEX_op_add_i64
;
2611 case INDEX_op_sub2_i64
:
2612 opc_new
= INDEX_op_sub_i64
;
2616 /* Test if the high part of the operation is dead, but not
2617 the low part. The result can be optimized to a simple
2618 add or sub. This happens often for x86_64 guest when the
2619 cpu mode is set to 32 bit. */
2620 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2621 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2624 /* Replace the opcode and adjust the args in place,
2625 leaving 3 unused args at the end. */
2626 op
->opc
= opc
= opc_new
;
2627 op
->args
[1] = op
->args
[2];
2628 op
->args
[2] = op
->args
[4];
2629 /* Fall through and mark the single-word operation live. */
2635 case INDEX_op_mulu2_i32
:
2636 opc_new
= INDEX_op_mul_i32
;
2637 opc_new2
= INDEX_op_muluh_i32
;
2638 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
2640 case INDEX_op_muls2_i32
:
2641 opc_new
= INDEX_op_mul_i32
;
2642 opc_new2
= INDEX_op_mulsh_i32
;
2643 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
2645 case INDEX_op_mulu2_i64
:
2646 opc_new
= INDEX_op_mul_i64
;
2647 opc_new2
= INDEX_op_muluh_i64
;
2648 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
2650 case INDEX_op_muls2_i64
:
2651 opc_new
= INDEX_op_mul_i64
;
2652 opc_new2
= INDEX_op_mulsh_i64
;
2653 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
2658 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2659 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2660 /* Both parts of the operation are dead. */
2663 /* The high part of the operation is dead; generate the low. */
2664 op
->opc
= opc
= opc_new
;
2665 op
->args
[1] = op
->args
[2];
2666 op
->args
[2] = op
->args
[3];
2667 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
2668 /* The low part of the operation is dead; generate the high. */
2669 op
->opc
= opc
= opc_new2
;
2670 op
->args
[0] = op
->args
[1];
2671 op
->args
[1] = op
->args
[2];
2672 op
->args
[2] = op
->args
[3];
2676 /* Mark the single-word operation live. */
2681 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2682 nb_iargs
= def
->nb_iargs
;
2683 nb_oargs
= def
->nb_oargs
;
2685 /* Test if the operation can be removed because all
2686 its outputs are dead. We assume that nb_oargs == 0
2687 implies side effects */
2688 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
2689 for (i
= 0; i
< nb_oargs
; i
++) {
2690 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
2699 tcg_op_remove(s
, op
);
2703 for (i
= 0; i
< nb_oargs
; i
++) {
2704 ts
= arg_temp(op
->args
[i
]);
2706 /* Remember the preference of the uses that followed. */
2707 op
->output_pref
[i
] = *la_temp_pref(ts
);
2709 /* Output args are dead. */
2710 if (ts
->state
& TS_DEAD
) {
2711 arg_life
|= DEAD_ARG
<< i
;
2713 if (ts
->state
& TS_MEM
) {
2714 arg_life
|= SYNC_ARG
<< i
;
2716 ts
->state
= TS_DEAD
;
2720 /* If end of basic block, update. */
2721 if (def
->flags
& TCG_OPF_BB_EXIT
) {
2722 la_func_end(s
, nb_globals
, nb_temps
);
2723 } else if (def
->flags
& TCG_OPF_BB_END
) {
2724 la_bb_end(s
, nb_globals
, nb_temps
);
2725 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2726 la_global_sync(s
, nb_globals
);
2727 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
2728 la_cross_call(s
, nb_temps
);
2732 /* Record arguments that die in this opcode. */
2733 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2734 ts
= arg_temp(op
->args
[i
]);
2735 if (ts
->state
& TS_DEAD
) {
2736 arg_life
|= DEAD_ARG
<< i
;
2740 /* Input arguments are live for preceding opcodes. */
2741 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2742 ts
= arg_temp(op
->args
[i
]);
2743 if (ts
->state
& TS_DEAD
) {
2744 /* For operands that were dead, initially allow
2745 all regs for the type. */
2746 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
2747 ts
->state
&= ~TS_DEAD
;
2751 /* Incorporate constraints for this operand. */
2753 case INDEX_op_mov_i32
:
2754 case INDEX_op_mov_i64
:
2755 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2756 have proper constraints. That said, special case
2757 moves to propagate preferences backward. */
2758 if (IS_DEAD_ARG(1)) {
2759 *la_temp_pref(arg_temp(op
->args
[0]))
2760 = *la_temp_pref(arg_temp(op
->args
[1]));
2765 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2766 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
2767 TCGRegSet set
, *pset
;
2769 ts
= arg_temp(op
->args
[i
]);
2770 pset
= la_temp_pref(ts
);
2774 if (ct
->ct
& TCG_CT_IALIAS
) {
2775 set
&= op
->output_pref
[ct
->alias_index
];
2777 /* If the combination is not possible, restart. */
2787 op
->life
= arg_life
;
2791 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2792 static bool liveness_pass_2(TCGContext
*s
)
2794 int nb_globals
= s
->nb_globals
;
2796 bool changes
= false;
2797 TCGOp
*op
, *op_next
;
2799 /* Create a temporary for each indirect global. */
2800 for (i
= 0; i
< nb_globals
; ++i
) {
2801 TCGTemp
*its
= &s
->temps
[i
];
2802 if (its
->indirect_reg
) {
2803 TCGTemp
*dts
= tcg_temp_alloc(s
);
2804 dts
->type
= its
->type
;
2805 dts
->base_type
= its
->base_type
;
2806 its
->state_ptr
= dts
;
2808 its
->state_ptr
= NULL
;
2810 /* All globals begin dead. */
2811 its
->state
= TS_DEAD
;
2813 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
2814 TCGTemp
*its
= &s
->temps
[i
];
2815 its
->state_ptr
= NULL
;
2816 its
->state
= TS_DEAD
;
2819 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2820 TCGOpcode opc
= op
->opc
;
2821 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2822 TCGLifeData arg_life
= op
->life
;
2823 int nb_iargs
, nb_oargs
, call_flags
;
2824 TCGTemp
*arg_ts
, *dir_ts
;
2826 if (opc
== INDEX_op_call
) {
2827 nb_oargs
= TCGOP_CALLO(op
);
2828 nb_iargs
= TCGOP_CALLI(op
);
2829 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2831 nb_iargs
= def
->nb_iargs
;
2832 nb_oargs
= def
->nb_oargs
;
2834 /* Set flags similar to how calls require. */
2835 if (def
->flags
& TCG_OPF_BB_END
) {
2836 /* Like writing globals: save_globals */
2838 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2839 /* Like reading globals: sync_globals */
2840 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
2842 /* No effect on globals. */
2843 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
2844 TCG_CALL_NO_WRITE_GLOBALS
);
2848 /* Make sure that input arguments are available. */
2849 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2850 arg_ts
= arg_temp(op
->args
[i
]);
2852 dir_ts
= arg_ts
->state_ptr
;
2853 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
2854 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
2857 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
);
2859 lop
->args
[0] = temp_arg(dir_ts
);
2860 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2861 lop
->args
[2] = arg_ts
->mem_offset
;
2863 /* Loaded, but synced with memory. */
2864 arg_ts
->state
= TS_MEM
;
2869 /* Perform input replacement, and mark inputs that became dead.
2870 No action is required except keeping temp_state up to date
2871 so that we reload when needed. */
2872 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2873 arg_ts
= arg_temp(op
->args
[i
]);
2875 dir_ts
= arg_ts
->state_ptr
;
2877 op
->args
[i
] = temp_arg(dir_ts
);
2879 if (IS_DEAD_ARG(i
)) {
2880 arg_ts
->state
= TS_DEAD
;
2886 /* Liveness analysis should ensure that the following are
2887 all correct, for call sites and basic block end points. */
2888 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
2890 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
2891 for (i
= 0; i
< nb_globals
; ++i
) {
2892 /* Liveness should see that globals are synced back,
2893 that is, either TS_DEAD or TS_MEM. */
2894 arg_ts
= &s
->temps
[i
];
2895 tcg_debug_assert(arg_ts
->state_ptr
== 0
2896 || arg_ts
->state
!= 0);
2899 for (i
= 0; i
< nb_globals
; ++i
) {
2900 /* Liveness should see that globals are saved back,
2901 that is, TS_DEAD, waiting to be reloaded. */
2902 arg_ts
= &s
->temps
[i
];
2903 tcg_debug_assert(arg_ts
->state_ptr
== 0
2904 || arg_ts
->state
== TS_DEAD
);
2908 /* Outputs become available. */
2909 for (i
= 0; i
< nb_oargs
; i
++) {
2910 arg_ts
= arg_temp(op
->args
[i
]);
2911 dir_ts
= arg_ts
->state_ptr
;
2915 op
->args
[i
] = temp_arg(dir_ts
);
2918 /* The output is now live and modified. */
2921 /* Sync outputs upon their last write. */
2922 if (NEED_SYNC_ARG(i
)) {
2923 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
2926 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
);
2928 sop
->args
[0] = temp_arg(dir_ts
);
2929 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2930 sop
->args
[2] = arg_ts
->mem_offset
;
2932 arg_ts
->state
= TS_MEM
;
2934 /* Drop outputs that are dead. */
2935 if (IS_DEAD_ARG(i
)) {
2936 arg_ts
->state
= TS_DEAD
;
2944 #ifdef CONFIG_DEBUG_TCG
2945 static void dump_regs(TCGContext
*s
)
2951 for(i
= 0; i
< s
->nb_temps
; i
++) {
2953 printf(" %10s: ", tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
2954 switch(ts
->val_type
) {
2956 printf("%s", tcg_target_reg_names
[ts
->reg
]);
2959 printf("%d(%s)", (int)ts
->mem_offset
,
2960 tcg_target_reg_names
[ts
->mem_base
->reg
]);
2962 case TEMP_VAL_CONST
:
2963 printf("$0x%" TCG_PRIlx
, ts
->val
);
2975 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
2976 if (s
->reg_to_temp
[i
] != NULL
) {
2978 tcg_target_reg_names
[i
],
2979 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
2984 static void check_regs(TCGContext
*s
)
2991 for (reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
2992 ts
= s
->reg_to_temp
[reg
];
2994 if (ts
->val_type
!= TEMP_VAL_REG
|| ts
->reg
!= reg
) {
2995 printf("Inconsistency for register %s:\n",
2996 tcg_target_reg_names
[reg
]);
3001 for (k
= 0; k
< s
->nb_temps
; k
++) {
3003 if (ts
->val_type
== TEMP_VAL_REG
&& !ts
->fixed_reg
3004 && s
->reg_to_temp
[ts
->reg
] != ts
) {
3005 printf("Inconsistency for temp %s:\n",
3006 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
3008 printf("reg state:\n");
3016 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3018 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
3019 /* Sparc64 stack is accessed with offset of 2047 */
3020 s
->current_frame_offset
= (s
->current_frame_offset
+
3021 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
3022 ~(sizeof(tcg_target_long
) - 1);
3024 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
3028 ts
->mem_offset
= s
->current_frame_offset
;
3029 ts
->mem_base
= s
->frame_temp
;
3030 ts
->mem_allocated
= 1;
3031 s
->current_frame_offset
+= sizeof(tcg_target_long
);
3034 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
3036 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3037 mark it free; otherwise mark it dead. */
3038 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
3040 if (ts
->fixed_reg
) {
3043 if (ts
->val_type
== TEMP_VAL_REG
) {
3044 s
->reg_to_temp
[ts
->reg
] = NULL
;
3046 ts
->val_type
= (free_or_dead
< 0
3049 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
3052 /* Mark a temporary as dead. */
3053 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
3055 temp_free_or_dead(s
, ts
, 1);
3058 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3059 registers needs to be allocated to store a constant. If 'free_or_dead'
3060 is non-zero, subsequently release the temporary; if it is positive, the
3061 temp is dead; if it is negative, the temp is free. */
3062 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
3063 TCGRegSet preferred_regs
, int free_or_dead
)
3065 if (ts
->fixed_reg
) {
3068 if (!ts
->mem_coherent
) {
3069 if (!ts
->mem_allocated
) {
3070 temp_allocate_frame(s
, ts
);
3072 switch (ts
->val_type
) {
3073 case TEMP_VAL_CONST
:
3074 /* If we're going to free the temp immediately, then we won't
3075 require it later in a register, so attempt to store the
3076 constant to memory directly. */
3078 && tcg_out_sti(s
, ts
->type
, ts
->val
,
3079 ts
->mem_base
->reg
, ts
->mem_offset
)) {
3082 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3083 allocated_regs
, preferred_regs
);
3087 tcg_out_st(s
, ts
->type
, ts
->reg
,
3088 ts
->mem_base
->reg
, ts
->mem_offset
);
3098 ts
->mem_coherent
= 1;
3101 temp_free_or_dead(s
, ts
, free_or_dead
);
3105 /* free register 'reg' by spilling the corresponding temporary if necessary */
3106 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
3108 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
3110 temp_sync(s
, ts
, allocated_regs
, 0, -1);
3116 * @required_regs: Set of registers in which we must allocate.
3117 * @allocated_regs: Set of registers which must be avoided.
3118 * @preferred_regs: Set of registers we should prefer.
3119 * @rev: True if we search the registers in "indirect" order.
3121 * The allocated register must be in @required_regs & ~@allocated_regs,
3122 * but if we can put it in @preferred_regs we may save a move later.
3124 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
3125 TCGRegSet allocated_regs
,
3126 TCGRegSet preferred_regs
, bool rev
)
3128 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
3129 TCGRegSet reg_ct
[2];
3132 reg_ct
[1] = required_regs
& ~allocated_regs
;
3133 tcg_debug_assert(reg_ct
[1] != 0);
3134 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
3136 /* Skip the preferred_regs option if it cannot be satisfied,
3137 or if the preference made no difference. */
3138 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
3140 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
3142 /* Try free registers, preferences first. */
3143 for (j
= f
; j
< 2; j
++) {
3144 TCGRegSet set
= reg_ct
[j
];
3146 if (tcg_regset_single(set
)) {
3147 /* One register in the set. */
3148 TCGReg reg
= tcg_regset_first(set
);
3149 if (s
->reg_to_temp
[reg
] == NULL
) {
3153 for (i
= 0; i
< n
; i
++) {
3154 TCGReg reg
= order
[i
];
3155 if (s
->reg_to_temp
[reg
] == NULL
&&
3156 tcg_regset_test_reg(set
, reg
)) {
3163 /* We must spill something. */
3164 for (j
= f
; j
< 2; j
++) {
3165 TCGRegSet set
= reg_ct
[j
];
3167 if (tcg_regset_single(set
)) {
3168 /* One register in the set. */
3169 TCGReg reg
= tcg_regset_first(set
);
3170 tcg_reg_free(s
, reg
, allocated_regs
);
3173 for (i
= 0; i
< n
; i
++) {
3174 TCGReg reg
= order
[i
];
3175 if (tcg_regset_test_reg(set
, reg
)) {
3176 tcg_reg_free(s
, reg
, allocated_regs
);
3186 /* Make sure the temporary is in a register. If needed, allocate the register
3187 from DESIRED while avoiding ALLOCATED. */
3188 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
3189 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
3193 switch (ts
->val_type
) {
3196 case TEMP_VAL_CONST
:
3197 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3198 preferred_regs
, ts
->indirect_base
);
3199 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
3200 ts
->mem_coherent
= 0;
3203 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3204 preferred_regs
, ts
->indirect_base
);
3205 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
3206 ts
->mem_coherent
= 1;
3213 ts
->val_type
= TEMP_VAL_REG
;
3214 s
->reg_to_temp
[reg
] = ts
;
3217 /* Save a temporary to memory. 'allocated_regs' is used in case a
3218 temporary registers needs to be allocated to store a constant. */
3219 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
3221 /* The liveness analysis already ensures that globals are back
3222 in memory. Keep an tcg_debug_assert for safety. */
3223 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| ts
->fixed_reg
);
3226 /* save globals to their canonical location and assume they can be
3227 modified be the following code. 'allocated_regs' is used in case a
3228 temporary registers needs to be allocated to store a constant. */
3229 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3233 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3234 temp_save(s
, &s
->temps
[i
], allocated_regs
);
3238 /* sync globals to their canonical location and assume they can be
3239 read by the following code. 'allocated_regs' is used in case a
3240 temporary registers needs to be allocated to store a constant. */
3241 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3245 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3246 TCGTemp
*ts
= &s
->temps
[i
];
3247 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
3249 || ts
->mem_coherent
);
3253 /* at the end of a basic block, we assume all temporaries are dead and
3254 all globals are stored at their canonical location. */
3255 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
3259 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3260 TCGTemp
*ts
= &s
->temps
[i
];
3261 if (ts
->temp_local
) {
3262 temp_save(s
, ts
, allocated_regs
);
3264 /* The liveness analysis already ensures that temps are dead.
3265 Keep an tcg_debug_assert for safety. */
3266 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3270 save_globals(s
, allocated_regs
);
3273 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
3274 tcg_target_ulong val
, TCGLifeData arg_life
,
3275 TCGRegSet preferred_regs
)
3277 if (ots
->fixed_reg
) {
3278 /* For fixed registers, we do not do any constant propagation. */
3279 tcg_out_movi(s
, ots
->type
, ots
->reg
, val
);
3283 /* The movi is not explicitly generated here. */
3284 if (ots
->val_type
== TEMP_VAL_REG
) {
3285 s
->reg_to_temp
[ots
->reg
] = NULL
;
3287 ots
->val_type
= TEMP_VAL_CONST
;
3289 ots
->mem_coherent
= 0;
3290 if (NEED_SYNC_ARG(0)) {
3291 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
3292 } else if (IS_DEAD_ARG(0)) {
3297 static void tcg_reg_alloc_movi(TCGContext
*s
, const TCGOp
*op
)
3299 TCGTemp
*ots
= arg_temp(op
->args
[0]);
3300 tcg_target_ulong val
= op
->args
[1];
3302 tcg_reg_alloc_do_movi(s
, ots
, val
, op
->life
, op
->output_pref
[0]);
3305 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
3307 const TCGLifeData arg_life
= op
->life
;
3308 TCGRegSet allocated_regs
, preferred_regs
;
3310 TCGType otype
, itype
;
3312 allocated_regs
= s
->reserved_regs
;
3313 preferred_regs
= op
->output_pref
[0];
3314 ots
= arg_temp(op
->args
[0]);
3315 ts
= arg_temp(op
->args
[1]);
3317 /* Note that otype != itype for no-op truncation. */
3321 if (ts
->val_type
== TEMP_VAL_CONST
) {
3322 /* propagate constant or generate sti */
3323 tcg_target_ulong val
= ts
->val
;
3324 if (IS_DEAD_ARG(1)) {
3327 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
3331 /* If the source value is in memory we're going to be forced
3332 to have it in a register in order to perform the copy. Copy
3333 the SOURCE value into its own register first, that way we
3334 don't have to reload SOURCE the next time it is used. */
3335 if (ts
->val_type
== TEMP_VAL_MEM
) {
3336 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
3337 allocated_regs
, preferred_regs
);
3340 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
3341 if (IS_DEAD_ARG(0) && !ots
->fixed_reg
) {
3342 /* mov to a non-saved dead register makes no sense (even with
3343 liveness analysis disabled). */
3344 tcg_debug_assert(NEED_SYNC_ARG(0));
3345 if (!ots
->mem_allocated
) {
3346 temp_allocate_frame(s
, ots
);
3348 tcg_out_st(s
, otype
, ts
->reg
, ots
->mem_base
->reg
, ots
->mem_offset
);
3349 if (IS_DEAD_ARG(1)) {
3354 if (IS_DEAD_ARG(1) && !ts
->fixed_reg
&& !ots
->fixed_reg
) {
3355 /* the mov can be suppressed */
3356 if (ots
->val_type
== TEMP_VAL_REG
) {
3357 s
->reg_to_temp
[ots
->reg
] = NULL
;
3362 if (ots
->val_type
!= TEMP_VAL_REG
) {
3363 /* When allocating a new register, make sure to not spill the
3365 tcg_regset_set_reg(allocated_regs
, ts
->reg
);
3366 ots
->reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
3367 allocated_regs
, preferred_regs
,
3368 ots
->indirect_base
);
3370 tcg_out_mov(s
, otype
, ots
->reg
, ts
->reg
);
3372 ots
->val_type
= TEMP_VAL_REG
;
3373 ots
->mem_coherent
= 0;
3374 s
->reg_to_temp
[ots
->reg
] = ots
;
3375 if (NEED_SYNC_ARG(0)) {
3376 temp_sync(s
, ots
, allocated_regs
, 0, 0);
3381 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
3383 const TCGLifeData arg_life
= op
->life
;
3384 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
3385 TCGRegSet i_allocated_regs
;
3386 TCGRegSet o_allocated_regs
;
3387 int i
, k
, nb_iargs
, nb_oargs
;
3390 const TCGArgConstraint
*arg_ct
;
3392 TCGArg new_args
[TCG_MAX_OP_ARGS
];
3393 int const_args
[TCG_MAX_OP_ARGS
];
3395 nb_oargs
= def
->nb_oargs
;
3396 nb_iargs
= def
->nb_iargs
;
3398 /* copy constants */
3399 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
3400 op
->args
+ nb_oargs
+ nb_iargs
,
3401 sizeof(TCGArg
) * def
->nb_cargs
);
3403 i_allocated_regs
= s
->reserved_regs
;
3404 o_allocated_regs
= s
->reserved_regs
;
3406 /* satisfy input constraints */
3407 for (k
= 0; k
< nb_iargs
; k
++) {
3408 TCGRegSet i_preferred_regs
, o_preferred_regs
;
3410 i
= def
->sorted_args
[nb_oargs
+ k
];
3412 arg_ct
= &def
->args_ct
[i
];
3415 if (ts
->val_type
== TEMP_VAL_CONST
3416 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
)) {
3417 /* constant is OK for instruction */
3419 new_args
[i
] = ts
->val
;
3423 i_preferred_regs
= o_preferred_regs
= 0;
3424 if (arg_ct
->ct
& TCG_CT_IALIAS
) {
3425 o_preferred_regs
= op
->output_pref
[arg_ct
->alias_index
];
3426 if (ts
->fixed_reg
) {
3427 /* if fixed register, we must allocate a new register
3428 if the alias is not the same register */
3429 if (arg
!= op
->args
[arg_ct
->alias_index
]) {
3430 goto allocate_in_reg
;
3433 /* if the input is aliased to an output and if it is
3434 not dead after the instruction, we must allocate
3435 a new register and move it */
3436 if (!IS_DEAD_ARG(i
)) {
3437 goto allocate_in_reg
;
3440 /* check if the current register has already been allocated
3441 for another input aliased to an output */
3442 if (ts
->val_type
== TEMP_VAL_REG
) {
3445 for (k2
= 0 ; k2
< k
; k2
++) {
3446 i2
= def
->sorted_args
[nb_oargs
+ k2
];
3447 if ((def
->args_ct
[i2
].ct
& TCG_CT_IALIAS
) &&
3448 reg
== new_args
[i2
]) {
3449 goto allocate_in_reg
;
3453 i_preferred_regs
= o_preferred_regs
;
3457 temp_load(s
, ts
, arg_ct
->u
.regs
, i_allocated_regs
, i_preferred_regs
);
3460 if (tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
3461 /* nothing to do : the constraint is satisfied */
3464 /* allocate a new register matching the constraint
3465 and move the temporary register into it */
3466 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3467 i_allocated_regs
, 0);
3468 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, i_allocated_regs
,
3469 o_preferred_regs
, ts
->indirect_base
);
3470 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
3474 tcg_regset_set_reg(i_allocated_regs
, reg
);
3477 /* mark dead temporaries and free the associated registers */
3478 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3479 if (IS_DEAD_ARG(i
)) {
3480 temp_dead(s
, arg_temp(op
->args
[i
]));
3484 if (def
->flags
& TCG_OPF_BB_END
) {
3485 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
3487 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3488 /* XXX: permit generic clobber register list ? */
3489 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3490 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
3491 tcg_reg_free(s
, i
, i_allocated_regs
);
3495 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3496 /* sync globals if the op has side effects and might trigger
3498 sync_globals(s
, i_allocated_regs
);
3501 /* satisfy the output constraints */
3502 for(k
= 0; k
< nb_oargs
; k
++) {
3503 i
= def
->sorted_args
[k
];
3505 arg_ct
= &def
->args_ct
[i
];
3507 if ((arg_ct
->ct
& TCG_CT_ALIAS
)
3508 && !const_args
[arg_ct
->alias_index
]) {
3509 reg
= new_args
[arg_ct
->alias_index
];
3510 } else if (arg_ct
->ct
& TCG_CT_NEWREG
) {
3511 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
,
3512 i_allocated_regs
| o_allocated_regs
,
3513 op
->output_pref
[k
], ts
->indirect_base
);
3515 /* if fixed register, we try to use it */
3517 if (ts
->fixed_reg
&&
3518 tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
3521 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, o_allocated_regs
,
3522 op
->output_pref
[k
], ts
->indirect_base
);
3524 tcg_regset_set_reg(o_allocated_regs
, reg
);
3525 /* if a fixed register is used, then a move will be done afterwards */
3526 if (!ts
->fixed_reg
) {
3527 if (ts
->val_type
== TEMP_VAL_REG
) {
3528 s
->reg_to_temp
[ts
->reg
] = NULL
;
3530 ts
->val_type
= TEMP_VAL_REG
;
3532 /* temp value is modified, so the value kept in memory is
3533 potentially not the same */
3534 ts
->mem_coherent
= 0;
3535 s
->reg_to_temp
[reg
] = ts
;
3542 /* emit instruction */
3543 if (def
->flags
& TCG_OPF_VECTOR
) {
3544 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
3545 new_args
, const_args
);
3547 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
3550 /* move the outputs in the correct register if needed */
3551 for(i
= 0; i
< nb_oargs
; i
++) {
3552 ts
= arg_temp(op
->args
[i
]);
3554 if (ts
->fixed_reg
&& ts
->reg
!= reg
) {
3555 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
3557 if (NEED_SYNC_ARG(i
)) {
3558 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
3559 } else if (IS_DEAD_ARG(i
)) {
3565 #ifdef TCG_TARGET_STACK_GROWSUP
3566 #define STACK_DIR(x) (-(x))
3568 #define STACK_DIR(x) (x)
3571 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
3573 const int nb_oargs
= TCGOP_CALLO(op
);
3574 const int nb_iargs
= TCGOP_CALLI(op
);
3575 const TCGLifeData arg_life
= op
->life
;
3576 int flags
, nb_regs
, i
;
3580 intptr_t stack_offset
;
3581 size_t call_stack_size
;
3582 tcg_insn_unit
*func_addr
;
3584 TCGRegSet allocated_regs
;
3586 func_addr
= (tcg_insn_unit
*)(intptr_t)op
->args
[nb_oargs
+ nb_iargs
];
3587 flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
3589 nb_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
3590 if (nb_regs
> nb_iargs
) {
3594 /* assign stack slots first */
3595 call_stack_size
= (nb_iargs
- nb_regs
) * sizeof(tcg_target_long
);
3596 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
3597 ~(TCG_TARGET_STACK_ALIGN
- 1);
3598 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
3599 if (allocate_args
) {
3600 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3601 preallocate call stack */
3605 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
3606 for (i
= nb_regs
; i
< nb_iargs
; i
++) {
3607 arg
= op
->args
[nb_oargs
+ i
];
3608 #ifdef TCG_TARGET_STACK_GROWSUP
3609 stack_offset
-= sizeof(tcg_target_long
);
3611 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3613 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3614 s
->reserved_regs
, 0);
3615 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
3617 #ifndef TCG_TARGET_STACK_GROWSUP
3618 stack_offset
+= sizeof(tcg_target_long
);
3622 /* assign input registers */
3623 allocated_regs
= s
->reserved_regs
;
3624 for (i
= 0; i
< nb_regs
; i
++) {
3625 arg
= op
->args
[nb_oargs
+ i
];
3626 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3628 reg
= tcg_target_call_iarg_regs
[i
];
3630 if (ts
->val_type
== TEMP_VAL_REG
) {
3631 if (ts
->reg
!= reg
) {
3632 tcg_reg_free(s
, reg
, allocated_regs
);
3633 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
3636 TCGRegSet arg_set
= 0;
3638 tcg_reg_free(s
, reg
, allocated_regs
);
3639 tcg_regset_set_reg(arg_set
, reg
);
3640 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
3643 tcg_regset_set_reg(allocated_regs
, reg
);
3647 /* mark dead temporaries and free the associated registers */
3648 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3649 if (IS_DEAD_ARG(i
)) {
3650 temp_dead(s
, arg_temp(op
->args
[i
]));
3654 /* clobber call registers */
3655 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3656 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
3657 tcg_reg_free(s
, i
, allocated_regs
);
3661 /* Save globals if they might be written by the helper, sync them if
3662 they might be read. */
3663 if (flags
& TCG_CALL_NO_READ_GLOBALS
) {
3665 } else if (flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3666 sync_globals(s
, allocated_regs
);
3668 save_globals(s
, allocated_regs
);
3671 tcg_out_call(s
, func_addr
);
3673 /* assign output registers and emit moves if needed */
3674 for(i
= 0; i
< nb_oargs
; i
++) {
3677 reg
= tcg_target_call_oarg_regs
[i
];
3678 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
3680 if (ts
->fixed_reg
) {
3681 if (ts
->reg
!= reg
) {
3682 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
3685 if (ts
->val_type
== TEMP_VAL_REG
) {
3686 s
->reg_to_temp
[ts
->reg
] = NULL
;
3688 ts
->val_type
= TEMP_VAL_REG
;
3690 ts
->mem_coherent
= 0;
3691 s
->reg_to_temp
[reg
] = ts
;
3692 if (NEED_SYNC_ARG(i
)) {
3693 temp_sync(s
, ts
, allocated_regs
, 0, IS_DEAD_ARG(i
));
3694 } else if (IS_DEAD_ARG(i
)) {
3701 #ifdef CONFIG_PROFILER
3703 /* avoid copy/paste errors */
3704 #define PROF_ADD(to, from, field) \
3706 (to)->field += atomic_read(&((from)->field)); \
3709 #define PROF_MAX(to, from, field) \
3711 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3712 if (val__ > (to)->field) { \
3713 (to)->field = val__; \
3717 /* Pass in a zero'ed @prof */
3719 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
3721 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
3724 for (i
= 0; i
< n_ctxs
; i
++) {
3725 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
3726 const TCGProfile
*orig
= &s
->prof
;
3729 PROF_ADD(prof
, orig
, cpu_exec_time
);
3730 PROF_ADD(prof
, orig
, tb_count1
);
3731 PROF_ADD(prof
, orig
, tb_count
);
3732 PROF_ADD(prof
, orig
, op_count
);
3733 PROF_MAX(prof
, orig
, op_count_max
);
3734 PROF_ADD(prof
, orig
, temp_count
);
3735 PROF_MAX(prof
, orig
, temp_count_max
);
3736 PROF_ADD(prof
, orig
, del_op_count
);
3737 PROF_ADD(prof
, orig
, code_in_len
);
3738 PROF_ADD(prof
, orig
, code_out_len
);
3739 PROF_ADD(prof
, orig
, search_out_len
);
3740 PROF_ADD(prof
, orig
, interm_time
);
3741 PROF_ADD(prof
, orig
, code_time
);
3742 PROF_ADD(prof
, orig
, la_time
);
3743 PROF_ADD(prof
, orig
, opt_time
);
3744 PROF_ADD(prof
, orig
, restore_count
);
3745 PROF_ADD(prof
, orig
, restore_time
);
3750 for (i
= 0; i
< NB_OPS
; i
++) {
3751 PROF_ADD(prof
, orig
, table_op_count
[i
]);
3760 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
3762 tcg_profile_snapshot(prof
, true, false);
3765 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
3767 tcg_profile_snapshot(prof
, false, true);
3770 void tcg_dump_op_count(void)
3772 TCGProfile prof
= {};
3775 tcg_profile_snapshot_table(&prof
);
3776 for (i
= 0; i
< NB_OPS
; i
++) {
3777 qemu_printf("%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
3778 prof
.table_op_count
[i
]);
3782 int64_t tcg_cpu_exec_time(void)
3784 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
3788 for (i
= 0; i
< n_ctxs
; i
++) {
3789 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
3790 const TCGProfile
*prof
= &s
->prof
;
3792 ret
+= atomic_read(&prof
->cpu_exec_time
);
3797 void tcg_dump_op_count(void)
3799 qemu_printf("[TCG profiler not compiled]\n");
3802 int64_t tcg_cpu_exec_time(void)
3804 error_report("%s: TCG profiler not compiled", __func__
);
3810 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
)
3812 #ifdef CONFIG_PROFILER
3813 TCGProfile
*prof
= &s
->prof
;
3818 #ifdef CONFIG_PROFILER
3822 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3825 atomic_set(&prof
->op_count
, prof
->op_count
+ n
);
3826 if (n
> prof
->op_count_max
) {
3827 atomic_set(&prof
->op_count_max
, n
);
3831 atomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
3832 if (n
> prof
->temp_count_max
) {
3833 atomic_set(&prof
->temp_count_max
, n
);
3839 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
3840 && qemu_log_in_addr_range(tb
->pc
))) {
3843 tcg_dump_ops(s
, false);
3849 #ifdef CONFIG_DEBUG_TCG
3850 /* Ensure all labels referenced have been emitted. */
3855 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
3856 if (unlikely(!l
->present
) && l
->refs
) {
3857 qemu_log_mask(CPU_LOG_TB_OP
,
3858 "$L%d referenced but not present.\n", l
->id
);
3866 #ifdef CONFIG_PROFILER
3867 atomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
3870 #ifdef USE_TCG_OPTIMIZATIONS
3874 #ifdef CONFIG_PROFILER
3875 atomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
3876 atomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
3879 reachable_code_pass(s
);
3882 if (s
->nb_indirects
> 0) {
3884 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
3885 && qemu_log_in_addr_range(tb
->pc
))) {
3887 qemu_log("OP before indirect lowering:\n");
3888 tcg_dump_ops(s
, false);
3893 /* Replace indirect temps with direct temps. */
3894 if (liveness_pass_2(s
)) {
3895 /* If changes were made, re-run liveness. */
3900 #ifdef CONFIG_PROFILER
3901 atomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
3905 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
3906 && qemu_log_in_addr_range(tb
->pc
))) {
3908 qemu_log("OP after optimization and liveness analysis:\n");
3909 tcg_dump_ops(s
, true);
3915 tcg_reg_alloc_start(s
);
3917 s
->code_buf
= tb
->tc
.ptr
;
3918 s
->code_ptr
= tb
->tc
.ptr
;
3920 #ifdef TCG_TARGET_NEED_LDST_LABELS
3921 QSIMPLEQ_INIT(&s
->ldst_labels
);
3923 #ifdef TCG_TARGET_NEED_POOL_LABELS
3924 s
->pool_labels
= NULL
;
3928 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
3929 TCGOpcode opc
= op
->opc
;
3931 #ifdef CONFIG_PROFILER
3932 atomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
3936 case INDEX_op_mov_i32
:
3937 case INDEX_op_mov_i64
:
3938 case INDEX_op_mov_vec
:
3939 tcg_reg_alloc_mov(s
, op
);
3941 case INDEX_op_movi_i32
:
3942 case INDEX_op_movi_i64
:
3943 case INDEX_op_dupi_vec
:
3944 tcg_reg_alloc_movi(s
, op
);
3946 case INDEX_op_insn_start
:
3947 if (num_insns
>= 0) {
3948 size_t off
= tcg_current_code_size(s
);
3949 s
->gen_insn_end_off
[num_insns
] = off
;
3950 /* Assert that we do not overflow our stored offset. */
3951 assert(s
->gen_insn_end_off
[num_insns
] == off
);
3954 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
3956 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
3957 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
3961 s
->gen_insn_data
[num_insns
][i
] = a
;
3964 case INDEX_op_discard
:
3965 temp_dead(s
, arg_temp(op
->args
[0]));
3967 case INDEX_op_set_label
:
3968 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
3969 tcg_out_label(s
, arg_label(op
->args
[0]), s
->code_ptr
);
3972 tcg_reg_alloc_call(s
, op
);
3975 /* Sanity check that we've not introduced any unhandled opcodes. */
3976 tcg_debug_assert(tcg_op_supported(opc
));
3977 /* Note: in order to speed up the code, it would be much
3978 faster to have specialized register allocator functions for
3979 some common argument patterns */
3980 tcg_reg_alloc_op(s
, op
);
3983 #ifdef CONFIG_DEBUG_TCG
3986 /* Test for (pending) buffer overflow. The assumption is that any
3987 one operation beginning below the high water mark cannot overrun
3988 the buffer completely. Thus we can test for overflow after
3989 generating code without having to check during generation. */
3990 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
3993 /* Test for TB overflow, as seen by gen_insn_end_off. */
3994 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
3998 tcg_debug_assert(num_insns
>= 0);
3999 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
4001 /* Generate TB finalization at the end of block */
4002 #ifdef TCG_TARGET_NEED_LDST_LABELS
4003 i
= tcg_out_ldst_finalize(s
);
4008 #ifdef TCG_TARGET_NEED_POOL_LABELS
4009 i
= tcg_out_pool_finalize(s
);
4014 if (!tcg_resolve_relocs(s
)) {
4018 /* flush instruction cache */
4019 flush_icache_range((uintptr_t)s
->code_buf
, (uintptr_t)s
->code_ptr
);
4021 return tcg_current_code_size(s
);
4024 #ifdef CONFIG_PROFILER
4025 void tcg_dump_info(void)
4027 TCGProfile prof
= {};
4028 const TCGProfile
*s
;
4030 int64_t tb_div_count
;
4033 tcg_profile_snapshot_counters(&prof
);
4035 tb_count
= s
->tb_count
;
4036 tb_div_count
= tb_count
? tb_count
: 1;
4037 tot
= s
->interm_time
+ s
->code_time
;
4039 qemu_printf("JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
4041 qemu_printf("translated TBs %" PRId64
" (aborted=%" PRId64
4043 tb_count
, s
->tb_count1
- tb_count
,
4044 (double)(s
->tb_count1
- s
->tb_count
)
4045 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
4046 qemu_printf("avg ops/TB %0.1f max=%d\n",
4047 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
4048 qemu_printf("deleted ops/TB %0.2f\n",
4049 (double)s
->del_op_count
/ tb_div_count
);
4050 qemu_printf("avg temps/TB %0.2f max=%d\n",
4051 (double)s
->temp_count
/ tb_div_count
, s
->temp_count_max
);
4052 qemu_printf("avg host code/TB %0.1f\n",
4053 (double)s
->code_out_len
/ tb_div_count
);
4054 qemu_printf("avg search data/TB %0.1f\n",
4055 (double)s
->search_out_len
/ tb_div_count
);
4057 qemu_printf("cycles/op %0.1f\n",
4058 s
->op_count
? (double)tot
/ s
->op_count
: 0);
4059 qemu_printf("cycles/in byte %0.1f\n",
4060 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
4061 qemu_printf("cycles/out byte %0.1f\n",
4062 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
4063 qemu_printf("cycles/search byte %0.1f\n",
4064 s
->search_out_len
? (double)tot
/ s
->search_out_len
: 0);
4068 qemu_printf(" gen_interm time %0.1f%%\n",
4069 (double)s
->interm_time
/ tot
* 100.0);
4070 qemu_printf(" gen_code time %0.1f%%\n",
4071 (double)s
->code_time
/ tot
* 100.0);
4072 qemu_printf("optim./code time %0.1f%%\n",
4073 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
4075 qemu_printf("liveness/code time %0.1f%%\n",
4076 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
4077 qemu_printf("cpu_restore count %" PRId64
"\n",
4079 qemu_printf(" avg cycles %0.1f\n",
4080 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
4083 void tcg_dump_info(void)
4085 qemu_printf("[TCG profiler not compiled]\n");
4089 #ifdef ELF_HOST_MACHINE
4090 /* In order to use this feature, the backend needs to do three things:
4092 (1) Define ELF_HOST_MACHINE to indicate both what value to
4093 put into the ELF image and to indicate support for the feature.
4095 (2) Define tcg_register_jit. This should create a buffer containing
4096 the contents of a .debug_frame section that describes the post-
4097 prologue unwind info for the tcg machine.
4099 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4102 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4109 struct jit_code_entry
{
4110 struct jit_code_entry
*next_entry
;
4111 struct jit_code_entry
*prev_entry
;
4112 const void *symfile_addr
;
4113 uint64_t symfile_size
;
4116 struct jit_descriptor
{
4118 uint32_t action_flag
;
4119 struct jit_code_entry
*relevant_entry
;
4120 struct jit_code_entry
*first_entry
;
4123 void __jit_debug_register_code(void) __attribute__((noinline
));
4124 void __jit_debug_register_code(void)
4129 /* Must statically initialize the version, because GDB may check
4130 the version before we can set it. */
4131 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
4133 /* End GDB interface. */
4135 static int find_string(const char *strtab
, const char *str
)
4137 const char *p
= strtab
+ 1;
4140 if (strcmp(p
, str
) == 0) {
4147 static void tcg_register_jit_int(void *buf_ptr
, size_t buf_size
,
4148 const void *debug_frame
,
4149 size_t debug_frame_size
)
4151 struct __attribute__((packed
)) DebugInfo
{
4158 uintptr_t cu_low_pc
;
4159 uintptr_t cu_high_pc
;
4162 uintptr_t fn_low_pc
;
4163 uintptr_t fn_high_pc
;
4172 struct DebugInfo di
;
4177 struct ElfImage
*img
;
4179 static const struct ElfImage img_template
= {
4181 .e_ident
[EI_MAG0
] = ELFMAG0
,
4182 .e_ident
[EI_MAG1
] = ELFMAG1
,
4183 .e_ident
[EI_MAG2
] = ELFMAG2
,
4184 .e_ident
[EI_MAG3
] = ELFMAG3
,
4185 .e_ident
[EI_CLASS
] = ELF_CLASS
,
4186 .e_ident
[EI_DATA
] = ELF_DATA
,
4187 .e_ident
[EI_VERSION
] = EV_CURRENT
,
4189 .e_machine
= ELF_HOST_MACHINE
,
4190 .e_version
= EV_CURRENT
,
4191 .e_phoff
= offsetof(struct ElfImage
, phdr
),
4192 .e_shoff
= offsetof(struct ElfImage
, shdr
),
4193 .e_ehsize
= sizeof(ElfW(Shdr
)),
4194 .e_phentsize
= sizeof(ElfW(Phdr
)),
4196 .e_shentsize
= sizeof(ElfW(Shdr
)),
4197 .e_shnum
= ARRAY_SIZE(img
->shdr
),
4198 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
4199 #ifdef ELF_HOST_FLAGS
4200 .e_flags
= ELF_HOST_FLAGS
,
4203 .e_ident
[EI_OSABI
] = ELF_OSABI
,
4211 [0] = { .sh_type
= SHT_NULL
},
4212 /* Trick: The contents of code_gen_buffer are not present in
4213 this fake ELF file; that got allocated elsewhere. Therefore
4214 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4215 will not look for contents. We can record any address. */
4217 .sh_type
= SHT_NOBITS
,
4218 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
4220 [2] = { /* .debug_info */
4221 .sh_type
= SHT_PROGBITS
,
4222 .sh_offset
= offsetof(struct ElfImage
, di
),
4223 .sh_size
= sizeof(struct DebugInfo
),
4225 [3] = { /* .debug_abbrev */
4226 .sh_type
= SHT_PROGBITS
,
4227 .sh_offset
= offsetof(struct ElfImage
, da
),
4228 .sh_size
= sizeof(img
->da
),
4230 [4] = { /* .debug_frame */
4231 .sh_type
= SHT_PROGBITS
,
4232 .sh_offset
= sizeof(struct ElfImage
),
4234 [5] = { /* .symtab */
4235 .sh_type
= SHT_SYMTAB
,
4236 .sh_offset
= offsetof(struct ElfImage
, sym
),
4237 .sh_size
= sizeof(img
->sym
),
4239 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
4240 .sh_entsize
= sizeof(ElfW(Sym
)),
4242 [6] = { /* .strtab */
4243 .sh_type
= SHT_STRTAB
,
4244 .sh_offset
= offsetof(struct ElfImage
, str
),
4245 .sh_size
= sizeof(img
->str
),
4249 [1] = { /* code_gen_buffer */
4250 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
4255 .len
= sizeof(struct DebugInfo
) - 4,
4257 .ptr_size
= sizeof(void *),
4259 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
4261 .fn_name
= "code_gen_buffer"
4264 1, /* abbrev number (the cu) */
4265 0x11, 1, /* DW_TAG_compile_unit, has children */
4266 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
4267 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4268 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4269 0, 0, /* end of abbrev */
4270 2, /* abbrev number (the fn) */
4271 0x2e, 0, /* DW_TAG_subprogram, no children */
4272 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
4273 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4274 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4275 0, 0, /* end of abbrev */
4276 0 /* no more abbrev */
4278 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
4279 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
4282 /* We only need a single jit entry; statically allocate it. */
4283 static struct jit_code_entry one_entry
;
4285 uintptr_t buf
= (uintptr_t)buf_ptr
;
4286 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
4287 DebugFrameHeader
*dfh
;
4289 img
= g_malloc(img_size
);
4290 *img
= img_template
;
4292 img
->phdr
.p_vaddr
= buf
;
4293 img
->phdr
.p_paddr
= buf
;
4294 img
->phdr
.p_memsz
= buf_size
;
4296 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
4297 img
->shdr
[1].sh_addr
= buf
;
4298 img
->shdr
[1].sh_size
= buf_size
;
4300 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
4301 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
4303 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
4304 img
->shdr
[4].sh_size
= debug_frame_size
;
4306 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
4307 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
4309 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
4310 img
->sym
[1].st_value
= buf
;
4311 img
->sym
[1].st_size
= buf_size
;
4313 img
->di
.cu_low_pc
= buf
;
4314 img
->di
.cu_high_pc
= buf
+ buf_size
;
4315 img
->di
.fn_low_pc
= buf
;
4316 img
->di
.fn_high_pc
= buf
+ buf_size
;
4318 dfh
= (DebugFrameHeader
*)(img
+ 1);
4319 memcpy(dfh
, debug_frame
, debug_frame_size
);
4320 dfh
->fde
.func_start
= buf
;
4321 dfh
->fde
.func_len
= buf_size
;
4324 /* Enable this block to be able to debug the ELF image file creation.
4325 One can use readelf, objdump, or other inspection utilities. */
4327 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
4329 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
4330 /* Avoid stupid unused return value warning for fwrite. */
4337 one_entry
.symfile_addr
= img
;
4338 one_entry
.symfile_size
= img_size
;
4340 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
4341 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
4342 __jit_debug_descriptor
.first_entry
= &one_entry
;
4343 __jit_debug_register_code();
4346 /* No support for the feature. Provide the entry point expected by exec.c,
4347 and implement the internal function we declared earlier. */
4349 static void tcg_register_jit_int(void *buf
, size_t size
,
4350 const void *debug_frame
,
4351 size_t debug_frame_size
)
4355 void tcg_register_jit(void *buf
, size_t buf_size
)
4358 #endif /* ELF_HOST_MACHINE */
4360 #if !TCG_TARGET_MAYBE_vec
4361 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
4363 g_assert_not_reached();