2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
39 /* Note: the long term plan is to reduce the dependencies on the QEMU
40 CPU definitions. Currently they are used for qemu_ld/st
42 #define NO_CPU_IO_DEFS
45 #include "exec/exec-all.h"
47 #if !defined(CONFIG_USER_ONLY)
48 #include "hw/boards.h"
53 #if UINTPTR_MAX == UINT32_MAX
54 # define ELF_CLASS ELFCLASS32
56 # define ELF_CLASS ELFCLASS64
58 #ifdef HOST_WORDS_BIGENDIAN
59 # define ELF_DATA ELFDATA2MSB
61 # define ELF_DATA ELFDATA2LSB
66 #include "sysemu/sysemu.h"
68 /* Forward declarations for functions declared in tcg-target.inc.c and
70 static void tcg_target_init(TCGContext
*s
);
71 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode
);
72 static void tcg_target_qemu_prologue(TCGContext
*s
);
73 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
74 intptr_t value
, intptr_t addend
);
76 /* The CIE and FDE header definitions will be common to all hosts. */
78 uint32_t len
__attribute__((aligned((sizeof(void *)))));
84 uint8_t return_column
;
87 typedef struct QEMU_PACKED
{
88 uint32_t len
__attribute__((aligned((sizeof(void *)))));
92 } DebugFrameFDEHeader
;
94 typedef struct QEMU_PACKED
{
96 DebugFrameFDEHeader fde
;
99 static void tcg_register_jit_int(void *buf
, size_t size
,
100 const void *debug_frame
,
101 size_t debug_frame_size
)
102 __attribute__((unused
));
104 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
105 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
106 const char *ct_str
, TCGType type
);
107 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
109 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
110 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
111 TCGReg ret
, tcg_target_long arg
);
112 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
113 const int *const_args
);
114 #if TCG_TARGET_MAYBE_vec
115 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
116 TCGReg dst
, TCGReg src
);
117 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
118 TCGReg dst
, TCGReg base
, intptr_t offset
);
119 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
,
120 TCGReg dst
, tcg_target_long arg
);
121 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
122 unsigned vece
, const TCGArg
*args
,
123 const int *const_args
);
125 static inline bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
126 TCGReg dst
, TCGReg src
)
128 g_assert_not_reached();
130 static inline bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
131 TCGReg dst
, TCGReg base
, intptr_t offset
)
133 g_assert_not_reached();
135 static inline void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
,
136 TCGReg dst
, tcg_target_long arg
)
138 g_assert_not_reached();
140 static inline void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
, unsigned vecl
,
141 unsigned vece
, const TCGArg
*args
,
142 const int *const_args
)
144 g_assert_not_reached();
147 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
149 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
150 TCGReg base
, intptr_t ofs
);
151 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*target
);
152 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
153 const TCGArgConstraint
*arg_ct
);
154 #ifdef TCG_TARGET_NEED_LDST_LABELS
155 static int tcg_out_ldst_finalize(TCGContext
*s
);
158 #define TCG_HIGHWATER 1024
160 static TCGContext
**tcg_ctxs
;
161 static unsigned int n_tcg_ctxs
;
162 TCGv_env cpu_env
= 0;
164 struct tcg_region_tree
{
167 /* padding to avoid false sharing is computed at run-time */
171 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
172 * dynamically allocate from as demand dictates. Given appropriate region
173 * sizing, this minimizes flushes even when some TCG threads generate a lot
174 * more code than others.
176 struct tcg_region_state
{
179 /* fields set at init time */
184 size_t size
; /* size of one region */
185 size_t stride
; /* .size + guard size */
187 /* fields protected by the lock */
188 size_t current
; /* current region index */
189 size_t agg_size_full
; /* aggregate size of full regions */
192 static struct tcg_region_state region
;
194 * This is an array of struct tcg_region_tree's, with padding.
195 * We use void * to simplify the computation of region_trees[i]; each
196 * struct is found every tree_size bytes.
198 static void *region_trees
;
199 static size_t tree_size
;
200 static TCGRegSet tcg_target_available_regs
[TCG_TYPE_COUNT
];
201 static TCGRegSet tcg_target_call_clobber_regs
;
203 #if TCG_TARGET_INSN_UNIT_SIZE == 1
204 static __attribute__((unused
)) inline void tcg_out8(TCGContext
*s
, uint8_t v
)
209 static __attribute__((unused
)) inline void tcg_patch8(tcg_insn_unit
*p
,
216 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
217 static __attribute__((unused
)) inline void tcg_out16(TCGContext
*s
, uint16_t v
)
219 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
222 tcg_insn_unit
*p
= s
->code_ptr
;
223 memcpy(p
, &v
, sizeof(v
));
224 s
->code_ptr
= p
+ (2 / TCG_TARGET_INSN_UNIT_SIZE
);
228 static __attribute__((unused
)) inline void tcg_patch16(tcg_insn_unit
*p
,
231 if (TCG_TARGET_INSN_UNIT_SIZE
== 2) {
234 memcpy(p
, &v
, sizeof(v
));
239 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
240 static __attribute__((unused
)) inline void tcg_out32(TCGContext
*s
, uint32_t v
)
242 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
245 tcg_insn_unit
*p
= s
->code_ptr
;
246 memcpy(p
, &v
, sizeof(v
));
247 s
->code_ptr
= p
+ (4 / TCG_TARGET_INSN_UNIT_SIZE
);
251 static __attribute__((unused
)) inline void tcg_patch32(tcg_insn_unit
*p
,
254 if (TCG_TARGET_INSN_UNIT_SIZE
== 4) {
257 memcpy(p
, &v
, sizeof(v
));
262 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
263 static __attribute__((unused
)) inline void tcg_out64(TCGContext
*s
, uint64_t v
)
265 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
268 tcg_insn_unit
*p
= s
->code_ptr
;
269 memcpy(p
, &v
, sizeof(v
));
270 s
->code_ptr
= p
+ (8 / TCG_TARGET_INSN_UNIT_SIZE
);
274 static __attribute__((unused
)) inline void tcg_patch64(tcg_insn_unit
*p
,
277 if (TCG_TARGET_INSN_UNIT_SIZE
== 8) {
280 memcpy(p
, &v
, sizeof(v
));
285 /* label relocation processing */
287 static void tcg_out_reloc(TCGContext
*s
, tcg_insn_unit
*code_ptr
, int type
,
288 TCGLabel
*l
, intptr_t addend
)
290 TCGRelocation
*r
= tcg_malloc(sizeof(TCGRelocation
));
295 QSIMPLEQ_INSERT_TAIL(&l
->relocs
, r
, next
);
298 static void tcg_out_label(TCGContext
*s
, TCGLabel
*l
, tcg_insn_unit
*ptr
)
300 tcg_debug_assert(!l
->has_value
);
302 l
->u
.value_ptr
= ptr
;
305 TCGLabel
*gen_new_label(void)
307 TCGContext
*s
= tcg_ctx
;
308 TCGLabel
*l
= tcg_malloc(sizeof(TCGLabel
));
310 memset(l
, 0, sizeof(TCGLabel
));
311 l
->id
= s
->nb_labels
++;
312 QSIMPLEQ_INIT(&l
->relocs
);
314 QSIMPLEQ_INSERT_TAIL(&s
->labels
, l
, next
);
319 static bool tcg_resolve_relocs(TCGContext
*s
)
323 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
325 uintptr_t value
= l
->u
.value
;
327 QSIMPLEQ_FOREACH(r
, &l
->relocs
, next
) {
328 if (!patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
)) {
336 static void set_jmp_reset_offset(TCGContext
*s
, int which
)
338 size_t off
= tcg_current_code_size(s
);
339 s
->tb_jmp_reset_offset
[which
] = off
;
340 /* Make sure that we didn't overflow the stored offset. */
341 assert(s
->tb_jmp_reset_offset
[which
] == off
);
344 #include "tcg-target.inc.c"
346 /* compare a pointer @ptr and a tb_tc @s */
347 static int ptr_cmp_tb_tc(const void *ptr
, const struct tb_tc
*s
)
349 if (ptr
>= s
->ptr
+ s
->size
) {
351 } else if (ptr
< s
->ptr
) {
357 static gint
tb_tc_cmp(gconstpointer ap
, gconstpointer bp
)
359 const struct tb_tc
*a
= ap
;
360 const struct tb_tc
*b
= bp
;
363 * When both sizes are set, we know this isn't a lookup.
364 * This is the most likely case: every TB must be inserted; lookups
365 * are a lot less frequent.
367 if (likely(a
->size
&& b
->size
)) {
368 if (a
->ptr
> b
->ptr
) {
370 } else if (a
->ptr
< b
->ptr
) {
373 /* a->ptr == b->ptr should happen only on deletions */
374 g_assert(a
->size
== b
->size
);
378 * All lookups have either .size field set to 0.
379 * From the glib sources we see that @ap is always the lookup key. However
380 * the docs provide no guarantee, so we just mark this case as likely.
382 if (likely(a
->size
== 0)) {
383 return ptr_cmp_tb_tc(a
->ptr
, b
);
385 return ptr_cmp_tb_tc(b
->ptr
, a
);
388 static void tcg_region_trees_init(void)
392 tree_size
= ROUND_UP(sizeof(struct tcg_region_tree
), qemu_dcache_linesize
);
393 region_trees
= qemu_memalign(qemu_dcache_linesize
, region
.n
* tree_size
);
394 for (i
= 0; i
< region
.n
; i
++) {
395 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
397 qemu_mutex_init(&rt
->lock
);
398 rt
->tree
= g_tree_new(tb_tc_cmp
);
402 static struct tcg_region_tree
*tc_ptr_to_region_tree(void *p
)
406 if (p
< region
.start_aligned
) {
409 ptrdiff_t offset
= p
- region
.start_aligned
;
411 if (offset
> region
.stride
* (region
.n
- 1)) {
412 region_idx
= region
.n
- 1;
414 region_idx
= offset
/ region
.stride
;
417 return region_trees
+ region_idx
* tree_size
;
420 void tcg_tb_insert(TranslationBlock
*tb
)
422 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree(tb
->tc
.ptr
);
424 qemu_mutex_lock(&rt
->lock
);
425 g_tree_insert(rt
->tree
, &tb
->tc
, tb
);
426 qemu_mutex_unlock(&rt
->lock
);
429 void tcg_tb_remove(TranslationBlock
*tb
)
431 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree(tb
->tc
.ptr
);
433 qemu_mutex_lock(&rt
->lock
);
434 g_tree_remove(rt
->tree
, &tb
->tc
);
435 qemu_mutex_unlock(&rt
->lock
);
439 * Find the TB 'tb' such that
440 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
441 * Return NULL if not found.
443 TranslationBlock
*tcg_tb_lookup(uintptr_t tc_ptr
)
445 struct tcg_region_tree
*rt
= tc_ptr_to_region_tree((void *)tc_ptr
);
446 TranslationBlock
*tb
;
447 struct tb_tc s
= { .ptr
= (void *)tc_ptr
};
449 qemu_mutex_lock(&rt
->lock
);
450 tb
= g_tree_lookup(rt
->tree
, &s
);
451 qemu_mutex_unlock(&rt
->lock
);
455 static void tcg_region_tree_lock_all(void)
459 for (i
= 0; i
< region
.n
; i
++) {
460 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
462 qemu_mutex_lock(&rt
->lock
);
466 static void tcg_region_tree_unlock_all(void)
470 for (i
= 0; i
< region
.n
; i
++) {
471 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
473 qemu_mutex_unlock(&rt
->lock
);
477 void tcg_tb_foreach(GTraverseFunc func
, gpointer user_data
)
481 tcg_region_tree_lock_all();
482 for (i
= 0; i
< region
.n
; i
++) {
483 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
485 g_tree_foreach(rt
->tree
, func
, user_data
);
487 tcg_region_tree_unlock_all();
490 size_t tcg_nb_tbs(void)
495 tcg_region_tree_lock_all();
496 for (i
= 0; i
< region
.n
; i
++) {
497 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
499 nb_tbs
+= g_tree_nnodes(rt
->tree
);
501 tcg_region_tree_unlock_all();
505 static void tcg_region_tree_reset_all(void)
509 tcg_region_tree_lock_all();
510 for (i
= 0; i
< region
.n
; i
++) {
511 struct tcg_region_tree
*rt
= region_trees
+ i
* tree_size
;
513 /* Increment the refcount first so that destroy acts as a reset */
514 g_tree_ref(rt
->tree
);
515 g_tree_destroy(rt
->tree
);
517 tcg_region_tree_unlock_all();
520 static void tcg_region_bounds(size_t curr_region
, void **pstart
, void **pend
)
524 start
= region
.start_aligned
+ curr_region
* region
.stride
;
525 end
= start
+ region
.size
;
527 if (curr_region
== 0) {
528 start
= region
.start
;
530 if (curr_region
== region
.n
- 1) {
538 static void tcg_region_assign(TCGContext
*s
, size_t curr_region
)
542 tcg_region_bounds(curr_region
, &start
, &end
);
544 s
->code_gen_buffer
= start
;
545 s
->code_gen_ptr
= start
;
546 s
->code_gen_buffer_size
= end
- start
;
547 s
->code_gen_highwater
= end
- TCG_HIGHWATER
;
550 static bool tcg_region_alloc__locked(TCGContext
*s
)
552 if (region
.current
== region
.n
) {
555 tcg_region_assign(s
, region
.current
);
561 * Request a new region once the one in use has filled up.
562 * Returns true on error.
564 static bool tcg_region_alloc(TCGContext
*s
)
567 /* read the region size now; alloc__locked will overwrite it on success */
568 size_t size_full
= s
->code_gen_buffer_size
;
570 qemu_mutex_lock(®ion
.lock
);
571 err
= tcg_region_alloc__locked(s
);
573 region
.agg_size_full
+= size_full
- TCG_HIGHWATER
;
575 qemu_mutex_unlock(®ion
.lock
);
580 * Perform a context's first region allocation.
581 * This function does _not_ increment region.agg_size_full.
583 static inline bool tcg_region_initial_alloc__locked(TCGContext
*s
)
585 return tcg_region_alloc__locked(s
);
588 /* Call from a safe-work context */
589 void tcg_region_reset_all(void)
591 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
594 qemu_mutex_lock(®ion
.lock
);
596 region
.agg_size_full
= 0;
598 for (i
= 0; i
< n_ctxs
; i
++) {
599 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
600 bool err
= tcg_region_initial_alloc__locked(s
);
604 qemu_mutex_unlock(®ion
.lock
);
606 tcg_region_tree_reset_all();
609 #ifdef CONFIG_USER_ONLY
610 static size_t tcg_n_regions(void)
616 * It is likely that some vCPUs will translate more code than others, so we
617 * first try to set more regions than max_cpus, with those regions being of
618 * reasonable size. If that's not possible we make do by evenly dividing
619 * the code_gen_buffer among the vCPUs.
621 static size_t tcg_n_regions(void)
625 /* Use a single region if all we have is one vCPU thread */
626 #if !defined(CONFIG_USER_ONLY)
627 MachineState
*ms
= MACHINE(qdev_get_machine());
628 unsigned int max_cpus
= ms
->smp
.max_cpus
;
630 if (max_cpus
== 1 || !qemu_tcg_mttcg_enabled()) {
634 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
635 for (i
= 8; i
> 0; i
--) {
636 size_t regions_per_thread
= i
;
639 region_size
= tcg_init_ctx
.code_gen_buffer_size
;
640 region_size
/= max_cpus
* regions_per_thread
;
642 if (region_size
>= 2 * 1024u * 1024) {
643 return max_cpus
* regions_per_thread
;
646 /* If we can't, then just allocate one region per vCPU thread */
652 * Initializes region partitioning.
654 * Called at init time from the parent thread (i.e. the one calling
655 * tcg_context_init), after the target's TCG globals have been set.
657 * Region partitioning works by splitting code_gen_buffer into separate regions,
658 * and then assigning regions to TCG threads so that the threads can translate
659 * code in parallel without synchronization.
661 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
662 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
663 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
664 * must have been parsed before calling this function, since it calls
665 * qemu_tcg_mttcg_enabled().
667 * In user-mode we use a single region. Having multiple regions in user-mode
668 * is not supported, because the number of vCPU threads (recall that each thread
669 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
670 * OS, and usually this number is huge (tens of thousands is not uncommon).
671 * Thus, given this large bound on the number of vCPU threads and the fact
672 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
673 * that the availability of at least one region per vCPU thread.
675 * However, this user-mode limitation is unlikely to be a significant problem
676 * in practice. Multi-threaded guests share most if not all of their translated
677 * code, which makes parallel code generation less appealing than in softmmu.
679 void tcg_region_init(void)
681 void *buf
= tcg_init_ctx
.code_gen_buffer
;
683 size_t size
= tcg_init_ctx
.code_gen_buffer_size
;
684 size_t page_size
= qemu_real_host_page_size
;
689 n_regions
= tcg_n_regions();
691 /* The first region will be 'aligned - buf' bytes larger than the others */
692 aligned
= QEMU_ALIGN_PTR_UP(buf
, page_size
);
693 g_assert(aligned
< tcg_init_ctx
.code_gen_buffer
+ size
);
695 * Make region_size a multiple of page_size, using aligned as the start.
696 * As a result of this we might end up with a few extra pages at the end of
697 * the buffer; we will assign those to the last region.
699 region_size
= (size
- (aligned
- buf
)) / n_regions
;
700 region_size
= QEMU_ALIGN_DOWN(region_size
, page_size
);
702 /* A region must have at least 2 pages; one code, one guard */
703 g_assert(region_size
>= 2 * page_size
);
705 /* init the region struct */
706 qemu_mutex_init(®ion
.lock
);
707 region
.n
= n_regions
;
708 region
.size
= region_size
- page_size
;
709 region
.stride
= region_size
;
711 region
.start_aligned
= aligned
;
712 /* page-align the end, since its last page will be a guard page */
713 region
.end
= QEMU_ALIGN_PTR_DOWN(buf
+ size
, page_size
);
714 /* account for that last guard page */
715 region
.end
-= page_size
;
717 /* set guard pages */
718 for (i
= 0; i
< region
.n
; i
++) {
722 tcg_region_bounds(i
, &start
, &end
);
723 rc
= qemu_mprotect_none(end
, page_size
);
727 tcg_region_trees_init();
729 /* In user-mode we support only one ctx, so do the initial allocation now */
730 #ifdef CONFIG_USER_ONLY
732 bool err
= tcg_region_initial_alloc__locked(tcg_ctx
);
740 * All TCG threads except the parent (i.e. the one that called tcg_context_init
741 * and registered the target's TCG globals) must register with this function
742 * before initiating translation.
744 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
745 * of tcg_region_init() for the reasoning behind this.
747 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
748 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
749 * is not used anymore for translation once this function is called.
751 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
752 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
754 #ifdef CONFIG_USER_ONLY
755 void tcg_register_thread(void)
757 tcg_ctx
= &tcg_init_ctx
;
760 void tcg_register_thread(void)
762 MachineState
*ms
= MACHINE(qdev_get_machine());
763 TCGContext
*s
= g_malloc(sizeof(*s
));
769 /* Relink mem_base. */
770 for (i
= 0, n
= tcg_init_ctx
.nb_globals
; i
< n
; ++i
) {
771 if (tcg_init_ctx
.temps
[i
].mem_base
) {
772 ptrdiff_t b
= tcg_init_ctx
.temps
[i
].mem_base
- tcg_init_ctx
.temps
;
773 tcg_debug_assert(b
>= 0 && b
< n
);
774 s
->temps
[i
].mem_base
= &s
->temps
[b
];
778 /* Claim an entry in tcg_ctxs */
779 n
= atomic_fetch_inc(&n_tcg_ctxs
);
780 g_assert(n
< ms
->smp
.max_cpus
);
781 atomic_set(&tcg_ctxs
[n
], s
);
784 qemu_mutex_lock(®ion
.lock
);
785 err
= tcg_region_initial_alloc__locked(tcg_ctx
);
787 qemu_mutex_unlock(®ion
.lock
);
789 #endif /* !CONFIG_USER_ONLY */
792 * Returns the size (in bytes) of all translated code (i.e. from all regions)
793 * currently in the cache.
794 * See also: tcg_code_capacity()
795 * Do not confuse with tcg_current_code_size(); that one applies to a single
798 size_t tcg_code_size(void)
800 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
804 qemu_mutex_lock(®ion
.lock
);
805 total
= region
.agg_size_full
;
806 for (i
= 0; i
< n_ctxs
; i
++) {
807 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
810 size
= atomic_read(&s
->code_gen_ptr
) - s
->code_gen_buffer
;
811 g_assert(size
<= s
->code_gen_buffer_size
);
814 qemu_mutex_unlock(®ion
.lock
);
819 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
821 * See also: tcg_code_size()
823 size_t tcg_code_capacity(void)
825 size_t guard_size
, capacity
;
827 /* no need for synchronization; these variables are set at init time */
828 guard_size
= region
.stride
- region
.size
;
829 capacity
= region
.end
+ guard_size
- region
.start
;
830 capacity
-= region
.n
* (guard_size
+ TCG_HIGHWATER
);
834 size_t tcg_tb_phys_invalidate_count(void)
836 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
840 for (i
= 0; i
< n_ctxs
; i
++) {
841 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
843 total
+= atomic_read(&s
->tb_phys_invalidate_count
);
848 /* pool based memory allocation */
849 void *tcg_malloc_internal(TCGContext
*s
, int size
)
854 if (size
> TCG_POOL_CHUNK_SIZE
) {
855 /* big malloc: insert a new pool (XXX: could optimize) */
856 p
= g_malloc(sizeof(TCGPool
) + size
);
858 p
->next
= s
->pool_first_large
;
859 s
->pool_first_large
= p
;
870 pool_size
= TCG_POOL_CHUNK_SIZE
;
871 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
875 s
->pool_current
->next
= p
;
884 s
->pool_cur
= p
->data
+ size
;
885 s
->pool_end
= p
->data
+ p
->size
;
889 void tcg_pool_reset(TCGContext
*s
)
892 for (p
= s
->pool_first_large
; p
; p
= t
) {
896 s
->pool_first_large
= NULL
;
897 s
->pool_cur
= s
->pool_end
= NULL
;
898 s
->pool_current
= NULL
;
901 typedef struct TCGHelperInfo
{
908 #include "exec/helper-proto.h"
910 static const TCGHelperInfo all_helpers
[] = {
911 #include "exec/helper-tcg.h"
913 static GHashTable
*helper_table
;
915 static int indirect_reg_alloc_order
[ARRAY_SIZE(tcg_target_reg_alloc_order
)];
916 static void process_op_defs(TCGContext
*s
);
917 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
918 TCGReg reg
, const char *name
);
920 void tcg_context_init(TCGContext
*s
)
922 int op
, total_args
, n
, i
;
924 TCGArgConstraint
*args_ct
;
928 memset(s
, 0, sizeof(*s
));
931 /* Count total number of arguments and allocate the corresponding
934 for(op
= 0; op
< NB_OPS
; op
++) {
935 def
= &tcg_op_defs
[op
];
936 n
= def
->nb_iargs
+ def
->nb_oargs
;
940 args_ct
= g_malloc(sizeof(TCGArgConstraint
) * total_args
);
941 sorted_args
= g_malloc(sizeof(int) * total_args
);
943 for(op
= 0; op
< NB_OPS
; op
++) {
944 def
= &tcg_op_defs
[op
];
945 def
->args_ct
= args_ct
;
946 def
->sorted_args
= sorted_args
;
947 n
= def
->nb_iargs
+ def
->nb_oargs
;
952 /* Register helpers. */
953 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
954 helper_table
= g_hash_table_new(NULL
, NULL
);
956 for (i
= 0; i
< ARRAY_SIZE(all_helpers
); ++i
) {
957 g_hash_table_insert(helper_table
, (gpointer
)all_helpers
[i
].func
,
958 (gpointer
)&all_helpers
[i
]);
964 /* Reverse the order of the saved registers, assuming they're all at
965 the start of tcg_target_reg_alloc_order. */
966 for (n
= 0; n
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++n
) {
967 int r
= tcg_target_reg_alloc_order
[n
];
968 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, r
)) {
972 for (i
= 0; i
< n
; ++i
) {
973 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[n
- 1 - i
];
975 for (; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); ++i
) {
976 indirect_reg_alloc_order
[i
] = tcg_target_reg_alloc_order
[i
];
981 * In user-mode we simply share the init context among threads, since we
982 * use a single region. See the documentation tcg_region_init() for the
983 * reasoning behind this.
984 * In softmmu we will have at most max_cpus TCG threads.
986 #ifdef CONFIG_USER_ONLY
990 MachineState
*ms
= MACHINE(qdev_get_machine());
991 unsigned int max_cpus
= ms
->smp
.max_cpus
;
992 tcg_ctxs
= g_new(TCGContext
*, max_cpus
);
995 tcg_debug_assert(!tcg_regset_test_reg(s
->reserved_regs
, TCG_AREG0
));
996 ts
= tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, TCG_AREG0
, "env");
997 cpu_env
= temp_tcgv_ptr(ts
);
1001 * Allocate TBs right before their corresponding translated code, making
1002 * sure that TBs and code are on different cache lines.
1004 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
)
1006 uintptr_t align
= qemu_icache_linesize
;
1007 TranslationBlock
*tb
;
1011 tb
= (void *)ROUND_UP((uintptr_t)s
->code_gen_ptr
, align
);
1012 next
= (void *)ROUND_UP((uintptr_t)(tb
+ 1), align
);
1014 if (unlikely(next
> s
->code_gen_highwater
)) {
1015 if (tcg_region_alloc(s
)) {
1020 atomic_set(&s
->code_gen_ptr
, next
);
1021 s
->data_gen_ptr
= NULL
;
1025 void tcg_prologue_init(TCGContext
*s
)
1027 size_t prologue_size
, total_size
;
1030 /* Put the prologue at the beginning of code_gen_buffer. */
1031 buf0
= s
->code_gen_buffer
;
1032 total_size
= s
->code_gen_buffer_size
;
1035 s
->data_gen_ptr
= NULL
;
1036 s
->code_gen_prologue
= buf0
;
1038 /* Compute a high-water mark, at which we voluntarily flush the buffer
1039 and start over. The size here is arbitrary, significantly larger
1040 than we expect the code generation for any one opcode to require. */
1041 s
->code_gen_highwater
= s
->code_gen_buffer
+ (total_size
- TCG_HIGHWATER
);
1043 #ifdef TCG_TARGET_NEED_POOL_LABELS
1044 s
->pool_labels
= NULL
;
1047 /* Generate the prologue. */
1048 tcg_target_qemu_prologue(s
);
1050 #ifdef TCG_TARGET_NEED_POOL_LABELS
1051 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1053 int result
= tcg_out_pool_finalize(s
);
1054 tcg_debug_assert(result
== 0);
1059 flush_icache_range((uintptr_t)buf0
, (uintptr_t)buf1
);
1061 /* Deduct the prologue from the buffer. */
1062 prologue_size
= tcg_current_code_size(s
);
1063 s
->code_gen_ptr
= buf1
;
1064 s
->code_gen_buffer
= buf1
;
1066 total_size
-= prologue_size
;
1067 s
->code_gen_buffer_size
= total_size
;
1069 tcg_register_jit(s
->code_gen_buffer
, total_size
);
1072 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
1074 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size
);
1075 if (s
->data_gen_ptr
) {
1076 size_t code_size
= s
->data_gen_ptr
- buf0
;
1077 size_t data_size
= prologue_size
- code_size
;
1080 log_disas(buf0
, code_size
);
1082 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1083 if (sizeof(tcg_target_ulong
) == 8) {
1084 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1085 (uintptr_t)s
->data_gen_ptr
+ i
,
1086 *(uint64_t *)(s
->data_gen_ptr
+ i
));
1088 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1089 (uintptr_t)s
->data_gen_ptr
+ i
,
1090 *(uint32_t *)(s
->data_gen_ptr
+ i
));
1094 log_disas(buf0
, prologue_size
);
1102 /* Assert that goto_ptr is implemented completely. */
1103 if (TCG_TARGET_HAS_goto_ptr
) {
1104 tcg_debug_assert(s
->code_gen_epilogue
!= NULL
);
1108 void tcg_func_start(TCGContext
*s
)
1111 s
->nb_temps
= s
->nb_globals
;
1113 /* No temps have been previously allocated for size or locality. */
1114 memset(s
->free_temps
, 0, sizeof(s
->free_temps
));
1118 s
->current_frame_offset
= s
->frame_start
;
1120 #ifdef CONFIG_DEBUG_TCG
1121 s
->goto_tb_issue_mask
= 0;
1124 QTAILQ_INIT(&s
->ops
);
1125 QTAILQ_INIT(&s
->free_ops
);
1126 QSIMPLEQ_INIT(&s
->labels
);
1129 static inline TCGTemp
*tcg_temp_alloc(TCGContext
*s
)
1131 int n
= s
->nb_temps
++;
1132 tcg_debug_assert(n
< TCG_MAX_TEMPS
);
1133 return memset(&s
->temps
[n
], 0, sizeof(TCGTemp
));
1136 static inline TCGTemp
*tcg_global_alloc(TCGContext
*s
)
1140 tcg_debug_assert(s
->nb_globals
== s
->nb_temps
);
1142 ts
= tcg_temp_alloc(s
);
1143 ts
->temp_global
= 1;
1148 static TCGTemp
*tcg_global_reg_new_internal(TCGContext
*s
, TCGType type
,
1149 TCGReg reg
, const char *name
)
1153 if (TCG_TARGET_REG_BITS
== 32 && type
!= TCG_TYPE_I32
) {
1157 ts
= tcg_global_alloc(s
);
1158 ts
->base_type
= type
;
1163 tcg_regset_set_reg(s
->reserved_regs
, reg
);
1168 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
)
1170 s
->frame_start
= start
;
1171 s
->frame_end
= start
+ size
;
1173 = tcg_global_reg_new_internal(s
, TCG_TYPE_PTR
, reg
, "_frame");
1176 TCGTemp
*tcg_global_mem_new_internal(TCGType type
, TCGv_ptr base
,
1177 intptr_t offset
, const char *name
)
1179 TCGContext
*s
= tcg_ctx
;
1180 TCGTemp
*base_ts
= tcgv_ptr_temp(base
);
1181 TCGTemp
*ts
= tcg_global_alloc(s
);
1182 int indirect_reg
= 0, bigendian
= 0;
1183 #ifdef HOST_WORDS_BIGENDIAN
1187 if (!base_ts
->fixed_reg
) {
1188 /* We do not support double-indirect registers. */
1189 tcg_debug_assert(!base_ts
->indirect_reg
);
1190 base_ts
->indirect_base
= 1;
1191 s
->nb_indirects
+= (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
1196 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1197 TCGTemp
*ts2
= tcg_global_alloc(s
);
1200 ts
->base_type
= TCG_TYPE_I64
;
1201 ts
->type
= TCG_TYPE_I32
;
1202 ts
->indirect_reg
= indirect_reg
;
1203 ts
->mem_allocated
= 1;
1204 ts
->mem_base
= base_ts
;
1205 ts
->mem_offset
= offset
+ bigendian
* 4;
1206 pstrcpy(buf
, sizeof(buf
), name
);
1207 pstrcat(buf
, sizeof(buf
), "_0");
1208 ts
->name
= strdup(buf
);
1210 tcg_debug_assert(ts2
== ts
+ 1);
1211 ts2
->base_type
= TCG_TYPE_I64
;
1212 ts2
->type
= TCG_TYPE_I32
;
1213 ts2
->indirect_reg
= indirect_reg
;
1214 ts2
->mem_allocated
= 1;
1215 ts2
->mem_base
= base_ts
;
1216 ts2
->mem_offset
= offset
+ (1 - bigendian
) * 4;
1217 pstrcpy(buf
, sizeof(buf
), name
);
1218 pstrcat(buf
, sizeof(buf
), "_1");
1219 ts2
->name
= strdup(buf
);
1221 ts
->base_type
= type
;
1223 ts
->indirect_reg
= indirect_reg
;
1224 ts
->mem_allocated
= 1;
1225 ts
->mem_base
= base_ts
;
1226 ts
->mem_offset
= offset
;
1232 TCGTemp
*tcg_temp_new_internal(TCGType type
, bool temp_local
)
1234 TCGContext
*s
= tcg_ctx
;
1238 k
= type
+ (temp_local
? TCG_TYPE_COUNT
: 0);
1239 idx
= find_first_bit(s
->free_temps
[k
].l
, TCG_MAX_TEMPS
);
1240 if (idx
< TCG_MAX_TEMPS
) {
1241 /* There is already an available temp with the right type. */
1242 clear_bit(idx
, s
->free_temps
[k
].l
);
1244 ts
= &s
->temps
[idx
];
1245 ts
->temp_allocated
= 1;
1246 tcg_debug_assert(ts
->base_type
== type
);
1247 tcg_debug_assert(ts
->temp_local
== temp_local
);
1249 ts
= tcg_temp_alloc(s
);
1250 if (TCG_TARGET_REG_BITS
== 32 && type
== TCG_TYPE_I64
) {
1251 TCGTemp
*ts2
= tcg_temp_alloc(s
);
1253 ts
->base_type
= type
;
1254 ts
->type
= TCG_TYPE_I32
;
1255 ts
->temp_allocated
= 1;
1256 ts
->temp_local
= temp_local
;
1258 tcg_debug_assert(ts2
== ts
+ 1);
1259 ts2
->base_type
= TCG_TYPE_I64
;
1260 ts2
->type
= TCG_TYPE_I32
;
1261 ts2
->temp_allocated
= 1;
1262 ts2
->temp_local
= temp_local
;
1264 ts
->base_type
= type
;
1266 ts
->temp_allocated
= 1;
1267 ts
->temp_local
= temp_local
;
1271 #if defined(CONFIG_DEBUG_TCG)
1277 TCGv_vec
tcg_temp_new_vec(TCGType type
)
1281 #ifdef CONFIG_DEBUG_TCG
1284 assert(TCG_TARGET_HAS_v64
);
1287 assert(TCG_TARGET_HAS_v128
);
1290 assert(TCG_TARGET_HAS_v256
);
1293 g_assert_not_reached();
1297 t
= tcg_temp_new_internal(type
, 0);
1298 return temp_tcgv_vec(t
);
1301 /* Create a new temp of the same type as an existing temp. */
1302 TCGv_vec
tcg_temp_new_vec_matching(TCGv_vec match
)
1304 TCGTemp
*t
= tcgv_vec_temp(match
);
1306 tcg_debug_assert(t
->temp_allocated
!= 0);
1308 t
= tcg_temp_new_internal(t
->base_type
, 0);
1309 return temp_tcgv_vec(t
);
1312 void tcg_temp_free_internal(TCGTemp
*ts
)
1314 TCGContext
*s
= tcg_ctx
;
1317 #if defined(CONFIG_DEBUG_TCG)
1319 if (s
->temps_in_use
< 0) {
1320 fprintf(stderr
, "More temporaries freed than allocated!\n");
1324 tcg_debug_assert(ts
->temp_global
== 0);
1325 tcg_debug_assert(ts
->temp_allocated
!= 0);
1326 ts
->temp_allocated
= 0;
1329 k
= ts
->base_type
+ (ts
->temp_local
? TCG_TYPE_COUNT
: 0);
1330 set_bit(idx
, s
->free_temps
[k
].l
);
1333 TCGv_i32
tcg_const_i32(int32_t val
)
1336 t0
= tcg_temp_new_i32();
1337 tcg_gen_movi_i32(t0
, val
);
1341 TCGv_i64
tcg_const_i64(int64_t val
)
1344 t0
= tcg_temp_new_i64();
1345 tcg_gen_movi_i64(t0
, val
);
1349 TCGv_i32
tcg_const_local_i32(int32_t val
)
1352 t0
= tcg_temp_local_new_i32();
1353 tcg_gen_movi_i32(t0
, val
);
1357 TCGv_i64
tcg_const_local_i64(int64_t val
)
1360 t0
= tcg_temp_local_new_i64();
1361 tcg_gen_movi_i64(t0
, val
);
1365 #if defined(CONFIG_DEBUG_TCG)
1366 void tcg_clear_temp_count(void)
1368 TCGContext
*s
= tcg_ctx
;
1369 s
->temps_in_use
= 0;
1372 int tcg_check_temp_count(void)
1374 TCGContext
*s
= tcg_ctx
;
1375 if (s
->temps_in_use
) {
1376 /* Clear the count so that we don't give another
1377 * warning immediately next time around.
1379 s
->temps_in_use
= 0;
1386 /* Return true if OP may appear in the opcode stream.
1387 Test the runtime variable that controls each opcode. */
1388 bool tcg_op_supported(TCGOpcode op
)
1391 = TCG_TARGET_HAS_v64
| TCG_TARGET_HAS_v128
| TCG_TARGET_HAS_v256
;
1394 case INDEX_op_discard
:
1395 case INDEX_op_set_label
:
1399 case INDEX_op_insn_start
:
1400 case INDEX_op_exit_tb
:
1401 case INDEX_op_goto_tb
:
1402 case INDEX_op_qemu_ld_i32
:
1403 case INDEX_op_qemu_st_i32
:
1404 case INDEX_op_qemu_ld_i64
:
1405 case INDEX_op_qemu_st_i64
:
1408 case INDEX_op_goto_ptr
:
1409 return TCG_TARGET_HAS_goto_ptr
;
1411 case INDEX_op_mov_i32
:
1412 case INDEX_op_movi_i32
:
1413 case INDEX_op_setcond_i32
:
1414 case INDEX_op_brcond_i32
:
1415 case INDEX_op_ld8u_i32
:
1416 case INDEX_op_ld8s_i32
:
1417 case INDEX_op_ld16u_i32
:
1418 case INDEX_op_ld16s_i32
:
1419 case INDEX_op_ld_i32
:
1420 case INDEX_op_st8_i32
:
1421 case INDEX_op_st16_i32
:
1422 case INDEX_op_st_i32
:
1423 case INDEX_op_add_i32
:
1424 case INDEX_op_sub_i32
:
1425 case INDEX_op_mul_i32
:
1426 case INDEX_op_and_i32
:
1427 case INDEX_op_or_i32
:
1428 case INDEX_op_xor_i32
:
1429 case INDEX_op_shl_i32
:
1430 case INDEX_op_shr_i32
:
1431 case INDEX_op_sar_i32
:
1434 case INDEX_op_movcond_i32
:
1435 return TCG_TARGET_HAS_movcond_i32
;
1436 case INDEX_op_div_i32
:
1437 case INDEX_op_divu_i32
:
1438 return TCG_TARGET_HAS_div_i32
;
1439 case INDEX_op_rem_i32
:
1440 case INDEX_op_remu_i32
:
1441 return TCG_TARGET_HAS_rem_i32
;
1442 case INDEX_op_div2_i32
:
1443 case INDEX_op_divu2_i32
:
1444 return TCG_TARGET_HAS_div2_i32
;
1445 case INDEX_op_rotl_i32
:
1446 case INDEX_op_rotr_i32
:
1447 return TCG_TARGET_HAS_rot_i32
;
1448 case INDEX_op_deposit_i32
:
1449 return TCG_TARGET_HAS_deposit_i32
;
1450 case INDEX_op_extract_i32
:
1451 return TCG_TARGET_HAS_extract_i32
;
1452 case INDEX_op_sextract_i32
:
1453 return TCG_TARGET_HAS_sextract_i32
;
1454 case INDEX_op_extract2_i32
:
1455 return TCG_TARGET_HAS_extract2_i32
;
1456 case INDEX_op_add2_i32
:
1457 return TCG_TARGET_HAS_add2_i32
;
1458 case INDEX_op_sub2_i32
:
1459 return TCG_TARGET_HAS_sub2_i32
;
1460 case INDEX_op_mulu2_i32
:
1461 return TCG_TARGET_HAS_mulu2_i32
;
1462 case INDEX_op_muls2_i32
:
1463 return TCG_TARGET_HAS_muls2_i32
;
1464 case INDEX_op_muluh_i32
:
1465 return TCG_TARGET_HAS_muluh_i32
;
1466 case INDEX_op_mulsh_i32
:
1467 return TCG_TARGET_HAS_mulsh_i32
;
1468 case INDEX_op_ext8s_i32
:
1469 return TCG_TARGET_HAS_ext8s_i32
;
1470 case INDEX_op_ext16s_i32
:
1471 return TCG_TARGET_HAS_ext16s_i32
;
1472 case INDEX_op_ext8u_i32
:
1473 return TCG_TARGET_HAS_ext8u_i32
;
1474 case INDEX_op_ext16u_i32
:
1475 return TCG_TARGET_HAS_ext16u_i32
;
1476 case INDEX_op_bswap16_i32
:
1477 return TCG_TARGET_HAS_bswap16_i32
;
1478 case INDEX_op_bswap32_i32
:
1479 return TCG_TARGET_HAS_bswap32_i32
;
1480 case INDEX_op_not_i32
:
1481 return TCG_TARGET_HAS_not_i32
;
1482 case INDEX_op_neg_i32
:
1483 return TCG_TARGET_HAS_neg_i32
;
1484 case INDEX_op_andc_i32
:
1485 return TCG_TARGET_HAS_andc_i32
;
1486 case INDEX_op_orc_i32
:
1487 return TCG_TARGET_HAS_orc_i32
;
1488 case INDEX_op_eqv_i32
:
1489 return TCG_TARGET_HAS_eqv_i32
;
1490 case INDEX_op_nand_i32
:
1491 return TCG_TARGET_HAS_nand_i32
;
1492 case INDEX_op_nor_i32
:
1493 return TCG_TARGET_HAS_nor_i32
;
1494 case INDEX_op_clz_i32
:
1495 return TCG_TARGET_HAS_clz_i32
;
1496 case INDEX_op_ctz_i32
:
1497 return TCG_TARGET_HAS_ctz_i32
;
1498 case INDEX_op_ctpop_i32
:
1499 return TCG_TARGET_HAS_ctpop_i32
;
1501 case INDEX_op_brcond2_i32
:
1502 case INDEX_op_setcond2_i32
:
1503 return TCG_TARGET_REG_BITS
== 32;
1505 case INDEX_op_mov_i64
:
1506 case INDEX_op_movi_i64
:
1507 case INDEX_op_setcond_i64
:
1508 case INDEX_op_brcond_i64
:
1509 case INDEX_op_ld8u_i64
:
1510 case INDEX_op_ld8s_i64
:
1511 case INDEX_op_ld16u_i64
:
1512 case INDEX_op_ld16s_i64
:
1513 case INDEX_op_ld32u_i64
:
1514 case INDEX_op_ld32s_i64
:
1515 case INDEX_op_ld_i64
:
1516 case INDEX_op_st8_i64
:
1517 case INDEX_op_st16_i64
:
1518 case INDEX_op_st32_i64
:
1519 case INDEX_op_st_i64
:
1520 case INDEX_op_add_i64
:
1521 case INDEX_op_sub_i64
:
1522 case INDEX_op_mul_i64
:
1523 case INDEX_op_and_i64
:
1524 case INDEX_op_or_i64
:
1525 case INDEX_op_xor_i64
:
1526 case INDEX_op_shl_i64
:
1527 case INDEX_op_shr_i64
:
1528 case INDEX_op_sar_i64
:
1529 case INDEX_op_ext_i32_i64
:
1530 case INDEX_op_extu_i32_i64
:
1531 return TCG_TARGET_REG_BITS
== 64;
1533 case INDEX_op_movcond_i64
:
1534 return TCG_TARGET_HAS_movcond_i64
;
1535 case INDEX_op_div_i64
:
1536 case INDEX_op_divu_i64
:
1537 return TCG_TARGET_HAS_div_i64
;
1538 case INDEX_op_rem_i64
:
1539 case INDEX_op_remu_i64
:
1540 return TCG_TARGET_HAS_rem_i64
;
1541 case INDEX_op_div2_i64
:
1542 case INDEX_op_divu2_i64
:
1543 return TCG_TARGET_HAS_div2_i64
;
1544 case INDEX_op_rotl_i64
:
1545 case INDEX_op_rotr_i64
:
1546 return TCG_TARGET_HAS_rot_i64
;
1547 case INDEX_op_deposit_i64
:
1548 return TCG_TARGET_HAS_deposit_i64
;
1549 case INDEX_op_extract_i64
:
1550 return TCG_TARGET_HAS_extract_i64
;
1551 case INDEX_op_sextract_i64
:
1552 return TCG_TARGET_HAS_sextract_i64
;
1553 case INDEX_op_extract2_i64
:
1554 return TCG_TARGET_HAS_extract2_i64
;
1555 case INDEX_op_extrl_i64_i32
:
1556 return TCG_TARGET_HAS_extrl_i64_i32
;
1557 case INDEX_op_extrh_i64_i32
:
1558 return TCG_TARGET_HAS_extrh_i64_i32
;
1559 case INDEX_op_ext8s_i64
:
1560 return TCG_TARGET_HAS_ext8s_i64
;
1561 case INDEX_op_ext16s_i64
:
1562 return TCG_TARGET_HAS_ext16s_i64
;
1563 case INDEX_op_ext32s_i64
:
1564 return TCG_TARGET_HAS_ext32s_i64
;
1565 case INDEX_op_ext8u_i64
:
1566 return TCG_TARGET_HAS_ext8u_i64
;
1567 case INDEX_op_ext16u_i64
:
1568 return TCG_TARGET_HAS_ext16u_i64
;
1569 case INDEX_op_ext32u_i64
:
1570 return TCG_TARGET_HAS_ext32u_i64
;
1571 case INDEX_op_bswap16_i64
:
1572 return TCG_TARGET_HAS_bswap16_i64
;
1573 case INDEX_op_bswap32_i64
:
1574 return TCG_TARGET_HAS_bswap32_i64
;
1575 case INDEX_op_bswap64_i64
:
1576 return TCG_TARGET_HAS_bswap64_i64
;
1577 case INDEX_op_not_i64
:
1578 return TCG_TARGET_HAS_not_i64
;
1579 case INDEX_op_neg_i64
:
1580 return TCG_TARGET_HAS_neg_i64
;
1581 case INDEX_op_andc_i64
:
1582 return TCG_TARGET_HAS_andc_i64
;
1583 case INDEX_op_orc_i64
:
1584 return TCG_TARGET_HAS_orc_i64
;
1585 case INDEX_op_eqv_i64
:
1586 return TCG_TARGET_HAS_eqv_i64
;
1587 case INDEX_op_nand_i64
:
1588 return TCG_TARGET_HAS_nand_i64
;
1589 case INDEX_op_nor_i64
:
1590 return TCG_TARGET_HAS_nor_i64
;
1591 case INDEX_op_clz_i64
:
1592 return TCG_TARGET_HAS_clz_i64
;
1593 case INDEX_op_ctz_i64
:
1594 return TCG_TARGET_HAS_ctz_i64
;
1595 case INDEX_op_ctpop_i64
:
1596 return TCG_TARGET_HAS_ctpop_i64
;
1597 case INDEX_op_add2_i64
:
1598 return TCG_TARGET_HAS_add2_i64
;
1599 case INDEX_op_sub2_i64
:
1600 return TCG_TARGET_HAS_sub2_i64
;
1601 case INDEX_op_mulu2_i64
:
1602 return TCG_TARGET_HAS_mulu2_i64
;
1603 case INDEX_op_muls2_i64
:
1604 return TCG_TARGET_HAS_muls2_i64
;
1605 case INDEX_op_muluh_i64
:
1606 return TCG_TARGET_HAS_muluh_i64
;
1607 case INDEX_op_mulsh_i64
:
1608 return TCG_TARGET_HAS_mulsh_i64
;
1610 case INDEX_op_mov_vec
:
1611 case INDEX_op_dup_vec
:
1612 case INDEX_op_dupi_vec
:
1613 case INDEX_op_dupm_vec
:
1614 case INDEX_op_ld_vec
:
1615 case INDEX_op_st_vec
:
1616 case INDEX_op_add_vec
:
1617 case INDEX_op_sub_vec
:
1618 case INDEX_op_and_vec
:
1619 case INDEX_op_or_vec
:
1620 case INDEX_op_xor_vec
:
1621 case INDEX_op_cmp_vec
:
1623 case INDEX_op_dup2_vec
:
1624 return have_vec
&& TCG_TARGET_REG_BITS
== 32;
1625 case INDEX_op_not_vec
:
1626 return have_vec
&& TCG_TARGET_HAS_not_vec
;
1627 case INDEX_op_neg_vec
:
1628 return have_vec
&& TCG_TARGET_HAS_neg_vec
;
1629 case INDEX_op_abs_vec
:
1630 return have_vec
&& TCG_TARGET_HAS_abs_vec
;
1631 case INDEX_op_andc_vec
:
1632 return have_vec
&& TCG_TARGET_HAS_andc_vec
;
1633 case INDEX_op_orc_vec
:
1634 return have_vec
&& TCG_TARGET_HAS_orc_vec
;
1635 case INDEX_op_mul_vec
:
1636 return have_vec
&& TCG_TARGET_HAS_mul_vec
;
1637 case INDEX_op_shli_vec
:
1638 case INDEX_op_shri_vec
:
1639 case INDEX_op_sari_vec
:
1640 return have_vec
&& TCG_TARGET_HAS_shi_vec
;
1641 case INDEX_op_shls_vec
:
1642 case INDEX_op_shrs_vec
:
1643 case INDEX_op_sars_vec
:
1644 return have_vec
&& TCG_TARGET_HAS_shs_vec
;
1645 case INDEX_op_shlv_vec
:
1646 case INDEX_op_shrv_vec
:
1647 case INDEX_op_sarv_vec
:
1648 return have_vec
&& TCG_TARGET_HAS_shv_vec
;
1649 case INDEX_op_ssadd_vec
:
1650 case INDEX_op_usadd_vec
:
1651 case INDEX_op_sssub_vec
:
1652 case INDEX_op_ussub_vec
:
1653 return have_vec
&& TCG_TARGET_HAS_sat_vec
;
1654 case INDEX_op_smin_vec
:
1655 case INDEX_op_umin_vec
:
1656 case INDEX_op_smax_vec
:
1657 case INDEX_op_umax_vec
:
1658 return have_vec
&& TCG_TARGET_HAS_minmax_vec
;
1659 case INDEX_op_bitsel_vec
:
1660 return have_vec
&& TCG_TARGET_HAS_bitsel_vec
;
1661 case INDEX_op_cmpsel_vec
:
1662 return have_vec
&& TCG_TARGET_HAS_cmpsel_vec
;
1665 tcg_debug_assert(op
> INDEX_op_last_generic
&& op
< NB_OPS
);
1670 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1671 and endian swap. Maybe it would be better to do the alignment
1672 and endian swap in tcg_reg_alloc_call(). */
1673 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
)
1675 int i
, real_args
, nb_rets
, pi
;
1676 unsigned sizemask
, flags
;
1677 TCGHelperInfo
*info
;
1680 info
= g_hash_table_lookup(helper_table
, (gpointer
)func
);
1681 flags
= info
->flags
;
1682 sizemask
= info
->sizemask
;
1684 #if defined(__sparc__) && !defined(__arch64__) \
1685 && !defined(CONFIG_TCG_INTERPRETER)
1686 /* We have 64-bit values in one register, but need to pass as two
1687 separate parameters. Split them. */
1688 int orig_sizemask
= sizemask
;
1689 int orig_nargs
= nargs
;
1690 TCGv_i64 retl
, reth
;
1691 TCGTemp
*split_args
[MAX_OPC_PARAM
];
1695 if (sizemask
!= 0) {
1696 for (i
= real_args
= 0; i
< nargs
; ++i
) {
1697 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1699 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1700 TCGv_i32 h
= tcg_temp_new_i32();
1701 TCGv_i32 l
= tcg_temp_new_i32();
1702 tcg_gen_extr_i64_i32(l
, h
, orig
);
1703 split_args
[real_args
++] = tcgv_i32_temp(h
);
1704 split_args
[real_args
++] = tcgv_i32_temp(l
);
1706 split_args
[real_args
++] = args
[i
];
1713 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1714 for (i
= 0; i
< nargs
; ++i
) {
1715 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1716 int is_signed
= sizemask
& (2 << (i
+1)*2);
1718 TCGv_i64 temp
= tcg_temp_new_i64();
1719 TCGv_i64 orig
= temp_tcgv_i64(args
[i
]);
1721 tcg_gen_ext32s_i64(temp
, orig
);
1723 tcg_gen_ext32u_i64(temp
, orig
);
1725 args
[i
] = tcgv_i64_temp(temp
);
1728 #endif /* TCG_TARGET_EXTEND_ARGS */
1730 op
= tcg_emit_op(INDEX_op_call
);
1734 #if defined(__sparc__) && !defined(__arch64__) \
1735 && !defined(CONFIG_TCG_INTERPRETER)
1736 if (orig_sizemask
& 1) {
1737 /* The 32-bit ABI is going to return the 64-bit value in
1738 the %o0/%o1 register pair. Prepare for this by using
1739 two return temporaries, and reassemble below. */
1740 retl
= tcg_temp_new_i64();
1741 reth
= tcg_temp_new_i64();
1742 op
->args
[pi
++] = tcgv_i64_arg(reth
);
1743 op
->args
[pi
++] = tcgv_i64_arg(retl
);
1746 op
->args
[pi
++] = temp_arg(ret
);
1750 if (TCG_TARGET_REG_BITS
< 64 && (sizemask
& 1)) {
1751 #ifdef HOST_WORDS_BIGENDIAN
1752 op
->args
[pi
++] = temp_arg(ret
+ 1);
1753 op
->args
[pi
++] = temp_arg(ret
);
1755 op
->args
[pi
++] = temp_arg(ret
);
1756 op
->args
[pi
++] = temp_arg(ret
+ 1);
1760 op
->args
[pi
++] = temp_arg(ret
);
1767 TCGOP_CALLO(op
) = nb_rets
;
1770 for (i
= 0; i
< nargs
; i
++) {
1771 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1772 if (TCG_TARGET_REG_BITS
< 64 && is_64bit
) {
1773 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1774 /* some targets want aligned 64 bit args */
1775 if (real_args
& 1) {
1776 op
->args
[pi
++] = TCG_CALL_DUMMY_ARG
;
1780 /* If stack grows up, then we will be placing successive
1781 arguments at lower addresses, which means we need to
1782 reverse the order compared to how we would normally
1783 treat either big or little-endian. For those arguments
1784 that will wind up in registers, this still works for
1785 HPPA (the only current STACK_GROWSUP target) since the
1786 argument registers are *also* allocated in decreasing
1787 order. If another such target is added, this logic may
1788 have to get more complicated to differentiate between
1789 stack arguments and register arguments. */
1790 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1791 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1792 op
->args
[pi
++] = temp_arg(args
[i
]);
1794 op
->args
[pi
++] = temp_arg(args
[i
]);
1795 op
->args
[pi
++] = temp_arg(args
[i
] + 1);
1801 op
->args
[pi
++] = temp_arg(args
[i
]);
1804 op
->args
[pi
++] = (uintptr_t)func
;
1805 op
->args
[pi
++] = flags
;
1806 TCGOP_CALLI(op
) = real_args
;
1808 /* Make sure the fields didn't overflow. */
1809 tcg_debug_assert(TCGOP_CALLI(op
) == real_args
);
1810 tcg_debug_assert(pi
<= ARRAY_SIZE(op
->args
));
1812 #if defined(__sparc__) && !defined(__arch64__) \
1813 && !defined(CONFIG_TCG_INTERPRETER)
1814 /* Free all of the parts we allocated above. */
1815 for (i
= real_args
= 0; i
< orig_nargs
; ++i
) {
1816 int is_64bit
= orig_sizemask
& (1 << (i
+1)*2);
1818 tcg_temp_free_internal(args
[real_args
++]);
1819 tcg_temp_free_internal(args
[real_args
++]);
1824 if (orig_sizemask
& 1) {
1825 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1826 Note that describing these as TCGv_i64 eliminates an unnecessary
1827 zero-extension that tcg_gen_concat_i32_i64 would create. */
1828 tcg_gen_concat32_i64(temp_tcgv_i64(ret
), retl
, reth
);
1829 tcg_temp_free_i64(retl
);
1830 tcg_temp_free_i64(reth
);
1832 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1833 for (i
= 0; i
< nargs
; ++i
) {
1834 int is_64bit
= sizemask
& (1 << (i
+1)*2);
1836 tcg_temp_free_internal(args
[i
]);
1839 #endif /* TCG_TARGET_EXTEND_ARGS */
1842 static void tcg_reg_alloc_start(TCGContext
*s
)
1847 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
1849 ts
->val_type
= (ts
->fixed_reg
? TEMP_VAL_REG
: TEMP_VAL_MEM
);
1851 for (n
= s
->nb_temps
; i
< n
; i
++) {
1853 ts
->val_type
= (ts
->temp_local
? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
1854 ts
->mem_allocated
= 0;
1858 memset(s
->reg_to_temp
, 0, sizeof(s
->reg_to_temp
));
1861 static char *tcg_get_arg_str_ptr(TCGContext
*s
, char *buf
, int buf_size
,
1864 int idx
= temp_idx(ts
);
1866 if (ts
->temp_global
) {
1867 pstrcpy(buf
, buf_size
, ts
->name
);
1868 } else if (ts
->temp_local
) {
1869 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
1871 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
1876 static char *tcg_get_arg_str(TCGContext
*s
, char *buf
,
1877 int buf_size
, TCGArg arg
)
1879 return tcg_get_arg_str_ptr(s
, buf
, buf_size
, arg_temp(arg
));
1882 /* Find helper name. */
1883 static inline const char *tcg_find_helper(TCGContext
*s
, uintptr_t val
)
1885 const char *ret
= NULL
;
1887 TCGHelperInfo
*info
= g_hash_table_lookup(helper_table
, (gpointer
)val
);
1895 static const char * const cond_name
[] =
1897 [TCG_COND_NEVER
] = "never",
1898 [TCG_COND_ALWAYS
] = "always",
1899 [TCG_COND_EQ
] = "eq",
1900 [TCG_COND_NE
] = "ne",
1901 [TCG_COND_LT
] = "lt",
1902 [TCG_COND_GE
] = "ge",
1903 [TCG_COND_LE
] = "le",
1904 [TCG_COND_GT
] = "gt",
1905 [TCG_COND_LTU
] = "ltu",
1906 [TCG_COND_GEU
] = "geu",
1907 [TCG_COND_LEU
] = "leu",
1908 [TCG_COND_GTU
] = "gtu"
1911 static const char * const ldst_name
[] =
1927 static const char * const alignment_name
[(MO_AMASK
>> MO_ASHIFT
) + 1] = {
1929 [MO_UNALN
>> MO_ASHIFT
] = "un+",
1930 [MO_ALIGN
>> MO_ASHIFT
] = "",
1932 [MO_UNALN
>> MO_ASHIFT
] = "",
1933 [MO_ALIGN
>> MO_ASHIFT
] = "al+",
1935 [MO_ALIGN_2
>> MO_ASHIFT
] = "al2+",
1936 [MO_ALIGN_4
>> MO_ASHIFT
] = "al4+",
1937 [MO_ALIGN_8
>> MO_ASHIFT
] = "al8+",
1938 [MO_ALIGN_16
>> MO_ASHIFT
] = "al16+",
1939 [MO_ALIGN_32
>> MO_ASHIFT
] = "al32+",
1940 [MO_ALIGN_64
>> MO_ASHIFT
] = "al64+",
1943 static inline bool tcg_regset_single(TCGRegSet d
)
1945 return (d
& (d
- 1)) == 0;
1948 static inline TCGReg
tcg_regset_first(TCGRegSet d
)
1950 if (TCG_TARGET_NB_REGS
<= 32) {
1957 static void tcg_dump_ops(TCGContext
*s
, bool have_prefs
)
1962 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
1963 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
;
1964 const TCGOpDef
*def
;
1969 def
= &tcg_op_defs
[c
];
1971 if (c
== INDEX_op_insn_start
) {
1973 col
+= qemu_log("\n ----");
1975 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
1977 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1978 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
1982 col
+= qemu_log(" " TARGET_FMT_lx
, a
);
1984 } else if (c
== INDEX_op_call
) {
1985 /* variable number of arguments */
1986 nb_oargs
= TCGOP_CALLO(op
);
1987 nb_iargs
= TCGOP_CALLI(op
);
1988 nb_cargs
= def
->nb_cargs
;
1990 /* function name, flags, out args */
1991 col
+= qemu_log(" %s %s,$0x%" TCG_PRIlx
",$%d", def
->name
,
1992 tcg_find_helper(s
, op
->args
[nb_oargs
+ nb_iargs
]),
1993 op
->args
[nb_oargs
+ nb_iargs
+ 1], nb_oargs
);
1994 for (i
= 0; i
< nb_oargs
; i
++) {
1995 col
+= qemu_log(",%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
1998 for (i
= 0; i
< nb_iargs
; i
++) {
1999 TCGArg arg
= op
->args
[nb_oargs
+ i
];
2000 const char *t
= "<dummy>";
2001 if (arg
!= TCG_CALL_DUMMY_ARG
) {
2002 t
= tcg_get_arg_str(s
, buf
, sizeof(buf
), arg
);
2004 col
+= qemu_log(",%s", t
);
2007 col
+= qemu_log(" %s ", def
->name
);
2009 nb_oargs
= def
->nb_oargs
;
2010 nb_iargs
= def
->nb_iargs
;
2011 nb_cargs
= def
->nb_cargs
;
2013 if (def
->flags
& TCG_OPF_VECTOR
) {
2014 col
+= qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op
),
2015 8 << TCGOP_VECE(op
));
2019 for (i
= 0; i
< nb_oargs
; i
++) {
2021 col
+= qemu_log(",");
2023 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2026 for (i
= 0; i
< nb_iargs
; i
++) {
2028 col
+= qemu_log(",");
2030 col
+= qemu_log("%s", tcg_get_arg_str(s
, buf
, sizeof(buf
),
2034 case INDEX_op_brcond_i32
:
2035 case INDEX_op_setcond_i32
:
2036 case INDEX_op_movcond_i32
:
2037 case INDEX_op_brcond2_i32
:
2038 case INDEX_op_setcond2_i32
:
2039 case INDEX_op_brcond_i64
:
2040 case INDEX_op_setcond_i64
:
2041 case INDEX_op_movcond_i64
:
2042 case INDEX_op_cmp_vec
:
2043 case INDEX_op_cmpsel_vec
:
2044 if (op
->args
[k
] < ARRAY_SIZE(cond_name
)
2045 && cond_name
[op
->args
[k
]]) {
2046 col
+= qemu_log(",%s", cond_name
[op
->args
[k
++]]);
2048 col
+= qemu_log(",$0x%" TCG_PRIlx
, op
->args
[k
++]);
2052 case INDEX_op_qemu_ld_i32
:
2053 case INDEX_op_qemu_st_i32
:
2054 case INDEX_op_qemu_ld_i64
:
2055 case INDEX_op_qemu_st_i64
:
2057 TCGMemOpIdx oi
= op
->args
[k
++];
2058 TCGMemOp op
= get_memop(oi
);
2059 unsigned ix
= get_mmuidx(oi
);
2061 if (op
& ~(MO_AMASK
| MO_BSWAP
| MO_SSIZE
)) {
2062 col
+= qemu_log(",$0x%x,%u", op
, ix
);
2064 const char *s_al
, *s_op
;
2065 s_al
= alignment_name
[(op
& MO_AMASK
) >> MO_ASHIFT
];
2066 s_op
= ldst_name
[op
& (MO_BSWAP
| MO_SSIZE
)];
2067 col
+= qemu_log(",%s%s,%u", s_al
, s_op
, ix
);
2077 case INDEX_op_set_label
:
2079 case INDEX_op_brcond_i32
:
2080 case INDEX_op_brcond_i64
:
2081 case INDEX_op_brcond2_i32
:
2082 col
+= qemu_log("%s$L%d", k
? "," : "",
2083 arg_label(op
->args
[k
])->id
);
2089 for (; i
< nb_cargs
; i
++, k
++) {
2090 col
+= qemu_log("%s$0x%" TCG_PRIlx
, k
? "," : "", op
->args
[k
]);
2094 if (have_prefs
|| op
->life
) {
2095 for (; col
< 40; ++col
) {
2096 putc(' ', qemu_logfile
);
2101 unsigned life
= op
->life
;
2103 if (life
& (SYNC_ARG
* 3)) {
2105 for (i
= 0; i
< 2; ++i
) {
2106 if (life
& (SYNC_ARG
<< i
)) {
2114 for (i
= 0; life
; ++i
, life
>>= 1) {
2123 for (i
= 0; i
< nb_oargs
; ++i
) {
2124 TCGRegSet set
= op
->output_pref
[i
];
2133 } else if (set
== MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS
)) {
2135 #ifdef CONFIG_DEBUG_TCG
2136 } else if (tcg_regset_single(set
)) {
2137 TCGReg reg
= tcg_regset_first(set
);
2138 qemu_log("%s", tcg_target_reg_names
[reg
]);
2140 } else if (TCG_TARGET_NB_REGS
<= 32) {
2141 qemu_log("%#x", (uint32_t)set
);
2143 qemu_log("%#" PRIx64
, (uint64_t)set
);
2152 /* we give more priority to constraints with less registers */
2153 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
2155 const TCGArgConstraint
*arg_ct
;
2158 arg_ct
= &def
->args_ct
[k
];
2159 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
2160 /* an alias is equivalent to a single register */
2163 if (!(arg_ct
->ct
& TCG_CT_REG
))
2166 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
2167 if (tcg_regset_test_reg(arg_ct
->u
.regs
, i
))
2171 return TCG_TARGET_NB_REGS
- n
+ 1;
2174 /* sort from highest priority to lowest */
2175 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
2177 int i
, j
, p1
, p2
, tmp
;
2179 for(i
= 0; i
< n
; i
++)
2180 def
->sorted_args
[start
+ i
] = start
+ i
;
2183 for(i
= 0; i
< n
- 1; i
++) {
2184 for(j
= i
+ 1; j
< n
; j
++) {
2185 p1
= get_constraint_priority(def
, def
->sorted_args
[start
+ i
]);
2186 p2
= get_constraint_priority(def
, def
->sorted_args
[start
+ j
]);
2188 tmp
= def
->sorted_args
[start
+ i
];
2189 def
->sorted_args
[start
+ i
] = def
->sorted_args
[start
+ j
];
2190 def
->sorted_args
[start
+ j
] = tmp
;
2196 static void process_op_defs(TCGContext
*s
)
2200 for (op
= 0; op
< NB_OPS
; op
++) {
2201 TCGOpDef
*def
= &tcg_op_defs
[op
];
2202 const TCGTargetOpDef
*tdefs
;
2206 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2210 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
2215 tdefs
= tcg_target_op_def(op
);
2216 /* Missing TCGTargetOpDef entry. */
2217 tcg_debug_assert(tdefs
!= NULL
);
2219 type
= (def
->flags
& TCG_OPF_64BIT
? TCG_TYPE_I64
: TCG_TYPE_I32
);
2220 for (i
= 0; i
< nb_args
; i
++) {
2221 const char *ct_str
= tdefs
->args_ct_str
[i
];
2222 /* Incomplete TCGTargetOpDef entry. */
2223 tcg_debug_assert(ct_str
!= NULL
);
2225 def
->args_ct
[i
].u
.regs
= 0;
2226 def
->args_ct
[i
].ct
= 0;
2227 while (*ct_str
!= '\0') {
2231 int oarg
= *ct_str
- '0';
2232 tcg_debug_assert(ct_str
== tdefs
->args_ct_str
[i
]);
2233 tcg_debug_assert(oarg
< def
->nb_oargs
);
2234 tcg_debug_assert(def
->args_ct
[oarg
].ct
& TCG_CT_REG
);
2235 /* TCG_CT_ALIAS is for the output arguments.
2236 The input is tagged with TCG_CT_IALIAS. */
2237 def
->args_ct
[i
] = def
->args_ct
[oarg
];
2238 def
->args_ct
[oarg
].ct
|= TCG_CT_ALIAS
;
2239 def
->args_ct
[oarg
].alias_index
= i
;
2240 def
->args_ct
[i
].ct
|= TCG_CT_IALIAS
;
2241 def
->args_ct
[i
].alias_index
= oarg
;
2246 def
->args_ct
[i
].ct
|= TCG_CT_NEWREG
;
2250 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
2254 ct_str
= target_parse_constraint(&def
->args_ct
[i
],
2256 /* Typo in TCGTargetOpDef constraint. */
2257 tcg_debug_assert(ct_str
!= NULL
);
2262 /* TCGTargetOpDef entry with too much information? */
2263 tcg_debug_assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
2265 /* sort the constraints (XXX: this is just an heuristic) */
2266 sort_constraints(def
, 0, def
->nb_oargs
);
2267 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
2271 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
)
2277 label
= arg_label(op
->args
[0]);
2280 case INDEX_op_brcond_i32
:
2281 case INDEX_op_brcond_i64
:
2282 label
= arg_label(op
->args
[3]);
2285 case INDEX_op_brcond2_i32
:
2286 label
= arg_label(op
->args
[5]);
2293 QTAILQ_REMOVE(&s
->ops
, op
, link
);
2294 QTAILQ_INSERT_TAIL(&s
->free_ops
, op
, link
);
2297 #ifdef CONFIG_PROFILER
2298 atomic_set(&s
->prof
.del_op_count
, s
->prof
.del_op_count
+ 1);
2302 static TCGOp
*tcg_op_alloc(TCGOpcode opc
)
2304 TCGContext
*s
= tcg_ctx
;
2307 if (likely(QTAILQ_EMPTY(&s
->free_ops
))) {
2308 op
= tcg_malloc(sizeof(TCGOp
));
2310 op
= QTAILQ_FIRST(&s
->free_ops
);
2311 QTAILQ_REMOVE(&s
->free_ops
, op
, link
);
2313 memset(op
, 0, offsetof(TCGOp
, link
));
2320 TCGOp
*tcg_emit_op(TCGOpcode opc
)
2322 TCGOp
*op
= tcg_op_alloc(opc
);
2323 QTAILQ_INSERT_TAIL(&tcg_ctx
->ops
, op
, link
);
2327 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2329 TCGOp
*new_op
= tcg_op_alloc(opc
);
2330 QTAILQ_INSERT_BEFORE(old_op
, new_op
, link
);
2334 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*old_op
, TCGOpcode opc
)
2336 TCGOp
*new_op
= tcg_op_alloc(opc
);
2337 QTAILQ_INSERT_AFTER(&s
->ops
, old_op
, new_op
, link
);
2341 /* Reachable analysis : remove unreachable code. */
2342 static void reachable_code_pass(TCGContext
*s
)
2344 TCGOp
*op
, *op_next
;
2347 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2353 case INDEX_op_set_label
:
2354 label
= arg_label(op
->args
[0]);
2355 if (label
->refs
== 0) {
2357 * While there is an occasional backward branch, virtually
2358 * all branches generated by the translators are forward.
2359 * Which means that generally we will have already removed
2360 * all references to the label that will be, and there is
2361 * little to be gained by iterating.
2365 /* Once we see a label, insns become live again. */
2370 * Optimization can fold conditional branches to unconditional.
2371 * If we find a label with one reference which is preceded by
2372 * an unconditional branch to it, remove both. This needed to
2373 * wait until the dead code in between them was removed.
2375 if (label
->refs
== 1) {
2376 TCGOp
*op_prev
= QTAILQ_PREV(op
, link
);
2377 if (op_prev
->opc
== INDEX_op_br
&&
2378 label
== arg_label(op_prev
->args
[0])) {
2379 tcg_op_remove(s
, op_prev
);
2387 case INDEX_op_exit_tb
:
2388 case INDEX_op_goto_ptr
:
2389 /* Unconditional branches; everything following is dead. */
2394 /* Notice noreturn helper calls, raising exceptions. */
2395 call_flags
= op
->args
[TCGOP_CALLO(op
) + TCGOP_CALLI(op
) + 1];
2396 if (call_flags
& TCG_CALL_NO_RETURN
) {
2401 case INDEX_op_insn_start
:
2402 /* Never remove -- we need to keep these for unwind. */
2411 tcg_op_remove(s
, op
);
2419 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2420 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2422 /* For liveness_pass_1, the register preferences for a given temp. */
2423 static inline TCGRegSet
*la_temp_pref(TCGTemp
*ts
)
2425 return ts
->state_ptr
;
2428 /* For liveness_pass_1, reset the preferences for a given temp to the
2429 * maximal regset for its type.
2431 static inline void la_reset_pref(TCGTemp
*ts
)
2434 = (ts
->state
== TS_DEAD
? 0 : tcg_target_available_regs
[ts
->type
]);
2437 /* liveness analysis: end of function: all temps are dead, and globals
2438 should be in memory. */
2439 static void la_func_end(TCGContext
*s
, int ng
, int nt
)
2443 for (i
= 0; i
< ng
; ++i
) {
2444 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2445 la_reset_pref(&s
->temps
[i
]);
2447 for (i
= ng
; i
< nt
; ++i
) {
2448 s
->temps
[i
].state
= TS_DEAD
;
2449 la_reset_pref(&s
->temps
[i
]);
2453 /* liveness analysis: end of basic block: all temps are dead, globals
2454 and local temps should be in memory. */
2455 static void la_bb_end(TCGContext
*s
, int ng
, int nt
)
2459 for (i
= 0; i
< ng
; ++i
) {
2460 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2461 la_reset_pref(&s
->temps
[i
]);
2463 for (i
= ng
; i
< nt
; ++i
) {
2464 s
->temps
[i
].state
= (s
->temps
[i
].temp_local
2467 la_reset_pref(&s
->temps
[i
]);
2471 /* liveness analysis: sync globals back to memory. */
2472 static void la_global_sync(TCGContext
*s
, int ng
)
2476 for (i
= 0; i
< ng
; ++i
) {
2477 int state
= s
->temps
[i
].state
;
2478 s
->temps
[i
].state
= state
| TS_MEM
;
2479 if (state
== TS_DEAD
) {
2480 /* If the global was previously dead, reset prefs. */
2481 la_reset_pref(&s
->temps
[i
]);
2486 /* liveness analysis: sync globals back to memory and kill. */
2487 static void la_global_kill(TCGContext
*s
, int ng
)
2491 for (i
= 0; i
< ng
; i
++) {
2492 s
->temps
[i
].state
= TS_DEAD
| TS_MEM
;
2493 la_reset_pref(&s
->temps
[i
]);
2497 /* liveness analysis: note live globals crossing calls. */
2498 static void la_cross_call(TCGContext
*s
, int nt
)
2500 TCGRegSet mask
= ~tcg_target_call_clobber_regs
;
2503 for (i
= 0; i
< nt
; i
++) {
2504 TCGTemp
*ts
= &s
->temps
[i
];
2505 if (!(ts
->state
& TS_DEAD
)) {
2506 TCGRegSet
*pset
= la_temp_pref(ts
);
2507 TCGRegSet set
= *pset
;
2510 /* If the combination is not possible, restart. */
2512 set
= tcg_target_available_regs
[ts
->type
] & mask
;
2519 /* Liveness analysis : update the opc_arg_life array to tell if a
2520 given input arguments is dead. Instructions updating dead
2521 temporaries are removed. */
2522 static void liveness_pass_1(TCGContext
*s
)
2524 int nb_globals
= s
->nb_globals
;
2525 int nb_temps
= s
->nb_temps
;
2526 TCGOp
*op
, *op_prev
;
2530 prefs
= tcg_malloc(sizeof(TCGRegSet
) * nb_temps
);
2531 for (i
= 0; i
< nb_temps
; ++i
) {
2532 s
->temps
[i
].state_ptr
= prefs
+ i
;
2535 /* ??? Should be redundant with the exit_tb that ends the TB. */
2536 la_func_end(s
, nb_globals
, nb_temps
);
2538 QTAILQ_FOREACH_REVERSE_SAFE(op
, &s
->ops
, link
, op_prev
) {
2539 int nb_iargs
, nb_oargs
;
2540 TCGOpcode opc_new
, opc_new2
;
2542 TCGLifeData arg_life
= 0;
2544 TCGOpcode opc
= op
->opc
;
2545 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2553 nb_oargs
= TCGOP_CALLO(op
);
2554 nb_iargs
= TCGOP_CALLI(op
);
2555 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2557 /* pure functions can be removed if their result is unused */
2558 if (call_flags
& TCG_CALL_NO_SIDE_EFFECTS
) {
2559 for (i
= 0; i
< nb_oargs
; i
++) {
2560 ts
= arg_temp(op
->args
[i
]);
2561 if (ts
->state
!= TS_DEAD
) {
2562 goto do_not_remove_call
;
2569 /* Output args are dead. */
2570 for (i
= 0; i
< nb_oargs
; i
++) {
2571 ts
= arg_temp(op
->args
[i
]);
2572 if (ts
->state
& TS_DEAD
) {
2573 arg_life
|= DEAD_ARG
<< i
;
2575 if (ts
->state
& TS_MEM
) {
2576 arg_life
|= SYNC_ARG
<< i
;
2578 ts
->state
= TS_DEAD
;
2581 /* Not used -- it will be tcg_target_call_oarg_regs[i]. */
2582 op
->output_pref
[i
] = 0;
2585 if (!(call_flags
& (TCG_CALL_NO_WRITE_GLOBALS
|
2586 TCG_CALL_NO_READ_GLOBALS
))) {
2587 la_global_kill(s
, nb_globals
);
2588 } else if (!(call_flags
& TCG_CALL_NO_READ_GLOBALS
)) {
2589 la_global_sync(s
, nb_globals
);
2592 /* Record arguments that die in this helper. */
2593 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2594 ts
= arg_temp(op
->args
[i
]);
2595 if (ts
&& ts
->state
& TS_DEAD
) {
2596 arg_life
|= DEAD_ARG
<< i
;
2600 /* For all live registers, remove call-clobbered prefs. */
2601 la_cross_call(s
, nb_temps
);
2603 nb_call_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
2605 /* Input arguments are live for preceding opcodes. */
2606 for (i
= 0; i
< nb_iargs
; i
++) {
2607 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
2608 if (ts
&& ts
->state
& TS_DEAD
) {
2609 /* For those arguments that die, and will be allocated
2610 * in registers, clear the register set for that arg,
2611 * to be filled in below. For args that will be on
2612 * the stack, reset to any available reg.
2615 = (i
< nb_call_regs
? 0 :
2616 tcg_target_available_regs
[ts
->type
]);
2617 ts
->state
&= ~TS_DEAD
;
2621 /* For each input argument, add its input register to prefs.
2622 If a temp is used once, this produces a single set bit. */
2623 for (i
= 0; i
< MIN(nb_call_regs
, nb_iargs
); i
++) {
2624 ts
= arg_temp(op
->args
[i
+ nb_oargs
]);
2626 tcg_regset_set_reg(*la_temp_pref(ts
),
2627 tcg_target_call_iarg_regs
[i
]);
2632 case INDEX_op_insn_start
:
2634 case INDEX_op_discard
:
2635 /* mark the temporary as dead */
2636 ts
= arg_temp(op
->args
[0]);
2637 ts
->state
= TS_DEAD
;
2641 case INDEX_op_add2_i32
:
2642 opc_new
= INDEX_op_add_i32
;
2644 case INDEX_op_sub2_i32
:
2645 opc_new
= INDEX_op_sub_i32
;
2647 case INDEX_op_add2_i64
:
2648 opc_new
= INDEX_op_add_i64
;
2650 case INDEX_op_sub2_i64
:
2651 opc_new
= INDEX_op_sub_i64
;
2655 /* Test if the high part of the operation is dead, but not
2656 the low part. The result can be optimized to a simple
2657 add or sub. This happens often for x86_64 guest when the
2658 cpu mode is set to 32 bit. */
2659 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2660 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2663 /* Replace the opcode and adjust the args in place,
2664 leaving 3 unused args at the end. */
2665 op
->opc
= opc
= opc_new
;
2666 op
->args
[1] = op
->args
[2];
2667 op
->args
[2] = op
->args
[4];
2668 /* Fall through and mark the single-word operation live. */
2674 case INDEX_op_mulu2_i32
:
2675 opc_new
= INDEX_op_mul_i32
;
2676 opc_new2
= INDEX_op_muluh_i32
;
2677 have_opc_new2
= TCG_TARGET_HAS_muluh_i32
;
2679 case INDEX_op_muls2_i32
:
2680 opc_new
= INDEX_op_mul_i32
;
2681 opc_new2
= INDEX_op_mulsh_i32
;
2682 have_opc_new2
= TCG_TARGET_HAS_mulsh_i32
;
2684 case INDEX_op_mulu2_i64
:
2685 opc_new
= INDEX_op_mul_i64
;
2686 opc_new2
= INDEX_op_muluh_i64
;
2687 have_opc_new2
= TCG_TARGET_HAS_muluh_i64
;
2689 case INDEX_op_muls2_i64
:
2690 opc_new
= INDEX_op_mul_i64
;
2691 opc_new2
= INDEX_op_mulsh_i64
;
2692 have_opc_new2
= TCG_TARGET_HAS_mulsh_i64
;
2697 if (arg_temp(op
->args
[1])->state
== TS_DEAD
) {
2698 if (arg_temp(op
->args
[0])->state
== TS_DEAD
) {
2699 /* Both parts of the operation are dead. */
2702 /* The high part of the operation is dead; generate the low. */
2703 op
->opc
= opc
= opc_new
;
2704 op
->args
[1] = op
->args
[2];
2705 op
->args
[2] = op
->args
[3];
2706 } else if (arg_temp(op
->args
[0])->state
== TS_DEAD
&& have_opc_new2
) {
2707 /* The low part of the operation is dead; generate the high. */
2708 op
->opc
= opc
= opc_new2
;
2709 op
->args
[0] = op
->args
[1];
2710 op
->args
[1] = op
->args
[2];
2711 op
->args
[2] = op
->args
[3];
2715 /* Mark the single-word operation live. */
2720 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2721 nb_iargs
= def
->nb_iargs
;
2722 nb_oargs
= def
->nb_oargs
;
2724 /* Test if the operation can be removed because all
2725 its outputs are dead. We assume that nb_oargs == 0
2726 implies side effects */
2727 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
2728 for (i
= 0; i
< nb_oargs
; i
++) {
2729 if (arg_temp(op
->args
[i
])->state
!= TS_DEAD
) {
2738 tcg_op_remove(s
, op
);
2742 for (i
= 0; i
< nb_oargs
; i
++) {
2743 ts
= arg_temp(op
->args
[i
]);
2745 /* Remember the preference of the uses that followed. */
2746 op
->output_pref
[i
] = *la_temp_pref(ts
);
2748 /* Output args are dead. */
2749 if (ts
->state
& TS_DEAD
) {
2750 arg_life
|= DEAD_ARG
<< i
;
2752 if (ts
->state
& TS_MEM
) {
2753 arg_life
|= SYNC_ARG
<< i
;
2755 ts
->state
= TS_DEAD
;
2759 /* If end of basic block, update. */
2760 if (def
->flags
& TCG_OPF_BB_EXIT
) {
2761 la_func_end(s
, nb_globals
, nb_temps
);
2762 } else if (def
->flags
& TCG_OPF_BB_END
) {
2763 la_bb_end(s
, nb_globals
, nb_temps
);
2764 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2765 la_global_sync(s
, nb_globals
);
2766 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
2767 la_cross_call(s
, nb_temps
);
2771 /* Record arguments that die in this opcode. */
2772 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2773 ts
= arg_temp(op
->args
[i
]);
2774 if (ts
->state
& TS_DEAD
) {
2775 arg_life
|= DEAD_ARG
<< i
;
2779 /* Input arguments are live for preceding opcodes. */
2780 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2781 ts
= arg_temp(op
->args
[i
]);
2782 if (ts
->state
& TS_DEAD
) {
2783 /* For operands that were dead, initially allow
2784 all regs for the type. */
2785 *la_temp_pref(ts
) = tcg_target_available_regs
[ts
->type
];
2786 ts
->state
&= ~TS_DEAD
;
2790 /* Incorporate constraints for this operand. */
2792 case INDEX_op_mov_i32
:
2793 case INDEX_op_mov_i64
:
2794 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2795 have proper constraints. That said, special case
2796 moves to propagate preferences backward. */
2797 if (IS_DEAD_ARG(1)) {
2798 *la_temp_pref(arg_temp(op
->args
[0]))
2799 = *la_temp_pref(arg_temp(op
->args
[1]));
2804 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
2805 const TCGArgConstraint
*ct
= &def
->args_ct
[i
];
2806 TCGRegSet set
, *pset
;
2808 ts
= arg_temp(op
->args
[i
]);
2809 pset
= la_temp_pref(ts
);
2813 if (ct
->ct
& TCG_CT_IALIAS
) {
2814 set
&= op
->output_pref
[ct
->alias_index
];
2816 /* If the combination is not possible, restart. */
2826 op
->life
= arg_life
;
2830 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2831 static bool liveness_pass_2(TCGContext
*s
)
2833 int nb_globals
= s
->nb_globals
;
2835 bool changes
= false;
2836 TCGOp
*op
, *op_next
;
2838 /* Create a temporary for each indirect global. */
2839 for (i
= 0; i
< nb_globals
; ++i
) {
2840 TCGTemp
*its
= &s
->temps
[i
];
2841 if (its
->indirect_reg
) {
2842 TCGTemp
*dts
= tcg_temp_alloc(s
);
2843 dts
->type
= its
->type
;
2844 dts
->base_type
= its
->base_type
;
2845 its
->state_ptr
= dts
;
2847 its
->state_ptr
= NULL
;
2849 /* All globals begin dead. */
2850 its
->state
= TS_DEAD
;
2852 for (nb_temps
= s
->nb_temps
; i
< nb_temps
; ++i
) {
2853 TCGTemp
*its
= &s
->temps
[i
];
2854 its
->state_ptr
= NULL
;
2855 its
->state
= TS_DEAD
;
2858 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2859 TCGOpcode opc
= op
->opc
;
2860 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
2861 TCGLifeData arg_life
= op
->life
;
2862 int nb_iargs
, nb_oargs
, call_flags
;
2863 TCGTemp
*arg_ts
, *dir_ts
;
2865 if (opc
== INDEX_op_call
) {
2866 nb_oargs
= TCGOP_CALLO(op
);
2867 nb_iargs
= TCGOP_CALLI(op
);
2868 call_flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
2870 nb_iargs
= def
->nb_iargs
;
2871 nb_oargs
= def
->nb_oargs
;
2873 /* Set flags similar to how calls require. */
2874 if (def
->flags
& TCG_OPF_BB_END
) {
2875 /* Like writing globals: save_globals */
2877 } else if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
2878 /* Like reading globals: sync_globals */
2879 call_flags
= TCG_CALL_NO_WRITE_GLOBALS
;
2881 /* No effect on globals. */
2882 call_flags
= (TCG_CALL_NO_READ_GLOBALS
|
2883 TCG_CALL_NO_WRITE_GLOBALS
);
2887 /* Make sure that input arguments are available. */
2888 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2889 arg_ts
= arg_temp(op
->args
[i
]);
2891 dir_ts
= arg_ts
->state_ptr
;
2892 if (dir_ts
&& arg_ts
->state
== TS_DEAD
) {
2893 TCGOpcode lopc
= (arg_ts
->type
== TCG_TYPE_I32
2896 TCGOp
*lop
= tcg_op_insert_before(s
, op
, lopc
);
2898 lop
->args
[0] = temp_arg(dir_ts
);
2899 lop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2900 lop
->args
[2] = arg_ts
->mem_offset
;
2902 /* Loaded, but synced with memory. */
2903 arg_ts
->state
= TS_MEM
;
2908 /* Perform input replacement, and mark inputs that became dead.
2909 No action is required except keeping temp_state up to date
2910 so that we reload when needed. */
2911 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
2912 arg_ts
= arg_temp(op
->args
[i
]);
2914 dir_ts
= arg_ts
->state_ptr
;
2916 op
->args
[i
] = temp_arg(dir_ts
);
2918 if (IS_DEAD_ARG(i
)) {
2919 arg_ts
->state
= TS_DEAD
;
2925 /* Liveness analysis should ensure that the following are
2926 all correct, for call sites and basic block end points. */
2927 if (call_flags
& TCG_CALL_NO_READ_GLOBALS
) {
2929 } else if (call_flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
2930 for (i
= 0; i
< nb_globals
; ++i
) {
2931 /* Liveness should see that globals are synced back,
2932 that is, either TS_DEAD or TS_MEM. */
2933 arg_ts
= &s
->temps
[i
];
2934 tcg_debug_assert(arg_ts
->state_ptr
== 0
2935 || arg_ts
->state
!= 0);
2938 for (i
= 0; i
< nb_globals
; ++i
) {
2939 /* Liveness should see that globals are saved back,
2940 that is, TS_DEAD, waiting to be reloaded. */
2941 arg_ts
= &s
->temps
[i
];
2942 tcg_debug_assert(arg_ts
->state_ptr
== 0
2943 || arg_ts
->state
== TS_DEAD
);
2947 /* Outputs become available. */
2948 for (i
= 0; i
< nb_oargs
; i
++) {
2949 arg_ts
= arg_temp(op
->args
[i
]);
2950 dir_ts
= arg_ts
->state_ptr
;
2954 op
->args
[i
] = temp_arg(dir_ts
);
2957 /* The output is now live and modified. */
2960 /* Sync outputs upon their last write. */
2961 if (NEED_SYNC_ARG(i
)) {
2962 TCGOpcode sopc
= (arg_ts
->type
== TCG_TYPE_I32
2965 TCGOp
*sop
= tcg_op_insert_after(s
, op
, sopc
);
2967 sop
->args
[0] = temp_arg(dir_ts
);
2968 sop
->args
[1] = temp_arg(arg_ts
->mem_base
);
2969 sop
->args
[2] = arg_ts
->mem_offset
;
2971 arg_ts
->state
= TS_MEM
;
2973 /* Drop outputs that are dead. */
2974 if (IS_DEAD_ARG(i
)) {
2975 arg_ts
->state
= TS_DEAD
;
2983 #ifdef CONFIG_DEBUG_TCG
2984 static void dump_regs(TCGContext
*s
)
2990 for(i
= 0; i
< s
->nb_temps
; i
++) {
2992 printf(" %10s: ", tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
2993 switch(ts
->val_type
) {
2995 printf("%s", tcg_target_reg_names
[ts
->reg
]);
2998 printf("%d(%s)", (int)ts
->mem_offset
,
2999 tcg_target_reg_names
[ts
->mem_base
->reg
]);
3001 case TEMP_VAL_CONST
:
3002 printf("$0x%" TCG_PRIlx
, ts
->val
);
3014 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3015 if (s
->reg_to_temp
[i
] != NULL
) {
3017 tcg_target_reg_names
[i
],
3018 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
3023 static void check_regs(TCGContext
*s
)
3030 for (reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
3031 ts
= s
->reg_to_temp
[reg
];
3033 if (ts
->val_type
!= TEMP_VAL_REG
|| ts
->reg
!= reg
) {
3034 printf("Inconsistency for register %s:\n",
3035 tcg_target_reg_names
[reg
]);
3040 for (k
= 0; k
< s
->nb_temps
; k
++) {
3042 if (ts
->val_type
== TEMP_VAL_REG
&& !ts
->fixed_reg
3043 && s
->reg_to_temp
[ts
->reg
] != ts
) {
3044 printf("Inconsistency for temp %s:\n",
3045 tcg_get_arg_str_ptr(s
, buf
, sizeof(buf
), ts
));
3047 printf("reg state:\n");
3055 static void temp_allocate_frame(TCGContext
*s
, TCGTemp
*ts
)
3057 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
3058 /* Sparc64 stack is accessed with offset of 2047 */
3059 s
->current_frame_offset
= (s
->current_frame_offset
+
3060 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
3061 ~(sizeof(tcg_target_long
) - 1);
3063 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
3067 ts
->mem_offset
= s
->current_frame_offset
;
3068 ts
->mem_base
= s
->frame_temp
;
3069 ts
->mem_allocated
= 1;
3070 s
->current_frame_offset
+= sizeof(tcg_target_long
);
3073 static void temp_load(TCGContext
*, TCGTemp
*, TCGRegSet
, TCGRegSet
, TCGRegSet
);
3075 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3076 mark it free; otherwise mark it dead. */
3077 static void temp_free_or_dead(TCGContext
*s
, TCGTemp
*ts
, int free_or_dead
)
3079 if (ts
->fixed_reg
) {
3082 if (ts
->val_type
== TEMP_VAL_REG
) {
3083 s
->reg_to_temp
[ts
->reg
] = NULL
;
3085 ts
->val_type
= (free_or_dead
< 0
3088 ? TEMP_VAL_MEM
: TEMP_VAL_DEAD
);
3091 /* Mark a temporary as dead. */
3092 static inline void temp_dead(TCGContext
*s
, TCGTemp
*ts
)
3094 temp_free_or_dead(s
, ts
, 1);
3097 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3098 registers needs to be allocated to store a constant. If 'free_or_dead'
3099 is non-zero, subsequently release the temporary; if it is positive, the
3100 temp is dead; if it is negative, the temp is free. */
3101 static void temp_sync(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
,
3102 TCGRegSet preferred_regs
, int free_or_dead
)
3104 if (ts
->fixed_reg
) {
3107 if (!ts
->mem_coherent
) {
3108 if (!ts
->mem_allocated
) {
3109 temp_allocate_frame(s
, ts
);
3111 switch (ts
->val_type
) {
3112 case TEMP_VAL_CONST
:
3113 /* If we're going to free the temp immediately, then we won't
3114 require it later in a register, so attempt to store the
3115 constant to memory directly. */
3117 && tcg_out_sti(s
, ts
->type
, ts
->val
,
3118 ts
->mem_base
->reg
, ts
->mem_offset
)) {
3121 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3122 allocated_regs
, preferred_regs
);
3126 tcg_out_st(s
, ts
->type
, ts
->reg
,
3127 ts
->mem_base
->reg
, ts
->mem_offset
);
3137 ts
->mem_coherent
= 1;
3140 temp_free_or_dead(s
, ts
, free_or_dead
);
3144 /* free register 'reg' by spilling the corresponding temporary if necessary */
3145 static void tcg_reg_free(TCGContext
*s
, TCGReg reg
, TCGRegSet allocated_regs
)
3147 TCGTemp
*ts
= s
->reg_to_temp
[reg
];
3149 temp_sync(s
, ts
, allocated_regs
, 0, -1);
3155 * @required_regs: Set of registers in which we must allocate.
3156 * @allocated_regs: Set of registers which must be avoided.
3157 * @preferred_regs: Set of registers we should prefer.
3158 * @rev: True if we search the registers in "indirect" order.
3160 * The allocated register must be in @required_regs & ~@allocated_regs,
3161 * but if we can put it in @preferred_regs we may save a move later.
3163 static TCGReg
tcg_reg_alloc(TCGContext
*s
, TCGRegSet required_regs
,
3164 TCGRegSet allocated_regs
,
3165 TCGRegSet preferred_regs
, bool rev
)
3167 int i
, j
, f
, n
= ARRAY_SIZE(tcg_target_reg_alloc_order
);
3168 TCGRegSet reg_ct
[2];
3171 reg_ct
[1] = required_regs
& ~allocated_regs
;
3172 tcg_debug_assert(reg_ct
[1] != 0);
3173 reg_ct
[0] = reg_ct
[1] & preferred_regs
;
3175 /* Skip the preferred_regs option if it cannot be satisfied,
3176 or if the preference made no difference. */
3177 f
= reg_ct
[0] == 0 || reg_ct
[0] == reg_ct
[1];
3179 order
= rev
? indirect_reg_alloc_order
: tcg_target_reg_alloc_order
;
3181 /* Try free registers, preferences first. */
3182 for (j
= f
; j
< 2; j
++) {
3183 TCGRegSet set
= reg_ct
[j
];
3185 if (tcg_regset_single(set
)) {
3186 /* One register in the set. */
3187 TCGReg reg
= tcg_regset_first(set
);
3188 if (s
->reg_to_temp
[reg
] == NULL
) {
3192 for (i
= 0; i
< n
; i
++) {
3193 TCGReg reg
= order
[i
];
3194 if (s
->reg_to_temp
[reg
] == NULL
&&
3195 tcg_regset_test_reg(set
, reg
)) {
3202 /* We must spill something. */
3203 for (j
= f
; j
< 2; j
++) {
3204 TCGRegSet set
= reg_ct
[j
];
3206 if (tcg_regset_single(set
)) {
3207 /* One register in the set. */
3208 TCGReg reg
= tcg_regset_first(set
);
3209 tcg_reg_free(s
, reg
, allocated_regs
);
3212 for (i
= 0; i
< n
; i
++) {
3213 TCGReg reg
= order
[i
];
3214 if (tcg_regset_test_reg(set
, reg
)) {
3215 tcg_reg_free(s
, reg
, allocated_regs
);
3225 /* Make sure the temporary is in a register. If needed, allocate the register
3226 from DESIRED while avoiding ALLOCATED. */
3227 static void temp_load(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet desired_regs
,
3228 TCGRegSet allocated_regs
, TCGRegSet preferred_regs
)
3232 switch (ts
->val_type
) {
3235 case TEMP_VAL_CONST
:
3236 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3237 preferred_regs
, ts
->indirect_base
);
3238 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
3239 ts
->mem_coherent
= 0;
3242 reg
= tcg_reg_alloc(s
, desired_regs
, allocated_regs
,
3243 preferred_regs
, ts
->indirect_base
);
3244 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_base
->reg
, ts
->mem_offset
);
3245 ts
->mem_coherent
= 1;
3252 ts
->val_type
= TEMP_VAL_REG
;
3253 s
->reg_to_temp
[reg
] = ts
;
3256 /* Save a temporary to memory. 'allocated_regs' is used in case a
3257 temporary registers needs to be allocated to store a constant. */
3258 static void temp_save(TCGContext
*s
, TCGTemp
*ts
, TCGRegSet allocated_regs
)
3260 /* The liveness analysis already ensures that globals are back
3261 in memory. Keep an tcg_debug_assert for safety. */
3262 tcg_debug_assert(ts
->val_type
== TEMP_VAL_MEM
|| ts
->fixed_reg
);
3265 /* save globals to their canonical location and assume they can be
3266 modified be the following code. 'allocated_regs' is used in case a
3267 temporary registers needs to be allocated to store a constant. */
3268 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3272 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3273 temp_save(s
, &s
->temps
[i
], allocated_regs
);
3277 /* sync globals to their canonical location and assume they can be
3278 read by the following code. 'allocated_regs' is used in case a
3279 temporary registers needs to be allocated to store a constant. */
3280 static void sync_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
3284 for (i
= 0, n
= s
->nb_globals
; i
< n
; i
++) {
3285 TCGTemp
*ts
= &s
->temps
[i
];
3286 tcg_debug_assert(ts
->val_type
!= TEMP_VAL_REG
3288 || ts
->mem_coherent
);
3292 /* at the end of a basic block, we assume all temporaries are dead and
3293 all globals are stored at their canonical location. */
3294 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
3298 for (i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
3299 TCGTemp
*ts
= &s
->temps
[i
];
3300 if (ts
->temp_local
) {
3301 temp_save(s
, ts
, allocated_regs
);
3303 /* The liveness analysis already ensures that temps are dead.
3304 Keep an tcg_debug_assert for safety. */
3305 tcg_debug_assert(ts
->val_type
== TEMP_VAL_DEAD
);
3309 save_globals(s
, allocated_regs
);
3313 * Specialized code generation for INDEX_op_movi_*.
3315 static void tcg_reg_alloc_do_movi(TCGContext
*s
, TCGTemp
*ots
,
3316 tcg_target_ulong val
, TCGLifeData arg_life
,
3317 TCGRegSet preferred_regs
)
3319 /* ENV should not be modified. */
3320 tcg_debug_assert(!ots
->fixed_reg
);
3322 /* The movi is not explicitly generated here. */
3323 if (ots
->val_type
== TEMP_VAL_REG
) {
3324 s
->reg_to_temp
[ots
->reg
] = NULL
;
3326 ots
->val_type
= TEMP_VAL_CONST
;
3328 ots
->mem_coherent
= 0;
3329 if (NEED_SYNC_ARG(0)) {
3330 temp_sync(s
, ots
, s
->reserved_regs
, preferred_regs
, IS_DEAD_ARG(0));
3331 } else if (IS_DEAD_ARG(0)) {
3336 static void tcg_reg_alloc_movi(TCGContext
*s
, const TCGOp
*op
)
3338 TCGTemp
*ots
= arg_temp(op
->args
[0]);
3339 tcg_target_ulong val
= op
->args
[1];
3341 tcg_reg_alloc_do_movi(s
, ots
, val
, op
->life
, op
->output_pref
[0]);
3345 * Specialized code generation for INDEX_op_mov_*.
3347 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOp
*op
)
3349 const TCGLifeData arg_life
= op
->life
;
3350 TCGRegSet allocated_regs
, preferred_regs
;
3352 TCGType otype
, itype
;
3354 allocated_regs
= s
->reserved_regs
;
3355 preferred_regs
= op
->output_pref
[0];
3356 ots
= arg_temp(op
->args
[0]);
3357 ts
= arg_temp(op
->args
[1]);
3359 /* ENV should not be modified. */
3360 tcg_debug_assert(!ots
->fixed_reg
);
3362 /* Note that otype != itype for no-op truncation. */
3366 if (ts
->val_type
== TEMP_VAL_CONST
) {
3367 /* propagate constant or generate sti */
3368 tcg_target_ulong val
= ts
->val
;
3369 if (IS_DEAD_ARG(1)) {
3372 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, preferred_regs
);
3376 /* If the source value is in memory we're going to be forced
3377 to have it in a register in order to perform the copy. Copy
3378 the SOURCE value into its own register first, that way we
3379 don't have to reload SOURCE the next time it is used. */
3380 if (ts
->val_type
== TEMP_VAL_MEM
) {
3381 temp_load(s
, ts
, tcg_target_available_regs
[itype
],
3382 allocated_regs
, preferred_regs
);
3385 tcg_debug_assert(ts
->val_type
== TEMP_VAL_REG
);
3386 if (IS_DEAD_ARG(0)) {
3387 /* mov to a non-saved dead register makes no sense (even with
3388 liveness analysis disabled). */
3389 tcg_debug_assert(NEED_SYNC_ARG(0));
3390 if (!ots
->mem_allocated
) {
3391 temp_allocate_frame(s
, ots
);
3393 tcg_out_st(s
, otype
, ts
->reg
, ots
->mem_base
->reg
, ots
->mem_offset
);
3394 if (IS_DEAD_ARG(1)) {
3399 if (IS_DEAD_ARG(1) && !ts
->fixed_reg
) {
3400 /* the mov can be suppressed */
3401 if (ots
->val_type
== TEMP_VAL_REG
) {
3402 s
->reg_to_temp
[ots
->reg
] = NULL
;
3407 if (ots
->val_type
!= TEMP_VAL_REG
) {
3408 /* When allocating a new register, make sure to not spill the
3410 tcg_regset_set_reg(allocated_regs
, ts
->reg
);
3411 ots
->reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[otype
],
3412 allocated_regs
, preferred_regs
,
3413 ots
->indirect_base
);
3415 if (!tcg_out_mov(s
, otype
, ots
->reg
, ts
->reg
)) {
3417 * Cross register class move not supported.
3418 * Store the source register into the destination slot
3419 * and leave the destination temp as TEMP_VAL_MEM.
3421 assert(!ots
->fixed_reg
);
3422 if (!ts
->mem_allocated
) {
3423 temp_allocate_frame(s
, ots
);
3425 tcg_out_st(s
, ts
->type
, ts
->reg
,
3426 ots
->mem_base
->reg
, ots
->mem_offset
);
3427 ots
->mem_coherent
= 1;
3428 temp_free_or_dead(s
, ots
, -1);
3432 ots
->val_type
= TEMP_VAL_REG
;
3433 ots
->mem_coherent
= 0;
3434 s
->reg_to_temp
[ots
->reg
] = ots
;
3435 if (NEED_SYNC_ARG(0)) {
3436 temp_sync(s
, ots
, allocated_regs
, 0, 0);
3442 * Specialized code generation for INDEX_op_dup_vec.
3444 static void tcg_reg_alloc_dup(TCGContext
*s
, const TCGOp
*op
)
3446 const TCGLifeData arg_life
= op
->life
;
3447 TCGRegSet dup_out_regs
, dup_in_regs
;
3449 TCGType itype
, vtype
;
3450 intptr_t endian_fixup
;
3454 ots
= arg_temp(op
->args
[0]);
3455 its
= arg_temp(op
->args
[1]);
3457 /* ENV should not be modified. */
3458 tcg_debug_assert(!ots
->fixed_reg
);
3461 vece
= TCGOP_VECE(op
);
3462 vtype
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
3464 if (its
->val_type
== TEMP_VAL_CONST
) {
3465 /* Propagate constant via movi -> dupi. */
3466 tcg_target_ulong val
= its
->val
;
3467 if (IS_DEAD_ARG(1)) {
3470 tcg_reg_alloc_do_movi(s
, ots
, val
, arg_life
, op
->output_pref
[0]);
3474 dup_out_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[0].u
.regs
;
3475 dup_in_regs
= tcg_op_defs
[INDEX_op_dup_vec
].args_ct
[1].u
.regs
;
3477 /* Allocate the output register now. */
3478 if (ots
->val_type
!= TEMP_VAL_REG
) {
3479 TCGRegSet allocated_regs
= s
->reserved_regs
;
3481 if (!IS_DEAD_ARG(1) && its
->val_type
== TEMP_VAL_REG
) {
3482 /* Make sure to not spill the input register. */
3483 tcg_regset_set_reg(allocated_regs
, its
->reg
);
3485 ots
->reg
= tcg_reg_alloc(s
, dup_out_regs
, allocated_regs
,
3486 op
->output_pref
[0], ots
->indirect_base
);
3487 ots
->val_type
= TEMP_VAL_REG
;
3488 ots
->mem_coherent
= 0;
3489 s
->reg_to_temp
[ots
->reg
] = ots
;
3492 switch (its
->val_type
) {
3495 * The dup constriaints must be broad, covering all possible VECE.
3496 * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3497 * to fail, indicating that extra moves are required for that case.
3499 if (tcg_regset_test_reg(dup_in_regs
, its
->reg
)) {
3500 if (tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, its
->reg
)) {
3503 /* Try again from memory or a vector input register. */
3505 if (!its
->mem_coherent
) {
3507 * The input register is not synced, and so an extra store
3508 * would be required to use memory. Attempt an integer-vector
3509 * register move first. We do not have a TCGRegSet for this.
3511 if (tcg_out_mov(s
, itype
, ots
->reg
, its
->reg
)) {
3514 /* Sync the temp back to its slot and load from there. */
3515 temp_sync(s
, its
, s
->reserved_regs
, 0, 0);
3520 #ifdef HOST_WORDS_BIGENDIAN
3521 endian_fixup
= itype
== TCG_TYPE_I32
? 4 : 8;
3522 endian_fixup
-= 1 << vece
;
3526 if (tcg_out_dupm_vec(s
, vtype
, vece
, ots
->reg
, its
->mem_base
->reg
,
3527 its
->mem_offset
+ endian_fixup
)) {
3530 tcg_out_ld(s
, itype
, ots
->reg
, its
->mem_base
->reg
, its
->mem_offset
);
3534 g_assert_not_reached();
3537 /* We now have a vector input register, so dup must succeed. */
3538 ok
= tcg_out_dup_vec(s
, vtype
, vece
, ots
->reg
, ots
->reg
);
3539 tcg_debug_assert(ok
);
3542 if (IS_DEAD_ARG(1)) {
3545 if (NEED_SYNC_ARG(0)) {
3546 temp_sync(s
, ots
, s
->reserved_regs
, 0, 0);
3548 if (IS_DEAD_ARG(0)) {
3553 static void tcg_reg_alloc_op(TCGContext
*s
, const TCGOp
*op
)
3555 const TCGLifeData arg_life
= op
->life
;
3556 const TCGOpDef
* const def
= &tcg_op_defs
[op
->opc
];
3557 TCGRegSet i_allocated_regs
;
3558 TCGRegSet o_allocated_regs
;
3559 int i
, k
, nb_iargs
, nb_oargs
;
3562 const TCGArgConstraint
*arg_ct
;
3564 TCGArg new_args
[TCG_MAX_OP_ARGS
];
3565 int const_args
[TCG_MAX_OP_ARGS
];
3567 nb_oargs
= def
->nb_oargs
;
3568 nb_iargs
= def
->nb_iargs
;
3570 /* copy constants */
3571 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
3572 op
->args
+ nb_oargs
+ nb_iargs
,
3573 sizeof(TCGArg
) * def
->nb_cargs
);
3575 i_allocated_regs
= s
->reserved_regs
;
3576 o_allocated_regs
= s
->reserved_regs
;
3578 /* satisfy input constraints */
3579 for (k
= 0; k
< nb_iargs
; k
++) {
3580 TCGRegSet i_preferred_regs
, o_preferred_regs
;
3582 i
= def
->sorted_args
[nb_oargs
+ k
];
3584 arg_ct
= &def
->args_ct
[i
];
3587 if (ts
->val_type
== TEMP_VAL_CONST
3588 && tcg_target_const_match(ts
->val
, ts
->type
, arg_ct
)) {
3589 /* constant is OK for instruction */
3591 new_args
[i
] = ts
->val
;
3595 i_preferred_regs
= o_preferred_regs
= 0;
3596 if (arg_ct
->ct
& TCG_CT_IALIAS
) {
3597 o_preferred_regs
= op
->output_pref
[arg_ct
->alias_index
];
3598 if (ts
->fixed_reg
) {
3599 /* if fixed register, we must allocate a new register
3600 if the alias is not the same register */
3601 if (arg
!= op
->args
[arg_ct
->alias_index
]) {
3602 goto allocate_in_reg
;
3605 /* if the input is aliased to an output and if it is
3606 not dead after the instruction, we must allocate
3607 a new register and move it */
3608 if (!IS_DEAD_ARG(i
)) {
3609 goto allocate_in_reg
;
3612 /* check if the current register has already been allocated
3613 for another input aliased to an output */
3614 if (ts
->val_type
== TEMP_VAL_REG
) {
3617 for (k2
= 0 ; k2
< k
; k2
++) {
3618 i2
= def
->sorted_args
[nb_oargs
+ k2
];
3619 if ((def
->args_ct
[i2
].ct
& TCG_CT_IALIAS
) &&
3620 reg
== new_args
[i2
]) {
3621 goto allocate_in_reg
;
3625 i_preferred_regs
= o_preferred_regs
;
3629 temp_load(s
, ts
, arg_ct
->u
.regs
, i_allocated_regs
, i_preferred_regs
);
3632 if (tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
3633 /* nothing to do : the constraint is satisfied */
3636 /* allocate a new register matching the constraint
3637 and move the temporary register into it */
3638 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3639 i_allocated_regs
, 0);
3640 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, i_allocated_regs
,
3641 o_preferred_regs
, ts
->indirect_base
);
3642 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
3644 * Cross register class move not supported. Sync the
3645 * temp back to its slot and load from there.
3647 temp_sync(s
, ts
, i_allocated_regs
, 0, 0);
3648 tcg_out_ld(s
, ts
->type
, reg
,
3649 ts
->mem_base
->reg
, ts
->mem_offset
);
3654 tcg_regset_set_reg(i_allocated_regs
, reg
);
3657 /* mark dead temporaries and free the associated registers */
3658 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
3659 if (IS_DEAD_ARG(i
)) {
3660 temp_dead(s
, arg_temp(op
->args
[i
]));
3664 if (def
->flags
& TCG_OPF_BB_END
) {
3665 tcg_reg_alloc_bb_end(s
, i_allocated_regs
);
3667 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
3668 /* XXX: permit generic clobber register list ? */
3669 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3670 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
3671 tcg_reg_free(s
, i
, i_allocated_regs
);
3675 if (def
->flags
& TCG_OPF_SIDE_EFFECTS
) {
3676 /* sync globals if the op has side effects and might trigger
3678 sync_globals(s
, i_allocated_regs
);
3681 /* satisfy the output constraints */
3682 for(k
= 0; k
< nb_oargs
; k
++) {
3683 i
= def
->sorted_args
[k
];
3685 arg_ct
= &def
->args_ct
[i
];
3688 /* ENV should not be modified. */
3689 tcg_debug_assert(!ts
->fixed_reg
);
3691 if ((arg_ct
->ct
& TCG_CT_ALIAS
)
3692 && !const_args
[arg_ct
->alias_index
]) {
3693 reg
= new_args
[arg_ct
->alias_index
];
3694 } else if (arg_ct
->ct
& TCG_CT_NEWREG
) {
3695 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
,
3696 i_allocated_regs
| o_allocated_regs
,
3697 op
->output_pref
[k
], ts
->indirect_base
);
3699 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, o_allocated_regs
,
3700 op
->output_pref
[k
], ts
->indirect_base
);
3702 tcg_regset_set_reg(o_allocated_regs
, reg
);
3703 if (ts
->val_type
== TEMP_VAL_REG
) {
3704 s
->reg_to_temp
[ts
->reg
] = NULL
;
3706 ts
->val_type
= TEMP_VAL_REG
;
3709 * Temp value is modified, so the value kept in memory is
3710 * potentially not the same.
3712 ts
->mem_coherent
= 0;
3713 s
->reg_to_temp
[reg
] = ts
;
3718 /* emit instruction */
3719 if (def
->flags
& TCG_OPF_VECTOR
) {
3720 tcg_out_vec_op(s
, op
->opc
, TCGOP_VECL(op
), TCGOP_VECE(op
),
3721 new_args
, const_args
);
3723 tcg_out_op(s
, op
->opc
, new_args
, const_args
);
3726 /* move the outputs in the correct register if needed */
3727 for(i
= 0; i
< nb_oargs
; i
++) {
3728 ts
= arg_temp(op
->args
[i
]);
3730 /* ENV should not be modified. */
3731 tcg_debug_assert(!ts
->fixed_reg
);
3733 if (NEED_SYNC_ARG(i
)) {
3734 temp_sync(s
, ts
, o_allocated_regs
, 0, IS_DEAD_ARG(i
));
3735 } else if (IS_DEAD_ARG(i
)) {
3741 #ifdef TCG_TARGET_STACK_GROWSUP
3742 #define STACK_DIR(x) (-(x))
3744 #define STACK_DIR(x) (x)
3747 static void tcg_reg_alloc_call(TCGContext
*s
, TCGOp
*op
)
3749 const int nb_oargs
= TCGOP_CALLO(op
);
3750 const int nb_iargs
= TCGOP_CALLI(op
);
3751 const TCGLifeData arg_life
= op
->life
;
3752 int flags
, nb_regs
, i
;
3756 intptr_t stack_offset
;
3757 size_t call_stack_size
;
3758 tcg_insn_unit
*func_addr
;
3760 TCGRegSet allocated_regs
;
3762 func_addr
= (tcg_insn_unit
*)(intptr_t)op
->args
[nb_oargs
+ nb_iargs
];
3763 flags
= op
->args
[nb_oargs
+ nb_iargs
+ 1];
3765 nb_regs
= ARRAY_SIZE(tcg_target_call_iarg_regs
);
3766 if (nb_regs
> nb_iargs
) {
3770 /* assign stack slots first */
3771 call_stack_size
= (nb_iargs
- nb_regs
) * sizeof(tcg_target_long
);
3772 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
3773 ~(TCG_TARGET_STACK_ALIGN
- 1);
3774 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
3775 if (allocate_args
) {
3776 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3777 preallocate call stack */
3781 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
3782 for (i
= nb_regs
; i
< nb_iargs
; i
++) {
3783 arg
= op
->args
[nb_oargs
+ i
];
3784 #ifdef TCG_TARGET_STACK_GROWSUP
3785 stack_offset
-= sizeof(tcg_target_long
);
3787 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3789 temp_load(s
, ts
, tcg_target_available_regs
[ts
->type
],
3790 s
->reserved_regs
, 0);
3791 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
3793 #ifndef TCG_TARGET_STACK_GROWSUP
3794 stack_offset
+= sizeof(tcg_target_long
);
3798 /* assign input registers */
3799 allocated_regs
= s
->reserved_regs
;
3800 for (i
= 0; i
< nb_regs
; i
++) {
3801 arg
= op
->args
[nb_oargs
+ i
];
3802 if (arg
!= TCG_CALL_DUMMY_ARG
) {
3804 reg
= tcg_target_call_iarg_regs
[i
];
3806 if (ts
->val_type
== TEMP_VAL_REG
) {
3807 if (ts
->reg
!= reg
) {
3808 tcg_reg_free(s
, reg
, allocated_regs
);
3809 if (!tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
)) {
3811 * Cross register class move not supported. Sync the
3812 * temp back to its slot and load from there.
3814 temp_sync(s
, ts
, allocated_regs
, 0, 0);
3815 tcg_out_ld(s
, ts
->type
, reg
,
3816 ts
->mem_base
->reg
, ts
->mem_offset
);
3820 TCGRegSet arg_set
= 0;
3822 tcg_reg_free(s
, reg
, allocated_regs
);
3823 tcg_regset_set_reg(arg_set
, reg
);
3824 temp_load(s
, ts
, arg_set
, allocated_regs
, 0);
3827 tcg_regset_set_reg(allocated_regs
, reg
);
3831 /* mark dead temporaries and free the associated registers */
3832 for (i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
3833 if (IS_DEAD_ARG(i
)) {
3834 temp_dead(s
, arg_temp(op
->args
[i
]));
3838 /* clobber call registers */
3839 for (i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
3840 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, i
)) {
3841 tcg_reg_free(s
, i
, allocated_regs
);
3845 /* Save globals if they might be written by the helper, sync them if
3846 they might be read. */
3847 if (flags
& TCG_CALL_NO_READ_GLOBALS
) {
3849 } else if (flags
& TCG_CALL_NO_WRITE_GLOBALS
) {
3850 sync_globals(s
, allocated_regs
);
3852 save_globals(s
, allocated_regs
);
3855 tcg_out_call(s
, func_addr
);
3857 /* assign output registers and emit moves if needed */
3858 for(i
= 0; i
< nb_oargs
; i
++) {
3862 /* ENV should not be modified. */
3863 tcg_debug_assert(!ts
->fixed_reg
);
3865 reg
= tcg_target_call_oarg_regs
[i
];
3866 tcg_debug_assert(s
->reg_to_temp
[reg
] == NULL
);
3867 if (ts
->val_type
== TEMP_VAL_REG
) {
3868 s
->reg_to_temp
[ts
->reg
] = NULL
;
3870 ts
->val_type
= TEMP_VAL_REG
;
3872 ts
->mem_coherent
= 0;
3873 s
->reg_to_temp
[reg
] = ts
;
3874 if (NEED_SYNC_ARG(i
)) {
3875 temp_sync(s
, ts
, allocated_regs
, 0, IS_DEAD_ARG(i
));
3876 } else if (IS_DEAD_ARG(i
)) {
3882 #ifdef CONFIG_PROFILER
3884 /* avoid copy/paste errors */
3885 #define PROF_ADD(to, from, field) \
3887 (to)->field += atomic_read(&((from)->field)); \
3890 #define PROF_MAX(to, from, field) \
3892 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3893 if (val__ > (to)->field) { \
3894 (to)->field = val__; \
3898 /* Pass in a zero'ed @prof */
3900 void tcg_profile_snapshot(TCGProfile
*prof
, bool counters
, bool table
)
3902 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
3905 for (i
= 0; i
< n_ctxs
; i
++) {
3906 TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
3907 const TCGProfile
*orig
= &s
->prof
;
3910 PROF_ADD(prof
, orig
, cpu_exec_time
);
3911 PROF_ADD(prof
, orig
, tb_count1
);
3912 PROF_ADD(prof
, orig
, tb_count
);
3913 PROF_ADD(prof
, orig
, op_count
);
3914 PROF_MAX(prof
, orig
, op_count_max
);
3915 PROF_ADD(prof
, orig
, temp_count
);
3916 PROF_MAX(prof
, orig
, temp_count_max
);
3917 PROF_ADD(prof
, orig
, del_op_count
);
3918 PROF_ADD(prof
, orig
, code_in_len
);
3919 PROF_ADD(prof
, orig
, code_out_len
);
3920 PROF_ADD(prof
, orig
, search_out_len
);
3921 PROF_ADD(prof
, orig
, interm_time
);
3922 PROF_ADD(prof
, orig
, code_time
);
3923 PROF_ADD(prof
, orig
, la_time
);
3924 PROF_ADD(prof
, orig
, opt_time
);
3925 PROF_ADD(prof
, orig
, restore_count
);
3926 PROF_ADD(prof
, orig
, restore_time
);
3931 for (i
= 0; i
< NB_OPS
; i
++) {
3932 PROF_ADD(prof
, orig
, table_op_count
[i
]);
3941 static void tcg_profile_snapshot_counters(TCGProfile
*prof
)
3943 tcg_profile_snapshot(prof
, true, false);
3946 static void tcg_profile_snapshot_table(TCGProfile
*prof
)
3948 tcg_profile_snapshot(prof
, false, true);
3951 void tcg_dump_op_count(void)
3953 TCGProfile prof
= {};
3956 tcg_profile_snapshot_table(&prof
);
3957 for (i
= 0; i
< NB_OPS
; i
++) {
3958 qemu_printf("%s %" PRId64
"\n", tcg_op_defs
[i
].name
,
3959 prof
.table_op_count
[i
]);
3963 int64_t tcg_cpu_exec_time(void)
3965 unsigned int n_ctxs
= atomic_read(&n_tcg_ctxs
);
3969 for (i
= 0; i
< n_ctxs
; i
++) {
3970 const TCGContext
*s
= atomic_read(&tcg_ctxs
[i
]);
3971 const TCGProfile
*prof
= &s
->prof
;
3973 ret
+= atomic_read(&prof
->cpu_exec_time
);
3978 void tcg_dump_op_count(void)
3980 qemu_printf("[TCG profiler not compiled]\n");
3983 int64_t tcg_cpu_exec_time(void)
3985 error_report("%s: TCG profiler not compiled", __func__
);
3991 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
)
3993 #ifdef CONFIG_PROFILER
3994 TCGProfile
*prof
= &s
->prof
;
3999 #ifdef CONFIG_PROFILER
4003 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4006 atomic_set(&prof
->op_count
, prof
->op_count
+ n
);
4007 if (n
> prof
->op_count_max
) {
4008 atomic_set(&prof
->op_count_max
, n
);
4012 atomic_set(&prof
->temp_count
, prof
->temp_count
+ n
);
4013 if (n
> prof
->temp_count_max
) {
4014 atomic_set(&prof
->temp_count_max
, n
);
4020 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)
4021 && qemu_log_in_addr_range(tb
->pc
))) {
4024 tcg_dump_ops(s
, false);
4030 #ifdef CONFIG_DEBUG_TCG
4031 /* Ensure all labels referenced have been emitted. */
4036 QSIMPLEQ_FOREACH(l
, &s
->labels
, next
) {
4037 if (unlikely(!l
->present
) && l
->refs
) {
4038 qemu_log_mask(CPU_LOG_TB_OP
,
4039 "$L%d referenced but not present.\n", l
->id
);
4047 #ifdef CONFIG_PROFILER
4048 atomic_set(&prof
->opt_time
, prof
->opt_time
- profile_getclock());
4051 #ifdef USE_TCG_OPTIMIZATIONS
4055 #ifdef CONFIG_PROFILER
4056 atomic_set(&prof
->opt_time
, prof
->opt_time
+ profile_getclock());
4057 atomic_set(&prof
->la_time
, prof
->la_time
- profile_getclock());
4060 reachable_code_pass(s
);
4063 if (s
->nb_indirects
> 0) {
4065 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND
)
4066 && qemu_log_in_addr_range(tb
->pc
))) {
4068 qemu_log("OP before indirect lowering:\n");
4069 tcg_dump_ops(s
, false);
4074 /* Replace indirect temps with direct temps. */
4075 if (liveness_pass_2(s
)) {
4076 /* If changes were made, re-run liveness. */
4081 #ifdef CONFIG_PROFILER
4082 atomic_set(&prof
->la_time
, prof
->la_time
+ profile_getclock());
4086 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
)
4087 && qemu_log_in_addr_range(tb
->pc
))) {
4089 qemu_log("OP after optimization and liveness analysis:\n");
4090 tcg_dump_ops(s
, true);
4096 tcg_reg_alloc_start(s
);
4098 s
->code_buf
= tb
->tc
.ptr
;
4099 s
->code_ptr
= tb
->tc
.ptr
;
4101 #ifdef TCG_TARGET_NEED_LDST_LABELS
4102 QSIMPLEQ_INIT(&s
->ldst_labels
);
4104 #ifdef TCG_TARGET_NEED_POOL_LABELS
4105 s
->pool_labels
= NULL
;
4109 QTAILQ_FOREACH(op
, &s
->ops
, link
) {
4110 TCGOpcode opc
= op
->opc
;
4112 #ifdef CONFIG_PROFILER
4113 atomic_set(&prof
->table_op_count
[opc
], prof
->table_op_count
[opc
] + 1);
4117 case INDEX_op_mov_i32
:
4118 case INDEX_op_mov_i64
:
4119 case INDEX_op_mov_vec
:
4120 tcg_reg_alloc_mov(s
, op
);
4122 case INDEX_op_movi_i32
:
4123 case INDEX_op_movi_i64
:
4124 case INDEX_op_dupi_vec
:
4125 tcg_reg_alloc_movi(s
, op
);
4127 case INDEX_op_dup_vec
:
4128 tcg_reg_alloc_dup(s
, op
);
4130 case INDEX_op_insn_start
:
4131 if (num_insns
>= 0) {
4132 size_t off
= tcg_current_code_size(s
);
4133 s
->gen_insn_end_off
[num_insns
] = off
;
4134 /* Assert that we do not overflow our stored offset. */
4135 assert(s
->gen_insn_end_off
[num_insns
] == off
);
4138 for (i
= 0; i
< TARGET_INSN_START_WORDS
; ++i
) {
4140 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
4141 a
= deposit64(op
->args
[i
* 2], 32, 32, op
->args
[i
* 2 + 1]);
4145 s
->gen_insn_data
[num_insns
][i
] = a
;
4148 case INDEX_op_discard
:
4149 temp_dead(s
, arg_temp(op
->args
[0]));
4151 case INDEX_op_set_label
:
4152 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
4153 tcg_out_label(s
, arg_label(op
->args
[0]), s
->code_ptr
);
4156 tcg_reg_alloc_call(s
, op
);
4159 /* Sanity check that we've not introduced any unhandled opcodes. */
4160 tcg_debug_assert(tcg_op_supported(opc
));
4161 /* Note: in order to speed up the code, it would be much
4162 faster to have specialized register allocator functions for
4163 some common argument patterns */
4164 tcg_reg_alloc_op(s
, op
);
4167 #ifdef CONFIG_DEBUG_TCG
4170 /* Test for (pending) buffer overflow. The assumption is that any
4171 one operation beginning below the high water mark cannot overrun
4172 the buffer completely. Thus we can test for overflow after
4173 generating code without having to check during generation. */
4174 if (unlikely((void *)s
->code_ptr
> s
->code_gen_highwater
)) {
4177 /* Test for TB overflow, as seen by gen_insn_end_off. */
4178 if (unlikely(tcg_current_code_size(s
) > UINT16_MAX
)) {
4182 tcg_debug_assert(num_insns
>= 0);
4183 s
->gen_insn_end_off
[num_insns
] = tcg_current_code_size(s
);
4185 /* Generate TB finalization at the end of block */
4186 #ifdef TCG_TARGET_NEED_LDST_LABELS
4187 i
= tcg_out_ldst_finalize(s
);
4192 #ifdef TCG_TARGET_NEED_POOL_LABELS
4193 i
= tcg_out_pool_finalize(s
);
4198 if (!tcg_resolve_relocs(s
)) {
4202 /* flush instruction cache */
4203 flush_icache_range((uintptr_t)s
->code_buf
, (uintptr_t)s
->code_ptr
);
4205 return tcg_current_code_size(s
);
4208 #ifdef CONFIG_PROFILER
4209 void tcg_dump_info(void)
4211 TCGProfile prof
= {};
4212 const TCGProfile
*s
;
4214 int64_t tb_div_count
;
4217 tcg_profile_snapshot_counters(&prof
);
4219 tb_count
= s
->tb_count
;
4220 tb_div_count
= tb_count
? tb_count
: 1;
4221 tot
= s
->interm_time
+ s
->code_time
;
4223 qemu_printf("JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
4225 qemu_printf("translated TBs %" PRId64
" (aborted=%" PRId64
4227 tb_count
, s
->tb_count1
- tb_count
,
4228 (double)(s
->tb_count1
- s
->tb_count
)
4229 / (s
->tb_count1
? s
->tb_count1
: 1) * 100.0);
4230 qemu_printf("avg ops/TB %0.1f max=%d\n",
4231 (double)s
->op_count
/ tb_div_count
, s
->op_count_max
);
4232 qemu_printf("deleted ops/TB %0.2f\n",
4233 (double)s
->del_op_count
/ tb_div_count
);
4234 qemu_printf("avg temps/TB %0.2f max=%d\n",
4235 (double)s
->temp_count
/ tb_div_count
, s
->temp_count_max
);
4236 qemu_printf("avg host code/TB %0.1f\n",
4237 (double)s
->code_out_len
/ tb_div_count
);
4238 qemu_printf("avg search data/TB %0.1f\n",
4239 (double)s
->search_out_len
/ tb_div_count
);
4241 qemu_printf("cycles/op %0.1f\n",
4242 s
->op_count
? (double)tot
/ s
->op_count
: 0);
4243 qemu_printf("cycles/in byte %0.1f\n",
4244 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
4245 qemu_printf("cycles/out byte %0.1f\n",
4246 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
4247 qemu_printf("cycles/search byte %0.1f\n",
4248 s
->search_out_len
? (double)tot
/ s
->search_out_len
: 0);
4252 qemu_printf(" gen_interm time %0.1f%%\n",
4253 (double)s
->interm_time
/ tot
* 100.0);
4254 qemu_printf(" gen_code time %0.1f%%\n",
4255 (double)s
->code_time
/ tot
* 100.0);
4256 qemu_printf("optim./code time %0.1f%%\n",
4257 (double)s
->opt_time
/ (s
->code_time
? s
->code_time
: 1)
4259 qemu_printf("liveness/code time %0.1f%%\n",
4260 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
4261 qemu_printf("cpu_restore count %" PRId64
"\n",
4263 qemu_printf(" avg cycles %0.1f\n",
4264 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
4267 void tcg_dump_info(void)
4269 qemu_printf("[TCG profiler not compiled]\n");
4273 #ifdef ELF_HOST_MACHINE
4274 /* In order to use this feature, the backend needs to do three things:
4276 (1) Define ELF_HOST_MACHINE to indicate both what value to
4277 put into the ELF image and to indicate support for the feature.
4279 (2) Define tcg_register_jit. This should create a buffer containing
4280 the contents of a .debug_frame section that describes the post-
4281 prologue unwind info for the tcg machine.
4283 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4286 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4293 struct jit_code_entry
{
4294 struct jit_code_entry
*next_entry
;
4295 struct jit_code_entry
*prev_entry
;
4296 const void *symfile_addr
;
4297 uint64_t symfile_size
;
4300 struct jit_descriptor
{
4302 uint32_t action_flag
;
4303 struct jit_code_entry
*relevant_entry
;
4304 struct jit_code_entry
*first_entry
;
4307 void __jit_debug_register_code(void) __attribute__((noinline
));
4308 void __jit_debug_register_code(void)
4313 /* Must statically initialize the version, because GDB may check
4314 the version before we can set it. */
4315 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
4317 /* End GDB interface. */
4319 static int find_string(const char *strtab
, const char *str
)
4321 const char *p
= strtab
+ 1;
4324 if (strcmp(p
, str
) == 0) {
4331 static void tcg_register_jit_int(void *buf_ptr
, size_t buf_size
,
4332 const void *debug_frame
,
4333 size_t debug_frame_size
)
4335 struct __attribute__((packed
)) DebugInfo
{
4342 uintptr_t cu_low_pc
;
4343 uintptr_t cu_high_pc
;
4346 uintptr_t fn_low_pc
;
4347 uintptr_t fn_high_pc
;
4356 struct DebugInfo di
;
4361 struct ElfImage
*img
;
4363 static const struct ElfImage img_template
= {
4365 .e_ident
[EI_MAG0
] = ELFMAG0
,
4366 .e_ident
[EI_MAG1
] = ELFMAG1
,
4367 .e_ident
[EI_MAG2
] = ELFMAG2
,
4368 .e_ident
[EI_MAG3
] = ELFMAG3
,
4369 .e_ident
[EI_CLASS
] = ELF_CLASS
,
4370 .e_ident
[EI_DATA
] = ELF_DATA
,
4371 .e_ident
[EI_VERSION
] = EV_CURRENT
,
4373 .e_machine
= ELF_HOST_MACHINE
,
4374 .e_version
= EV_CURRENT
,
4375 .e_phoff
= offsetof(struct ElfImage
, phdr
),
4376 .e_shoff
= offsetof(struct ElfImage
, shdr
),
4377 .e_ehsize
= sizeof(ElfW(Shdr
)),
4378 .e_phentsize
= sizeof(ElfW(Phdr
)),
4380 .e_shentsize
= sizeof(ElfW(Shdr
)),
4381 .e_shnum
= ARRAY_SIZE(img
->shdr
),
4382 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
4383 #ifdef ELF_HOST_FLAGS
4384 .e_flags
= ELF_HOST_FLAGS
,
4387 .e_ident
[EI_OSABI
] = ELF_OSABI
,
4395 [0] = { .sh_type
= SHT_NULL
},
4396 /* Trick: The contents of code_gen_buffer are not present in
4397 this fake ELF file; that got allocated elsewhere. Therefore
4398 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4399 will not look for contents. We can record any address. */
4401 .sh_type
= SHT_NOBITS
,
4402 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
4404 [2] = { /* .debug_info */
4405 .sh_type
= SHT_PROGBITS
,
4406 .sh_offset
= offsetof(struct ElfImage
, di
),
4407 .sh_size
= sizeof(struct DebugInfo
),
4409 [3] = { /* .debug_abbrev */
4410 .sh_type
= SHT_PROGBITS
,
4411 .sh_offset
= offsetof(struct ElfImage
, da
),
4412 .sh_size
= sizeof(img
->da
),
4414 [4] = { /* .debug_frame */
4415 .sh_type
= SHT_PROGBITS
,
4416 .sh_offset
= sizeof(struct ElfImage
),
4418 [5] = { /* .symtab */
4419 .sh_type
= SHT_SYMTAB
,
4420 .sh_offset
= offsetof(struct ElfImage
, sym
),
4421 .sh_size
= sizeof(img
->sym
),
4423 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
4424 .sh_entsize
= sizeof(ElfW(Sym
)),
4426 [6] = { /* .strtab */
4427 .sh_type
= SHT_STRTAB
,
4428 .sh_offset
= offsetof(struct ElfImage
, str
),
4429 .sh_size
= sizeof(img
->str
),
4433 [1] = { /* code_gen_buffer */
4434 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
4439 .len
= sizeof(struct DebugInfo
) - 4,
4441 .ptr_size
= sizeof(void *),
4443 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
4445 .fn_name
= "code_gen_buffer"
4448 1, /* abbrev number (the cu) */
4449 0x11, 1, /* DW_TAG_compile_unit, has children */
4450 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
4451 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4452 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4453 0, 0, /* end of abbrev */
4454 2, /* abbrev number (the fn) */
4455 0x2e, 0, /* DW_TAG_subprogram, no children */
4456 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
4457 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4458 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4459 0, 0, /* end of abbrev */
4460 0 /* no more abbrev */
4462 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
4463 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
4466 /* We only need a single jit entry; statically allocate it. */
4467 static struct jit_code_entry one_entry
;
4469 uintptr_t buf
= (uintptr_t)buf_ptr
;
4470 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
4471 DebugFrameHeader
*dfh
;
4473 img
= g_malloc(img_size
);
4474 *img
= img_template
;
4476 img
->phdr
.p_vaddr
= buf
;
4477 img
->phdr
.p_paddr
= buf
;
4478 img
->phdr
.p_memsz
= buf_size
;
4480 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
4481 img
->shdr
[1].sh_addr
= buf
;
4482 img
->shdr
[1].sh_size
= buf_size
;
4484 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
4485 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
4487 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
4488 img
->shdr
[4].sh_size
= debug_frame_size
;
4490 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
4491 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
4493 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
4494 img
->sym
[1].st_value
= buf
;
4495 img
->sym
[1].st_size
= buf_size
;
4497 img
->di
.cu_low_pc
= buf
;
4498 img
->di
.cu_high_pc
= buf
+ buf_size
;
4499 img
->di
.fn_low_pc
= buf
;
4500 img
->di
.fn_high_pc
= buf
+ buf_size
;
4502 dfh
= (DebugFrameHeader
*)(img
+ 1);
4503 memcpy(dfh
, debug_frame
, debug_frame_size
);
4504 dfh
->fde
.func_start
= buf
;
4505 dfh
->fde
.func_len
= buf_size
;
4508 /* Enable this block to be able to debug the ELF image file creation.
4509 One can use readelf, objdump, or other inspection utilities. */
4511 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
4513 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
4514 /* Avoid stupid unused return value warning for fwrite. */
4521 one_entry
.symfile_addr
= img
;
4522 one_entry
.symfile_size
= img_size
;
4524 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
4525 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
4526 __jit_debug_descriptor
.first_entry
= &one_entry
;
4527 __jit_debug_register_code();
4530 /* No support for the feature. Provide the entry point expected by exec.c,
4531 and implement the internal function we declared earlier. */
4533 static void tcg_register_jit_int(void *buf
, size_t size
,
4534 const void *debug_frame
,
4535 size_t debug_frame_size
)
4539 void tcg_register_jit(void *buf
, size_t buf_size
)
4542 #endif /* ELF_HOST_MACHINE */
4544 #if !TCG_TARGET_MAYBE_vec
4545 void tcg_expand_vec_op(TCGOpcode o
, TCGType t
, unsigned e
, TCGArg a0
, ...)
4547 g_assert_not_reached();