Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20180410a' into...
[qemu/ar7.git] / tcg / tcg.c
blobbb24526c9323ae78483c931d179b8d9506ddf97b
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
31 #undef DEBUG_JIT
33 #include "qemu/cutils.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/timer.h"
37 /* Note: the long term plan is to reduce the dependencies on the QEMU
38 CPU definitions. Currently they are used for qemu_ld/st
39 instructions */
40 #define NO_CPU_IO_DEFS
41 #include "cpu.h"
43 #include "exec/cpu-common.h"
44 #include "exec/exec-all.h"
46 #include "tcg-op.h"
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS ELFCLASS32
50 #else
51 # define ELF_CLASS ELFCLASS64
52 #endif
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA ELFDATA2MSB
55 #else
56 # define ELF_DATA ELFDATA2LSB
57 #endif
59 #include "elf.h"
60 #include "exec/log.h"
61 #include "sysemu/sysemu.h"
63 /* Forward declarations for functions declared in tcg-target.inc.c and
64 used here. */
65 static void tcg_target_init(TCGContext *s);
66 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
67 static void tcg_target_qemu_prologue(TCGContext *s);
68 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
69 intptr_t value, intptr_t addend);
71 /* The CIE and FDE header definitions will be common to all hosts. */
72 typedef struct {
73 uint32_t len __attribute__((aligned((sizeof(void *)))));
74 uint32_t id;
75 uint8_t version;
76 char augmentation[1];
77 uint8_t code_align;
78 uint8_t data_align;
79 uint8_t return_column;
80 } DebugFrameCIE;
82 typedef struct QEMU_PACKED {
83 uint32_t len __attribute__((aligned((sizeof(void *)))));
84 uint32_t cie_offset;
85 uintptr_t func_start;
86 uintptr_t func_len;
87 } DebugFrameFDEHeader;
89 typedef struct QEMU_PACKED {
90 DebugFrameCIE cie;
91 DebugFrameFDEHeader fde;
92 } DebugFrameHeader;
94 static void tcg_register_jit_int(void *buf, size_t size,
95 const void *debug_frame,
96 size_t debug_frame_size)
97 __attribute__((unused));
99 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
100 static const char *target_parse_constraint(TCGArgConstraint *ct,
101 const char *ct_str, TCGType type);
102 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
103 intptr_t arg2);
104 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
105 static void tcg_out_movi(TCGContext *s, TCGType type,
106 TCGReg ret, tcg_target_long arg);
107 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
108 const int *const_args);
109 #if TCG_TARGET_MAYBE_vec
110 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
111 unsigned vece, const TCGArg *args,
112 const int *const_args);
113 #else
114 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
115 unsigned vece, const TCGArg *args,
116 const int *const_args)
118 g_assert_not_reached();
120 #endif
121 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
122 intptr_t arg2);
123 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
124 TCGReg base, intptr_t ofs);
125 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
126 static int tcg_target_const_match(tcg_target_long val, TCGType type,
127 const TCGArgConstraint *arg_ct);
128 #ifdef TCG_TARGET_NEED_LDST_LABELS
129 static bool tcg_out_ldst_finalize(TCGContext *s);
130 #endif
132 #define TCG_HIGHWATER 1024
134 static TCGContext **tcg_ctxs;
135 static unsigned int n_tcg_ctxs;
136 TCGv_env cpu_env = 0;
139 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
140 * dynamically allocate from as demand dictates. Given appropriate region
141 * sizing, this minimizes flushes even when some TCG threads generate a lot
142 * more code than others.
144 struct tcg_region_state {
145 QemuMutex lock;
147 /* fields set at init time */
148 void *start;
149 void *start_aligned;
150 void *end;
151 size_t n;
152 size_t size; /* size of one region */
153 size_t stride; /* .size + guard size */
155 /* fields protected by the lock */
156 size_t current; /* current region index */
157 size_t agg_size_full; /* aggregate size of full regions */
160 static struct tcg_region_state region;
161 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
162 static TCGRegSet tcg_target_call_clobber_regs;
164 #if TCG_TARGET_INSN_UNIT_SIZE == 1
165 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
167 *s->code_ptr++ = v;
170 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
171 uint8_t v)
173 *p = v;
175 #endif
177 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
178 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
180 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
181 *s->code_ptr++ = v;
182 } else {
183 tcg_insn_unit *p = s->code_ptr;
184 memcpy(p, &v, sizeof(v));
185 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
189 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
190 uint16_t v)
192 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
193 *p = v;
194 } else {
195 memcpy(p, &v, sizeof(v));
198 #endif
200 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
201 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
203 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
204 *s->code_ptr++ = v;
205 } else {
206 tcg_insn_unit *p = s->code_ptr;
207 memcpy(p, &v, sizeof(v));
208 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
212 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
213 uint32_t v)
215 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
216 *p = v;
217 } else {
218 memcpy(p, &v, sizeof(v));
221 #endif
223 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
224 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
226 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
227 *s->code_ptr++ = v;
228 } else {
229 tcg_insn_unit *p = s->code_ptr;
230 memcpy(p, &v, sizeof(v));
231 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
235 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
236 uint64_t v)
238 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
239 *p = v;
240 } else {
241 memcpy(p, &v, sizeof(v));
244 #endif
246 /* label relocation processing */
248 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
249 TCGLabel *l, intptr_t addend)
251 TCGRelocation *r;
253 if (l->has_value) {
254 /* FIXME: This may break relocations on RISC targets that
255 modify instruction fields in place. The caller may not have
256 written the initial value. */
257 patch_reloc(code_ptr, type, l->u.value, addend);
258 } else {
259 /* add a new relocation entry */
260 r = tcg_malloc(sizeof(TCGRelocation));
261 r->type = type;
262 r->ptr = code_ptr;
263 r->addend = addend;
264 r->next = l->u.first_reloc;
265 l->u.first_reloc = r;
269 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
271 intptr_t value = (intptr_t)ptr;
272 TCGRelocation *r;
274 tcg_debug_assert(!l->has_value);
276 for (r = l->u.first_reloc; r != NULL; r = r->next) {
277 patch_reloc(r->ptr, r->type, value, r->addend);
280 l->has_value = 1;
281 l->u.value_ptr = ptr;
284 TCGLabel *gen_new_label(void)
286 TCGContext *s = tcg_ctx;
287 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
289 *l = (TCGLabel){
290 .id = s->nb_labels++
293 return l;
296 #include "tcg-target.inc.c"
298 static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
300 void *start, *end;
302 start = region.start_aligned + curr_region * region.stride;
303 end = start + region.size;
305 if (curr_region == 0) {
306 start = region.start;
308 if (curr_region == region.n - 1) {
309 end = region.end;
312 *pstart = start;
313 *pend = end;
316 static void tcg_region_assign(TCGContext *s, size_t curr_region)
318 void *start, *end;
320 tcg_region_bounds(curr_region, &start, &end);
322 s->code_gen_buffer = start;
323 s->code_gen_ptr = start;
324 s->code_gen_buffer_size = end - start;
325 s->code_gen_highwater = end - TCG_HIGHWATER;
328 static bool tcg_region_alloc__locked(TCGContext *s)
330 if (region.current == region.n) {
331 return true;
333 tcg_region_assign(s, region.current);
334 region.current++;
335 return false;
339 * Request a new region once the one in use has filled up.
340 * Returns true on error.
342 static bool tcg_region_alloc(TCGContext *s)
344 bool err;
345 /* read the region size now; alloc__locked will overwrite it on success */
346 size_t size_full = s->code_gen_buffer_size;
348 qemu_mutex_lock(&region.lock);
349 err = tcg_region_alloc__locked(s);
350 if (!err) {
351 region.agg_size_full += size_full - TCG_HIGHWATER;
353 qemu_mutex_unlock(&region.lock);
354 return err;
358 * Perform a context's first region allocation.
359 * This function does _not_ increment region.agg_size_full.
361 static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
363 return tcg_region_alloc__locked(s);
366 /* Call from a safe-work context */
367 void tcg_region_reset_all(void)
369 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
370 unsigned int i;
372 qemu_mutex_lock(&region.lock);
373 region.current = 0;
374 region.agg_size_full = 0;
376 for (i = 0; i < n_ctxs; i++) {
377 TCGContext *s = atomic_read(&tcg_ctxs[i]);
378 bool err = tcg_region_initial_alloc__locked(s);
380 g_assert(!err);
382 qemu_mutex_unlock(&region.lock);
385 #ifdef CONFIG_USER_ONLY
386 static size_t tcg_n_regions(void)
388 return 1;
390 #else
392 * It is likely that some vCPUs will translate more code than others, so we
393 * first try to set more regions than max_cpus, with those regions being of
394 * reasonable size. If that's not possible we make do by evenly dividing
395 * the code_gen_buffer among the vCPUs.
397 static size_t tcg_n_regions(void)
399 size_t i;
401 /* Use a single region if all we have is one vCPU thread */
402 if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
403 return 1;
406 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
407 for (i = 8; i > 0; i--) {
408 size_t regions_per_thread = i;
409 size_t region_size;
411 region_size = tcg_init_ctx.code_gen_buffer_size;
412 region_size /= max_cpus * regions_per_thread;
414 if (region_size >= 2 * 1024u * 1024) {
415 return max_cpus * regions_per_thread;
418 /* If we can't, then just allocate one region per vCPU thread */
419 return max_cpus;
421 #endif
424 * Initializes region partitioning.
426 * Called at init time from the parent thread (i.e. the one calling
427 * tcg_context_init), after the target's TCG globals have been set.
429 * Region partitioning works by splitting code_gen_buffer into separate regions,
430 * and then assigning regions to TCG threads so that the threads can translate
431 * code in parallel without synchronization.
433 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
434 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
435 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
436 * must have been parsed before calling this function, since it calls
437 * qemu_tcg_mttcg_enabled().
439 * In user-mode we use a single region. Having multiple regions in user-mode
440 * is not supported, because the number of vCPU threads (recall that each thread
441 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
442 * OS, and usually this number is huge (tens of thousands is not uncommon).
443 * Thus, given this large bound on the number of vCPU threads and the fact
444 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
445 * that the availability of at least one region per vCPU thread.
447 * However, this user-mode limitation is unlikely to be a significant problem
448 * in practice. Multi-threaded guests share most if not all of their translated
449 * code, which makes parallel code generation less appealing than in softmmu.
451 void tcg_region_init(void)
453 void *buf = tcg_init_ctx.code_gen_buffer;
454 void *aligned;
455 size_t size = tcg_init_ctx.code_gen_buffer_size;
456 size_t page_size = qemu_real_host_page_size;
457 size_t region_size;
458 size_t n_regions;
459 size_t i;
461 n_regions = tcg_n_regions();
463 /* The first region will be 'aligned - buf' bytes larger than the others */
464 aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
465 g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
467 * Make region_size a multiple of page_size, using aligned as the start.
468 * As a result of this we might end up with a few extra pages at the end of
469 * the buffer; we will assign those to the last region.
471 region_size = (size - (aligned - buf)) / n_regions;
472 region_size = QEMU_ALIGN_DOWN(region_size, page_size);
474 /* A region must have at least 2 pages; one code, one guard */
475 g_assert(region_size >= 2 * page_size);
477 /* init the region struct */
478 qemu_mutex_init(&region.lock);
479 region.n = n_regions;
480 region.size = region_size - page_size;
481 region.stride = region_size;
482 region.start = buf;
483 region.start_aligned = aligned;
484 /* page-align the end, since its last page will be a guard page */
485 region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
486 /* account for that last guard page */
487 region.end -= page_size;
489 /* set guard pages */
490 for (i = 0; i < region.n; i++) {
491 void *start, *end;
492 int rc;
494 tcg_region_bounds(i, &start, &end);
495 rc = qemu_mprotect_none(end, page_size);
496 g_assert(!rc);
499 /* In user-mode we support only one ctx, so do the initial allocation now */
500 #ifdef CONFIG_USER_ONLY
502 bool err = tcg_region_initial_alloc__locked(tcg_ctx);
504 g_assert(!err);
506 #endif
510 * All TCG threads except the parent (i.e. the one that called tcg_context_init
511 * and registered the target's TCG globals) must register with this function
512 * before initiating translation.
514 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
515 * of tcg_region_init() for the reasoning behind this.
517 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
518 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
519 * is not used anymore for translation once this function is called.
521 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
522 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
524 #ifdef CONFIG_USER_ONLY
525 void tcg_register_thread(void)
527 tcg_ctx = &tcg_init_ctx;
529 #else
530 void tcg_register_thread(void)
532 TCGContext *s = g_malloc(sizeof(*s));
533 unsigned int i, n;
534 bool err;
536 *s = tcg_init_ctx;
538 /* Relink mem_base. */
539 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
540 if (tcg_init_ctx.temps[i].mem_base) {
541 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
542 tcg_debug_assert(b >= 0 && b < n);
543 s->temps[i].mem_base = &s->temps[b];
547 /* Claim an entry in tcg_ctxs */
548 n = atomic_fetch_inc(&n_tcg_ctxs);
549 g_assert(n < max_cpus);
550 atomic_set(&tcg_ctxs[n], s);
552 tcg_ctx = s;
553 qemu_mutex_lock(&region.lock);
554 err = tcg_region_initial_alloc__locked(tcg_ctx);
555 g_assert(!err);
556 qemu_mutex_unlock(&region.lock);
558 #endif /* !CONFIG_USER_ONLY */
561 * Returns the size (in bytes) of all translated code (i.e. from all regions)
562 * currently in the cache.
563 * See also: tcg_code_capacity()
564 * Do not confuse with tcg_current_code_size(); that one applies to a single
565 * TCG context.
567 size_t tcg_code_size(void)
569 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
570 unsigned int i;
571 size_t total;
573 qemu_mutex_lock(&region.lock);
574 total = region.agg_size_full;
575 for (i = 0; i < n_ctxs; i++) {
576 const TCGContext *s = atomic_read(&tcg_ctxs[i]);
577 size_t size;
579 size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
580 g_assert(size <= s->code_gen_buffer_size);
581 total += size;
583 qemu_mutex_unlock(&region.lock);
584 return total;
588 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
589 * regions.
590 * See also: tcg_code_size()
592 size_t tcg_code_capacity(void)
594 size_t guard_size, capacity;
596 /* no need for synchronization; these variables are set at init time */
597 guard_size = region.stride - region.size;
598 capacity = region.end + guard_size - region.start;
599 capacity -= region.n * (guard_size + TCG_HIGHWATER);
600 return capacity;
603 /* pool based memory allocation */
604 void *tcg_malloc_internal(TCGContext *s, int size)
606 TCGPool *p;
607 int pool_size;
609 if (size > TCG_POOL_CHUNK_SIZE) {
610 /* big malloc: insert a new pool (XXX: could optimize) */
611 p = g_malloc(sizeof(TCGPool) + size);
612 p->size = size;
613 p->next = s->pool_first_large;
614 s->pool_first_large = p;
615 return p->data;
616 } else {
617 p = s->pool_current;
618 if (!p) {
619 p = s->pool_first;
620 if (!p)
621 goto new_pool;
622 } else {
623 if (!p->next) {
624 new_pool:
625 pool_size = TCG_POOL_CHUNK_SIZE;
626 p = g_malloc(sizeof(TCGPool) + pool_size);
627 p->size = pool_size;
628 p->next = NULL;
629 if (s->pool_current)
630 s->pool_current->next = p;
631 else
632 s->pool_first = p;
633 } else {
634 p = p->next;
638 s->pool_current = p;
639 s->pool_cur = p->data + size;
640 s->pool_end = p->data + p->size;
641 return p->data;
644 void tcg_pool_reset(TCGContext *s)
646 TCGPool *p, *t;
647 for (p = s->pool_first_large; p; p = t) {
648 t = p->next;
649 g_free(p);
651 s->pool_first_large = NULL;
652 s->pool_cur = s->pool_end = NULL;
653 s->pool_current = NULL;
656 typedef struct TCGHelperInfo {
657 void *func;
658 const char *name;
659 unsigned flags;
660 unsigned sizemask;
661 } TCGHelperInfo;
663 #include "exec/helper-proto.h"
665 static const TCGHelperInfo all_helpers[] = {
666 #include "exec/helper-tcg.h"
668 static GHashTable *helper_table;
670 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
671 static void process_op_defs(TCGContext *s);
672 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
673 TCGReg reg, const char *name);
675 void tcg_context_init(TCGContext *s)
677 int op, total_args, n, i;
678 TCGOpDef *def;
679 TCGArgConstraint *args_ct;
680 int *sorted_args;
681 TCGTemp *ts;
683 memset(s, 0, sizeof(*s));
684 s->nb_globals = 0;
686 /* Count total number of arguments and allocate the corresponding
687 space */
688 total_args = 0;
689 for(op = 0; op < NB_OPS; op++) {
690 def = &tcg_op_defs[op];
691 n = def->nb_iargs + def->nb_oargs;
692 total_args += n;
695 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
696 sorted_args = g_malloc(sizeof(int) * total_args);
698 for(op = 0; op < NB_OPS; op++) {
699 def = &tcg_op_defs[op];
700 def->args_ct = args_ct;
701 def->sorted_args = sorted_args;
702 n = def->nb_iargs + def->nb_oargs;
703 sorted_args += n;
704 args_ct += n;
707 /* Register helpers. */
708 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
709 helper_table = g_hash_table_new(NULL, NULL);
711 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
712 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
713 (gpointer)&all_helpers[i]);
716 tcg_target_init(s);
717 process_op_defs(s);
719 /* Reverse the order of the saved registers, assuming they're all at
720 the start of tcg_target_reg_alloc_order. */
721 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
722 int r = tcg_target_reg_alloc_order[n];
723 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
724 break;
727 for (i = 0; i < n; ++i) {
728 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
730 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
731 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
734 tcg_ctx = s;
736 * In user-mode we simply share the init context among threads, since we
737 * use a single region. See the documentation tcg_region_init() for the
738 * reasoning behind this.
739 * In softmmu we will have at most max_cpus TCG threads.
741 #ifdef CONFIG_USER_ONLY
742 tcg_ctxs = &tcg_ctx;
743 n_tcg_ctxs = 1;
744 #else
745 tcg_ctxs = g_new(TCGContext *, max_cpus);
746 #endif
748 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
749 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
750 cpu_env = temp_tcgv_ptr(ts);
754 * Allocate TBs right before their corresponding translated code, making
755 * sure that TBs and code are on different cache lines.
757 TranslationBlock *tcg_tb_alloc(TCGContext *s)
759 uintptr_t align = qemu_icache_linesize;
760 TranslationBlock *tb;
761 void *next;
763 retry:
764 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
765 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
767 if (unlikely(next > s->code_gen_highwater)) {
768 if (tcg_region_alloc(s)) {
769 return NULL;
771 goto retry;
773 atomic_set(&s->code_gen_ptr, next);
774 s->data_gen_ptr = NULL;
775 return tb;
778 void tcg_prologue_init(TCGContext *s)
780 size_t prologue_size, total_size;
781 void *buf0, *buf1;
783 /* Put the prologue at the beginning of code_gen_buffer. */
784 buf0 = s->code_gen_buffer;
785 total_size = s->code_gen_buffer_size;
786 s->code_ptr = buf0;
787 s->code_buf = buf0;
788 s->data_gen_ptr = NULL;
789 s->code_gen_prologue = buf0;
791 /* Compute a high-water mark, at which we voluntarily flush the buffer
792 and start over. The size here is arbitrary, significantly larger
793 than we expect the code generation for any one opcode to require. */
794 s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
796 #ifdef TCG_TARGET_NEED_POOL_LABELS
797 s->pool_labels = NULL;
798 #endif
800 /* Generate the prologue. */
801 tcg_target_qemu_prologue(s);
803 #ifdef TCG_TARGET_NEED_POOL_LABELS
804 /* Allow the prologue to put e.g. guest_base into a pool entry. */
806 bool ok = tcg_out_pool_finalize(s);
807 tcg_debug_assert(ok);
809 #endif
811 buf1 = s->code_ptr;
812 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
814 /* Deduct the prologue from the buffer. */
815 prologue_size = tcg_current_code_size(s);
816 s->code_gen_ptr = buf1;
817 s->code_gen_buffer = buf1;
818 s->code_buf = buf1;
819 total_size -= prologue_size;
820 s->code_gen_buffer_size = total_size;
822 tcg_register_jit(s->code_gen_buffer, total_size);
824 #ifdef DEBUG_DISAS
825 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
826 qemu_log_lock();
827 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
828 if (s->data_gen_ptr) {
829 size_t code_size = s->data_gen_ptr - buf0;
830 size_t data_size = prologue_size - code_size;
831 size_t i;
833 log_disas(buf0, code_size);
835 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
836 if (sizeof(tcg_target_ulong) == 8) {
837 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
838 (uintptr_t)s->data_gen_ptr + i,
839 *(uint64_t *)(s->data_gen_ptr + i));
840 } else {
841 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
842 (uintptr_t)s->data_gen_ptr + i,
843 *(uint32_t *)(s->data_gen_ptr + i));
846 } else {
847 log_disas(buf0, prologue_size);
849 qemu_log("\n");
850 qemu_log_flush();
851 qemu_log_unlock();
853 #endif
855 /* Assert that goto_ptr is implemented completely. */
856 if (TCG_TARGET_HAS_goto_ptr) {
857 tcg_debug_assert(s->code_gen_epilogue != NULL);
861 void tcg_func_start(TCGContext *s)
863 tcg_pool_reset(s);
864 s->nb_temps = s->nb_globals;
866 /* No temps have been previously allocated for size or locality. */
867 memset(s->free_temps, 0, sizeof(s->free_temps));
869 s->nb_labels = 0;
870 s->current_frame_offset = s->frame_start;
872 #ifdef CONFIG_DEBUG_TCG
873 s->goto_tb_issue_mask = 0;
874 #endif
876 QTAILQ_INIT(&s->ops);
877 QTAILQ_INIT(&s->free_ops);
880 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
882 int n = s->nb_temps++;
883 tcg_debug_assert(n < TCG_MAX_TEMPS);
884 return memset(&s->temps[n], 0, sizeof(TCGTemp));
887 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
889 TCGTemp *ts;
891 tcg_debug_assert(s->nb_globals == s->nb_temps);
892 s->nb_globals++;
893 ts = tcg_temp_alloc(s);
894 ts->temp_global = 1;
896 return ts;
899 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
900 TCGReg reg, const char *name)
902 TCGTemp *ts;
904 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
905 tcg_abort();
908 ts = tcg_global_alloc(s);
909 ts->base_type = type;
910 ts->type = type;
911 ts->fixed_reg = 1;
912 ts->reg = reg;
913 ts->name = name;
914 tcg_regset_set_reg(s->reserved_regs, reg);
916 return ts;
919 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
921 s->frame_start = start;
922 s->frame_end = start + size;
923 s->frame_temp
924 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
927 TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
928 intptr_t offset, const char *name)
930 TCGContext *s = tcg_ctx;
931 TCGTemp *base_ts = tcgv_ptr_temp(base);
932 TCGTemp *ts = tcg_global_alloc(s);
933 int indirect_reg = 0, bigendian = 0;
934 #ifdef HOST_WORDS_BIGENDIAN
935 bigendian = 1;
936 #endif
938 if (!base_ts->fixed_reg) {
939 /* We do not support double-indirect registers. */
940 tcg_debug_assert(!base_ts->indirect_reg);
941 base_ts->indirect_base = 1;
942 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
943 ? 2 : 1);
944 indirect_reg = 1;
947 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
948 TCGTemp *ts2 = tcg_global_alloc(s);
949 char buf[64];
951 ts->base_type = TCG_TYPE_I64;
952 ts->type = TCG_TYPE_I32;
953 ts->indirect_reg = indirect_reg;
954 ts->mem_allocated = 1;
955 ts->mem_base = base_ts;
956 ts->mem_offset = offset + bigendian * 4;
957 pstrcpy(buf, sizeof(buf), name);
958 pstrcat(buf, sizeof(buf), "_0");
959 ts->name = strdup(buf);
961 tcg_debug_assert(ts2 == ts + 1);
962 ts2->base_type = TCG_TYPE_I64;
963 ts2->type = TCG_TYPE_I32;
964 ts2->indirect_reg = indirect_reg;
965 ts2->mem_allocated = 1;
966 ts2->mem_base = base_ts;
967 ts2->mem_offset = offset + (1 - bigendian) * 4;
968 pstrcpy(buf, sizeof(buf), name);
969 pstrcat(buf, sizeof(buf), "_1");
970 ts2->name = strdup(buf);
971 } else {
972 ts->base_type = type;
973 ts->type = type;
974 ts->indirect_reg = indirect_reg;
975 ts->mem_allocated = 1;
976 ts->mem_base = base_ts;
977 ts->mem_offset = offset;
978 ts->name = name;
980 return ts;
983 static TCGTemp *tcg_temp_new_internal(TCGType type, int temp_local)
985 TCGContext *s = tcg_ctx;
986 TCGTemp *ts;
987 int idx, k;
989 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
990 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
991 if (idx < TCG_MAX_TEMPS) {
992 /* There is already an available temp with the right type. */
993 clear_bit(idx, s->free_temps[k].l);
995 ts = &s->temps[idx];
996 ts->temp_allocated = 1;
997 tcg_debug_assert(ts->base_type == type);
998 tcg_debug_assert(ts->temp_local == temp_local);
999 } else {
1000 ts = tcg_temp_alloc(s);
1001 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1002 TCGTemp *ts2 = tcg_temp_alloc(s);
1004 ts->base_type = type;
1005 ts->type = TCG_TYPE_I32;
1006 ts->temp_allocated = 1;
1007 ts->temp_local = temp_local;
1009 tcg_debug_assert(ts2 == ts + 1);
1010 ts2->base_type = TCG_TYPE_I64;
1011 ts2->type = TCG_TYPE_I32;
1012 ts2->temp_allocated = 1;
1013 ts2->temp_local = temp_local;
1014 } else {
1015 ts->base_type = type;
1016 ts->type = type;
1017 ts->temp_allocated = 1;
1018 ts->temp_local = temp_local;
1022 #if defined(CONFIG_DEBUG_TCG)
1023 s->temps_in_use++;
1024 #endif
1025 return ts;
1028 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
1030 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
1031 return temp_tcgv_i32(t);
1034 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
1036 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
1037 return temp_tcgv_i64(t);
1040 TCGv_vec tcg_temp_new_vec(TCGType type)
1042 TCGTemp *t;
1044 #ifdef CONFIG_DEBUG_TCG
1045 switch (type) {
1046 case TCG_TYPE_V64:
1047 assert(TCG_TARGET_HAS_v64);
1048 break;
1049 case TCG_TYPE_V128:
1050 assert(TCG_TARGET_HAS_v128);
1051 break;
1052 case TCG_TYPE_V256:
1053 assert(TCG_TARGET_HAS_v256);
1054 break;
1055 default:
1056 g_assert_not_reached();
1058 #endif
1060 t = tcg_temp_new_internal(type, 0);
1061 return temp_tcgv_vec(t);
1064 /* Create a new temp of the same type as an existing temp. */
1065 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1067 TCGTemp *t = tcgv_vec_temp(match);
1069 tcg_debug_assert(t->temp_allocated != 0);
1071 t = tcg_temp_new_internal(t->base_type, 0);
1072 return temp_tcgv_vec(t);
1075 static void tcg_temp_free_internal(TCGTemp *ts)
1077 TCGContext *s = tcg_ctx;
1078 int k, idx;
1080 #if defined(CONFIG_DEBUG_TCG)
1081 s->temps_in_use--;
1082 if (s->temps_in_use < 0) {
1083 fprintf(stderr, "More temporaries freed than allocated!\n");
1085 #endif
1087 tcg_debug_assert(ts->temp_global == 0);
1088 tcg_debug_assert(ts->temp_allocated != 0);
1089 ts->temp_allocated = 0;
1091 idx = temp_idx(ts);
1092 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
1093 set_bit(idx, s->free_temps[k].l);
1096 void tcg_temp_free_i32(TCGv_i32 arg)
1098 tcg_temp_free_internal(tcgv_i32_temp(arg));
1101 void tcg_temp_free_i64(TCGv_i64 arg)
1103 tcg_temp_free_internal(tcgv_i64_temp(arg));
1106 void tcg_temp_free_vec(TCGv_vec arg)
1108 tcg_temp_free_internal(tcgv_vec_temp(arg));
1111 TCGv_i32 tcg_const_i32(int32_t val)
1113 TCGv_i32 t0;
1114 t0 = tcg_temp_new_i32();
1115 tcg_gen_movi_i32(t0, val);
1116 return t0;
1119 TCGv_i64 tcg_const_i64(int64_t val)
1121 TCGv_i64 t0;
1122 t0 = tcg_temp_new_i64();
1123 tcg_gen_movi_i64(t0, val);
1124 return t0;
1127 TCGv_i32 tcg_const_local_i32(int32_t val)
1129 TCGv_i32 t0;
1130 t0 = tcg_temp_local_new_i32();
1131 tcg_gen_movi_i32(t0, val);
1132 return t0;
1135 TCGv_i64 tcg_const_local_i64(int64_t val)
1137 TCGv_i64 t0;
1138 t0 = tcg_temp_local_new_i64();
1139 tcg_gen_movi_i64(t0, val);
1140 return t0;
1143 #if defined(CONFIG_DEBUG_TCG)
1144 void tcg_clear_temp_count(void)
1146 TCGContext *s = tcg_ctx;
1147 s->temps_in_use = 0;
1150 int tcg_check_temp_count(void)
1152 TCGContext *s = tcg_ctx;
1153 if (s->temps_in_use) {
1154 /* Clear the count so that we don't give another
1155 * warning immediately next time around.
1157 s->temps_in_use = 0;
1158 return 1;
1160 return 0;
1162 #endif
1164 /* Return true if OP may appear in the opcode stream.
1165 Test the runtime variable that controls each opcode. */
1166 bool tcg_op_supported(TCGOpcode op)
1168 const bool have_vec
1169 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1171 switch (op) {
1172 case INDEX_op_discard:
1173 case INDEX_op_set_label:
1174 case INDEX_op_call:
1175 case INDEX_op_br:
1176 case INDEX_op_mb:
1177 case INDEX_op_insn_start:
1178 case INDEX_op_exit_tb:
1179 case INDEX_op_goto_tb:
1180 case INDEX_op_qemu_ld_i32:
1181 case INDEX_op_qemu_st_i32:
1182 case INDEX_op_qemu_ld_i64:
1183 case INDEX_op_qemu_st_i64:
1184 return true;
1186 case INDEX_op_goto_ptr:
1187 return TCG_TARGET_HAS_goto_ptr;
1189 case INDEX_op_mov_i32:
1190 case INDEX_op_movi_i32:
1191 case INDEX_op_setcond_i32:
1192 case INDEX_op_brcond_i32:
1193 case INDEX_op_ld8u_i32:
1194 case INDEX_op_ld8s_i32:
1195 case INDEX_op_ld16u_i32:
1196 case INDEX_op_ld16s_i32:
1197 case INDEX_op_ld_i32:
1198 case INDEX_op_st8_i32:
1199 case INDEX_op_st16_i32:
1200 case INDEX_op_st_i32:
1201 case INDEX_op_add_i32:
1202 case INDEX_op_sub_i32:
1203 case INDEX_op_mul_i32:
1204 case INDEX_op_and_i32:
1205 case INDEX_op_or_i32:
1206 case INDEX_op_xor_i32:
1207 case INDEX_op_shl_i32:
1208 case INDEX_op_shr_i32:
1209 case INDEX_op_sar_i32:
1210 return true;
1212 case INDEX_op_movcond_i32:
1213 return TCG_TARGET_HAS_movcond_i32;
1214 case INDEX_op_div_i32:
1215 case INDEX_op_divu_i32:
1216 return TCG_TARGET_HAS_div_i32;
1217 case INDEX_op_rem_i32:
1218 case INDEX_op_remu_i32:
1219 return TCG_TARGET_HAS_rem_i32;
1220 case INDEX_op_div2_i32:
1221 case INDEX_op_divu2_i32:
1222 return TCG_TARGET_HAS_div2_i32;
1223 case INDEX_op_rotl_i32:
1224 case INDEX_op_rotr_i32:
1225 return TCG_TARGET_HAS_rot_i32;
1226 case INDEX_op_deposit_i32:
1227 return TCG_TARGET_HAS_deposit_i32;
1228 case INDEX_op_extract_i32:
1229 return TCG_TARGET_HAS_extract_i32;
1230 case INDEX_op_sextract_i32:
1231 return TCG_TARGET_HAS_sextract_i32;
1232 case INDEX_op_add2_i32:
1233 return TCG_TARGET_HAS_add2_i32;
1234 case INDEX_op_sub2_i32:
1235 return TCG_TARGET_HAS_sub2_i32;
1236 case INDEX_op_mulu2_i32:
1237 return TCG_TARGET_HAS_mulu2_i32;
1238 case INDEX_op_muls2_i32:
1239 return TCG_TARGET_HAS_muls2_i32;
1240 case INDEX_op_muluh_i32:
1241 return TCG_TARGET_HAS_muluh_i32;
1242 case INDEX_op_mulsh_i32:
1243 return TCG_TARGET_HAS_mulsh_i32;
1244 case INDEX_op_ext8s_i32:
1245 return TCG_TARGET_HAS_ext8s_i32;
1246 case INDEX_op_ext16s_i32:
1247 return TCG_TARGET_HAS_ext16s_i32;
1248 case INDEX_op_ext8u_i32:
1249 return TCG_TARGET_HAS_ext8u_i32;
1250 case INDEX_op_ext16u_i32:
1251 return TCG_TARGET_HAS_ext16u_i32;
1252 case INDEX_op_bswap16_i32:
1253 return TCG_TARGET_HAS_bswap16_i32;
1254 case INDEX_op_bswap32_i32:
1255 return TCG_TARGET_HAS_bswap32_i32;
1256 case INDEX_op_not_i32:
1257 return TCG_TARGET_HAS_not_i32;
1258 case INDEX_op_neg_i32:
1259 return TCG_TARGET_HAS_neg_i32;
1260 case INDEX_op_andc_i32:
1261 return TCG_TARGET_HAS_andc_i32;
1262 case INDEX_op_orc_i32:
1263 return TCG_TARGET_HAS_orc_i32;
1264 case INDEX_op_eqv_i32:
1265 return TCG_TARGET_HAS_eqv_i32;
1266 case INDEX_op_nand_i32:
1267 return TCG_TARGET_HAS_nand_i32;
1268 case INDEX_op_nor_i32:
1269 return TCG_TARGET_HAS_nor_i32;
1270 case INDEX_op_clz_i32:
1271 return TCG_TARGET_HAS_clz_i32;
1272 case INDEX_op_ctz_i32:
1273 return TCG_TARGET_HAS_ctz_i32;
1274 case INDEX_op_ctpop_i32:
1275 return TCG_TARGET_HAS_ctpop_i32;
1277 case INDEX_op_brcond2_i32:
1278 case INDEX_op_setcond2_i32:
1279 return TCG_TARGET_REG_BITS == 32;
1281 case INDEX_op_mov_i64:
1282 case INDEX_op_movi_i64:
1283 case INDEX_op_setcond_i64:
1284 case INDEX_op_brcond_i64:
1285 case INDEX_op_ld8u_i64:
1286 case INDEX_op_ld8s_i64:
1287 case INDEX_op_ld16u_i64:
1288 case INDEX_op_ld16s_i64:
1289 case INDEX_op_ld32u_i64:
1290 case INDEX_op_ld32s_i64:
1291 case INDEX_op_ld_i64:
1292 case INDEX_op_st8_i64:
1293 case INDEX_op_st16_i64:
1294 case INDEX_op_st32_i64:
1295 case INDEX_op_st_i64:
1296 case INDEX_op_add_i64:
1297 case INDEX_op_sub_i64:
1298 case INDEX_op_mul_i64:
1299 case INDEX_op_and_i64:
1300 case INDEX_op_or_i64:
1301 case INDEX_op_xor_i64:
1302 case INDEX_op_shl_i64:
1303 case INDEX_op_shr_i64:
1304 case INDEX_op_sar_i64:
1305 case INDEX_op_ext_i32_i64:
1306 case INDEX_op_extu_i32_i64:
1307 return TCG_TARGET_REG_BITS == 64;
1309 case INDEX_op_movcond_i64:
1310 return TCG_TARGET_HAS_movcond_i64;
1311 case INDEX_op_div_i64:
1312 case INDEX_op_divu_i64:
1313 return TCG_TARGET_HAS_div_i64;
1314 case INDEX_op_rem_i64:
1315 case INDEX_op_remu_i64:
1316 return TCG_TARGET_HAS_rem_i64;
1317 case INDEX_op_div2_i64:
1318 case INDEX_op_divu2_i64:
1319 return TCG_TARGET_HAS_div2_i64;
1320 case INDEX_op_rotl_i64:
1321 case INDEX_op_rotr_i64:
1322 return TCG_TARGET_HAS_rot_i64;
1323 case INDEX_op_deposit_i64:
1324 return TCG_TARGET_HAS_deposit_i64;
1325 case INDEX_op_extract_i64:
1326 return TCG_TARGET_HAS_extract_i64;
1327 case INDEX_op_sextract_i64:
1328 return TCG_TARGET_HAS_sextract_i64;
1329 case INDEX_op_extrl_i64_i32:
1330 return TCG_TARGET_HAS_extrl_i64_i32;
1331 case INDEX_op_extrh_i64_i32:
1332 return TCG_TARGET_HAS_extrh_i64_i32;
1333 case INDEX_op_ext8s_i64:
1334 return TCG_TARGET_HAS_ext8s_i64;
1335 case INDEX_op_ext16s_i64:
1336 return TCG_TARGET_HAS_ext16s_i64;
1337 case INDEX_op_ext32s_i64:
1338 return TCG_TARGET_HAS_ext32s_i64;
1339 case INDEX_op_ext8u_i64:
1340 return TCG_TARGET_HAS_ext8u_i64;
1341 case INDEX_op_ext16u_i64:
1342 return TCG_TARGET_HAS_ext16u_i64;
1343 case INDEX_op_ext32u_i64:
1344 return TCG_TARGET_HAS_ext32u_i64;
1345 case INDEX_op_bswap16_i64:
1346 return TCG_TARGET_HAS_bswap16_i64;
1347 case INDEX_op_bswap32_i64:
1348 return TCG_TARGET_HAS_bswap32_i64;
1349 case INDEX_op_bswap64_i64:
1350 return TCG_TARGET_HAS_bswap64_i64;
1351 case INDEX_op_not_i64:
1352 return TCG_TARGET_HAS_not_i64;
1353 case INDEX_op_neg_i64:
1354 return TCG_TARGET_HAS_neg_i64;
1355 case INDEX_op_andc_i64:
1356 return TCG_TARGET_HAS_andc_i64;
1357 case INDEX_op_orc_i64:
1358 return TCG_TARGET_HAS_orc_i64;
1359 case INDEX_op_eqv_i64:
1360 return TCG_TARGET_HAS_eqv_i64;
1361 case INDEX_op_nand_i64:
1362 return TCG_TARGET_HAS_nand_i64;
1363 case INDEX_op_nor_i64:
1364 return TCG_TARGET_HAS_nor_i64;
1365 case INDEX_op_clz_i64:
1366 return TCG_TARGET_HAS_clz_i64;
1367 case INDEX_op_ctz_i64:
1368 return TCG_TARGET_HAS_ctz_i64;
1369 case INDEX_op_ctpop_i64:
1370 return TCG_TARGET_HAS_ctpop_i64;
1371 case INDEX_op_add2_i64:
1372 return TCG_TARGET_HAS_add2_i64;
1373 case INDEX_op_sub2_i64:
1374 return TCG_TARGET_HAS_sub2_i64;
1375 case INDEX_op_mulu2_i64:
1376 return TCG_TARGET_HAS_mulu2_i64;
1377 case INDEX_op_muls2_i64:
1378 return TCG_TARGET_HAS_muls2_i64;
1379 case INDEX_op_muluh_i64:
1380 return TCG_TARGET_HAS_muluh_i64;
1381 case INDEX_op_mulsh_i64:
1382 return TCG_TARGET_HAS_mulsh_i64;
1384 case INDEX_op_mov_vec:
1385 case INDEX_op_dup_vec:
1386 case INDEX_op_dupi_vec:
1387 case INDEX_op_ld_vec:
1388 case INDEX_op_st_vec:
1389 case INDEX_op_add_vec:
1390 case INDEX_op_sub_vec:
1391 case INDEX_op_and_vec:
1392 case INDEX_op_or_vec:
1393 case INDEX_op_xor_vec:
1394 case INDEX_op_cmp_vec:
1395 return have_vec;
1396 case INDEX_op_dup2_vec:
1397 return have_vec && TCG_TARGET_REG_BITS == 32;
1398 case INDEX_op_not_vec:
1399 return have_vec && TCG_TARGET_HAS_not_vec;
1400 case INDEX_op_neg_vec:
1401 return have_vec && TCG_TARGET_HAS_neg_vec;
1402 case INDEX_op_andc_vec:
1403 return have_vec && TCG_TARGET_HAS_andc_vec;
1404 case INDEX_op_orc_vec:
1405 return have_vec && TCG_TARGET_HAS_orc_vec;
1406 case INDEX_op_mul_vec:
1407 return have_vec && TCG_TARGET_HAS_mul_vec;
1408 case INDEX_op_shli_vec:
1409 case INDEX_op_shri_vec:
1410 case INDEX_op_sari_vec:
1411 return have_vec && TCG_TARGET_HAS_shi_vec;
1412 case INDEX_op_shls_vec:
1413 case INDEX_op_shrs_vec:
1414 case INDEX_op_sars_vec:
1415 return have_vec && TCG_TARGET_HAS_shs_vec;
1416 case INDEX_op_shlv_vec:
1417 case INDEX_op_shrv_vec:
1418 case INDEX_op_sarv_vec:
1419 return have_vec && TCG_TARGET_HAS_shv_vec;
1421 default:
1422 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1423 return true;
1427 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1428 and endian swap. Maybe it would be better to do the alignment
1429 and endian swap in tcg_reg_alloc_call(). */
1430 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
1432 int i, real_args, nb_rets, pi;
1433 unsigned sizemask, flags;
1434 TCGHelperInfo *info;
1435 TCGOp *op;
1437 info = g_hash_table_lookup(helper_table, (gpointer)func);
1438 flags = info->flags;
1439 sizemask = info->sizemask;
1441 #if defined(__sparc__) && !defined(__arch64__) \
1442 && !defined(CONFIG_TCG_INTERPRETER)
1443 /* We have 64-bit values in one register, but need to pass as two
1444 separate parameters. Split them. */
1445 int orig_sizemask = sizemask;
1446 int orig_nargs = nargs;
1447 TCGv_i64 retl, reth;
1448 TCGTemp *split_args[MAX_OPC_PARAM];
1450 retl = NULL;
1451 reth = NULL;
1452 if (sizemask != 0) {
1453 for (i = real_args = 0; i < nargs; ++i) {
1454 int is_64bit = sizemask & (1 << (i+1)*2);
1455 if (is_64bit) {
1456 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1457 TCGv_i32 h = tcg_temp_new_i32();
1458 TCGv_i32 l = tcg_temp_new_i32();
1459 tcg_gen_extr_i64_i32(l, h, orig);
1460 split_args[real_args++] = tcgv_i32_temp(h);
1461 split_args[real_args++] = tcgv_i32_temp(l);
1462 } else {
1463 split_args[real_args++] = args[i];
1466 nargs = real_args;
1467 args = split_args;
1468 sizemask = 0;
1470 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1471 for (i = 0; i < nargs; ++i) {
1472 int is_64bit = sizemask & (1 << (i+1)*2);
1473 int is_signed = sizemask & (2 << (i+1)*2);
1474 if (!is_64bit) {
1475 TCGv_i64 temp = tcg_temp_new_i64();
1476 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1477 if (is_signed) {
1478 tcg_gen_ext32s_i64(temp, orig);
1479 } else {
1480 tcg_gen_ext32u_i64(temp, orig);
1482 args[i] = tcgv_i64_temp(temp);
1485 #endif /* TCG_TARGET_EXTEND_ARGS */
1487 op = tcg_emit_op(INDEX_op_call);
1489 pi = 0;
1490 if (ret != NULL) {
1491 #if defined(__sparc__) && !defined(__arch64__) \
1492 && !defined(CONFIG_TCG_INTERPRETER)
1493 if (orig_sizemask & 1) {
1494 /* The 32-bit ABI is going to return the 64-bit value in
1495 the %o0/%o1 register pair. Prepare for this by using
1496 two return temporaries, and reassemble below. */
1497 retl = tcg_temp_new_i64();
1498 reth = tcg_temp_new_i64();
1499 op->args[pi++] = tcgv_i64_arg(reth);
1500 op->args[pi++] = tcgv_i64_arg(retl);
1501 nb_rets = 2;
1502 } else {
1503 op->args[pi++] = temp_arg(ret);
1504 nb_rets = 1;
1506 #else
1507 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
1508 #ifdef HOST_WORDS_BIGENDIAN
1509 op->args[pi++] = temp_arg(ret + 1);
1510 op->args[pi++] = temp_arg(ret);
1511 #else
1512 op->args[pi++] = temp_arg(ret);
1513 op->args[pi++] = temp_arg(ret + 1);
1514 #endif
1515 nb_rets = 2;
1516 } else {
1517 op->args[pi++] = temp_arg(ret);
1518 nb_rets = 1;
1520 #endif
1521 } else {
1522 nb_rets = 0;
1524 TCGOP_CALLO(op) = nb_rets;
1526 real_args = 0;
1527 for (i = 0; i < nargs; i++) {
1528 int is_64bit = sizemask & (1 << (i+1)*2);
1529 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
1530 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1531 /* some targets want aligned 64 bit args */
1532 if (real_args & 1) {
1533 op->args[pi++] = TCG_CALL_DUMMY_ARG;
1534 real_args++;
1536 #endif
1537 /* If stack grows up, then we will be placing successive
1538 arguments at lower addresses, which means we need to
1539 reverse the order compared to how we would normally
1540 treat either big or little-endian. For those arguments
1541 that will wind up in registers, this still works for
1542 HPPA (the only current STACK_GROWSUP target) since the
1543 argument registers are *also* allocated in decreasing
1544 order. If another such target is added, this logic may
1545 have to get more complicated to differentiate between
1546 stack arguments and register arguments. */
1547 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1548 op->args[pi++] = temp_arg(args[i] + 1);
1549 op->args[pi++] = temp_arg(args[i]);
1550 #else
1551 op->args[pi++] = temp_arg(args[i]);
1552 op->args[pi++] = temp_arg(args[i] + 1);
1553 #endif
1554 real_args += 2;
1555 continue;
1558 op->args[pi++] = temp_arg(args[i]);
1559 real_args++;
1561 op->args[pi++] = (uintptr_t)func;
1562 op->args[pi++] = flags;
1563 TCGOP_CALLI(op) = real_args;
1565 /* Make sure the fields didn't overflow. */
1566 tcg_debug_assert(TCGOP_CALLI(op) == real_args);
1567 tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
1569 #if defined(__sparc__) && !defined(__arch64__) \
1570 && !defined(CONFIG_TCG_INTERPRETER)
1571 /* Free all of the parts we allocated above. */
1572 for (i = real_args = 0; i < orig_nargs; ++i) {
1573 int is_64bit = orig_sizemask & (1 << (i+1)*2);
1574 if (is_64bit) {
1575 tcg_temp_free_internal(args[real_args++]);
1576 tcg_temp_free_internal(args[real_args++]);
1577 } else {
1578 real_args++;
1581 if (orig_sizemask & 1) {
1582 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1583 Note that describing these as TCGv_i64 eliminates an unnecessary
1584 zero-extension that tcg_gen_concat_i32_i64 would create. */
1585 tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
1586 tcg_temp_free_i64(retl);
1587 tcg_temp_free_i64(reth);
1589 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1590 for (i = 0; i < nargs; ++i) {
1591 int is_64bit = sizemask & (1 << (i+1)*2);
1592 if (!is_64bit) {
1593 tcg_temp_free_internal(args[i]);
1596 #endif /* TCG_TARGET_EXTEND_ARGS */
1599 static void tcg_reg_alloc_start(TCGContext *s)
1601 int i, n;
1602 TCGTemp *ts;
1604 for (i = 0, n = s->nb_globals; i < n; i++) {
1605 ts = &s->temps[i];
1606 ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
1608 for (n = s->nb_temps; i < n; i++) {
1609 ts = &s->temps[i];
1610 ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1611 ts->mem_allocated = 0;
1612 ts->fixed_reg = 0;
1615 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
1618 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1619 TCGTemp *ts)
1621 int idx = temp_idx(ts);
1623 if (ts->temp_global) {
1624 pstrcpy(buf, buf_size, ts->name);
1625 } else if (ts->temp_local) {
1626 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1627 } else {
1628 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1630 return buf;
1633 static char *tcg_get_arg_str(TCGContext *s, char *buf,
1634 int buf_size, TCGArg arg)
1636 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
1639 /* Find helper name. */
1640 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
1642 const char *ret = NULL;
1643 if (helper_table) {
1644 TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
1645 if (info) {
1646 ret = info->name;
1649 return ret;
1652 static const char * const cond_name[] =
1654 [TCG_COND_NEVER] = "never",
1655 [TCG_COND_ALWAYS] = "always",
1656 [TCG_COND_EQ] = "eq",
1657 [TCG_COND_NE] = "ne",
1658 [TCG_COND_LT] = "lt",
1659 [TCG_COND_GE] = "ge",
1660 [TCG_COND_LE] = "le",
1661 [TCG_COND_GT] = "gt",
1662 [TCG_COND_LTU] = "ltu",
1663 [TCG_COND_GEU] = "geu",
1664 [TCG_COND_LEU] = "leu",
1665 [TCG_COND_GTU] = "gtu"
1668 static const char * const ldst_name[] =
1670 [MO_UB] = "ub",
1671 [MO_SB] = "sb",
1672 [MO_LEUW] = "leuw",
1673 [MO_LESW] = "lesw",
1674 [MO_LEUL] = "leul",
1675 [MO_LESL] = "lesl",
1676 [MO_LEQ] = "leq",
1677 [MO_BEUW] = "beuw",
1678 [MO_BESW] = "besw",
1679 [MO_BEUL] = "beul",
1680 [MO_BESL] = "besl",
1681 [MO_BEQ] = "beq",
1684 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1685 #ifdef ALIGNED_ONLY
1686 [MO_UNALN >> MO_ASHIFT] = "un+",
1687 [MO_ALIGN >> MO_ASHIFT] = "",
1688 #else
1689 [MO_UNALN >> MO_ASHIFT] = "",
1690 [MO_ALIGN >> MO_ASHIFT] = "al+",
1691 #endif
1692 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1693 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1694 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1695 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1696 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1697 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1700 void tcg_dump_ops(TCGContext *s)
1702 char buf[128];
1703 TCGOp *op;
1705 QTAILQ_FOREACH(op, &s->ops, link) {
1706 int i, k, nb_oargs, nb_iargs, nb_cargs;
1707 const TCGOpDef *def;
1708 TCGOpcode c;
1709 int col = 0;
1711 c = op->opc;
1712 def = &tcg_op_defs[c];
1714 if (c == INDEX_op_insn_start) {
1715 col += qemu_log("\n ----");
1717 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1718 target_ulong a;
1719 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1720 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
1721 #else
1722 a = op->args[i];
1723 #endif
1724 col += qemu_log(" " TARGET_FMT_lx, a);
1726 } else if (c == INDEX_op_call) {
1727 /* variable number of arguments */
1728 nb_oargs = TCGOP_CALLO(op);
1729 nb_iargs = TCGOP_CALLI(op);
1730 nb_cargs = def->nb_cargs;
1732 /* function name, flags, out args */
1733 col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1734 tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
1735 op->args[nb_oargs + nb_iargs + 1], nb_oargs);
1736 for (i = 0; i < nb_oargs; i++) {
1737 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1738 op->args[i]));
1740 for (i = 0; i < nb_iargs; i++) {
1741 TCGArg arg = op->args[nb_oargs + i];
1742 const char *t = "<dummy>";
1743 if (arg != TCG_CALL_DUMMY_ARG) {
1744 t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
1746 col += qemu_log(",%s", t);
1748 } else {
1749 col += qemu_log(" %s ", def->name);
1751 nb_oargs = def->nb_oargs;
1752 nb_iargs = def->nb_iargs;
1753 nb_cargs = def->nb_cargs;
1755 if (def->flags & TCG_OPF_VECTOR) {
1756 col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
1757 8 << TCGOP_VECE(op));
1760 k = 0;
1761 for (i = 0; i < nb_oargs; i++) {
1762 if (k != 0) {
1763 col += qemu_log(",");
1765 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1766 op->args[k++]));
1768 for (i = 0; i < nb_iargs; i++) {
1769 if (k != 0) {
1770 col += qemu_log(",");
1772 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1773 op->args[k++]));
1775 switch (c) {
1776 case INDEX_op_brcond_i32:
1777 case INDEX_op_setcond_i32:
1778 case INDEX_op_movcond_i32:
1779 case INDEX_op_brcond2_i32:
1780 case INDEX_op_setcond2_i32:
1781 case INDEX_op_brcond_i64:
1782 case INDEX_op_setcond_i64:
1783 case INDEX_op_movcond_i64:
1784 case INDEX_op_cmp_vec:
1785 if (op->args[k] < ARRAY_SIZE(cond_name)
1786 && cond_name[op->args[k]]) {
1787 col += qemu_log(",%s", cond_name[op->args[k++]]);
1788 } else {
1789 col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
1791 i = 1;
1792 break;
1793 case INDEX_op_qemu_ld_i32:
1794 case INDEX_op_qemu_st_i32:
1795 case INDEX_op_qemu_ld_i64:
1796 case INDEX_op_qemu_st_i64:
1798 TCGMemOpIdx oi = op->args[k++];
1799 TCGMemOp op = get_memop(oi);
1800 unsigned ix = get_mmuidx(oi);
1802 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1803 col += qemu_log(",$0x%x,%u", op, ix);
1804 } else {
1805 const char *s_al, *s_op;
1806 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1807 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1808 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
1810 i = 1;
1812 break;
1813 default:
1814 i = 0;
1815 break;
1817 switch (c) {
1818 case INDEX_op_set_label:
1819 case INDEX_op_br:
1820 case INDEX_op_brcond_i32:
1821 case INDEX_op_brcond_i64:
1822 case INDEX_op_brcond2_i32:
1823 col += qemu_log("%s$L%d", k ? "," : "",
1824 arg_label(op->args[k])->id);
1825 i++, k++;
1826 break;
1827 default:
1828 break;
1830 for (; i < nb_cargs; i++, k++) {
1831 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
1834 if (op->life) {
1835 unsigned life = op->life;
1837 for (; col < 48; ++col) {
1838 putc(' ', qemu_logfile);
1841 if (life & (SYNC_ARG * 3)) {
1842 qemu_log(" sync:");
1843 for (i = 0; i < 2; ++i) {
1844 if (life & (SYNC_ARG << i)) {
1845 qemu_log(" %d", i);
1849 life /= DEAD_ARG;
1850 if (life) {
1851 qemu_log(" dead:");
1852 for (i = 0; life; ++i, life >>= 1) {
1853 if (life & 1) {
1854 qemu_log(" %d", i);
1859 qemu_log("\n");
1863 /* we give more priority to constraints with less registers */
1864 static int get_constraint_priority(const TCGOpDef *def, int k)
1866 const TCGArgConstraint *arg_ct;
1868 int i, n;
1869 arg_ct = &def->args_ct[k];
1870 if (arg_ct->ct & TCG_CT_ALIAS) {
1871 /* an alias is equivalent to a single register */
1872 n = 1;
1873 } else {
1874 if (!(arg_ct->ct & TCG_CT_REG))
1875 return 0;
1876 n = 0;
1877 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1878 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1879 n++;
1882 return TCG_TARGET_NB_REGS - n + 1;
1885 /* sort from highest priority to lowest */
1886 static void sort_constraints(TCGOpDef *def, int start, int n)
1888 int i, j, p1, p2, tmp;
1890 for(i = 0; i < n; i++)
1891 def->sorted_args[start + i] = start + i;
1892 if (n <= 1)
1893 return;
1894 for(i = 0; i < n - 1; i++) {
1895 for(j = i + 1; j < n; j++) {
1896 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1897 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1898 if (p1 < p2) {
1899 tmp = def->sorted_args[start + i];
1900 def->sorted_args[start + i] = def->sorted_args[start + j];
1901 def->sorted_args[start + j] = tmp;
1907 static void process_op_defs(TCGContext *s)
1909 TCGOpcode op;
1911 for (op = 0; op < NB_OPS; op++) {
1912 TCGOpDef *def = &tcg_op_defs[op];
1913 const TCGTargetOpDef *tdefs;
1914 TCGType type;
1915 int i, nb_args;
1917 if (def->flags & TCG_OPF_NOT_PRESENT) {
1918 continue;
1921 nb_args = def->nb_iargs + def->nb_oargs;
1922 if (nb_args == 0) {
1923 continue;
1926 tdefs = tcg_target_op_def(op);
1927 /* Missing TCGTargetOpDef entry. */
1928 tcg_debug_assert(tdefs != NULL);
1930 type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
1931 for (i = 0; i < nb_args; i++) {
1932 const char *ct_str = tdefs->args_ct_str[i];
1933 /* Incomplete TCGTargetOpDef entry. */
1934 tcg_debug_assert(ct_str != NULL);
1936 def->args_ct[i].u.regs = 0;
1937 def->args_ct[i].ct = 0;
1938 while (*ct_str != '\0') {
1939 switch(*ct_str) {
1940 case '0' ... '9':
1942 int oarg = *ct_str - '0';
1943 tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
1944 tcg_debug_assert(oarg < def->nb_oargs);
1945 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1946 /* TCG_CT_ALIAS is for the output arguments.
1947 The input is tagged with TCG_CT_IALIAS. */
1948 def->args_ct[i] = def->args_ct[oarg];
1949 def->args_ct[oarg].ct |= TCG_CT_ALIAS;
1950 def->args_ct[oarg].alias_index = i;
1951 def->args_ct[i].ct |= TCG_CT_IALIAS;
1952 def->args_ct[i].alias_index = oarg;
1954 ct_str++;
1955 break;
1956 case '&':
1957 def->args_ct[i].ct |= TCG_CT_NEWREG;
1958 ct_str++;
1959 break;
1960 case 'i':
1961 def->args_ct[i].ct |= TCG_CT_CONST;
1962 ct_str++;
1963 break;
1964 default:
1965 ct_str = target_parse_constraint(&def->args_ct[i],
1966 ct_str, type);
1967 /* Typo in TCGTargetOpDef constraint. */
1968 tcg_debug_assert(ct_str != NULL);
1973 /* TCGTargetOpDef entry with too much information? */
1974 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1976 /* sort the constraints (XXX: this is just an heuristic) */
1977 sort_constraints(def, 0, def->nb_oargs);
1978 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1982 void tcg_op_remove(TCGContext *s, TCGOp *op)
1984 QTAILQ_REMOVE(&s->ops, op, link);
1985 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
1987 #ifdef CONFIG_PROFILER
1988 atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
1989 #endif
1992 static TCGOp *tcg_op_alloc(TCGOpcode opc)
1994 TCGContext *s = tcg_ctx;
1995 TCGOp *op;
1997 if (likely(QTAILQ_EMPTY(&s->free_ops))) {
1998 op = tcg_malloc(sizeof(TCGOp));
1999 } else {
2000 op = QTAILQ_FIRST(&s->free_ops);
2001 QTAILQ_REMOVE(&s->free_ops, op, link);
2003 memset(op, 0, offsetof(TCGOp, link));
2004 op->opc = opc;
2006 return op;
2009 TCGOp *tcg_emit_op(TCGOpcode opc)
2011 TCGOp *op = tcg_op_alloc(opc);
2012 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2013 return op;
2016 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
2017 TCGOpcode opc, int nargs)
2019 TCGOp *new_op = tcg_op_alloc(opc);
2020 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
2021 return new_op;
2024 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
2025 TCGOpcode opc, int nargs)
2027 TCGOp *new_op = tcg_op_alloc(opc);
2028 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
2029 return new_op;
2032 #define TS_DEAD 1
2033 #define TS_MEM 2
2035 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2036 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2038 /* liveness analysis: end of function: all temps are dead, and globals
2039 should be in memory. */
2040 static void tcg_la_func_end(TCGContext *s)
2042 int ng = s->nb_globals;
2043 int nt = s->nb_temps;
2044 int i;
2046 for (i = 0; i < ng; ++i) {
2047 s->temps[i].state = TS_DEAD | TS_MEM;
2049 for (i = ng; i < nt; ++i) {
2050 s->temps[i].state = TS_DEAD;
2054 /* liveness analysis: end of basic block: all temps are dead, globals
2055 and local temps should be in memory. */
2056 static void tcg_la_bb_end(TCGContext *s)
2058 int ng = s->nb_globals;
2059 int nt = s->nb_temps;
2060 int i;
2062 for (i = 0; i < ng; ++i) {
2063 s->temps[i].state = TS_DEAD | TS_MEM;
2065 for (i = ng; i < nt; ++i) {
2066 s->temps[i].state = (s->temps[i].temp_local
2067 ? TS_DEAD | TS_MEM
2068 : TS_DEAD);
2072 /* Liveness analysis : update the opc_arg_life array to tell if a
2073 given input arguments is dead. Instructions updating dead
2074 temporaries are removed. */
2075 static void liveness_pass_1(TCGContext *s)
2077 int nb_globals = s->nb_globals;
2078 TCGOp *op, *op_prev;
2080 tcg_la_func_end(s);
2082 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) {
2083 int i, nb_iargs, nb_oargs;
2084 TCGOpcode opc_new, opc_new2;
2085 bool have_opc_new2;
2086 TCGLifeData arg_life = 0;
2087 TCGTemp *arg_ts;
2088 TCGOpcode opc = op->opc;
2089 const TCGOpDef *def = &tcg_op_defs[opc];
2091 switch (opc) {
2092 case INDEX_op_call:
2094 int call_flags;
2096 nb_oargs = TCGOP_CALLO(op);
2097 nb_iargs = TCGOP_CALLI(op);
2098 call_flags = op->args[nb_oargs + nb_iargs + 1];
2100 /* pure functions can be removed if their result is unused */
2101 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
2102 for (i = 0; i < nb_oargs; i++) {
2103 arg_ts = arg_temp(op->args[i]);
2104 if (arg_ts->state != TS_DEAD) {
2105 goto do_not_remove_call;
2108 goto do_remove;
2109 } else {
2110 do_not_remove_call:
2112 /* output args are dead */
2113 for (i = 0; i < nb_oargs; i++) {
2114 arg_ts = arg_temp(op->args[i]);
2115 if (arg_ts->state & TS_DEAD) {
2116 arg_life |= DEAD_ARG << i;
2118 if (arg_ts->state & TS_MEM) {
2119 arg_life |= SYNC_ARG << i;
2121 arg_ts->state = TS_DEAD;
2124 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2125 TCG_CALL_NO_READ_GLOBALS))) {
2126 /* globals should go back to memory */
2127 for (i = 0; i < nb_globals; i++) {
2128 s->temps[i].state = TS_DEAD | TS_MEM;
2130 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
2131 /* globals should be synced to memory */
2132 for (i = 0; i < nb_globals; i++) {
2133 s->temps[i].state |= TS_MEM;
2137 /* record arguments that die in this helper */
2138 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2139 arg_ts = arg_temp(op->args[i]);
2140 if (arg_ts && arg_ts->state & TS_DEAD) {
2141 arg_life |= DEAD_ARG << i;
2144 /* input arguments are live for preceding opcodes */
2145 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2146 arg_ts = arg_temp(op->args[i]);
2147 if (arg_ts) {
2148 arg_ts->state &= ~TS_DEAD;
2153 break;
2154 case INDEX_op_insn_start:
2155 break;
2156 case INDEX_op_discard:
2157 /* mark the temporary as dead */
2158 arg_temp(op->args[0])->state = TS_DEAD;
2159 break;
2161 case INDEX_op_add2_i32:
2162 opc_new = INDEX_op_add_i32;
2163 goto do_addsub2;
2164 case INDEX_op_sub2_i32:
2165 opc_new = INDEX_op_sub_i32;
2166 goto do_addsub2;
2167 case INDEX_op_add2_i64:
2168 opc_new = INDEX_op_add_i64;
2169 goto do_addsub2;
2170 case INDEX_op_sub2_i64:
2171 opc_new = INDEX_op_sub_i64;
2172 do_addsub2:
2173 nb_iargs = 4;
2174 nb_oargs = 2;
2175 /* Test if the high part of the operation is dead, but not
2176 the low part. The result can be optimized to a simple
2177 add or sub. This happens often for x86_64 guest when the
2178 cpu mode is set to 32 bit. */
2179 if (arg_temp(op->args[1])->state == TS_DEAD) {
2180 if (arg_temp(op->args[0])->state == TS_DEAD) {
2181 goto do_remove;
2183 /* Replace the opcode and adjust the args in place,
2184 leaving 3 unused args at the end. */
2185 op->opc = opc = opc_new;
2186 op->args[1] = op->args[2];
2187 op->args[2] = op->args[4];
2188 /* Fall through and mark the single-word operation live. */
2189 nb_iargs = 2;
2190 nb_oargs = 1;
2192 goto do_not_remove;
2194 case INDEX_op_mulu2_i32:
2195 opc_new = INDEX_op_mul_i32;
2196 opc_new2 = INDEX_op_muluh_i32;
2197 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
2198 goto do_mul2;
2199 case INDEX_op_muls2_i32:
2200 opc_new = INDEX_op_mul_i32;
2201 opc_new2 = INDEX_op_mulsh_i32;
2202 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
2203 goto do_mul2;
2204 case INDEX_op_mulu2_i64:
2205 opc_new = INDEX_op_mul_i64;
2206 opc_new2 = INDEX_op_muluh_i64;
2207 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
2208 goto do_mul2;
2209 case INDEX_op_muls2_i64:
2210 opc_new = INDEX_op_mul_i64;
2211 opc_new2 = INDEX_op_mulsh_i64;
2212 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
2213 goto do_mul2;
2214 do_mul2:
2215 nb_iargs = 2;
2216 nb_oargs = 2;
2217 if (arg_temp(op->args[1])->state == TS_DEAD) {
2218 if (arg_temp(op->args[0])->state == TS_DEAD) {
2219 /* Both parts of the operation are dead. */
2220 goto do_remove;
2222 /* The high part of the operation is dead; generate the low. */
2223 op->opc = opc = opc_new;
2224 op->args[1] = op->args[2];
2225 op->args[2] = op->args[3];
2226 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
2227 /* The low part of the operation is dead; generate the high. */
2228 op->opc = opc = opc_new2;
2229 op->args[0] = op->args[1];
2230 op->args[1] = op->args[2];
2231 op->args[2] = op->args[3];
2232 } else {
2233 goto do_not_remove;
2235 /* Mark the single-word operation live. */
2236 nb_oargs = 1;
2237 goto do_not_remove;
2239 default:
2240 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2241 nb_iargs = def->nb_iargs;
2242 nb_oargs = def->nb_oargs;
2244 /* Test if the operation can be removed because all
2245 its outputs are dead. We assume that nb_oargs == 0
2246 implies side effects */
2247 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
2248 for (i = 0; i < nb_oargs; i++) {
2249 if (arg_temp(op->args[i])->state != TS_DEAD) {
2250 goto do_not_remove;
2253 do_remove:
2254 tcg_op_remove(s, op);
2255 } else {
2256 do_not_remove:
2257 /* output args are dead */
2258 for (i = 0; i < nb_oargs; i++) {
2259 arg_ts = arg_temp(op->args[i]);
2260 if (arg_ts->state & TS_DEAD) {
2261 arg_life |= DEAD_ARG << i;
2263 if (arg_ts->state & TS_MEM) {
2264 arg_life |= SYNC_ARG << i;
2266 arg_ts->state = TS_DEAD;
2269 /* if end of basic block, update */
2270 if (def->flags & TCG_OPF_BB_END) {
2271 tcg_la_bb_end(s);
2272 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2273 /* globals should be synced to memory */
2274 for (i = 0; i < nb_globals; i++) {
2275 s->temps[i].state |= TS_MEM;
2279 /* record arguments that die in this opcode */
2280 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2281 arg_ts = arg_temp(op->args[i]);
2282 if (arg_ts->state & TS_DEAD) {
2283 arg_life |= DEAD_ARG << i;
2286 /* input arguments are live for preceding opcodes */
2287 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2288 arg_temp(op->args[i])->state &= ~TS_DEAD;
2291 break;
2293 op->life = arg_life;
2297 /* Liveness analysis: Convert indirect regs to direct temporaries. */
2298 static bool liveness_pass_2(TCGContext *s)
2300 int nb_globals = s->nb_globals;
2301 int nb_temps, i;
2302 bool changes = false;
2303 TCGOp *op, *op_next;
2305 /* Create a temporary for each indirect global. */
2306 for (i = 0; i < nb_globals; ++i) {
2307 TCGTemp *its = &s->temps[i];
2308 if (its->indirect_reg) {
2309 TCGTemp *dts = tcg_temp_alloc(s);
2310 dts->type = its->type;
2311 dts->base_type = its->base_type;
2312 its->state_ptr = dts;
2313 } else {
2314 its->state_ptr = NULL;
2316 /* All globals begin dead. */
2317 its->state = TS_DEAD;
2319 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2320 TCGTemp *its = &s->temps[i];
2321 its->state_ptr = NULL;
2322 its->state = TS_DEAD;
2325 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2326 TCGOpcode opc = op->opc;
2327 const TCGOpDef *def = &tcg_op_defs[opc];
2328 TCGLifeData arg_life = op->life;
2329 int nb_iargs, nb_oargs, call_flags;
2330 TCGTemp *arg_ts, *dir_ts;
2332 if (opc == INDEX_op_call) {
2333 nb_oargs = TCGOP_CALLO(op);
2334 nb_iargs = TCGOP_CALLI(op);
2335 call_flags = op->args[nb_oargs + nb_iargs + 1];
2336 } else {
2337 nb_iargs = def->nb_iargs;
2338 nb_oargs = def->nb_oargs;
2340 /* Set flags similar to how calls require. */
2341 if (def->flags & TCG_OPF_BB_END) {
2342 /* Like writing globals: save_globals */
2343 call_flags = 0;
2344 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2345 /* Like reading globals: sync_globals */
2346 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2347 } else {
2348 /* No effect on globals. */
2349 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2350 TCG_CALL_NO_WRITE_GLOBALS);
2354 /* Make sure that input arguments are available. */
2355 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2356 arg_ts = arg_temp(op->args[i]);
2357 if (arg_ts) {
2358 dir_ts = arg_ts->state_ptr;
2359 if (dir_ts && arg_ts->state == TS_DEAD) {
2360 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
2361 ? INDEX_op_ld_i32
2362 : INDEX_op_ld_i64);
2363 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
2365 lop->args[0] = temp_arg(dir_ts);
2366 lop->args[1] = temp_arg(arg_ts->mem_base);
2367 lop->args[2] = arg_ts->mem_offset;
2369 /* Loaded, but synced with memory. */
2370 arg_ts->state = TS_MEM;
2375 /* Perform input replacement, and mark inputs that became dead.
2376 No action is required except keeping temp_state up to date
2377 so that we reload when needed. */
2378 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2379 arg_ts = arg_temp(op->args[i]);
2380 if (arg_ts) {
2381 dir_ts = arg_ts->state_ptr;
2382 if (dir_ts) {
2383 op->args[i] = temp_arg(dir_ts);
2384 changes = true;
2385 if (IS_DEAD_ARG(i)) {
2386 arg_ts->state = TS_DEAD;
2392 /* Liveness analysis should ensure that the following are
2393 all correct, for call sites and basic block end points. */
2394 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2395 /* Nothing to do */
2396 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2397 for (i = 0; i < nb_globals; ++i) {
2398 /* Liveness should see that globals are synced back,
2399 that is, either TS_DEAD or TS_MEM. */
2400 arg_ts = &s->temps[i];
2401 tcg_debug_assert(arg_ts->state_ptr == 0
2402 || arg_ts->state != 0);
2404 } else {
2405 for (i = 0; i < nb_globals; ++i) {
2406 /* Liveness should see that globals are saved back,
2407 that is, TS_DEAD, waiting to be reloaded. */
2408 arg_ts = &s->temps[i];
2409 tcg_debug_assert(arg_ts->state_ptr == 0
2410 || arg_ts->state == TS_DEAD);
2414 /* Outputs become available. */
2415 for (i = 0; i < nb_oargs; i++) {
2416 arg_ts = arg_temp(op->args[i]);
2417 dir_ts = arg_ts->state_ptr;
2418 if (!dir_ts) {
2419 continue;
2421 op->args[i] = temp_arg(dir_ts);
2422 changes = true;
2424 /* The output is now live and modified. */
2425 arg_ts->state = 0;
2427 /* Sync outputs upon their last write. */
2428 if (NEED_SYNC_ARG(i)) {
2429 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
2430 ? INDEX_op_st_i32
2431 : INDEX_op_st_i64);
2432 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
2434 sop->args[0] = temp_arg(dir_ts);
2435 sop->args[1] = temp_arg(arg_ts->mem_base);
2436 sop->args[2] = arg_ts->mem_offset;
2438 arg_ts->state = TS_MEM;
2440 /* Drop outputs that are dead. */
2441 if (IS_DEAD_ARG(i)) {
2442 arg_ts->state = TS_DEAD;
2447 return changes;
2450 #ifdef CONFIG_DEBUG_TCG
2451 static void dump_regs(TCGContext *s)
2453 TCGTemp *ts;
2454 int i;
2455 char buf[64];
2457 for(i = 0; i < s->nb_temps; i++) {
2458 ts = &s->temps[i];
2459 printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2460 switch(ts->val_type) {
2461 case TEMP_VAL_REG:
2462 printf("%s", tcg_target_reg_names[ts->reg]);
2463 break;
2464 case TEMP_VAL_MEM:
2465 printf("%d(%s)", (int)ts->mem_offset,
2466 tcg_target_reg_names[ts->mem_base->reg]);
2467 break;
2468 case TEMP_VAL_CONST:
2469 printf("$0x%" TCG_PRIlx, ts->val);
2470 break;
2471 case TEMP_VAL_DEAD:
2472 printf("D");
2473 break;
2474 default:
2475 printf("???");
2476 break;
2478 printf("\n");
2481 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2482 if (s->reg_to_temp[i] != NULL) {
2483 printf("%s: %s\n",
2484 tcg_target_reg_names[i],
2485 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
2490 static void check_regs(TCGContext *s)
2492 int reg;
2493 int k;
2494 TCGTemp *ts;
2495 char buf[64];
2497 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2498 ts = s->reg_to_temp[reg];
2499 if (ts != NULL) {
2500 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
2501 printf("Inconsistency for register %s:\n",
2502 tcg_target_reg_names[reg]);
2503 goto fail;
2507 for (k = 0; k < s->nb_temps; k++) {
2508 ts = &s->temps[k];
2509 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
2510 && s->reg_to_temp[ts->reg] != ts) {
2511 printf("Inconsistency for temp %s:\n",
2512 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2513 fail:
2514 printf("reg state:\n");
2515 dump_regs(s);
2516 tcg_abort();
2520 #endif
2522 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
2524 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2525 /* Sparc64 stack is accessed with offset of 2047 */
2526 s->current_frame_offset = (s->current_frame_offset +
2527 (tcg_target_long)sizeof(tcg_target_long) - 1) &
2528 ~(sizeof(tcg_target_long) - 1);
2529 #endif
2530 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2531 s->frame_end) {
2532 tcg_abort();
2534 ts->mem_offset = s->current_frame_offset;
2535 ts->mem_base = s->frame_temp;
2536 ts->mem_allocated = 1;
2537 s->current_frame_offset += sizeof(tcg_target_long);
2540 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
2542 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2543 mark it free; otherwise mark it dead. */
2544 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
2546 if (ts->fixed_reg) {
2547 return;
2549 if (ts->val_type == TEMP_VAL_REG) {
2550 s->reg_to_temp[ts->reg] = NULL;
2552 ts->val_type = (free_or_dead < 0
2553 || ts->temp_local
2554 || ts->temp_global
2555 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
2558 /* Mark a temporary as dead. */
2559 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2561 temp_free_or_dead(s, ts, 1);
2564 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2565 registers needs to be allocated to store a constant. If 'free_or_dead'
2566 is non-zero, subsequently release the temporary; if it is positive, the
2567 temp is dead; if it is negative, the temp is free. */
2568 static void temp_sync(TCGContext *s, TCGTemp *ts,
2569 TCGRegSet allocated_regs, int free_or_dead)
2571 if (ts->fixed_reg) {
2572 return;
2574 if (!ts->mem_coherent) {
2575 if (!ts->mem_allocated) {
2576 temp_allocate_frame(s, ts);
2578 switch (ts->val_type) {
2579 case TEMP_VAL_CONST:
2580 /* If we're going to free the temp immediately, then we won't
2581 require it later in a register, so attempt to store the
2582 constant to memory directly. */
2583 if (free_or_dead
2584 && tcg_out_sti(s, ts->type, ts->val,
2585 ts->mem_base->reg, ts->mem_offset)) {
2586 break;
2588 temp_load(s, ts, tcg_target_available_regs[ts->type],
2589 allocated_regs);
2590 /* fallthrough */
2592 case TEMP_VAL_REG:
2593 tcg_out_st(s, ts->type, ts->reg,
2594 ts->mem_base->reg, ts->mem_offset);
2595 break;
2597 case TEMP_VAL_MEM:
2598 break;
2600 case TEMP_VAL_DEAD:
2601 default:
2602 tcg_abort();
2604 ts->mem_coherent = 1;
2606 if (free_or_dead) {
2607 temp_free_or_dead(s, ts, free_or_dead);
2611 /* free register 'reg' by spilling the corresponding temporary if necessary */
2612 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
2614 TCGTemp *ts = s->reg_to_temp[reg];
2615 if (ts != NULL) {
2616 temp_sync(s, ts, allocated_regs, -1);
2620 /* Allocate a register belonging to reg1 & ~reg2 */
2621 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
2622 TCGRegSet allocated_regs, bool rev)
2624 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
2625 const int *order;
2626 TCGReg reg;
2627 TCGRegSet reg_ct;
2629 reg_ct = desired_regs & ~allocated_regs;
2630 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
2632 /* first try free registers */
2633 for(i = 0; i < n; i++) {
2634 reg = order[i];
2635 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
2636 return reg;
2639 /* XXX: do better spill choice */
2640 for(i = 0; i < n; i++) {
2641 reg = order[i];
2642 if (tcg_regset_test_reg(reg_ct, reg)) {
2643 tcg_reg_free(s, reg, allocated_regs);
2644 return reg;
2648 tcg_abort();
2651 /* Make sure the temporary is in a register. If needed, allocate the register
2652 from DESIRED while avoiding ALLOCATED. */
2653 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2654 TCGRegSet allocated_regs)
2656 TCGReg reg;
2658 switch (ts->val_type) {
2659 case TEMP_VAL_REG:
2660 return;
2661 case TEMP_VAL_CONST:
2662 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2663 tcg_out_movi(s, ts->type, reg, ts->val);
2664 ts->mem_coherent = 0;
2665 break;
2666 case TEMP_VAL_MEM:
2667 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2668 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2669 ts->mem_coherent = 1;
2670 break;
2671 case TEMP_VAL_DEAD:
2672 default:
2673 tcg_abort();
2675 ts->reg = reg;
2676 ts->val_type = TEMP_VAL_REG;
2677 s->reg_to_temp[reg] = ts;
2680 /* Save a temporary to memory. 'allocated_regs' is used in case a
2681 temporary registers needs to be allocated to store a constant. */
2682 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
2684 /* The liveness analysis already ensures that globals are back
2685 in memory. Keep an tcg_debug_assert for safety. */
2686 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
2689 /* save globals to their canonical location and assume they can be
2690 modified be the following code. 'allocated_regs' is used in case a
2691 temporary registers needs to be allocated to store a constant. */
2692 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
2694 int i, n;
2696 for (i = 0, n = s->nb_globals; i < n; i++) {
2697 temp_save(s, &s->temps[i], allocated_regs);
2701 /* sync globals to their canonical location and assume they can be
2702 read by the following code. 'allocated_regs' is used in case a
2703 temporary registers needs to be allocated to store a constant. */
2704 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2706 int i, n;
2708 for (i = 0, n = s->nb_globals; i < n; i++) {
2709 TCGTemp *ts = &s->temps[i];
2710 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2711 || ts->fixed_reg
2712 || ts->mem_coherent);
2716 /* at the end of a basic block, we assume all temporaries are dead and
2717 all globals are stored at their canonical location. */
2718 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
2720 int i;
2722 for (i = s->nb_globals; i < s->nb_temps; i++) {
2723 TCGTemp *ts = &s->temps[i];
2724 if (ts->temp_local) {
2725 temp_save(s, ts, allocated_regs);
2726 } else {
2727 /* The liveness analysis already ensures that temps are dead.
2728 Keep an tcg_debug_assert for safety. */
2729 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
2733 save_globals(s, allocated_regs);
2736 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
2737 tcg_target_ulong val, TCGLifeData arg_life)
2739 if (ots->fixed_reg) {
2740 /* For fixed registers, we do not do any constant propagation. */
2741 tcg_out_movi(s, ots->type, ots->reg, val);
2742 return;
2745 /* The movi is not explicitly generated here. */
2746 if (ots->val_type == TEMP_VAL_REG) {
2747 s->reg_to_temp[ots->reg] = NULL;
2749 ots->val_type = TEMP_VAL_CONST;
2750 ots->val = val;
2751 ots->mem_coherent = 0;
2752 if (NEED_SYNC_ARG(0)) {
2753 temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2754 } else if (IS_DEAD_ARG(0)) {
2755 temp_dead(s, ots);
2759 static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
2761 TCGTemp *ots = arg_temp(op->args[0]);
2762 tcg_target_ulong val = op->args[1];
2764 tcg_reg_alloc_do_movi(s, ots, val, op->life);
2767 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
2769 const TCGLifeData arg_life = op->life;
2770 TCGRegSet allocated_regs;
2771 TCGTemp *ts, *ots;
2772 TCGType otype, itype;
2774 allocated_regs = s->reserved_regs;
2775 ots = arg_temp(op->args[0]);
2776 ts = arg_temp(op->args[1]);
2778 /* Note that otype != itype for no-op truncation. */
2779 otype = ots->type;
2780 itype = ts->type;
2782 if (ts->val_type == TEMP_VAL_CONST) {
2783 /* propagate constant or generate sti */
2784 tcg_target_ulong val = ts->val;
2785 if (IS_DEAD_ARG(1)) {
2786 temp_dead(s, ts);
2788 tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2789 return;
2792 /* If the source value is in memory we're going to be forced
2793 to have it in a register in order to perform the copy. Copy
2794 the SOURCE value into its own register first, that way we
2795 don't have to reload SOURCE the next time it is used. */
2796 if (ts->val_type == TEMP_VAL_MEM) {
2797 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
2800 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
2801 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2802 /* mov to a non-saved dead register makes no sense (even with
2803 liveness analysis disabled). */
2804 tcg_debug_assert(NEED_SYNC_ARG(0));
2805 if (!ots->mem_allocated) {
2806 temp_allocate_frame(s, ots);
2808 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
2809 if (IS_DEAD_ARG(1)) {
2810 temp_dead(s, ts);
2812 temp_dead(s, ots);
2813 } else {
2814 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
2815 /* the mov can be suppressed */
2816 if (ots->val_type == TEMP_VAL_REG) {
2817 s->reg_to_temp[ots->reg] = NULL;
2819 ots->reg = ts->reg;
2820 temp_dead(s, ts);
2821 } else {
2822 if (ots->val_type != TEMP_VAL_REG) {
2823 /* When allocating a new register, make sure to not spill the
2824 input one. */
2825 tcg_regset_set_reg(allocated_regs, ts->reg);
2826 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
2827 allocated_regs, ots->indirect_base);
2829 tcg_out_mov(s, otype, ots->reg, ts->reg);
2831 ots->val_type = TEMP_VAL_REG;
2832 ots->mem_coherent = 0;
2833 s->reg_to_temp[ots->reg] = ots;
2834 if (NEED_SYNC_ARG(0)) {
2835 temp_sync(s, ots, allocated_regs, 0);
2840 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
2842 const TCGLifeData arg_life = op->life;
2843 const TCGOpDef * const def = &tcg_op_defs[op->opc];
2844 TCGRegSet i_allocated_regs;
2845 TCGRegSet o_allocated_regs;
2846 int i, k, nb_iargs, nb_oargs;
2847 TCGReg reg;
2848 TCGArg arg;
2849 const TCGArgConstraint *arg_ct;
2850 TCGTemp *ts;
2851 TCGArg new_args[TCG_MAX_OP_ARGS];
2852 int const_args[TCG_MAX_OP_ARGS];
2854 nb_oargs = def->nb_oargs;
2855 nb_iargs = def->nb_iargs;
2857 /* copy constants */
2858 memcpy(new_args + nb_oargs + nb_iargs,
2859 op->args + nb_oargs + nb_iargs,
2860 sizeof(TCGArg) * def->nb_cargs);
2862 i_allocated_regs = s->reserved_regs;
2863 o_allocated_regs = s->reserved_regs;
2865 /* satisfy input constraints */
2866 for (k = 0; k < nb_iargs; k++) {
2867 i = def->sorted_args[nb_oargs + k];
2868 arg = op->args[i];
2869 arg_ct = &def->args_ct[i];
2870 ts = arg_temp(arg);
2872 if (ts->val_type == TEMP_VAL_CONST
2873 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2874 /* constant is OK for instruction */
2875 const_args[i] = 1;
2876 new_args[i] = ts->val;
2877 goto iarg_end;
2880 temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
2882 if (arg_ct->ct & TCG_CT_IALIAS) {
2883 if (ts->fixed_reg) {
2884 /* if fixed register, we must allocate a new register
2885 if the alias is not the same register */
2886 if (arg != op->args[arg_ct->alias_index])
2887 goto allocate_in_reg;
2888 } else {
2889 /* if the input is aliased to an output and if it is
2890 not dead after the instruction, we must allocate
2891 a new register and move it */
2892 if (!IS_DEAD_ARG(i)) {
2893 goto allocate_in_reg;
2895 /* check if the current register has already been allocated
2896 for another input aliased to an output */
2897 int k2, i2;
2898 for (k2 = 0 ; k2 < k ; k2++) {
2899 i2 = def->sorted_args[nb_oargs + k2];
2900 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2901 (new_args[i2] == ts->reg)) {
2902 goto allocate_in_reg;
2907 reg = ts->reg;
2908 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2909 /* nothing to do : the constraint is satisfied */
2910 } else {
2911 allocate_in_reg:
2912 /* allocate a new register matching the constraint
2913 and move the temporary register into it */
2914 reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
2915 ts->indirect_base);
2916 tcg_out_mov(s, ts->type, reg, ts->reg);
2918 new_args[i] = reg;
2919 const_args[i] = 0;
2920 tcg_regset_set_reg(i_allocated_regs, reg);
2921 iarg_end: ;
2924 /* mark dead temporaries and free the associated registers */
2925 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2926 if (IS_DEAD_ARG(i)) {
2927 temp_dead(s, arg_temp(op->args[i]));
2931 if (def->flags & TCG_OPF_BB_END) {
2932 tcg_reg_alloc_bb_end(s, i_allocated_regs);
2933 } else {
2934 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2935 /* XXX: permit generic clobber register list ? */
2936 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2937 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2938 tcg_reg_free(s, i, i_allocated_regs);
2942 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2943 /* sync globals if the op has side effects and might trigger
2944 an exception. */
2945 sync_globals(s, i_allocated_regs);
2948 /* satisfy the output constraints */
2949 for(k = 0; k < nb_oargs; k++) {
2950 i = def->sorted_args[k];
2951 arg = op->args[i];
2952 arg_ct = &def->args_ct[i];
2953 ts = arg_temp(arg);
2954 if ((arg_ct->ct & TCG_CT_ALIAS)
2955 && !const_args[arg_ct->alias_index]) {
2956 reg = new_args[arg_ct->alias_index];
2957 } else if (arg_ct->ct & TCG_CT_NEWREG) {
2958 reg = tcg_reg_alloc(s, arg_ct->u.regs,
2959 i_allocated_regs | o_allocated_regs,
2960 ts->indirect_base);
2961 } else {
2962 /* if fixed register, we try to use it */
2963 reg = ts->reg;
2964 if (ts->fixed_reg &&
2965 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2966 goto oarg_end;
2968 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
2969 ts->indirect_base);
2971 tcg_regset_set_reg(o_allocated_regs, reg);
2972 /* if a fixed register is used, then a move will be done afterwards */
2973 if (!ts->fixed_reg) {
2974 if (ts->val_type == TEMP_VAL_REG) {
2975 s->reg_to_temp[ts->reg] = NULL;
2977 ts->val_type = TEMP_VAL_REG;
2978 ts->reg = reg;
2979 /* temp value is modified, so the value kept in memory is
2980 potentially not the same */
2981 ts->mem_coherent = 0;
2982 s->reg_to_temp[reg] = ts;
2984 oarg_end:
2985 new_args[i] = reg;
2989 /* emit instruction */
2990 if (def->flags & TCG_OPF_VECTOR) {
2991 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
2992 new_args, const_args);
2993 } else {
2994 tcg_out_op(s, op->opc, new_args, const_args);
2997 /* move the outputs in the correct register if needed */
2998 for(i = 0; i < nb_oargs; i++) {
2999 ts = arg_temp(op->args[i]);
3000 reg = new_args[i];
3001 if (ts->fixed_reg && ts->reg != reg) {
3002 tcg_out_mov(s, ts->type, ts->reg, reg);
3004 if (NEED_SYNC_ARG(i)) {
3005 temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
3006 } else if (IS_DEAD_ARG(i)) {
3007 temp_dead(s, ts);
3012 #ifdef TCG_TARGET_STACK_GROWSUP
3013 #define STACK_DIR(x) (-(x))
3014 #else
3015 #define STACK_DIR(x) (x)
3016 #endif
3018 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
3020 const int nb_oargs = TCGOP_CALLO(op);
3021 const int nb_iargs = TCGOP_CALLI(op);
3022 const TCGLifeData arg_life = op->life;
3023 int flags, nb_regs, i;
3024 TCGReg reg;
3025 TCGArg arg;
3026 TCGTemp *ts;
3027 intptr_t stack_offset;
3028 size_t call_stack_size;
3029 tcg_insn_unit *func_addr;
3030 int allocate_args;
3031 TCGRegSet allocated_regs;
3033 func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
3034 flags = op->args[nb_oargs + nb_iargs + 1];
3036 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
3037 if (nb_regs > nb_iargs) {
3038 nb_regs = nb_iargs;
3041 /* assign stack slots first */
3042 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
3043 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
3044 ~(TCG_TARGET_STACK_ALIGN - 1);
3045 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
3046 if (allocate_args) {
3047 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3048 preallocate call stack */
3049 tcg_abort();
3052 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
3053 for (i = nb_regs; i < nb_iargs; i++) {
3054 arg = op->args[nb_oargs + i];
3055 #ifdef TCG_TARGET_STACK_GROWSUP
3056 stack_offset -= sizeof(tcg_target_long);
3057 #endif
3058 if (arg != TCG_CALL_DUMMY_ARG) {
3059 ts = arg_temp(arg);
3060 temp_load(s, ts, tcg_target_available_regs[ts->type],
3061 s->reserved_regs);
3062 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
3064 #ifndef TCG_TARGET_STACK_GROWSUP
3065 stack_offset += sizeof(tcg_target_long);
3066 #endif
3069 /* assign input registers */
3070 allocated_regs = s->reserved_regs;
3071 for (i = 0; i < nb_regs; i++) {
3072 arg = op->args[nb_oargs + i];
3073 if (arg != TCG_CALL_DUMMY_ARG) {
3074 ts = arg_temp(arg);
3075 reg = tcg_target_call_iarg_regs[i];
3076 tcg_reg_free(s, reg, allocated_regs);
3078 if (ts->val_type == TEMP_VAL_REG) {
3079 if (ts->reg != reg) {
3080 tcg_out_mov(s, ts->type, reg, ts->reg);
3082 } else {
3083 TCGRegSet arg_set = 0;
3085 tcg_regset_set_reg(arg_set, reg);
3086 temp_load(s, ts, arg_set, allocated_regs);
3089 tcg_regset_set_reg(allocated_regs, reg);
3093 /* mark dead temporaries and free the associated registers */
3094 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3095 if (IS_DEAD_ARG(i)) {
3096 temp_dead(s, arg_temp(op->args[i]));
3100 /* clobber call registers */
3101 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3102 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
3103 tcg_reg_free(s, i, allocated_regs);
3107 /* Save globals if they might be written by the helper, sync them if
3108 they might be read. */
3109 if (flags & TCG_CALL_NO_READ_GLOBALS) {
3110 /* Nothing to do */
3111 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
3112 sync_globals(s, allocated_regs);
3113 } else {
3114 save_globals(s, allocated_regs);
3117 tcg_out_call(s, func_addr);
3119 /* assign output registers and emit moves if needed */
3120 for(i = 0; i < nb_oargs; i++) {
3121 arg = op->args[i];
3122 ts = arg_temp(arg);
3123 reg = tcg_target_call_oarg_regs[i];
3124 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3126 if (ts->fixed_reg) {
3127 if (ts->reg != reg) {
3128 tcg_out_mov(s, ts->type, ts->reg, reg);
3130 } else {
3131 if (ts->val_type == TEMP_VAL_REG) {
3132 s->reg_to_temp[ts->reg] = NULL;
3134 ts->val_type = TEMP_VAL_REG;
3135 ts->reg = reg;
3136 ts->mem_coherent = 0;
3137 s->reg_to_temp[reg] = ts;
3138 if (NEED_SYNC_ARG(i)) {
3139 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
3140 } else if (IS_DEAD_ARG(i)) {
3141 temp_dead(s, ts);
3147 #ifdef CONFIG_PROFILER
3149 /* avoid copy/paste errors */
3150 #define PROF_ADD(to, from, field) \
3151 do { \
3152 (to)->field += atomic_read(&((from)->field)); \
3153 } while (0)
3155 #define PROF_MAX(to, from, field) \
3156 do { \
3157 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3158 if (val__ > (to)->field) { \
3159 (to)->field = val__; \
3161 } while (0)
3163 /* Pass in a zero'ed @prof */
3164 static inline
3165 void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
3167 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
3168 unsigned int i;
3170 for (i = 0; i < n_ctxs; i++) {
3171 TCGContext *s = atomic_read(&tcg_ctxs[i]);
3172 const TCGProfile *orig = &s->prof;
3174 if (counters) {
3175 PROF_ADD(prof, orig, tb_count1);
3176 PROF_ADD(prof, orig, tb_count);
3177 PROF_ADD(prof, orig, op_count);
3178 PROF_MAX(prof, orig, op_count_max);
3179 PROF_ADD(prof, orig, temp_count);
3180 PROF_MAX(prof, orig, temp_count_max);
3181 PROF_ADD(prof, orig, del_op_count);
3182 PROF_ADD(prof, orig, code_in_len);
3183 PROF_ADD(prof, orig, code_out_len);
3184 PROF_ADD(prof, orig, search_out_len);
3185 PROF_ADD(prof, orig, interm_time);
3186 PROF_ADD(prof, orig, code_time);
3187 PROF_ADD(prof, orig, la_time);
3188 PROF_ADD(prof, orig, opt_time);
3189 PROF_ADD(prof, orig, restore_count);
3190 PROF_ADD(prof, orig, restore_time);
3192 if (table) {
3193 int i;
3195 for (i = 0; i < NB_OPS; i++) {
3196 PROF_ADD(prof, orig, table_op_count[i]);
3202 #undef PROF_ADD
3203 #undef PROF_MAX
3205 static void tcg_profile_snapshot_counters(TCGProfile *prof)
3207 tcg_profile_snapshot(prof, true, false);
3210 static void tcg_profile_snapshot_table(TCGProfile *prof)
3212 tcg_profile_snapshot(prof, false, true);
3215 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
3217 TCGProfile prof = {};
3218 int i;
3220 tcg_profile_snapshot_table(&prof);
3221 for (i = 0; i < NB_OPS; i++) {
3222 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
3223 prof.table_op_count[i]);
3226 #else
3227 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
3229 cpu_fprintf(f, "[TCG profiler not compiled]\n");
3231 #endif
3234 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
3236 #ifdef CONFIG_PROFILER
3237 TCGProfile *prof = &s->prof;
3238 #endif
3239 int i, num_insns;
3240 TCGOp *op;
3242 #ifdef CONFIG_PROFILER
3244 int n;
3246 QTAILQ_FOREACH(op, &s->ops, link) {
3247 n++;
3249 atomic_set(&prof->op_count, prof->op_count + n);
3250 if (n > prof->op_count_max) {
3251 atomic_set(&prof->op_count_max, n);
3254 n = s->nb_temps;
3255 atomic_set(&prof->temp_count, prof->temp_count + n);
3256 if (n > prof->temp_count_max) {
3257 atomic_set(&prof->temp_count_max, n);
3260 #endif
3262 #ifdef DEBUG_DISAS
3263 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
3264 && qemu_log_in_addr_range(tb->pc))) {
3265 qemu_log_lock();
3266 qemu_log("OP:\n");
3267 tcg_dump_ops(s);
3268 qemu_log("\n");
3269 qemu_log_unlock();
3271 #endif
3273 #ifdef CONFIG_PROFILER
3274 atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
3275 #endif
3277 #ifdef USE_TCG_OPTIMIZATIONS
3278 tcg_optimize(s);
3279 #endif
3281 #ifdef CONFIG_PROFILER
3282 atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
3283 atomic_set(&prof->la_time, prof->la_time - profile_getclock());
3284 #endif
3286 liveness_pass_1(s);
3288 if (s->nb_indirects > 0) {
3289 #ifdef DEBUG_DISAS
3290 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
3291 && qemu_log_in_addr_range(tb->pc))) {
3292 qemu_log_lock();
3293 qemu_log("OP before indirect lowering:\n");
3294 tcg_dump_ops(s);
3295 qemu_log("\n");
3296 qemu_log_unlock();
3298 #endif
3299 /* Replace indirect temps with direct temps. */
3300 if (liveness_pass_2(s)) {
3301 /* If changes were made, re-run liveness. */
3302 liveness_pass_1(s);
3306 #ifdef CONFIG_PROFILER
3307 atomic_set(&prof->la_time, prof->la_time + profile_getclock());
3308 #endif
3310 #ifdef DEBUG_DISAS
3311 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
3312 && qemu_log_in_addr_range(tb->pc))) {
3313 qemu_log_lock();
3314 qemu_log("OP after optimization and liveness analysis:\n");
3315 tcg_dump_ops(s);
3316 qemu_log("\n");
3317 qemu_log_unlock();
3319 #endif
3321 tcg_reg_alloc_start(s);
3323 s->code_buf = tb->tc.ptr;
3324 s->code_ptr = tb->tc.ptr;
3326 #ifdef TCG_TARGET_NEED_LDST_LABELS
3327 s->ldst_labels = NULL;
3328 #endif
3329 #ifdef TCG_TARGET_NEED_POOL_LABELS
3330 s->pool_labels = NULL;
3331 #endif
3333 num_insns = -1;
3334 QTAILQ_FOREACH(op, &s->ops, link) {
3335 TCGOpcode opc = op->opc;
3337 #ifdef CONFIG_PROFILER
3338 atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
3339 #endif
3341 switch (opc) {
3342 case INDEX_op_mov_i32:
3343 case INDEX_op_mov_i64:
3344 case INDEX_op_mov_vec:
3345 tcg_reg_alloc_mov(s, op);
3346 break;
3347 case INDEX_op_movi_i32:
3348 case INDEX_op_movi_i64:
3349 case INDEX_op_dupi_vec:
3350 tcg_reg_alloc_movi(s, op);
3351 break;
3352 case INDEX_op_insn_start:
3353 if (num_insns >= 0) {
3354 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
3356 num_insns++;
3357 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
3358 target_ulong a;
3359 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
3360 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
3361 #else
3362 a = op->args[i];
3363 #endif
3364 s->gen_insn_data[num_insns][i] = a;
3366 break;
3367 case INDEX_op_discard:
3368 temp_dead(s, arg_temp(op->args[0]));
3369 break;
3370 case INDEX_op_set_label:
3371 tcg_reg_alloc_bb_end(s, s->reserved_regs);
3372 tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
3373 break;
3374 case INDEX_op_call:
3375 tcg_reg_alloc_call(s, op);
3376 break;
3377 default:
3378 /* Sanity check that we've not introduced any unhandled opcodes. */
3379 tcg_debug_assert(tcg_op_supported(opc));
3380 /* Note: in order to speed up the code, it would be much
3381 faster to have specialized register allocator functions for
3382 some common argument patterns */
3383 tcg_reg_alloc_op(s, op);
3384 break;
3386 #ifdef CONFIG_DEBUG_TCG
3387 check_regs(s);
3388 #endif
3389 /* Test for (pending) buffer overflow. The assumption is that any
3390 one operation beginning below the high water mark cannot overrun
3391 the buffer completely. Thus we can test for overflow after
3392 generating code without having to check during generation. */
3393 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
3394 return -1;
3397 tcg_debug_assert(num_insns >= 0);
3398 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
3400 /* Generate TB finalization at the end of block */
3401 #ifdef TCG_TARGET_NEED_LDST_LABELS
3402 if (!tcg_out_ldst_finalize(s)) {
3403 return -1;
3405 #endif
3406 #ifdef TCG_TARGET_NEED_POOL_LABELS
3407 if (!tcg_out_pool_finalize(s)) {
3408 return -1;
3410 #endif
3412 /* flush instruction cache */
3413 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
3415 return tcg_current_code_size(s);
3418 #ifdef CONFIG_PROFILER
3419 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
3421 TCGProfile prof = {};
3422 const TCGProfile *s;
3423 int64_t tb_count;
3424 int64_t tb_div_count;
3425 int64_t tot;
3427 tcg_profile_snapshot_counters(&prof);
3428 s = &prof;
3429 tb_count = s->tb_count;
3430 tb_div_count = tb_count ? tb_count : 1;
3431 tot = s->interm_time + s->code_time;
3433 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3434 tot, tot / 2.4e9);
3435 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3436 tb_count, s->tb_count1 - tb_count,
3437 (double)(s->tb_count1 - s->tb_count)
3438 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
3439 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3440 (double)s->op_count / tb_div_count, s->op_count_max);
3441 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3442 (double)s->del_op_count / tb_div_count);
3443 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
3444 (double)s->temp_count / tb_div_count, s->temp_count_max);
3445 cpu_fprintf(f, "avg host code/TB %0.1f\n",
3446 (double)s->code_out_len / tb_div_count);
3447 cpu_fprintf(f, "avg search data/TB %0.1f\n",
3448 (double)s->search_out_len / tb_div_count);
3450 cpu_fprintf(f, "cycles/op %0.1f\n",
3451 s->op_count ? (double)tot / s->op_count : 0);
3452 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3453 s->code_in_len ? (double)tot / s->code_in_len : 0);
3454 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3455 s->code_out_len ? (double)tot / s->code_out_len : 0);
3456 cpu_fprintf(f, "cycles/search byte %0.1f\n",
3457 s->search_out_len ? (double)tot / s->search_out_len : 0);
3458 if (tot == 0) {
3459 tot = 1;
3461 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3462 (double)s->interm_time / tot * 100.0);
3463 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3464 (double)s->code_time / tot * 100.0);
3465 cpu_fprintf(f, "optim./code time %0.1f%%\n",
3466 (double)s->opt_time / (s->code_time ? s->code_time : 1)
3467 * 100.0);
3468 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
3469 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
3470 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3471 s->restore_count);
3472 cpu_fprintf(f, " avg cycles %0.1f\n",
3473 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
3475 #else
3476 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
3478 cpu_fprintf(f, "[TCG profiler not compiled]\n");
3480 #endif
3482 #ifdef ELF_HOST_MACHINE
3483 /* In order to use this feature, the backend needs to do three things:
3485 (1) Define ELF_HOST_MACHINE to indicate both what value to
3486 put into the ELF image and to indicate support for the feature.
3488 (2) Define tcg_register_jit. This should create a buffer containing
3489 the contents of a .debug_frame section that describes the post-
3490 prologue unwind info for the tcg machine.
3492 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
3495 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
3496 typedef enum {
3497 JIT_NOACTION = 0,
3498 JIT_REGISTER_FN,
3499 JIT_UNREGISTER_FN
3500 } jit_actions_t;
3502 struct jit_code_entry {
3503 struct jit_code_entry *next_entry;
3504 struct jit_code_entry *prev_entry;
3505 const void *symfile_addr;
3506 uint64_t symfile_size;
3509 struct jit_descriptor {
3510 uint32_t version;
3511 uint32_t action_flag;
3512 struct jit_code_entry *relevant_entry;
3513 struct jit_code_entry *first_entry;
3516 void __jit_debug_register_code(void) __attribute__((noinline));
3517 void __jit_debug_register_code(void)
3519 asm("");
3522 /* Must statically initialize the version, because GDB may check
3523 the version before we can set it. */
3524 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
3526 /* End GDB interface. */
3528 static int find_string(const char *strtab, const char *str)
3530 const char *p = strtab + 1;
3532 while (1) {
3533 if (strcmp(p, str) == 0) {
3534 return p - strtab;
3536 p += strlen(p) + 1;
3540 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
3541 const void *debug_frame,
3542 size_t debug_frame_size)
3544 struct __attribute__((packed)) DebugInfo {
3545 uint32_t len;
3546 uint16_t version;
3547 uint32_t abbrev;
3548 uint8_t ptr_size;
3549 uint8_t cu_die;
3550 uint16_t cu_lang;
3551 uintptr_t cu_low_pc;
3552 uintptr_t cu_high_pc;
3553 uint8_t fn_die;
3554 char fn_name[16];
3555 uintptr_t fn_low_pc;
3556 uintptr_t fn_high_pc;
3557 uint8_t cu_eoc;
3560 struct ElfImage {
3561 ElfW(Ehdr) ehdr;
3562 ElfW(Phdr) phdr;
3563 ElfW(Shdr) shdr[7];
3564 ElfW(Sym) sym[2];
3565 struct DebugInfo di;
3566 uint8_t da[24];
3567 char str[80];
3570 struct ElfImage *img;
3572 static const struct ElfImage img_template = {
3573 .ehdr = {
3574 .e_ident[EI_MAG0] = ELFMAG0,
3575 .e_ident[EI_MAG1] = ELFMAG1,
3576 .e_ident[EI_MAG2] = ELFMAG2,
3577 .e_ident[EI_MAG3] = ELFMAG3,
3578 .e_ident[EI_CLASS] = ELF_CLASS,
3579 .e_ident[EI_DATA] = ELF_DATA,
3580 .e_ident[EI_VERSION] = EV_CURRENT,
3581 .e_type = ET_EXEC,
3582 .e_machine = ELF_HOST_MACHINE,
3583 .e_version = EV_CURRENT,
3584 .e_phoff = offsetof(struct ElfImage, phdr),
3585 .e_shoff = offsetof(struct ElfImage, shdr),
3586 .e_ehsize = sizeof(ElfW(Shdr)),
3587 .e_phentsize = sizeof(ElfW(Phdr)),
3588 .e_phnum = 1,
3589 .e_shentsize = sizeof(ElfW(Shdr)),
3590 .e_shnum = ARRAY_SIZE(img->shdr),
3591 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
3592 #ifdef ELF_HOST_FLAGS
3593 .e_flags = ELF_HOST_FLAGS,
3594 #endif
3595 #ifdef ELF_OSABI
3596 .e_ident[EI_OSABI] = ELF_OSABI,
3597 #endif
3599 .phdr = {
3600 .p_type = PT_LOAD,
3601 .p_flags = PF_X,
3603 .shdr = {
3604 [0] = { .sh_type = SHT_NULL },
3605 /* Trick: The contents of code_gen_buffer are not present in
3606 this fake ELF file; that got allocated elsewhere. Therefore
3607 we mark .text as SHT_NOBITS (similar to .bss) so that readers
3608 will not look for contents. We can record any address. */
3609 [1] = { /* .text */
3610 .sh_type = SHT_NOBITS,
3611 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
3613 [2] = { /* .debug_info */
3614 .sh_type = SHT_PROGBITS,
3615 .sh_offset = offsetof(struct ElfImage, di),
3616 .sh_size = sizeof(struct DebugInfo),
3618 [3] = { /* .debug_abbrev */
3619 .sh_type = SHT_PROGBITS,
3620 .sh_offset = offsetof(struct ElfImage, da),
3621 .sh_size = sizeof(img->da),
3623 [4] = { /* .debug_frame */
3624 .sh_type = SHT_PROGBITS,
3625 .sh_offset = sizeof(struct ElfImage),
3627 [5] = { /* .symtab */
3628 .sh_type = SHT_SYMTAB,
3629 .sh_offset = offsetof(struct ElfImage, sym),
3630 .sh_size = sizeof(img->sym),
3631 .sh_info = 1,
3632 .sh_link = ARRAY_SIZE(img->shdr) - 1,
3633 .sh_entsize = sizeof(ElfW(Sym)),
3635 [6] = { /* .strtab */
3636 .sh_type = SHT_STRTAB,
3637 .sh_offset = offsetof(struct ElfImage, str),
3638 .sh_size = sizeof(img->str),
3641 .sym = {
3642 [1] = { /* code_gen_buffer */
3643 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
3644 .st_shndx = 1,
3647 .di = {
3648 .len = sizeof(struct DebugInfo) - 4,
3649 .version = 2,
3650 .ptr_size = sizeof(void *),
3651 .cu_die = 1,
3652 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
3653 .fn_die = 2,
3654 .fn_name = "code_gen_buffer"
3656 .da = {
3657 1, /* abbrev number (the cu) */
3658 0x11, 1, /* DW_TAG_compile_unit, has children */
3659 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
3660 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3661 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3662 0, 0, /* end of abbrev */
3663 2, /* abbrev number (the fn) */
3664 0x2e, 0, /* DW_TAG_subprogram, no children */
3665 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
3666 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3667 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3668 0, 0, /* end of abbrev */
3669 0 /* no more abbrev */
3671 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3672 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
3675 /* We only need a single jit entry; statically allocate it. */
3676 static struct jit_code_entry one_entry;
3678 uintptr_t buf = (uintptr_t)buf_ptr;
3679 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
3680 DebugFrameHeader *dfh;
3682 img = g_malloc(img_size);
3683 *img = img_template;
3685 img->phdr.p_vaddr = buf;
3686 img->phdr.p_paddr = buf;
3687 img->phdr.p_memsz = buf_size;
3689 img->shdr[1].sh_name = find_string(img->str, ".text");
3690 img->shdr[1].sh_addr = buf;
3691 img->shdr[1].sh_size = buf_size;
3693 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
3694 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
3696 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
3697 img->shdr[4].sh_size = debug_frame_size;
3699 img->shdr[5].sh_name = find_string(img->str, ".symtab");
3700 img->shdr[6].sh_name = find_string(img->str, ".strtab");
3702 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
3703 img->sym[1].st_value = buf;
3704 img->sym[1].st_size = buf_size;
3706 img->di.cu_low_pc = buf;
3707 img->di.cu_high_pc = buf + buf_size;
3708 img->di.fn_low_pc = buf;
3709 img->di.fn_high_pc = buf + buf_size;
3711 dfh = (DebugFrameHeader *)(img + 1);
3712 memcpy(dfh, debug_frame, debug_frame_size);
3713 dfh->fde.func_start = buf;
3714 dfh->fde.func_len = buf_size;
3716 #ifdef DEBUG_JIT
3717 /* Enable this block to be able to debug the ELF image file creation.
3718 One can use readelf, objdump, or other inspection utilities. */
3720 FILE *f = fopen("/tmp/qemu.jit", "w+b");
3721 if (f) {
3722 if (fwrite(img, img_size, 1, f) != img_size) {
3723 /* Avoid stupid unused return value warning for fwrite. */
3725 fclose(f);
3728 #endif
3730 one_entry.symfile_addr = img;
3731 one_entry.symfile_size = img_size;
3733 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
3734 __jit_debug_descriptor.relevant_entry = &one_entry;
3735 __jit_debug_descriptor.first_entry = &one_entry;
3736 __jit_debug_register_code();
3738 #else
3739 /* No support for the feature. Provide the entry point expected by exec.c,
3740 and implement the internal function we declared earlier. */
3742 static void tcg_register_jit_int(void *buf, size_t size,
3743 const void *debug_frame,
3744 size_t debug_frame_size)
3748 void tcg_register_jit(void *buf, size_t buf_size)
3751 #endif /* ELF_HOST_MACHINE */
3753 #if !TCG_TARGET_MAYBE_vec
3754 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
3756 g_assert_not_reached();
3758 #endif