slirp: fix segv when init failed
[qemu.git] / tcg / tcg.c
blob42417bdc925413420542682fff11996c38718a01
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
28 #include "qemu/osdep.h"
30 /* Define to jump the ELF file used to communicate with GDB. */
31 #undef DEBUG_JIT
33 #include "qemu/cutils.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/timer.h"
37 /* Note: the long term plan is to reduce the dependencies on the QEMU
38 CPU definitions. Currently they are used for qemu_ld/st
39 instructions */
40 #define NO_CPU_IO_DEFS
41 #include "cpu.h"
43 #include "qemu/host-utils.h"
44 #include "qemu/timer.h"
45 #include "exec/cpu-common.h"
46 #include "exec/exec-all.h"
48 #include "tcg-op.h"
50 #if UINTPTR_MAX == UINT32_MAX
51 # define ELF_CLASS ELFCLASS32
52 #else
53 # define ELF_CLASS ELFCLASS64
54 #endif
55 #ifdef HOST_WORDS_BIGENDIAN
56 # define ELF_DATA ELFDATA2MSB
57 #else
58 # define ELF_DATA ELFDATA2LSB
59 #endif
61 #include "elf.h"
62 #include "exec/log.h"
64 /* Forward declarations for functions declared in tcg-target.inc.c and
65 used here. */
66 static void tcg_target_init(TCGContext *s);
67 static void tcg_target_qemu_prologue(TCGContext *s);
68 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
69 intptr_t value, intptr_t addend);
71 /* The CIE and FDE header definitions will be common to all hosts. */
72 typedef struct {
73 uint32_t len __attribute__((aligned((sizeof(void *)))));
74 uint32_t id;
75 uint8_t version;
76 char augmentation[1];
77 uint8_t code_align;
78 uint8_t data_align;
79 uint8_t return_column;
80 } DebugFrameCIE;
82 typedef struct QEMU_PACKED {
83 uint32_t len __attribute__((aligned((sizeof(void *)))));
84 uint32_t cie_offset;
85 uintptr_t func_start;
86 uintptr_t func_len;
87 } DebugFrameFDEHeader;
89 typedef struct QEMU_PACKED {
90 DebugFrameCIE cie;
91 DebugFrameFDEHeader fde;
92 } DebugFrameHeader;
94 static void tcg_register_jit_int(void *buf, size_t size,
95 const void *debug_frame,
96 size_t debug_frame_size)
97 __attribute__((unused));
99 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
100 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
101 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
102 intptr_t arg2);
103 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
104 static void tcg_out_movi(TCGContext *s, TCGType type,
105 TCGReg ret, tcg_target_long arg);
106 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
107 const int *const_args);
108 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
109 intptr_t arg2);
110 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
111 TCGReg base, intptr_t ofs);
112 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
113 static int tcg_target_const_match(tcg_target_long val, TCGType type,
114 const TCGArgConstraint *arg_ct);
115 static void tcg_out_tb_init(TCGContext *s);
116 static bool tcg_out_tb_finalize(TCGContext *s);
120 static TCGRegSet tcg_target_available_regs[2];
121 static TCGRegSet tcg_target_call_clobber_regs;
123 #if TCG_TARGET_INSN_UNIT_SIZE == 1
124 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
126 *s->code_ptr++ = v;
129 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
130 uint8_t v)
132 *p = v;
134 #endif
136 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
137 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
139 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
140 *s->code_ptr++ = v;
141 } else {
142 tcg_insn_unit *p = s->code_ptr;
143 memcpy(p, &v, sizeof(v));
144 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
148 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
149 uint16_t v)
151 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
152 *p = v;
153 } else {
154 memcpy(p, &v, sizeof(v));
157 #endif
159 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
160 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
162 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
163 *s->code_ptr++ = v;
164 } else {
165 tcg_insn_unit *p = s->code_ptr;
166 memcpy(p, &v, sizeof(v));
167 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
171 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
172 uint32_t v)
174 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
175 *p = v;
176 } else {
177 memcpy(p, &v, sizeof(v));
180 #endif
182 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
183 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
185 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
186 *s->code_ptr++ = v;
187 } else {
188 tcg_insn_unit *p = s->code_ptr;
189 memcpy(p, &v, sizeof(v));
190 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
194 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
195 uint64_t v)
197 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
198 *p = v;
199 } else {
200 memcpy(p, &v, sizeof(v));
203 #endif
205 /* label relocation processing */
207 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
208 TCGLabel *l, intptr_t addend)
210 TCGRelocation *r;
212 if (l->has_value) {
213 /* FIXME: This may break relocations on RISC targets that
214 modify instruction fields in place. The caller may not have
215 written the initial value. */
216 patch_reloc(code_ptr, type, l->u.value, addend);
217 } else {
218 /* add a new relocation entry */
219 r = tcg_malloc(sizeof(TCGRelocation));
220 r->type = type;
221 r->ptr = code_ptr;
222 r->addend = addend;
223 r->next = l->u.first_reloc;
224 l->u.first_reloc = r;
228 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
230 intptr_t value = (intptr_t)ptr;
231 TCGRelocation *r;
233 tcg_debug_assert(!l->has_value);
235 for (r = l->u.first_reloc; r != NULL; r = r->next) {
236 patch_reloc(r->ptr, r->type, value, r->addend);
239 l->has_value = 1;
240 l->u.value_ptr = ptr;
243 TCGLabel *gen_new_label(void)
245 TCGContext *s = &tcg_ctx;
246 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
248 *l = (TCGLabel){
249 .id = s->nb_labels++
252 return l;
255 #include "tcg-target.inc.c"
257 /* pool based memory allocation */
258 void *tcg_malloc_internal(TCGContext *s, int size)
260 TCGPool *p;
261 int pool_size;
263 if (size > TCG_POOL_CHUNK_SIZE) {
264 /* big malloc: insert a new pool (XXX: could optimize) */
265 p = g_malloc(sizeof(TCGPool) + size);
266 p->size = size;
267 p->next = s->pool_first_large;
268 s->pool_first_large = p;
269 return p->data;
270 } else {
271 p = s->pool_current;
272 if (!p) {
273 p = s->pool_first;
274 if (!p)
275 goto new_pool;
276 } else {
277 if (!p->next) {
278 new_pool:
279 pool_size = TCG_POOL_CHUNK_SIZE;
280 p = g_malloc(sizeof(TCGPool) + pool_size);
281 p->size = pool_size;
282 p->next = NULL;
283 if (s->pool_current)
284 s->pool_current->next = p;
285 else
286 s->pool_first = p;
287 } else {
288 p = p->next;
292 s->pool_current = p;
293 s->pool_cur = p->data + size;
294 s->pool_end = p->data + p->size;
295 return p->data;
298 void tcg_pool_reset(TCGContext *s)
300 TCGPool *p, *t;
301 for (p = s->pool_first_large; p; p = t) {
302 t = p->next;
303 g_free(p);
305 s->pool_first_large = NULL;
306 s->pool_cur = s->pool_end = NULL;
307 s->pool_current = NULL;
310 typedef struct TCGHelperInfo {
311 void *func;
312 const char *name;
313 unsigned flags;
314 unsigned sizemask;
315 } TCGHelperInfo;
317 #include "exec/helper-proto.h"
319 static const TCGHelperInfo all_helpers[] = {
320 #include "exec/helper-tcg.h"
323 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
325 void tcg_context_init(TCGContext *s)
327 int op, total_args, n, i;
328 TCGOpDef *def;
329 TCGArgConstraint *args_ct;
330 int *sorted_args;
331 GHashTable *helper_table;
333 memset(s, 0, sizeof(*s));
334 s->nb_globals = 0;
336 /* Count total number of arguments and allocate the corresponding
337 space */
338 total_args = 0;
339 for(op = 0; op < NB_OPS; op++) {
340 def = &tcg_op_defs[op];
341 n = def->nb_iargs + def->nb_oargs;
342 total_args += n;
345 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
346 sorted_args = g_malloc(sizeof(int) * total_args);
348 for(op = 0; op < NB_OPS; op++) {
349 def = &tcg_op_defs[op];
350 def->args_ct = args_ct;
351 def->sorted_args = sorted_args;
352 n = def->nb_iargs + def->nb_oargs;
353 sorted_args += n;
354 args_ct += n;
357 /* Register helpers. */
358 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
359 s->helpers = helper_table = g_hash_table_new(NULL, NULL);
361 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
362 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
363 (gpointer)&all_helpers[i]);
366 tcg_target_init(s);
368 /* Reverse the order of the saved registers, assuming they're all at
369 the start of tcg_target_reg_alloc_order. */
370 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
371 int r = tcg_target_reg_alloc_order[n];
372 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
373 break;
376 for (i = 0; i < n; ++i) {
377 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
379 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
380 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
384 void tcg_prologue_init(TCGContext *s)
386 size_t prologue_size, total_size;
387 void *buf0, *buf1;
389 /* Put the prologue at the beginning of code_gen_buffer. */
390 buf0 = s->code_gen_buffer;
391 s->code_ptr = buf0;
392 s->code_buf = buf0;
393 s->code_gen_prologue = buf0;
395 /* Generate the prologue. */
396 tcg_target_qemu_prologue(s);
397 buf1 = s->code_ptr;
398 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
400 /* Deduct the prologue from the buffer. */
401 prologue_size = tcg_current_code_size(s);
402 s->code_gen_ptr = buf1;
403 s->code_gen_buffer = buf1;
404 s->code_buf = buf1;
405 total_size = s->code_gen_buffer_size - prologue_size;
406 s->code_gen_buffer_size = total_size;
408 /* Compute a high-water mark, at which we voluntarily flush the buffer
409 and start over. The size here is arbitrary, significantly larger
410 than we expect the code generation for any one opcode to require. */
411 s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
413 tcg_register_jit(s->code_gen_buffer, total_size);
415 #ifdef DEBUG_DISAS
416 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
417 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
418 log_disas(buf0, prologue_size);
419 qemu_log("\n");
420 qemu_log_flush();
422 #endif
425 void tcg_func_start(TCGContext *s)
427 tcg_pool_reset(s);
428 s->nb_temps = s->nb_globals;
430 /* No temps have been previously allocated for size or locality. */
431 memset(s->free_temps, 0, sizeof(s->free_temps));
433 s->nb_labels = 0;
434 s->current_frame_offset = s->frame_start;
436 #ifdef CONFIG_DEBUG_TCG
437 s->goto_tb_issue_mask = 0;
438 #endif
440 s->gen_op_buf[0].next = 1;
441 s->gen_op_buf[0].prev = 0;
442 s->gen_next_op_idx = 1;
443 s->gen_next_parm_idx = 0;
445 s->be = tcg_malloc(sizeof(TCGBackendData));
448 static inline int temp_idx(TCGContext *s, TCGTemp *ts)
450 ptrdiff_t n = ts - s->temps;
451 tcg_debug_assert(n >= 0 && n < s->nb_temps);
452 return n;
455 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
457 int n = s->nb_temps++;
458 tcg_debug_assert(n < TCG_MAX_TEMPS);
459 return memset(&s->temps[n], 0, sizeof(TCGTemp));
462 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
464 tcg_debug_assert(s->nb_globals == s->nb_temps);
465 s->nb_globals++;
466 return tcg_temp_alloc(s);
469 static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
470 TCGReg reg, const char *name)
472 TCGTemp *ts;
474 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
475 tcg_abort();
478 ts = tcg_global_alloc(s);
479 ts->base_type = type;
480 ts->type = type;
481 ts->fixed_reg = 1;
482 ts->reg = reg;
483 ts->name = name;
484 tcg_regset_set_reg(s->reserved_regs, reg);
486 return temp_idx(s, ts);
489 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
491 int idx;
492 s->frame_start = start;
493 s->frame_end = start + size;
494 idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
495 s->frame_temp = &s->temps[idx];
498 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
500 TCGContext *s = &tcg_ctx;
501 int idx;
503 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
504 tcg_abort();
506 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
507 return MAKE_TCGV_I32(idx);
510 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
512 TCGContext *s = &tcg_ctx;
513 int idx;
515 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
516 tcg_abort();
518 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
519 return MAKE_TCGV_I64(idx);
522 int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
523 intptr_t offset, const char *name)
525 TCGContext *s = &tcg_ctx;
526 TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
527 TCGTemp *ts = tcg_global_alloc(s);
528 int indirect_reg = 0, bigendian = 0;
529 #ifdef HOST_WORDS_BIGENDIAN
530 bigendian = 1;
531 #endif
533 if (!base_ts->fixed_reg) {
534 /* We do not support double-indirect registers. */
535 tcg_debug_assert(!base_ts->indirect_reg);
536 base_ts->indirect_base = 1;
537 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
538 ? 2 : 1);
539 indirect_reg = 1;
542 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
543 TCGTemp *ts2 = tcg_global_alloc(s);
544 char buf[64];
546 ts->base_type = TCG_TYPE_I64;
547 ts->type = TCG_TYPE_I32;
548 ts->indirect_reg = indirect_reg;
549 ts->mem_allocated = 1;
550 ts->mem_base = base_ts;
551 ts->mem_offset = offset + bigendian * 4;
552 pstrcpy(buf, sizeof(buf), name);
553 pstrcat(buf, sizeof(buf), "_0");
554 ts->name = strdup(buf);
556 tcg_debug_assert(ts2 == ts + 1);
557 ts2->base_type = TCG_TYPE_I64;
558 ts2->type = TCG_TYPE_I32;
559 ts2->indirect_reg = indirect_reg;
560 ts2->mem_allocated = 1;
561 ts2->mem_base = base_ts;
562 ts2->mem_offset = offset + (1 - bigendian) * 4;
563 pstrcpy(buf, sizeof(buf), name);
564 pstrcat(buf, sizeof(buf), "_1");
565 ts2->name = strdup(buf);
566 } else {
567 ts->base_type = type;
568 ts->type = type;
569 ts->indirect_reg = indirect_reg;
570 ts->mem_allocated = 1;
571 ts->mem_base = base_ts;
572 ts->mem_offset = offset;
573 ts->name = name;
575 return temp_idx(s, ts);
578 static int tcg_temp_new_internal(TCGType type, int temp_local)
580 TCGContext *s = &tcg_ctx;
581 TCGTemp *ts;
582 int idx, k;
584 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
585 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
586 if (idx < TCG_MAX_TEMPS) {
587 /* There is already an available temp with the right type. */
588 clear_bit(idx, s->free_temps[k].l);
590 ts = &s->temps[idx];
591 ts->temp_allocated = 1;
592 tcg_debug_assert(ts->base_type == type);
593 tcg_debug_assert(ts->temp_local == temp_local);
594 } else {
595 ts = tcg_temp_alloc(s);
596 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
597 TCGTemp *ts2 = tcg_temp_alloc(s);
599 ts->base_type = type;
600 ts->type = TCG_TYPE_I32;
601 ts->temp_allocated = 1;
602 ts->temp_local = temp_local;
604 tcg_debug_assert(ts2 == ts + 1);
605 ts2->base_type = TCG_TYPE_I64;
606 ts2->type = TCG_TYPE_I32;
607 ts2->temp_allocated = 1;
608 ts2->temp_local = temp_local;
609 } else {
610 ts->base_type = type;
611 ts->type = type;
612 ts->temp_allocated = 1;
613 ts->temp_local = temp_local;
615 idx = temp_idx(s, ts);
618 #if defined(CONFIG_DEBUG_TCG)
619 s->temps_in_use++;
620 #endif
621 return idx;
624 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
626 int idx;
628 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
629 return MAKE_TCGV_I32(idx);
632 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
634 int idx;
636 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
637 return MAKE_TCGV_I64(idx);
640 static void tcg_temp_free_internal(int idx)
642 TCGContext *s = &tcg_ctx;
643 TCGTemp *ts;
644 int k;
646 #if defined(CONFIG_DEBUG_TCG)
647 s->temps_in_use--;
648 if (s->temps_in_use < 0) {
649 fprintf(stderr, "More temporaries freed than allocated!\n");
651 #endif
653 tcg_debug_assert(idx >= s->nb_globals && idx < s->nb_temps);
654 ts = &s->temps[idx];
655 tcg_debug_assert(ts->temp_allocated != 0);
656 ts->temp_allocated = 0;
658 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
659 set_bit(idx, s->free_temps[k].l);
662 void tcg_temp_free_i32(TCGv_i32 arg)
664 tcg_temp_free_internal(GET_TCGV_I32(arg));
667 void tcg_temp_free_i64(TCGv_i64 arg)
669 tcg_temp_free_internal(GET_TCGV_I64(arg));
672 TCGv_i32 tcg_const_i32(int32_t val)
674 TCGv_i32 t0;
675 t0 = tcg_temp_new_i32();
676 tcg_gen_movi_i32(t0, val);
677 return t0;
680 TCGv_i64 tcg_const_i64(int64_t val)
682 TCGv_i64 t0;
683 t0 = tcg_temp_new_i64();
684 tcg_gen_movi_i64(t0, val);
685 return t0;
688 TCGv_i32 tcg_const_local_i32(int32_t val)
690 TCGv_i32 t0;
691 t0 = tcg_temp_local_new_i32();
692 tcg_gen_movi_i32(t0, val);
693 return t0;
696 TCGv_i64 tcg_const_local_i64(int64_t val)
698 TCGv_i64 t0;
699 t0 = tcg_temp_local_new_i64();
700 tcg_gen_movi_i64(t0, val);
701 return t0;
704 #if defined(CONFIG_DEBUG_TCG)
705 void tcg_clear_temp_count(void)
707 TCGContext *s = &tcg_ctx;
708 s->temps_in_use = 0;
711 int tcg_check_temp_count(void)
713 TCGContext *s = &tcg_ctx;
714 if (s->temps_in_use) {
715 /* Clear the count so that we don't give another
716 * warning immediately next time around.
718 s->temps_in_use = 0;
719 return 1;
721 return 0;
723 #endif
725 /* Note: we convert the 64 bit args to 32 bit and do some alignment
726 and endian swap. Maybe it would be better to do the alignment
727 and endian swap in tcg_reg_alloc_call(). */
728 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
729 int nargs, TCGArg *args)
731 int i, real_args, nb_rets, pi, pi_first;
732 unsigned sizemask, flags;
733 TCGHelperInfo *info;
735 info = g_hash_table_lookup(s->helpers, (gpointer)func);
736 flags = info->flags;
737 sizemask = info->sizemask;
739 #if defined(__sparc__) && !defined(__arch64__) \
740 && !defined(CONFIG_TCG_INTERPRETER)
741 /* We have 64-bit values in one register, but need to pass as two
742 separate parameters. Split them. */
743 int orig_sizemask = sizemask;
744 int orig_nargs = nargs;
745 TCGv_i64 retl, reth;
747 TCGV_UNUSED_I64(retl);
748 TCGV_UNUSED_I64(reth);
749 if (sizemask != 0) {
750 TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
751 for (i = real_args = 0; i < nargs; ++i) {
752 int is_64bit = sizemask & (1 << (i+1)*2);
753 if (is_64bit) {
754 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
755 TCGv_i32 h = tcg_temp_new_i32();
756 TCGv_i32 l = tcg_temp_new_i32();
757 tcg_gen_extr_i64_i32(l, h, orig);
758 split_args[real_args++] = GET_TCGV_I32(h);
759 split_args[real_args++] = GET_TCGV_I32(l);
760 } else {
761 split_args[real_args++] = args[i];
764 nargs = real_args;
765 args = split_args;
766 sizemask = 0;
768 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
769 for (i = 0; i < nargs; ++i) {
770 int is_64bit = sizemask & (1 << (i+1)*2);
771 int is_signed = sizemask & (2 << (i+1)*2);
772 if (!is_64bit) {
773 TCGv_i64 temp = tcg_temp_new_i64();
774 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
775 if (is_signed) {
776 tcg_gen_ext32s_i64(temp, orig);
777 } else {
778 tcg_gen_ext32u_i64(temp, orig);
780 args[i] = GET_TCGV_I64(temp);
783 #endif /* TCG_TARGET_EXTEND_ARGS */
785 pi_first = pi = s->gen_next_parm_idx;
786 if (ret != TCG_CALL_DUMMY_ARG) {
787 #if defined(__sparc__) && !defined(__arch64__) \
788 && !defined(CONFIG_TCG_INTERPRETER)
789 if (orig_sizemask & 1) {
790 /* The 32-bit ABI is going to return the 64-bit value in
791 the %o0/%o1 register pair. Prepare for this by using
792 two return temporaries, and reassemble below. */
793 retl = tcg_temp_new_i64();
794 reth = tcg_temp_new_i64();
795 s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
796 s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
797 nb_rets = 2;
798 } else {
799 s->gen_opparam_buf[pi++] = ret;
800 nb_rets = 1;
802 #else
803 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
804 #ifdef HOST_WORDS_BIGENDIAN
805 s->gen_opparam_buf[pi++] = ret + 1;
806 s->gen_opparam_buf[pi++] = ret;
807 #else
808 s->gen_opparam_buf[pi++] = ret;
809 s->gen_opparam_buf[pi++] = ret + 1;
810 #endif
811 nb_rets = 2;
812 } else {
813 s->gen_opparam_buf[pi++] = ret;
814 nb_rets = 1;
816 #endif
817 } else {
818 nb_rets = 0;
820 real_args = 0;
821 for (i = 0; i < nargs; i++) {
822 int is_64bit = sizemask & (1 << (i+1)*2);
823 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
824 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
825 /* some targets want aligned 64 bit args */
826 if (real_args & 1) {
827 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
828 real_args++;
830 #endif
831 /* If stack grows up, then we will be placing successive
832 arguments at lower addresses, which means we need to
833 reverse the order compared to how we would normally
834 treat either big or little-endian. For those arguments
835 that will wind up in registers, this still works for
836 HPPA (the only current STACK_GROWSUP target) since the
837 argument registers are *also* allocated in decreasing
838 order. If another such target is added, this logic may
839 have to get more complicated to differentiate between
840 stack arguments and register arguments. */
841 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
842 s->gen_opparam_buf[pi++] = args[i] + 1;
843 s->gen_opparam_buf[pi++] = args[i];
844 #else
845 s->gen_opparam_buf[pi++] = args[i];
846 s->gen_opparam_buf[pi++] = args[i] + 1;
847 #endif
848 real_args += 2;
849 continue;
852 s->gen_opparam_buf[pi++] = args[i];
853 real_args++;
855 s->gen_opparam_buf[pi++] = (uintptr_t)func;
856 s->gen_opparam_buf[pi++] = flags;
858 i = s->gen_next_op_idx;
859 tcg_debug_assert(i < OPC_BUF_SIZE);
860 tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
862 /* Set links for sequential allocation during translation. */
863 s->gen_op_buf[i] = (TCGOp){
864 .opc = INDEX_op_call,
865 .callo = nb_rets,
866 .calli = real_args,
867 .args = pi_first,
868 .prev = i - 1,
869 .next = i + 1
872 /* Make sure the calli field didn't overflow. */
873 tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
875 s->gen_op_buf[0].prev = i;
876 s->gen_next_op_idx = i + 1;
877 s->gen_next_parm_idx = pi;
879 #if defined(__sparc__) && !defined(__arch64__) \
880 && !defined(CONFIG_TCG_INTERPRETER)
881 /* Free all of the parts we allocated above. */
882 for (i = real_args = 0; i < orig_nargs; ++i) {
883 int is_64bit = orig_sizemask & (1 << (i+1)*2);
884 if (is_64bit) {
885 TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
886 TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
887 tcg_temp_free_i32(h);
888 tcg_temp_free_i32(l);
889 } else {
890 real_args++;
893 if (orig_sizemask & 1) {
894 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
895 Note that describing these as TCGv_i64 eliminates an unnecessary
896 zero-extension that tcg_gen_concat_i32_i64 would create. */
897 tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
898 tcg_temp_free_i64(retl);
899 tcg_temp_free_i64(reth);
901 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
902 for (i = 0; i < nargs; ++i) {
903 int is_64bit = sizemask & (1 << (i+1)*2);
904 if (!is_64bit) {
905 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
906 tcg_temp_free_i64(temp);
909 #endif /* TCG_TARGET_EXTEND_ARGS */
912 static void tcg_reg_alloc_start(TCGContext *s)
914 int i;
915 TCGTemp *ts;
916 for(i = 0; i < s->nb_globals; i++) {
917 ts = &s->temps[i];
918 if (ts->fixed_reg) {
919 ts->val_type = TEMP_VAL_REG;
920 } else {
921 ts->val_type = TEMP_VAL_MEM;
924 for(i = s->nb_globals; i < s->nb_temps; i++) {
925 ts = &s->temps[i];
926 if (ts->temp_local) {
927 ts->val_type = TEMP_VAL_MEM;
928 } else {
929 ts->val_type = TEMP_VAL_DEAD;
931 ts->mem_allocated = 0;
932 ts->fixed_reg = 0;
935 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
938 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
939 TCGTemp *ts)
941 int idx = temp_idx(s, ts);
943 if (idx < s->nb_globals) {
944 pstrcpy(buf, buf_size, ts->name);
945 } else if (ts->temp_local) {
946 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
947 } else {
948 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
950 return buf;
953 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
954 int buf_size, int idx)
956 tcg_debug_assert(idx >= 0 && idx < s->nb_temps);
957 return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
960 /* Find helper name. */
961 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
963 const char *ret = NULL;
964 if (s->helpers) {
965 TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
966 if (info) {
967 ret = info->name;
970 return ret;
973 static const char * const cond_name[] =
975 [TCG_COND_NEVER] = "never",
976 [TCG_COND_ALWAYS] = "always",
977 [TCG_COND_EQ] = "eq",
978 [TCG_COND_NE] = "ne",
979 [TCG_COND_LT] = "lt",
980 [TCG_COND_GE] = "ge",
981 [TCG_COND_LE] = "le",
982 [TCG_COND_GT] = "gt",
983 [TCG_COND_LTU] = "ltu",
984 [TCG_COND_GEU] = "geu",
985 [TCG_COND_LEU] = "leu",
986 [TCG_COND_GTU] = "gtu"
989 static const char * const ldst_name[] =
991 [MO_UB] = "ub",
992 [MO_SB] = "sb",
993 [MO_LEUW] = "leuw",
994 [MO_LESW] = "lesw",
995 [MO_LEUL] = "leul",
996 [MO_LESL] = "lesl",
997 [MO_LEQ] = "leq",
998 [MO_BEUW] = "beuw",
999 [MO_BESW] = "besw",
1000 [MO_BEUL] = "beul",
1001 [MO_BESL] = "besl",
1002 [MO_BEQ] = "beq",
1005 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1006 #ifdef ALIGNED_ONLY
1007 [MO_UNALN >> MO_ASHIFT] = "un+",
1008 [MO_ALIGN >> MO_ASHIFT] = "",
1009 #else
1010 [MO_UNALN >> MO_ASHIFT] = "",
1011 [MO_ALIGN >> MO_ASHIFT] = "al+",
1012 #endif
1013 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1014 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1015 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1016 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1017 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1018 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1021 void tcg_dump_ops(TCGContext *s)
1023 char buf[128];
1024 TCGOp *op;
1025 int oi;
1027 for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
1028 int i, k, nb_oargs, nb_iargs, nb_cargs;
1029 const TCGOpDef *def;
1030 const TCGArg *args;
1031 TCGOpcode c;
1032 int col = 0;
1034 op = &s->gen_op_buf[oi];
1035 c = op->opc;
1036 def = &tcg_op_defs[c];
1037 args = &s->gen_opparam_buf[op->args];
1039 if (c == INDEX_op_insn_start) {
1040 col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
1042 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1043 target_ulong a;
1044 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1045 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1046 #else
1047 a = args[i];
1048 #endif
1049 col += qemu_log(" " TARGET_FMT_lx, a);
1051 } else if (c == INDEX_op_call) {
1052 /* variable number of arguments */
1053 nb_oargs = op->callo;
1054 nb_iargs = op->calli;
1055 nb_cargs = def->nb_cargs;
1057 /* function name, flags, out args */
1058 col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1059 tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1060 args[nb_oargs + nb_iargs + 1], nb_oargs);
1061 for (i = 0; i < nb_oargs; i++) {
1062 col += qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1063 args[i]));
1065 for (i = 0; i < nb_iargs; i++) {
1066 TCGArg arg = args[nb_oargs + i];
1067 const char *t = "<dummy>";
1068 if (arg != TCG_CALL_DUMMY_ARG) {
1069 t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1071 col += qemu_log(",%s", t);
1073 } else {
1074 col += qemu_log(" %s ", def->name);
1076 nb_oargs = def->nb_oargs;
1077 nb_iargs = def->nb_iargs;
1078 nb_cargs = def->nb_cargs;
1080 k = 0;
1081 for (i = 0; i < nb_oargs; i++) {
1082 if (k != 0) {
1083 col += qemu_log(",");
1085 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1086 args[k++]));
1088 for (i = 0; i < nb_iargs; i++) {
1089 if (k != 0) {
1090 col += qemu_log(",");
1092 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1093 args[k++]));
1095 switch (c) {
1096 case INDEX_op_brcond_i32:
1097 case INDEX_op_setcond_i32:
1098 case INDEX_op_movcond_i32:
1099 case INDEX_op_brcond2_i32:
1100 case INDEX_op_setcond2_i32:
1101 case INDEX_op_brcond_i64:
1102 case INDEX_op_setcond_i64:
1103 case INDEX_op_movcond_i64:
1104 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1105 col += qemu_log(",%s", cond_name[args[k++]]);
1106 } else {
1107 col += qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1109 i = 1;
1110 break;
1111 case INDEX_op_qemu_ld_i32:
1112 case INDEX_op_qemu_st_i32:
1113 case INDEX_op_qemu_ld_i64:
1114 case INDEX_op_qemu_st_i64:
1116 TCGMemOpIdx oi = args[k++];
1117 TCGMemOp op = get_memop(oi);
1118 unsigned ix = get_mmuidx(oi);
1120 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1121 col += qemu_log(",$0x%x,%u", op, ix);
1122 } else {
1123 const char *s_al, *s_op;
1124 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1125 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1126 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
1128 i = 1;
1130 break;
1131 default:
1132 i = 0;
1133 break;
1135 switch (c) {
1136 case INDEX_op_set_label:
1137 case INDEX_op_br:
1138 case INDEX_op_brcond_i32:
1139 case INDEX_op_brcond_i64:
1140 case INDEX_op_brcond2_i32:
1141 col += qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1142 i++, k++;
1143 break;
1144 default:
1145 break;
1147 for (; i < nb_cargs; i++, k++) {
1148 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1151 if (op->life) {
1152 unsigned life = op->life;
1154 for (; col < 48; ++col) {
1155 putc(' ', qemu_logfile);
1158 if (life & (SYNC_ARG * 3)) {
1159 qemu_log(" sync:");
1160 for (i = 0; i < 2; ++i) {
1161 if (life & (SYNC_ARG << i)) {
1162 qemu_log(" %d", i);
1166 life /= DEAD_ARG;
1167 if (life) {
1168 qemu_log(" dead:");
1169 for (i = 0; life; ++i, life >>= 1) {
1170 if (life & 1) {
1171 qemu_log(" %d", i);
1176 qemu_log("\n");
1180 /* we give more priority to constraints with less registers */
1181 static int get_constraint_priority(const TCGOpDef *def, int k)
1183 const TCGArgConstraint *arg_ct;
1185 int i, n;
1186 arg_ct = &def->args_ct[k];
1187 if (arg_ct->ct & TCG_CT_ALIAS) {
1188 /* an alias is equivalent to a single register */
1189 n = 1;
1190 } else {
1191 if (!(arg_ct->ct & TCG_CT_REG))
1192 return 0;
1193 n = 0;
1194 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1195 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1196 n++;
1199 return TCG_TARGET_NB_REGS - n + 1;
1202 /* sort from highest priority to lowest */
1203 static void sort_constraints(TCGOpDef *def, int start, int n)
1205 int i, j, p1, p2, tmp;
1207 for(i = 0; i < n; i++)
1208 def->sorted_args[start + i] = start + i;
1209 if (n <= 1)
1210 return;
1211 for(i = 0; i < n - 1; i++) {
1212 for(j = i + 1; j < n; j++) {
1213 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1214 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1215 if (p1 < p2) {
1216 tmp = def->sorted_args[start + i];
1217 def->sorted_args[start + i] = def->sorted_args[start + j];
1218 def->sorted_args[start + j] = tmp;
1224 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1226 TCGOpcode op;
1227 TCGOpDef *def;
1228 const char *ct_str;
1229 int i, nb_args;
1231 for(;;) {
1232 if (tdefs->op == (TCGOpcode)-1)
1233 break;
1234 op = tdefs->op;
1235 tcg_debug_assert((unsigned)op < NB_OPS);
1236 def = &tcg_op_defs[op];
1237 #if defined(CONFIG_DEBUG_TCG)
1238 /* Duplicate entry in op definitions? */
1239 tcg_debug_assert(!def->used);
1240 def->used = 1;
1241 #endif
1242 nb_args = def->nb_iargs + def->nb_oargs;
1243 for(i = 0; i < nb_args; i++) {
1244 ct_str = tdefs->args_ct_str[i];
1245 /* Incomplete TCGTargetOpDef entry? */
1246 tcg_debug_assert(ct_str != NULL);
1247 tcg_regset_clear(def->args_ct[i].u.regs);
1248 def->args_ct[i].ct = 0;
1249 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1250 int oarg;
1251 oarg = ct_str[0] - '0';
1252 tcg_debug_assert(oarg < def->nb_oargs);
1253 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1254 /* TCG_CT_ALIAS is for the output arguments. The input
1255 argument is tagged with TCG_CT_IALIAS. */
1256 def->args_ct[i] = def->args_ct[oarg];
1257 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1258 def->args_ct[oarg].alias_index = i;
1259 def->args_ct[i].ct |= TCG_CT_IALIAS;
1260 def->args_ct[i].alias_index = oarg;
1261 } else {
1262 for(;;) {
1263 if (*ct_str == '\0')
1264 break;
1265 switch(*ct_str) {
1266 case 'i':
1267 def->args_ct[i].ct |= TCG_CT_CONST;
1268 ct_str++;
1269 break;
1270 default:
1271 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1272 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1273 ct_str, i, def->name);
1274 exit(1);
1281 /* TCGTargetOpDef entry with too much information? */
1282 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1284 /* sort the constraints (XXX: this is just an heuristic) */
1285 sort_constraints(def, 0, def->nb_oargs);
1286 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1288 #if 0
1290 int i;
1292 printf("%s: sorted=", def->name);
1293 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1294 printf(" %d", def->sorted_args[i]);
1295 printf("\n");
1297 #endif
1298 tdefs++;
1301 #if defined(CONFIG_DEBUG_TCG)
1302 i = 0;
1303 for (op = 0; op < tcg_op_defs_max; op++) {
1304 const TCGOpDef *def = &tcg_op_defs[op];
1305 if (def->flags & TCG_OPF_NOT_PRESENT) {
1306 /* Wrong entry in op definitions? */
1307 if (def->used) {
1308 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1309 i = 1;
1311 } else {
1312 /* Missing entry in op definitions? */
1313 if (!def->used) {
1314 fprintf(stderr, "Missing op definition for %s\n", def->name);
1315 i = 1;
1319 if (i == 1) {
1320 tcg_abort();
1322 #endif
1325 void tcg_op_remove(TCGContext *s, TCGOp *op)
1327 int next = op->next;
1328 int prev = op->prev;
1330 /* We should never attempt to remove the list terminator. */
1331 tcg_debug_assert(op != &s->gen_op_buf[0]);
1333 s->gen_op_buf[next].prev = prev;
1334 s->gen_op_buf[prev].next = next;
1336 memset(op, 0, sizeof(*op));
1338 #ifdef CONFIG_PROFILER
1339 s->del_op_count++;
1340 #endif
1343 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
1344 TCGOpcode opc, int nargs)
1346 int oi = s->gen_next_op_idx;
1347 int pi = s->gen_next_parm_idx;
1348 int prev = old_op->prev;
1349 int next = old_op - s->gen_op_buf;
1350 TCGOp *new_op;
1352 tcg_debug_assert(oi < OPC_BUF_SIZE);
1353 tcg_debug_assert(pi + nargs <= OPPARAM_BUF_SIZE);
1354 s->gen_next_op_idx = oi + 1;
1355 s->gen_next_parm_idx = pi + nargs;
1357 new_op = &s->gen_op_buf[oi];
1358 *new_op = (TCGOp){
1359 .opc = opc,
1360 .args = pi,
1361 .prev = prev,
1362 .next = next
1364 s->gen_op_buf[prev].next = oi;
1365 old_op->prev = oi;
1367 return new_op;
1370 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
1371 TCGOpcode opc, int nargs)
1373 int oi = s->gen_next_op_idx;
1374 int pi = s->gen_next_parm_idx;
1375 int prev = old_op - s->gen_op_buf;
1376 int next = old_op->next;
1377 TCGOp *new_op;
1379 tcg_debug_assert(oi < OPC_BUF_SIZE);
1380 tcg_debug_assert(pi + nargs <= OPPARAM_BUF_SIZE);
1381 s->gen_next_op_idx = oi + 1;
1382 s->gen_next_parm_idx = pi + nargs;
1384 new_op = &s->gen_op_buf[oi];
1385 *new_op = (TCGOp){
1386 .opc = opc,
1387 .args = pi,
1388 .prev = prev,
1389 .next = next
1391 s->gen_op_buf[next].prev = oi;
1392 old_op->next = oi;
1394 return new_op;
1397 #define TS_DEAD 1
1398 #define TS_MEM 2
1400 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
1401 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
1403 /* liveness analysis: end of function: all temps are dead, and globals
1404 should be in memory. */
1405 static inline void tcg_la_func_end(TCGContext *s, uint8_t *temp_state)
1407 memset(temp_state, TS_DEAD | TS_MEM, s->nb_globals);
1408 memset(temp_state + s->nb_globals, TS_DEAD, s->nb_temps - s->nb_globals);
1411 /* liveness analysis: end of basic block: all temps are dead, globals
1412 and local temps should be in memory. */
1413 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *temp_state)
1415 int i, n;
1417 tcg_la_func_end(s, temp_state);
1418 for (i = s->nb_globals, n = s->nb_temps; i < n; i++) {
1419 if (s->temps[i].temp_local) {
1420 temp_state[i] |= TS_MEM;
1425 /* Liveness analysis : update the opc_arg_life array to tell if a
1426 given input arguments is dead. Instructions updating dead
1427 temporaries are removed. */
1428 static void liveness_pass_1(TCGContext *s, uint8_t *temp_state)
1430 int nb_globals = s->nb_globals;
1431 int oi, oi_prev;
1433 tcg_la_func_end(s, temp_state);
1435 for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
1436 int i, nb_iargs, nb_oargs;
1437 TCGOpcode opc_new, opc_new2;
1438 bool have_opc_new2;
1439 TCGLifeData arg_life = 0;
1440 TCGArg arg;
1442 TCGOp * const op = &s->gen_op_buf[oi];
1443 TCGArg * const args = &s->gen_opparam_buf[op->args];
1444 TCGOpcode opc = op->opc;
1445 const TCGOpDef *def = &tcg_op_defs[opc];
1447 oi_prev = op->prev;
1449 switch (opc) {
1450 case INDEX_op_call:
1452 int call_flags;
1454 nb_oargs = op->callo;
1455 nb_iargs = op->calli;
1456 call_flags = args[nb_oargs + nb_iargs + 1];
1458 /* pure functions can be removed if their result is unused */
1459 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1460 for (i = 0; i < nb_oargs; i++) {
1461 arg = args[i];
1462 if (temp_state[arg] != TS_DEAD) {
1463 goto do_not_remove_call;
1466 goto do_remove;
1467 } else {
1468 do_not_remove_call:
1470 /* output args are dead */
1471 for (i = 0; i < nb_oargs; i++) {
1472 arg = args[i];
1473 if (temp_state[arg] & TS_DEAD) {
1474 arg_life |= DEAD_ARG << i;
1476 if (temp_state[arg] & TS_MEM) {
1477 arg_life |= SYNC_ARG << i;
1479 temp_state[arg] = TS_DEAD;
1482 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1483 TCG_CALL_NO_READ_GLOBALS))) {
1484 /* globals should go back to memory */
1485 memset(temp_state, TS_DEAD | TS_MEM, nb_globals);
1486 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1487 /* globals should be synced to memory */
1488 for (i = 0; i < nb_globals; i++) {
1489 temp_state[i] |= TS_MEM;
1493 /* record arguments that die in this helper */
1494 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1495 arg = args[i];
1496 if (arg != TCG_CALL_DUMMY_ARG) {
1497 if (temp_state[arg] & TS_DEAD) {
1498 arg_life |= DEAD_ARG << i;
1502 /* input arguments are live for preceding opcodes */
1503 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1504 arg = args[i];
1505 if (arg != TCG_CALL_DUMMY_ARG) {
1506 temp_state[arg] &= ~TS_DEAD;
1511 break;
1512 case INDEX_op_insn_start:
1513 break;
1514 case INDEX_op_discard:
1515 /* mark the temporary as dead */
1516 temp_state[args[0]] = TS_DEAD;
1517 break;
1519 case INDEX_op_add2_i32:
1520 opc_new = INDEX_op_add_i32;
1521 goto do_addsub2;
1522 case INDEX_op_sub2_i32:
1523 opc_new = INDEX_op_sub_i32;
1524 goto do_addsub2;
1525 case INDEX_op_add2_i64:
1526 opc_new = INDEX_op_add_i64;
1527 goto do_addsub2;
1528 case INDEX_op_sub2_i64:
1529 opc_new = INDEX_op_sub_i64;
1530 do_addsub2:
1531 nb_iargs = 4;
1532 nb_oargs = 2;
1533 /* Test if the high part of the operation is dead, but not
1534 the low part. The result can be optimized to a simple
1535 add or sub. This happens often for x86_64 guest when the
1536 cpu mode is set to 32 bit. */
1537 if (temp_state[args[1]] == TS_DEAD) {
1538 if (temp_state[args[0]] == TS_DEAD) {
1539 goto do_remove;
1541 /* Replace the opcode and adjust the args in place,
1542 leaving 3 unused args at the end. */
1543 op->opc = opc = opc_new;
1544 args[1] = args[2];
1545 args[2] = args[4];
1546 /* Fall through and mark the single-word operation live. */
1547 nb_iargs = 2;
1548 nb_oargs = 1;
1550 goto do_not_remove;
1552 case INDEX_op_mulu2_i32:
1553 opc_new = INDEX_op_mul_i32;
1554 opc_new2 = INDEX_op_muluh_i32;
1555 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1556 goto do_mul2;
1557 case INDEX_op_muls2_i32:
1558 opc_new = INDEX_op_mul_i32;
1559 opc_new2 = INDEX_op_mulsh_i32;
1560 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1561 goto do_mul2;
1562 case INDEX_op_mulu2_i64:
1563 opc_new = INDEX_op_mul_i64;
1564 opc_new2 = INDEX_op_muluh_i64;
1565 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1566 goto do_mul2;
1567 case INDEX_op_muls2_i64:
1568 opc_new = INDEX_op_mul_i64;
1569 opc_new2 = INDEX_op_mulsh_i64;
1570 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1571 goto do_mul2;
1572 do_mul2:
1573 nb_iargs = 2;
1574 nb_oargs = 2;
1575 if (temp_state[args[1]] == TS_DEAD) {
1576 if (temp_state[args[0]] == TS_DEAD) {
1577 /* Both parts of the operation are dead. */
1578 goto do_remove;
1580 /* The high part of the operation is dead; generate the low. */
1581 op->opc = opc = opc_new;
1582 args[1] = args[2];
1583 args[2] = args[3];
1584 } else if (temp_state[args[0]] == TS_DEAD && have_opc_new2) {
1585 /* The low part of the operation is dead; generate the high. */
1586 op->opc = opc = opc_new2;
1587 args[0] = args[1];
1588 args[1] = args[2];
1589 args[2] = args[3];
1590 } else {
1591 goto do_not_remove;
1593 /* Mark the single-word operation live. */
1594 nb_oargs = 1;
1595 goto do_not_remove;
1597 default:
1598 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1599 nb_iargs = def->nb_iargs;
1600 nb_oargs = def->nb_oargs;
1602 /* Test if the operation can be removed because all
1603 its outputs are dead. We assume that nb_oargs == 0
1604 implies side effects */
1605 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1606 for (i = 0; i < nb_oargs; i++) {
1607 if (temp_state[args[i]] != TS_DEAD) {
1608 goto do_not_remove;
1611 do_remove:
1612 tcg_op_remove(s, op);
1613 } else {
1614 do_not_remove:
1615 /* output args are dead */
1616 for (i = 0; i < nb_oargs; i++) {
1617 arg = args[i];
1618 if (temp_state[arg] & TS_DEAD) {
1619 arg_life |= DEAD_ARG << i;
1621 if (temp_state[arg] & TS_MEM) {
1622 arg_life |= SYNC_ARG << i;
1624 temp_state[arg] = TS_DEAD;
1627 /* if end of basic block, update */
1628 if (def->flags & TCG_OPF_BB_END) {
1629 tcg_la_bb_end(s, temp_state);
1630 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1631 /* globals should be synced to memory */
1632 for (i = 0; i < nb_globals; i++) {
1633 temp_state[i] |= TS_MEM;
1637 /* record arguments that die in this opcode */
1638 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1639 arg = args[i];
1640 if (temp_state[arg] & TS_DEAD) {
1641 arg_life |= DEAD_ARG << i;
1644 /* input arguments are live for preceding opcodes */
1645 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1646 temp_state[args[i]] &= ~TS_DEAD;
1649 break;
1651 op->life = arg_life;
1655 /* Liveness analysis: Convert indirect regs to direct temporaries. */
1656 static bool liveness_pass_2(TCGContext *s, uint8_t *temp_state)
1658 int nb_globals = s->nb_globals;
1659 int16_t *dir_temps;
1660 int i, oi, oi_next;
1661 bool changes = false;
1663 dir_temps = tcg_malloc(nb_globals * sizeof(int16_t));
1664 memset(dir_temps, 0, nb_globals * sizeof(int16_t));
1666 /* Create a temporary for each indirect global. */
1667 for (i = 0; i < nb_globals; ++i) {
1668 TCGTemp *its = &s->temps[i];
1669 if (its->indirect_reg) {
1670 TCGTemp *dts = tcg_temp_alloc(s);
1671 dts->type = its->type;
1672 dts->base_type = its->base_type;
1673 dir_temps[i] = temp_idx(s, dts);
1677 memset(temp_state, TS_DEAD, nb_globals);
1679 for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
1680 TCGOp *op = &s->gen_op_buf[oi];
1681 TCGArg *args = &s->gen_opparam_buf[op->args];
1682 TCGOpcode opc = op->opc;
1683 const TCGOpDef *def = &tcg_op_defs[opc];
1684 TCGLifeData arg_life = op->life;
1685 int nb_iargs, nb_oargs, call_flags;
1686 TCGArg arg, dir;
1688 oi_next = op->next;
1690 if (opc == INDEX_op_call) {
1691 nb_oargs = op->callo;
1692 nb_iargs = op->calli;
1693 call_flags = args[nb_oargs + nb_iargs + 1];
1694 } else {
1695 nb_iargs = def->nb_iargs;
1696 nb_oargs = def->nb_oargs;
1698 /* Set flags similar to how calls require. */
1699 if (def->flags & TCG_OPF_BB_END) {
1700 /* Like writing globals: save_globals */
1701 call_flags = 0;
1702 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1703 /* Like reading globals: sync_globals */
1704 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
1705 } else {
1706 /* No effect on globals. */
1707 call_flags = (TCG_CALL_NO_READ_GLOBALS |
1708 TCG_CALL_NO_WRITE_GLOBALS);
1712 /* Make sure that input arguments are available. */
1713 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1714 arg = args[i];
1715 /* Note this unsigned test catches TCG_CALL_ARG_DUMMY too. */
1716 if (arg < nb_globals) {
1717 dir = dir_temps[arg];
1718 if (dir != 0 && temp_state[arg] == TS_DEAD) {
1719 TCGTemp *its = &s->temps[arg];
1720 TCGOpcode lopc = (its->type == TCG_TYPE_I32
1721 ? INDEX_op_ld_i32
1722 : INDEX_op_ld_i64);
1723 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
1724 TCGArg *largs = &s->gen_opparam_buf[lop->args];
1726 largs[0] = dir;
1727 largs[1] = temp_idx(s, its->mem_base);
1728 largs[2] = its->mem_offset;
1730 /* Loaded, but synced with memory. */
1731 temp_state[arg] = TS_MEM;
1736 /* Perform input replacement, and mark inputs that became dead.
1737 No action is required except keeping temp_state up to date
1738 so that we reload when needed. */
1739 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1740 arg = args[i];
1741 if (arg < nb_globals) {
1742 dir = dir_temps[arg];
1743 if (dir != 0) {
1744 args[i] = dir;
1745 changes = true;
1746 if (IS_DEAD_ARG(i)) {
1747 temp_state[arg] = TS_DEAD;
1753 /* Liveness analysis should ensure that the following are
1754 all correct, for call sites and basic block end points. */
1755 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
1756 /* Nothing to do */
1757 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
1758 for (i = 0; i < nb_globals; ++i) {
1759 /* Liveness should see that globals are synced back,
1760 that is, either TS_DEAD or TS_MEM. */
1761 tcg_debug_assert(dir_temps[i] == 0
1762 || temp_state[i] != 0);
1764 } else {
1765 for (i = 0; i < nb_globals; ++i) {
1766 /* Liveness should see that globals are saved back,
1767 that is, TS_DEAD, waiting to be reloaded. */
1768 tcg_debug_assert(dir_temps[i] == 0
1769 || temp_state[i] == TS_DEAD);
1773 /* Outputs become available. */
1774 for (i = 0; i < nb_oargs; i++) {
1775 arg = args[i];
1776 if (arg >= nb_globals) {
1777 continue;
1779 dir = dir_temps[arg];
1780 if (dir == 0) {
1781 continue;
1783 args[i] = dir;
1784 changes = true;
1786 /* The output is now live and modified. */
1787 temp_state[arg] = 0;
1789 /* Sync outputs upon their last write. */
1790 if (NEED_SYNC_ARG(i)) {
1791 TCGTemp *its = &s->temps[arg];
1792 TCGOpcode sopc = (its->type == TCG_TYPE_I32
1793 ? INDEX_op_st_i32
1794 : INDEX_op_st_i64);
1795 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
1796 TCGArg *sargs = &s->gen_opparam_buf[sop->args];
1798 sargs[0] = dir;
1799 sargs[1] = temp_idx(s, its->mem_base);
1800 sargs[2] = its->mem_offset;
1802 temp_state[arg] = TS_MEM;
1804 /* Drop outputs that are dead. */
1805 if (IS_DEAD_ARG(i)) {
1806 temp_state[arg] = TS_DEAD;
1811 return changes;
1814 #ifdef CONFIG_DEBUG_TCG
1815 static void dump_regs(TCGContext *s)
1817 TCGTemp *ts;
1818 int i;
1819 char buf[64];
1821 for(i = 0; i < s->nb_temps; i++) {
1822 ts = &s->temps[i];
1823 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1824 switch(ts->val_type) {
1825 case TEMP_VAL_REG:
1826 printf("%s", tcg_target_reg_names[ts->reg]);
1827 break;
1828 case TEMP_VAL_MEM:
1829 printf("%d(%s)", (int)ts->mem_offset,
1830 tcg_target_reg_names[ts->mem_base->reg]);
1831 break;
1832 case TEMP_VAL_CONST:
1833 printf("$0x%" TCG_PRIlx, ts->val);
1834 break;
1835 case TEMP_VAL_DEAD:
1836 printf("D");
1837 break;
1838 default:
1839 printf("???");
1840 break;
1842 printf("\n");
1845 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1846 if (s->reg_to_temp[i] != NULL) {
1847 printf("%s: %s\n",
1848 tcg_target_reg_names[i],
1849 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
1854 static void check_regs(TCGContext *s)
1856 int reg;
1857 int k;
1858 TCGTemp *ts;
1859 char buf[64];
1861 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1862 ts = s->reg_to_temp[reg];
1863 if (ts != NULL) {
1864 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
1865 printf("Inconsistency for register %s:\n",
1866 tcg_target_reg_names[reg]);
1867 goto fail;
1871 for (k = 0; k < s->nb_temps; k++) {
1872 ts = &s->temps[k];
1873 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
1874 && s->reg_to_temp[ts->reg] != ts) {
1875 printf("Inconsistency for temp %s:\n",
1876 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
1877 fail:
1878 printf("reg state:\n");
1879 dump_regs(s);
1880 tcg_abort();
1884 #endif
1886 static void temp_allocate_frame(TCGContext *s, int temp)
1888 TCGTemp *ts;
1889 ts = &s->temps[temp];
1890 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1891 /* Sparc64 stack is accessed with offset of 2047 */
1892 s->current_frame_offset = (s->current_frame_offset +
1893 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1894 ~(sizeof(tcg_target_long) - 1);
1895 #endif
1896 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1897 s->frame_end) {
1898 tcg_abort();
1900 ts->mem_offset = s->current_frame_offset;
1901 ts->mem_base = s->frame_temp;
1902 ts->mem_allocated = 1;
1903 s->current_frame_offset += sizeof(tcg_target_long);
1906 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
1908 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
1909 mark it free; otherwise mark it dead. */
1910 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
1912 if (ts->fixed_reg) {
1913 return;
1915 if (ts->val_type == TEMP_VAL_REG) {
1916 s->reg_to_temp[ts->reg] = NULL;
1918 ts->val_type = (free_or_dead < 0
1919 || ts->temp_local
1920 || temp_idx(s, ts) < s->nb_globals
1921 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1924 /* Mark a temporary as dead. */
1925 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
1927 temp_free_or_dead(s, ts, 1);
1930 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
1931 registers needs to be allocated to store a constant. If 'free_or_dead'
1932 is non-zero, subsequently release the temporary; if it is positive, the
1933 temp is dead; if it is negative, the temp is free. */
1934 static void temp_sync(TCGContext *s, TCGTemp *ts,
1935 TCGRegSet allocated_regs, int free_or_dead)
1937 if (ts->fixed_reg) {
1938 return;
1940 if (!ts->mem_coherent) {
1941 if (!ts->mem_allocated) {
1942 temp_allocate_frame(s, temp_idx(s, ts));
1944 switch (ts->val_type) {
1945 case TEMP_VAL_CONST:
1946 /* If we're going to free the temp immediately, then we won't
1947 require it later in a register, so attempt to store the
1948 constant to memory directly. */
1949 if (free_or_dead
1950 && tcg_out_sti(s, ts->type, ts->val,
1951 ts->mem_base->reg, ts->mem_offset)) {
1952 break;
1954 temp_load(s, ts, tcg_target_available_regs[ts->type],
1955 allocated_regs);
1956 /* fallthrough */
1958 case TEMP_VAL_REG:
1959 tcg_out_st(s, ts->type, ts->reg,
1960 ts->mem_base->reg, ts->mem_offset);
1961 break;
1963 case TEMP_VAL_MEM:
1964 break;
1966 case TEMP_VAL_DEAD:
1967 default:
1968 tcg_abort();
1970 ts->mem_coherent = 1;
1972 if (free_or_dead) {
1973 temp_free_or_dead(s, ts, free_or_dead);
1977 /* free register 'reg' by spilling the corresponding temporary if necessary */
1978 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
1980 TCGTemp *ts = s->reg_to_temp[reg];
1981 if (ts != NULL) {
1982 temp_sync(s, ts, allocated_regs, -1);
1986 /* Allocate a register belonging to reg1 & ~reg2 */
1987 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
1988 TCGRegSet allocated_regs, bool rev)
1990 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
1991 const int *order;
1992 TCGReg reg;
1993 TCGRegSet reg_ct;
1995 tcg_regset_andnot(reg_ct, desired_regs, allocated_regs);
1996 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
1998 /* first try free registers */
1999 for(i = 0; i < n; i++) {
2000 reg = order[i];
2001 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
2002 return reg;
2005 /* XXX: do better spill choice */
2006 for(i = 0; i < n; i++) {
2007 reg = order[i];
2008 if (tcg_regset_test_reg(reg_ct, reg)) {
2009 tcg_reg_free(s, reg, allocated_regs);
2010 return reg;
2014 tcg_abort();
2017 /* Make sure the temporary is in a register. If needed, allocate the register
2018 from DESIRED while avoiding ALLOCATED. */
2019 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2020 TCGRegSet allocated_regs)
2022 TCGReg reg;
2024 switch (ts->val_type) {
2025 case TEMP_VAL_REG:
2026 return;
2027 case TEMP_VAL_CONST:
2028 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2029 tcg_out_movi(s, ts->type, reg, ts->val);
2030 ts->mem_coherent = 0;
2031 break;
2032 case TEMP_VAL_MEM:
2033 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2034 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2035 ts->mem_coherent = 1;
2036 break;
2037 case TEMP_VAL_DEAD:
2038 default:
2039 tcg_abort();
2041 ts->reg = reg;
2042 ts->val_type = TEMP_VAL_REG;
2043 s->reg_to_temp[reg] = ts;
2046 /* Save a temporary to memory. 'allocated_regs' is used in case a
2047 temporary registers needs to be allocated to store a constant. */
2048 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
2050 /* The liveness analysis already ensures that globals are back
2051 in memory. Keep an tcg_debug_assert for safety. */
2052 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
2055 /* save globals to their canonical location and assume they can be
2056 modified be the following code. 'allocated_regs' is used in case a
2057 temporary registers needs to be allocated to store a constant. */
2058 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
2060 int i;
2062 for (i = 0; i < s->nb_globals; i++) {
2063 temp_save(s, &s->temps[i], allocated_regs);
2067 /* sync globals to their canonical location and assume they can be
2068 read by the following code. 'allocated_regs' is used in case a
2069 temporary registers needs to be allocated to store a constant. */
2070 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2072 int i;
2074 for (i = 0; i < s->nb_globals; i++) {
2075 TCGTemp *ts = &s->temps[i];
2076 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2077 || ts->fixed_reg
2078 || ts->mem_coherent);
2082 /* at the end of a basic block, we assume all temporaries are dead and
2083 all globals are stored at their canonical location. */
2084 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
2086 int i;
2088 for (i = s->nb_globals; i < s->nb_temps; i++) {
2089 TCGTemp *ts = &s->temps[i];
2090 if (ts->temp_local) {
2091 temp_save(s, ts, allocated_regs);
2092 } else {
2093 /* The liveness analysis already ensures that temps are dead.
2094 Keep an tcg_debug_assert for safety. */
2095 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
2099 save_globals(s, allocated_regs);
2102 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
2103 TCGLifeData arg_life)
2105 TCGTemp *ots;
2106 tcg_target_ulong val;
2108 ots = &s->temps[args[0]];
2109 val = args[1];
2111 if (ots->fixed_reg) {
2112 /* For fixed registers, we do not do any constant propagation. */
2113 tcg_out_movi(s, ots->type, ots->reg, val);
2114 return;
2117 /* The movi is not explicitly generated here. */
2118 if (ots->val_type == TEMP_VAL_REG) {
2119 s->reg_to_temp[ots->reg] = NULL;
2121 ots->val_type = TEMP_VAL_CONST;
2122 ots->val = val;
2123 ots->mem_coherent = 0;
2124 if (NEED_SYNC_ARG(0)) {
2125 temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2126 } else if (IS_DEAD_ARG(0)) {
2127 temp_dead(s, ots);
2131 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
2132 const TCGArg *args, TCGLifeData arg_life)
2134 TCGRegSet allocated_regs;
2135 TCGTemp *ts, *ots;
2136 TCGType otype, itype;
2138 tcg_regset_set(allocated_regs, s->reserved_regs);
2139 ots = &s->temps[args[0]];
2140 ts = &s->temps[args[1]];
2142 /* Note that otype != itype for no-op truncation. */
2143 otype = ots->type;
2144 itype = ts->type;
2146 /* If the source value is not in a register, and we're going to be
2147 forced to have it in a register in order to perform the copy,
2148 then copy the SOURCE value into its own register first. That way
2149 we don't have to reload SOURCE the next time it is used. */
2150 if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
2151 || ts->val_type == TEMP_VAL_MEM) {
2152 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
2155 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2156 /* mov to a non-saved dead register makes no sense (even with
2157 liveness analysis disabled). */
2158 tcg_debug_assert(NEED_SYNC_ARG(0));
2159 /* The code above should have moved the temp to a register. */
2160 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
2161 if (!ots->mem_allocated) {
2162 temp_allocate_frame(s, args[0]);
2164 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
2165 if (IS_DEAD_ARG(1)) {
2166 temp_dead(s, ts);
2168 temp_dead(s, ots);
2169 } else if (ts->val_type == TEMP_VAL_CONST) {
2170 /* propagate constant */
2171 if (ots->val_type == TEMP_VAL_REG) {
2172 s->reg_to_temp[ots->reg] = NULL;
2174 ots->val_type = TEMP_VAL_CONST;
2175 ots->val = ts->val;
2176 if (IS_DEAD_ARG(1)) {
2177 temp_dead(s, ts);
2179 } else {
2180 /* The code in the first if block should have moved the
2181 temp to a register. */
2182 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
2183 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
2184 /* the mov can be suppressed */
2185 if (ots->val_type == TEMP_VAL_REG) {
2186 s->reg_to_temp[ots->reg] = NULL;
2188 ots->reg = ts->reg;
2189 temp_dead(s, ts);
2190 } else {
2191 if (ots->val_type != TEMP_VAL_REG) {
2192 /* When allocating a new register, make sure to not spill the
2193 input one. */
2194 tcg_regset_set_reg(allocated_regs, ts->reg);
2195 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
2196 allocated_regs, ots->indirect_base);
2198 tcg_out_mov(s, otype, ots->reg, ts->reg);
2200 ots->val_type = TEMP_VAL_REG;
2201 ots->mem_coherent = 0;
2202 s->reg_to_temp[ots->reg] = ots;
2203 if (NEED_SYNC_ARG(0)) {
2204 temp_sync(s, ots, allocated_regs, 0);
2209 static void tcg_reg_alloc_op(TCGContext *s,
2210 const TCGOpDef *def, TCGOpcode opc,
2211 const TCGArg *args, TCGLifeData arg_life)
2213 TCGRegSet allocated_regs;
2214 int i, k, nb_iargs, nb_oargs;
2215 TCGReg reg;
2216 TCGArg arg;
2217 const TCGArgConstraint *arg_ct;
2218 TCGTemp *ts;
2219 TCGArg new_args[TCG_MAX_OP_ARGS];
2220 int const_args[TCG_MAX_OP_ARGS];
2222 nb_oargs = def->nb_oargs;
2223 nb_iargs = def->nb_iargs;
2225 /* copy constants */
2226 memcpy(new_args + nb_oargs + nb_iargs,
2227 args + nb_oargs + nb_iargs,
2228 sizeof(TCGArg) * def->nb_cargs);
2230 /* satisfy input constraints */
2231 tcg_regset_set(allocated_regs, s->reserved_regs);
2232 for(k = 0; k < nb_iargs; k++) {
2233 i = def->sorted_args[nb_oargs + k];
2234 arg = args[i];
2235 arg_ct = &def->args_ct[i];
2236 ts = &s->temps[arg];
2238 if (ts->val_type == TEMP_VAL_CONST
2239 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2240 /* constant is OK for instruction */
2241 const_args[i] = 1;
2242 new_args[i] = ts->val;
2243 goto iarg_end;
2246 temp_load(s, ts, arg_ct->u.regs, allocated_regs);
2248 if (arg_ct->ct & TCG_CT_IALIAS) {
2249 if (ts->fixed_reg) {
2250 /* if fixed register, we must allocate a new register
2251 if the alias is not the same register */
2252 if (arg != args[arg_ct->alias_index])
2253 goto allocate_in_reg;
2254 } else {
2255 /* if the input is aliased to an output and if it is
2256 not dead after the instruction, we must allocate
2257 a new register and move it */
2258 if (!IS_DEAD_ARG(i)) {
2259 goto allocate_in_reg;
2261 /* check if the current register has already been allocated
2262 for another input aliased to an output */
2263 int k2, i2;
2264 for (k2 = 0 ; k2 < k ; k2++) {
2265 i2 = def->sorted_args[nb_oargs + k2];
2266 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2267 (new_args[i2] == ts->reg)) {
2268 goto allocate_in_reg;
2273 reg = ts->reg;
2274 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2275 /* nothing to do : the constraint is satisfied */
2276 } else {
2277 allocate_in_reg:
2278 /* allocate a new register matching the constraint
2279 and move the temporary register into it */
2280 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2281 ts->indirect_base);
2282 tcg_out_mov(s, ts->type, reg, ts->reg);
2284 new_args[i] = reg;
2285 const_args[i] = 0;
2286 tcg_regset_set_reg(allocated_regs, reg);
2287 iarg_end: ;
2290 /* mark dead temporaries and free the associated registers */
2291 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2292 if (IS_DEAD_ARG(i)) {
2293 temp_dead(s, &s->temps[args[i]]);
2297 if (def->flags & TCG_OPF_BB_END) {
2298 tcg_reg_alloc_bb_end(s, allocated_regs);
2299 } else {
2300 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2301 /* XXX: permit generic clobber register list ? */
2302 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2303 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2304 tcg_reg_free(s, i, allocated_regs);
2308 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2309 /* sync globals if the op has side effects and might trigger
2310 an exception. */
2311 sync_globals(s, allocated_regs);
2314 /* satisfy the output constraints */
2315 tcg_regset_set(allocated_regs, s->reserved_regs);
2316 for(k = 0; k < nb_oargs; k++) {
2317 i = def->sorted_args[k];
2318 arg = args[i];
2319 arg_ct = &def->args_ct[i];
2320 ts = &s->temps[arg];
2321 if (arg_ct->ct & TCG_CT_ALIAS) {
2322 reg = new_args[arg_ct->alias_index];
2323 } else {
2324 /* if fixed register, we try to use it */
2325 reg = ts->reg;
2326 if (ts->fixed_reg &&
2327 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2328 goto oarg_end;
2330 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2331 ts->indirect_base);
2333 tcg_regset_set_reg(allocated_regs, reg);
2334 /* if a fixed register is used, then a move will be done afterwards */
2335 if (!ts->fixed_reg) {
2336 if (ts->val_type == TEMP_VAL_REG) {
2337 s->reg_to_temp[ts->reg] = NULL;
2339 ts->val_type = TEMP_VAL_REG;
2340 ts->reg = reg;
2341 /* temp value is modified, so the value kept in memory is
2342 potentially not the same */
2343 ts->mem_coherent = 0;
2344 s->reg_to_temp[reg] = ts;
2346 oarg_end:
2347 new_args[i] = reg;
2351 /* emit instruction */
2352 tcg_out_op(s, opc, new_args, const_args);
2354 /* move the outputs in the correct register if needed */
2355 for(i = 0; i < nb_oargs; i++) {
2356 ts = &s->temps[args[i]];
2357 reg = new_args[i];
2358 if (ts->fixed_reg && ts->reg != reg) {
2359 tcg_out_mov(s, ts->type, ts->reg, reg);
2361 if (NEED_SYNC_ARG(i)) {
2362 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2363 } else if (IS_DEAD_ARG(i)) {
2364 temp_dead(s, ts);
2369 #ifdef TCG_TARGET_STACK_GROWSUP
2370 #define STACK_DIR(x) (-(x))
2371 #else
2372 #define STACK_DIR(x) (x)
2373 #endif
2375 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2376 const TCGArg * const args, TCGLifeData arg_life)
2378 int flags, nb_regs, i;
2379 TCGReg reg;
2380 TCGArg arg;
2381 TCGTemp *ts;
2382 intptr_t stack_offset;
2383 size_t call_stack_size;
2384 tcg_insn_unit *func_addr;
2385 int allocate_args;
2386 TCGRegSet allocated_regs;
2388 func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2389 flags = args[nb_oargs + nb_iargs + 1];
2391 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2392 if (nb_regs > nb_iargs) {
2393 nb_regs = nb_iargs;
2396 /* assign stack slots first */
2397 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2398 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2399 ~(TCG_TARGET_STACK_ALIGN - 1);
2400 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2401 if (allocate_args) {
2402 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2403 preallocate call stack */
2404 tcg_abort();
2407 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2408 for(i = nb_regs; i < nb_iargs; i++) {
2409 arg = args[nb_oargs + i];
2410 #ifdef TCG_TARGET_STACK_GROWSUP
2411 stack_offset -= sizeof(tcg_target_long);
2412 #endif
2413 if (arg != TCG_CALL_DUMMY_ARG) {
2414 ts = &s->temps[arg];
2415 temp_load(s, ts, tcg_target_available_regs[ts->type],
2416 s->reserved_regs);
2417 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2419 #ifndef TCG_TARGET_STACK_GROWSUP
2420 stack_offset += sizeof(tcg_target_long);
2421 #endif
2424 /* assign input registers */
2425 tcg_regset_set(allocated_regs, s->reserved_regs);
2426 for(i = 0; i < nb_regs; i++) {
2427 arg = args[nb_oargs + i];
2428 if (arg != TCG_CALL_DUMMY_ARG) {
2429 ts = &s->temps[arg];
2430 reg = tcg_target_call_iarg_regs[i];
2431 tcg_reg_free(s, reg, allocated_regs);
2433 if (ts->val_type == TEMP_VAL_REG) {
2434 if (ts->reg != reg) {
2435 tcg_out_mov(s, ts->type, reg, ts->reg);
2437 } else {
2438 TCGRegSet arg_set;
2440 tcg_regset_clear(arg_set);
2441 tcg_regset_set_reg(arg_set, reg);
2442 temp_load(s, ts, arg_set, allocated_regs);
2445 tcg_regset_set_reg(allocated_regs, reg);
2449 /* mark dead temporaries and free the associated registers */
2450 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2451 if (IS_DEAD_ARG(i)) {
2452 temp_dead(s, &s->temps[args[i]]);
2456 /* clobber call registers */
2457 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2458 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2459 tcg_reg_free(s, i, allocated_regs);
2463 /* Save globals if they might be written by the helper, sync them if
2464 they might be read. */
2465 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2466 /* Nothing to do */
2467 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2468 sync_globals(s, allocated_regs);
2469 } else {
2470 save_globals(s, allocated_regs);
2473 tcg_out_call(s, func_addr);
2475 /* assign output registers and emit moves if needed */
2476 for(i = 0; i < nb_oargs; i++) {
2477 arg = args[i];
2478 ts = &s->temps[arg];
2479 reg = tcg_target_call_oarg_regs[i];
2480 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
2482 if (ts->fixed_reg) {
2483 if (ts->reg != reg) {
2484 tcg_out_mov(s, ts->type, ts->reg, reg);
2486 } else {
2487 if (ts->val_type == TEMP_VAL_REG) {
2488 s->reg_to_temp[ts->reg] = NULL;
2490 ts->val_type = TEMP_VAL_REG;
2491 ts->reg = reg;
2492 ts->mem_coherent = 0;
2493 s->reg_to_temp[reg] = ts;
2494 if (NEED_SYNC_ARG(i)) {
2495 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2496 } else if (IS_DEAD_ARG(i)) {
2497 temp_dead(s, ts);
2503 #ifdef CONFIG_PROFILER
2505 static int64_t tcg_table_op_count[NB_OPS];
2507 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2509 int i;
2511 for (i = 0; i < NB_OPS; i++) {
2512 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2513 tcg_table_op_count[i]);
2516 #else
2517 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2519 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2521 #endif
2524 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
2526 int i, oi, oi_next, num_insns;
2528 #ifdef CONFIG_PROFILER
2530 int n;
2532 n = s->gen_op_buf[0].prev + 1;
2533 s->op_count += n;
2534 if (n > s->op_count_max) {
2535 s->op_count_max = n;
2538 n = s->nb_temps;
2539 s->temp_count += n;
2540 if (n > s->temp_count_max) {
2541 s->temp_count_max = n;
2544 #endif
2546 #ifdef DEBUG_DISAS
2547 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
2548 && qemu_log_in_addr_range(tb->pc))) {
2549 qemu_log("OP:\n");
2550 tcg_dump_ops(s);
2551 qemu_log("\n");
2553 #endif
2555 #ifdef CONFIG_PROFILER
2556 s->opt_time -= profile_getclock();
2557 #endif
2559 #ifdef USE_TCG_OPTIMIZATIONS
2560 tcg_optimize(s);
2561 #endif
2563 #ifdef CONFIG_PROFILER
2564 s->opt_time += profile_getclock();
2565 s->la_time -= profile_getclock();
2566 #endif
2569 uint8_t *temp_state = tcg_malloc(s->nb_temps + s->nb_indirects);
2571 liveness_pass_1(s, temp_state);
2573 if (s->nb_indirects > 0) {
2574 #ifdef DEBUG_DISAS
2575 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
2576 && qemu_log_in_addr_range(tb->pc))) {
2577 qemu_log("OP before indirect lowering:\n");
2578 tcg_dump_ops(s);
2579 qemu_log("\n");
2581 #endif
2582 /* Replace indirect temps with direct temps. */
2583 if (liveness_pass_2(s, temp_state)) {
2584 /* If changes were made, re-run liveness. */
2585 liveness_pass_1(s, temp_state);
2590 #ifdef CONFIG_PROFILER
2591 s->la_time += profile_getclock();
2592 #endif
2594 #ifdef DEBUG_DISAS
2595 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
2596 && qemu_log_in_addr_range(tb->pc))) {
2597 qemu_log("OP after optimization and liveness analysis:\n");
2598 tcg_dump_ops(s);
2599 qemu_log("\n");
2601 #endif
2603 tcg_reg_alloc_start(s);
2605 s->code_buf = tb->tc_ptr;
2606 s->code_ptr = tb->tc_ptr;
2608 tcg_out_tb_init(s);
2610 num_insns = -1;
2611 for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
2612 TCGOp * const op = &s->gen_op_buf[oi];
2613 TCGArg * const args = &s->gen_opparam_buf[op->args];
2614 TCGOpcode opc = op->opc;
2615 const TCGOpDef *def = &tcg_op_defs[opc];
2616 TCGLifeData arg_life = op->life;
2618 oi_next = op->next;
2619 #ifdef CONFIG_PROFILER
2620 tcg_table_op_count[opc]++;
2621 #endif
2623 switch (opc) {
2624 case INDEX_op_mov_i32:
2625 case INDEX_op_mov_i64:
2626 tcg_reg_alloc_mov(s, def, args, arg_life);
2627 break;
2628 case INDEX_op_movi_i32:
2629 case INDEX_op_movi_i64:
2630 tcg_reg_alloc_movi(s, args, arg_life);
2631 break;
2632 case INDEX_op_insn_start:
2633 if (num_insns >= 0) {
2634 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2636 num_insns++;
2637 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2638 target_ulong a;
2639 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2640 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2641 #else
2642 a = args[i];
2643 #endif
2644 s->gen_insn_data[num_insns][i] = a;
2646 break;
2647 case INDEX_op_discard:
2648 temp_dead(s, &s->temps[args[0]]);
2649 break;
2650 case INDEX_op_set_label:
2651 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2652 tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2653 break;
2654 case INDEX_op_call:
2655 tcg_reg_alloc_call(s, op->callo, op->calli, args, arg_life);
2656 break;
2657 default:
2658 /* Sanity check that we've not introduced any unhandled opcodes. */
2659 if (def->flags & TCG_OPF_NOT_PRESENT) {
2660 tcg_abort();
2662 /* Note: in order to speed up the code, it would be much
2663 faster to have specialized register allocator functions for
2664 some common argument patterns */
2665 tcg_reg_alloc_op(s, def, opc, args, arg_life);
2666 break;
2668 #ifdef CONFIG_DEBUG_TCG
2669 check_regs(s);
2670 #endif
2671 /* Test for (pending) buffer overflow. The assumption is that any
2672 one operation beginning below the high water mark cannot overrun
2673 the buffer completely. Thus we can test for overflow after
2674 generating code without having to check during generation. */
2675 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2676 return -1;
2679 tcg_debug_assert(num_insns >= 0);
2680 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2682 /* Generate TB finalization at the end of block */
2683 if (!tcg_out_tb_finalize(s)) {
2684 return -1;
2687 /* flush instruction cache */
2688 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2690 return tcg_current_code_size(s);
2693 #ifdef CONFIG_PROFILER
2694 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2696 TCGContext *s = &tcg_ctx;
2697 int64_t tb_count = s->tb_count;
2698 int64_t tb_div_count = tb_count ? tb_count : 1;
2699 int64_t tot = s->interm_time + s->code_time;
2701 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2702 tot, tot / 2.4e9);
2703 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2704 tb_count, s->tb_count1 - tb_count,
2705 (double)(s->tb_count1 - s->tb_count)
2706 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2707 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2708 (double)s->op_count / tb_div_count, s->op_count_max);
2709 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2710 (double)s->del_op_count / tb_div_count);
2711 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2712 (double)s->temp_count / tb_div_count, s->temp_count_max);
2713 cpu_fprintf(f, "avg host code/TB %0.1f\n",
2714 (double)s->code_out_len / tb_div_count);
2715 cpu_fprintf(f, "avg search data/TB %0.1f\n",
2716 (double)s->search_out_len / tb_div_count);
2718 cpu_fprintf(f, "cycles/op %0.1f\n",
2719 s->op_count ? (double)tot / s->op_count : 0);
2720 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2721 s->code_in_len ? (double)tot / s->code_in_len : 0);
2722 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2723 s->code_out_len ? (double)tot / s->code_out_len : 0);
2724 cpu_fprintf(f, "cycles/search byte %0.1f\n",
2725 s->search_out_len ? (double)tot / s->search_out_len : 0);
2726 if (tot == 0) {
2727 tot = 1;
2729 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2730 (double)s->interm_time / tot * 100.0);
2731 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2732 (double)s->code_time / tot * 100.0);
2733 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2734 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2735 * 100.0);
2736 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2737 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2738 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2739 s->restore_count);
2740 cpu_fprintf(f, " avg cycles %0.1f\n",
2741 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2743 #else
2744 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2746 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2748 #endif
2750 #ifdef ELF_HOST_MACHINE
2751 /* In order to use this feature, the backend needs to do three things:
2753 (1) Define ELF_HOST_MACHINE to indicate both what value to
2754 put into the ELF image and to indicate support for the feature.
2756 (2) Define tcg_register_jit. This should create a buffer containing
2757 the contents of a .debug_frame section that describes the post-
2758 prologue unwind info for the tcg machine.
2760 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2763 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2764 typedef enum {
2765 JIT_NOACTION = 0,
2766 JIT_REGISTER_FN,
2767 JIT_UNREGISTER_FN
2768 } jit_actions_t;
2770 struct jit_code_entry {
2771 struct jit_code_entry *next_entry;
2772 struct jit_code_entry *prev_entry;
2773 const void *symfile_addr;
2774 uint64_t symfile_size;
2777 struct jit_descriptor {
2778 uint32_t version;
2779 uint32_t action_flag;
2780 struct jit_code_entry *relevant_entry;
2781 struct jit_code_entry *first_entry;
2784 void __jit_debug_register_code(void) __attribute__((noinline));
2785 void __jit_debug_register_code(void)
2787 asm("");
2790 /* Must statically initialize the version, because GDB may check
2791 the version before we can set it. */
2792 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2794 /* End GDB interface. */
2796 static int find_string(const char *strtab, const char *str)
2798 const char *p = strtab + 1;
2800 while (1) {
2801 if (strcmp(p, str) == 0) {
2802 return p - strtab;
2804 p += strlen(p) + 1;
2808 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2809 const void *debug_frame,
2810 size_t debug_frame_size)
2812 struct __attribute__((packed)) DebugInfo {
2813 uint32_t len;
2814 uint16_t version;
2815 uint32_t abbrev;
2816 uint8_t ptr_size;
2817 uint8_t cu_die;
2818 uint16_t cu_lang;
2819 uintptr_t cu_low_pc;
2820 uintptr_t cu_high_pc;
2821 uint8_t fn_die;
2822 char fn_name[16];
2823 uintptr_t fn_low_pc;
2824 uintptr_t fn_high_pc;
2825 uint8_t cu_eoc;
2828 struct ElfImage {
2829 ElfW(Ehdr) ehdr;
2830 ElfW(Phdr) phdr;
2831 ElfW(Shdr) shdr[7];
2832 ElfW(Sym) sym[2];
2833 struct DebugInfo di;
2834 uint8_t da[24];
2835 char str[80];
2838 struct ElfImage *img;
2840 static const struct ElfImage img_template = {
2841 .ehdr = {
2842 .e_ident[EI_MAG0] = ELFMAG0,
2843 .e_ident[EI_MAG1] = ELFMAG1,
2844 .e_ident[EI_MAG2] = ELFMAG2,
2845 .e_ident[EI_MAG3] = ELFMAG3,
2846 .e_ident[EI_CLASS] = ELF_CLASS,
2847 .e_ident[EI_DATA] = ELF_DATA,
2848 .e_ident[EI_VERSION] = EV_CURRENT,
2849 .e_type = ET_EXEC,
2850 .e_machine = ELF_HOST_MACHINE,
2851 .e_version = EV_CURRENT,
2852 .e_phoff = offsetof(struct ElfImage, phdr),
2853 .e_shoff = offsetof(struct ElfImage, shdr),
2854 .e_ehsize = sizeof(ElfW(Shdr)),
2855 .e_phentsize = sizeof(ElfW(Phdr)),
2856 .e_phnum = 1,
2857 .e_shentsize = sizeof(ElfW(Shdr)),
2858 .e_shnum = ARRAY_SIZE(img->shdr),
2859 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2860 #ifdef ELF_HOST_FLAGS
2861 .e_flags = ELF_HOST_FLAGS,
2862 #endif
2863 #ifdef ELF_OSABI
2864 .e_ident[EI_OSABI] = ELF_OSABI,
2865 #endif
2867 .phdr = {
2868 .p_type = PT_LOAD,
2869 .p_flags = PF_X,
2871 .shdr = {
2872 [0] = { .sh_type = SHT_NULL },
2873 /* Trick: The contents of code_gen_buffer are not present in
2874 this fake ELF file; that got allocated elsewhere. Therefore
2875 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2876 will not look for contents. We can record any address. */
2877 [1] = { /* .text */
2878 .sh_type = SHT_NOBITS,
2879 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2881 [2] = { /* .debug_info */
2882 .sh_type = SHT_PROGBITS,
2883 .sh_offset = offsetof(struct ElfImage, di),
2884 .sh_size = sizeof(struct DebugInfo),
2886 [3] = { /* .debug_abbrev */
2887 .sh_type = SHT_PROGBITS,
2888 .sh_offset = offsetof(struct ElfImage, da),
2889 .sh_size = sizeof(img->da),
2891 [4] = { /* .debug_frame */
2892 .sh_type = SHT_PROGBITS,
2893 .sh_offset = sizeof(struct ElfImage),
2895 [5] = { /* .symtab */
2896 .sh_type = SHT_SYMTAB,
2897 .sh_offset = offsetof(struct ElfImage, sym),
2898 .sh_size = sizeof(img->sym),
2899 .sh_info = 1,
2900 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2901 .sh_entsize = sizeof(ElfW(Sym)),
2903 [6] = { /* .strtab */
2904 .sh_type = SHT_STRTAB,
2905 .sh_offset = offsetof(struct ElfImage, str),
2906 .sh_size = sizeof(img->str),
2909 .sym = {
2910 [1] = { /* code_gen_buffer */
2911 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2912 .st_shndx = 1,
2915 .di = {
2916 .len = sizeof(struct DebugInfo) - 4,
2917 .version = 2,
2918 .ptr_size = sizeof(void *),
2919 .cu_die = 1,
2920 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2921 .fn_die = 2,
2922 .fn_name = "code_gen_buffer"
2924 .da = {
2925 1, /* abbrev number (the cu) */
2926 0x11, 1, /* DW_TAG_compile_unit, has children */
2927 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2928 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2929 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2930 0, 0, /* end of abbrev */
2931 2, /* abbrev number (the fn) */
2932 0x2e, 0, /* DW_TAG_subprogram, no children */
2933 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2934 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2935 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2936 0, 0, /* end of abbrev */
2937 0 /* no more abbrev */
2939 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2940 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2943 /* We only need a single jit entry; statically allocate it. */
2944 static struct jit_code_entry one_entry;
2946 uintptr_t buf = (uintptr_t)buf_ptr;
2947 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2948 DebugFrameHeader *dfh;
2950 img = g_malloc(img_size);
2951 *img = img_template;
2953 img->phdr.p_vaddr = buf;
2954 img->phdr.p_paddr = buf;
2955 img->phdr.p_memsz = buf_size;
2957 img->shdr[1].sh_name = find_string(img->str, ".text");
2958 img->shdr[1].sh_addr = buf;
2959 img->shdr[1].sh_size = buf_size;
2961 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2962 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2964 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2965 img->shdr[4].sh_size = debug_frame_size;
2967 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2968 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2970 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2971 img->sym[1].st_value = buf;
2972 img->sym[1].st_size = buf_size;
2974 img->di.cu_low_pc = buf;
2975 img->di.cu_high_pc = buf + buf_size;
2976 img->di.fn_low_pc = buf;
2977 img->di.fn_high_pc = buf + buf_size;
2979 dfh = (DebugFrameHeader *)(img + 1);
2980 memcpy(dfh, debug_frame, debug_frame_size);
2981 dfh->fde.func_start = buf;
2982 dfh->fde.func_len = buf_size;
2984 #ifdef DEBUG_JIT
2985 /* Enable this block to be able to debug the ELF image file creation.
2986 One can use readelf, objdump, or other inspection utilities. */
2988 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2989 if (f) {
2990 if (fwrite(img, img_size, 1, f) != img_size) {
2991 /* Avoid stupid unused return value warning for fwrite. */
2993 fclose(f);
2996 #endif
2998 one_entry.symfile_addr = img;
2999 one_entry.symfile_size = img_size;
3001 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
3002 __jit_debug_descriptor.relevant_entry = &one_entry;
3003 __jit_debug_descriptor.first_entry = &one_entry;
3004 __jit_debug_register_code();
3006 #else
3007 /* No support for the feature. Provide the entry point expected by exec.c,
3008 and implement the internal function we declared earlier. */
3010 static void tcg_register_jit_int(void *buf, size_t size,
3011 const void *debug_frame,
3012 size_t debug_frame_size)
3016 void tcg_register_jit(void *buf, size_t buf_size)
3019 #endif /* ELF_HOST_MACHINE */