tcg: Move some opcode generation functions out of line
[qemu.git] / tcg / tcg.c
blob3470500a7af15f17252567878acf5e5ac60866d5
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
29 #include "config.h"
31 /* Define to jump the ELF file used to communicate with GDB. */
32 #undef DEBUG_JIT
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
36 #define NDEBUG
37 #endif
39 #include "qemu-common.h"
40 #include "qemu/host-utils.h"
41 #include "qemu/timer.h"
43 /* Note: the long term plan is to reduce the dependencies on the QEMU
44 CPU definitions. Currently they are used for qemu_ld/st
45 instructions */
46 #define NO_CPU_IO_DEFS
47 #include "cpu.h"
49 #include "tcg-op.h"
51 #if UINTPTR_MAX == UINT32_MAX
52 # define ELF_CLASS ELFCLASS32
53 #else
54 # define ELF_CLASS ELFCLASS64
55 #endif
56 #ifdef HOST_WORDS_BIGENDIAN
57 # define ELF_DATA ELFDATA2MSB
58 #else
59 # define ELF_DATA ELFDATA2LSB
60 #endif
62 #include "elf.h"
64 /* Forward declarations for functions declared in tcg-target.c and used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
68 intptr_t value, intptr_t addend);
70 /* The CIE and FDE header definitions will be common to all hosts. */
71 typedef struct {
72 uint32_t len __attribute__((aligned((sizeof(void *)))));
73 uint32_t id;
74 uint8_t version;
75 char augmentation[1];
76 uint8_t code_align;
77 uint8_t data_align;
78 uint8_t return_column;
79 } DebugFrameCIE;
81 typedef struct QEMU_PACKED {
82 uint32_t len __attribute__((aligned((sizeof(void *)))));
83 uint32_t cie_offset;
84 uintptr_t func_start;
85 uintptr_t func_len;
86 } DebugFrameFDEHeader;
88 typedef struct QEMU_PACKED {
89 DebugFrameCIE cie;
90 DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
93 static void tcg_register_jit_int(void *buf, size_t size,
94 const void *debug_frame,
95 size_t debug_frame_size)
96 __attribute__((unused));
98 /* Forward declarations for functions declared and used in tcg-target.c. */
99 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
100 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
101 intptr_t arg2);
102 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
103 static void tcg_out_movi(TCGContext *s, TCGType type,
104 TCGReg ret, tcg_target_long arg);
105 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
106 const int *const_args);
107 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
108 intptr_t arg2);
109 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
110 static int tcg_target_const_match(tcg_target_long val, TCGType type,
111 const TCGArgConstraint *arg_ct);
112 static void tcg_out_tb_init(TCGContext *s);
113 static void tcg_out_tb_finalize(TCGContext *s);
116 TCGOpDef tcg_op_defs[] = {
117 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
118 #include "tcg-opc.h"
119 #undef DEF
121 const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);
123 static TCGRegSet tcg_target_available_regs[2];
124 static TCGRegSet tcg_target_call_clobber_regs;
126 #if TCG_TARGET_INSN_UNIT_SIZE == 1
127 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
129 *s->code_ptr++ = v;
132 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
133 uint8_t v)
135 *p = v;
137 #endif
139 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
140 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
142 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
143 *s->code_ptr++ = v;
144 } else {
145 tcg_insn_unit *p = s->code_ptr;
146 memcpy(p, &v, sizeof(v));
147 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
151 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
152 uint16_t v)
154 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
155 *p = v;
156 } else {
157 memcpy(p, &v, sizeof(v));
160 #endif
162 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
163 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
165 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
166 *s->code_ptr++ = v;
167 } else {
168 tcg_insn_unit *p = s->code_ptr;
169 memcpy(p, &v, sizeof(v));
170 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
174 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
175 uint32_t v)
177 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
178 *p = v;
179 } else {
180 memcpy(p, &v, sizeof(v));
183 #endif
185 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
186 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
188 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
189 *s->code_ptr++ = v;
190 } else {
191 tcg_insn_unit *p = s->code_ptr;
192 memcpy(p, &v, sizeof(v));
193 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
197 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
198 uint64_t v)
200 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
201 *p = v;
202 } else {
203 memcpy(p, &v, sizeof(v));
206 #endif
208 /* label relocation processing */
210 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
211 int label_index, intptr_t addend)
213 TCGLabel *l;
214 TCGRelocation *r;
216 l = &s->labels[label_index];
217 if (l->has_value) {
218 /* FIXME: This may break relocations on RISC targets that
219 modify instruction fields in place. The caller may not have
220 written the initial value. */
221 patch_reloc(code_ptr, type, l->u.value, addend);
222 } else {
223 /* add a new relocation entry */
224 r = tcg_malloc(sizeof(TCGRelocation));
225 r->type = type;
226 r->ptr = code_ptr;
227 r->addend = addend;
228 r->next = l->u.first_reloc;
229 l->u.first_reloc = r;
233 static void tcg_out_label(TCGContext *s, int label_index, tcg_insn_unit *ptr)
235 TCGLabel *l = &s->labels[label_index];
236 intptr_t value = (intptr_t)ptr;
237 TCGRelocation *r;
239 assert(!l->has_value);
241 for (r = l->u.first_reloc; r != NULL; r = r->next) {
242 patch_reloc(r->ptr, r->type, value, r->addend);
245 l->has_value = 1;
246 l->u.value_ptr = ptr;
249 int gen_new_label(void)
251 TCGContext *s = &tcg_ctx;
252 int idx;
253 TCGLabel *l;
255 if (s->nb_labels >= TCG_MAX_LABELS)
256 tcg_abort();
257 idx = s->nb_labels++;
258 l = &s->labels[idx];
259 l->has_value = 0;
260 l->u.first_reloc = NULL;
261 return idx;
264 #include "tcg-target.c"
266 /* pool based memory allocation */
267 void *tcg_malloc_internal(TCGContext *s, int size)
269 TCGPool *p;
270 int pool_size;
272 if (size > TCG_POOL_CHUNK_SIZE) {
273 /* big malloc: insert a new pool (XXX: could optimize) */
274 p = g_malloc(sizeof(TCGPool) + size);
275 p->size = size;
276 p->next = s->pool_first_large;
277 s->pool_first_large = p;
278 return p->data;
279 } else {
280 p = s->pool_current;
281 if (!p) {
282 p = s->pool_first;
283 if (!p)
284 goto new_pool;
285 } else {
286 if (!p->next) {
287 new_pool:
288 pool_size = TCG_POOL_CHUNK_SIZE;
289 p = g_malloc(sizeof(TCGPool) + pool_size);
290 p->size = pool_size;
291 p->next = NULL;
292 if (s->pool_current)
293 s->pool_current->next = p;
294 else
295 s->pool_first = p;
296 } else {
297 p = p->next;
301 s->pool_current = p;
302 s->pool_cur = p->data + size;
303 s->pool_end = p->data + p->size;
304 return p->data;
307 void tcg_pool_reset(TCGContext *s)
309 TCGPool *p, *t;
310 for (p = s->pool_first_large; p; p = t) {
311 t = p->next;
312 g_free(p);
314 s->pool_first_large = NULL;
315 s->pool_cur = s->pool_end = NULL;
316 s->pool_current = NULL;
319 typedef struct TCGHelperInfo {
320 void *func;
321 const char *name;
322 unsigned flags;
323 unsigned sizemask;
324 } TCGHelperInfo;
326 #include "exec/helper-proto.h"
328 static const TCGHelperInfo all_helpers[] = {
329 #include "exec/helper-tcg.h"
332 void tcg_context_init(TCGContext *s)
334 int op, total_args, n, i;
335 TCGOpDef *def;
336 TCGArgConstraint *args_ct;
337 int *sorted_args;
338 GHashTable *helper_table;
340 memset(s, 0, sizeof(*s));
341 s->nb_globals = 0;
343 /* Count total number of arguments and allocate the corresponding
344 space */
345 total_args = 0;
346 for(op = 0; op < NB_OPS; op++) {
347 def = &tcg_op_defs[op];
348 n = def->nb_iargs + def->nb_oargs;
349 total_args += n;
352 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
353 sorted_args = g_malloc(sizeof(int) * total_args);
355 for(op = 0; op < NB_OPS; op++) {
356 def = &tcg_op_defs[op];
357 def->args_ct = args_ct;
358 def->sorted_args = sorted_args;
359 n = def->nb_iargs + def->nb_oargs;
360 sorted_args += n;
361 args_ct += n;
364 /* Register helpers. */
365 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
366 s->helpers = helper_table = g_hash_table_new(NULL, NULL);
368 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
369 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
370 (gpointer)&all_helpers[i]);
373 tcg_target_init(s);
376 void tcg_prologue_init(TCGContext *s)
378 /* init global prologue and epilogue */
379 s->code_buf = s->code_gen_prologue;
380 s->code_ptr = s->code_buf;
381 tcg_target_qemu_prologue(s);
382 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
384 #ifdef DEBUG_DISAS
385 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
386 size_t size = tcg_current_code_size(s);
387 qemu_log("PROLOGUE: [size=%zu]\n", size);
388 log_disas(s->code_buf, size);
389 qemu_log("\n");
390 qemu_log_flush();
392 #endif
395 void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
397 s->frame_start = start;
398 s->frame_end = start + size;
399 s->frame_reg = reg;
402 void tcg_func_start(TCGContext *s)
404 tcg_pool_reset(s);
405 s->nb_temps = s->nb_globals;
407 /* No temps have been previously allocated for size or locality. */
408 memset(s->free_temps, 0, sizeof(s->free_temps));
410 s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
411 s->nb_labels = 0;
412 s->current_frame_offset = s->frame_start;
414 #ifdef CONFIG_DEBUG_TCG
415 s->goto_tb_issue_mask = 0;
416 #endif
418 s->gen_opc_ptr = s->gen_opc_buf;
419 s->gen_opparam_ptr = s->gen_opparam_buf;
421 s->be = tcg_malloc(sizeof(TCGBackendData));
424 static inline void tcg_temp_alloc(TCGContext *s, int n)
426 if (n > TCG_MAX_TEMPS)
427 tcg_abort();
430 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
431 const char *name)
433 TCGContext *s = &tcg_ctx;
434 TCGTemp *ts;
435 int idx;
437 #if TCG_TARGET_REG_BITS == 32
438 if (type != TCG_TYPE_I32)
439 tcg_abort();
440 #endif
441 if (tcg_regset_test_reg(s->reserved_regs, reg))
442 tcg_abort();
443 idx = s->nb_globals;
444 tcg_temp_alloc(s, s->nb_globals + 1);
445 ts = &s->temps[s->nb_globals];
446 ts->base_type = type;
447 ts->type = type;
448 ts->fixed_reg = 1;
449 ts->reg = reg;
450 ts->name = name;
451 s->nb_globals++;
452 tcg_regset_set_reg(s->reserved_regs, reg);
453 return idx;
456 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
458 int idx;
460 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
461 return MAKE_TCGV_I32(idx);
464 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
466 int idx;
468 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
469 return MAKE_TCGV_I64(idx);
472 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
473 intptr_t offset,
474 const char *name)
476 TCGContext *s = &tcg_ctx;
477 TCGTemp *ts;
478 int idx;
480 idx = s->nb_globals;
481 #if TCG_TARGET_REG_BITS == 32
482 if (type == TCG_TYPE_I64) {
483 char buf[64];
484 tcg_temp_alloc(s, s->nb_globals + 2);
485 ts = &s->temps[s->nb_globals];
486 ts->base_type = type;
487 ts->type = TCG_TYPE_I32;
488 ts->fixed_reg = 0;
489 ts->mem_allocated = 1;
490 ts->mem_reg = reg;
491 #ifdef HOST_WORDS_BIGENDIAN
492 ts->mem_offset = offset + 4;
493 #else
494 ts->mem_offset = offset;
495 #endif
496 pstrcpy(buf, sizeof(buf), name);
497 pstrcat(buf, sizeof(buf), "_0");
498 ts->name = strdup(buf);
499 ts++;
501 ts->base_type = type;
502 ts->type = TCG_TYPE_I32;
503 ts->fixed_reg = 0;
504 ts->mem_allocated = 1;
505 ts->mem_reg = reg;
506 #ifdef HOST_WORDS_BIGENDIAN
507 ts->mem_offset = offset;
508 #else
509 ts->mem_offset = offset + 4;
510 #endif
511 pstrcpy(buf, sizeof(buf), name);
512 pstrcat(buf, sizeof(buf), "_1");
513 ts->name = strdup(buf);
515 s->nb_globals += 2;
516 } else
517 #endif
519 tcg_temp_alloc(s, s->nb_globals + 1);
520 ts = &s->temps[s->nb_globals];
521 ts->base_type = type;
522 ts->type = type;
523 ts->fixed_reg = 0;
524 ts->mem_allocated = 1;
525 ts->mem_reg = reg;
526 ts->mem_offset = offset;
527 ts->name = name;
528 s->nb_globals++;
530 return idx;
533 TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name)
535 int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
536 return MAKE_TCGV_I32(idx);
539 TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name)
541 int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
542 return MAKE_TCGV_I64(idx);
545 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
547 TCGContext *s = &tcg_ctx;
548 TCGTemp *ts;
549 int idx, k;
551 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
552 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
553 if (idx < TCG_MAX_TEMPS) {
554 /* There is already an available temp with the right type. */
555 clear_bit(idx, s->free_temps[k].l);
557 ts = &s->temps[idx];
558 ts->temp_allocated = 1;
559 assert(ts->base_type == type);
560 assert(ts->temp_local == temp_local);
561 } else {
562 idx = s->nb_temps;
563 #if TCG_TARGET_REG_BITS == 32
564 if (type == TCG_TYPE_I64) {
565 tcg_temp_alloc(s, s->nb_temps + 2);
566 ts = &s->temps[s->nb_temps];
567 ts->base_type = type;
568 ts->type = TCG_TYPE_I32;
569 ts->temp_allocated = 1;
570 ts->temp_local = temp_local;
571 ts->name = NULL;
572 ts++;
573 ts->base_type = type;
574 ts->type = TCG_TYPE_I32;
575 ts->temp_allocated = 1;
576 ts->temp_local = temp_local;
577 ts->name = NULL;
578 s->nb_temps += 2;
579 } else
580 #endif
582 tcg_temp_alloc(s, s->nb_temps + 1);
583 ts = &s->temps[s->nb_temps];
584 ts->base_type = type;
585 ts->type = type;
586 ts->temp_allocated = 1;
587 ts->temp_local = temp_local;
588 ts->name = NULL;
589 s->nb_temps++;
593 #if defined(CONFIG_DEBUG_TCG)
594 s->temps_in_use++;
595 #endif
596 return idx;
599 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
601 int idx;
603 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
604 return MAKE_TCGV_I32(idx);
607 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
609 int idx;
611 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
612 return MAKE_TCGV_I64(idx);
615 static void tcg_temp_free_internal(int idx)
617 TCGContext *s = &tcg_ctx;
618 TCGTemp *ts;
619 int k;
621 #if defined(CONFIG_DEBUG_TCG)
622 s->temps_in_use--;
623 if (s->temps_in_use < 0) {
624 fprintf(stderr, "More temporaries freed than allocated!\n");
626 #endif
628 assert(idx >= s->nb_globals && idx < s->nb_temps);
629 ts = &s->temps[idx];
630 assert(ts->temp_allocated != 0);
631 ts->temp_allocated = 0;
633 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
634 set_bit(idx, s->free_temps[k].l);
637 void tcg_temp_free_i32(TCGv_i32 arg)
639 tcg_temp_free_internal(GET_TCGV_I32(arg));
642 void tcg_temp_free_i64(TCGv_i64 arg)
644 tcg_temp_free_internal(GET_TCGV_I64(arg));
647 TCGv_i32 tcg_const_i32(int32_t val)
649 TCGv_i32 t0;
650 t0 = tcg_temp_new_i32();
651 tcg_gen_movi_i32(t0, val);
652 return t0;
655 TCGv_i64 tcg_const_i64(int64_t val)
657 TCGv_i64 t0;
658 t0 = tcg_temp_new_i64();
659 tcg_gen_movi_i64(t0, val);
660 return t0;
663 TCGv_i32 tcg_const_local_i32(int32_t val)
665 TCGv_i32 t0;
666 t0 = tcg_temp_local_new_i32();
667 tcg_gen_movi_i32(t0, val);
668 return t0;
671 TCGv_i64 tcg_const_local_i64(int64_t val)
673 TCGv_i64 t0;
674 t0 = tcg_temp_local_new_i64();
675 tcg_gen_movi_i64(t0, val);
676 return t0;
679 #if defined(CONFIG_DEBUG_TCG)
680 void tcg_clear_temp_count(void)
682 TCGContext *s = &tcg_ctx;
683 s->temps_in_use = 0;
686 int tcg_check_temp_count(void)
688 TCGContext *s = &tcg_ctx;
689 if (s->temps_in_use) {
690 /* Clear the count so that we don't give another
691 * warning immediately next time around.
693 s->temps_in_use = 0;
694 return 1;
696 return 0;
698 #endif
700 /* Note: we convert the 64 bit args to 32 bit and do some alignment
701 and endian swap. Maybe it would be better to do the alignment
702 and endian swap in tcg_reg_alloc_call(). */
703 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
704 int nargs, TCGArg *args)
706 int i, real_args, nb_rets;
707 unsigned sizemask, flags;
708 TCGArg *nparam;
709 TCGHelperInfo *info;
711 info = g_hash_table_lookup(s->helpers, (gpointer)func);
712 flags = info->flags;
713 sizemask = info->sizemask;
715 #if defined(__sparc__) && !defined(__arch64__) \
716 && !defined(CONFIG_TCG_INTERPRETER)
717 /* We have 64-bit values in one register, but need to pass as two
718 separate parameters. Split them. */
719 int orig_sizemask = sizemask;
720 int orig_nargs = nargs;
721 TCGv_i64 retl, reth;
723 TCGV_UNUSED_I64(retl);
724 TCGV_UNUSED_I64(reth);
725 if (sizemask != 0) {
726 TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
727 for (i = real_args = 0; i < nargs; ++i) {
728 int is_64bit = sizemask & (1 << (i+1)*2);
729 if (is_64bit) {
730 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
731 TCGv_i32 h = tcg_temp_new_i32();
732 TCGv_i32 l = tcg_temp_new_i32();
733 tcg_gen_extr_i64_i32(l, h, orig);
734 split_args[real_args++] = GET_TCGV_I32(h);
735 split_args[real_args++] = GET_TCGV_I32(l);
736 } else {
737 split_args[real_args++] = args[i];
740 nargs = real_args;
741 args = split_args;
742 sizemask = 0;
744 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
745 for (i = 0; i < nargs; ++i) {
746 int is_64bit = sizemask & (1 << (i+1)*2);
747 int is_signed = sizemask & (2 << (i+1)*2);
748 if (!is_64bit) {
749 TCGv_i64 temp = tcg_temp_new_i64();
750 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
751 if (is_signed) {
752 tcg_gen_ext32s_i64(temp, orig);
753 } else {
754 tcg_gen_ext32u_i64(temp, orig);
756 args[i] = GET_TCGV_I64(temp);
759 #endif /* TCG_TARGET_EXTEND_ARGS */
761 *s->gen_opc_ptr++ = INDEX_op_call;
762 nparam = s->gen_opparam_ptr++;
763 if (ret != TCG_CALL_DUMMY_ARG) {
764 #if defined(__sparc__) && !defined(__arch64__) \
765 && !defined(CONFIG_TCG_INTERPRETER)
766 if (orig_sizemask & 1) {
767 /* The 32-bit ABI is going to return the 64-bit value in
768 the %o0/%o1 register pair. Prepare for this by using
769 two return temporaries, and reassemble below. */
770 retl = tcg_temp_new_i64();
771 reth = tcg_temp_new_i64();
772 *s->gen_opparam_ptr++ = GET_TCGV_I64(reth);
773 *s->gen_opparam_ptr++ = GET_TCGV_I64(retl);
774 nb_rets = 2;
775 } else {
776 *s->gen_opparam_ptr++ = ret;
777 nb_rets = 1;
779 #else
780 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
781 #ifdef HOST_WORDS_BIGENDIAN
782 *s->gen_opparam_ptr++ = ret + 1;
783 *s->gen_opparam_ptr++ = ret;
784 #else
785 *s->gen_opparam_ptr++ = ret;
786 *s->gen_opparam_ptr++ = ret + 1;
787 #endif
788 nb_rets = 2;
789 } else {
790 *s->gen_opparam_ptr++ = ret;
791 nb_rets = 1;
793 #endif
794 } else {
795 nb_rets = 0;
797 real_args = 0;
798 for (i = 0; i < nargs; i++) {
799 int is_64bit = sizemask & (1 << (i+1)*2);
800 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
801 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
802 /* some targets want aligned 64 bit args */
803 if (real_args & 1) {
804 *s->gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
805 real_args++;
807 #endif
808 /* If stack grows up, then we will be placing successive
809 arguments at lower addresses, which means we need to
810 reverse the order compared to how we would normally
811 treat either big or little-endian. For those arguments
812 that will wind up in registers, this still works for
813 HPPA (the only current STACK_GROWSUP target) since the
814 argument registers are *also* allocated in decreasing
815 order. If another such target is added, this logic may
816 have to get more complicated to differentiate between
817 stack arguments and register arguments. */
818 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
819 *s->gen_opparam_ptr++ = args[i] + 1;
820 *s->gen_opparam_ptr++ = args[i];
821 #else
822 *s->gen_opparam_ptr++ = args[i];
823 *s->gen_opparam_ptr++ = args[i] + 1;
824 #endif
825 real_args += 2;
826 continue;
829 *s->gen_opparam_ptr++ = args[i];
830 real_args++;
832 *s->gen_opparam_ptr++ = (uintptr_t)func;
833 *s->gen_opparam_ptr++ = flags;
835 *nparam = (nb_rets << 16) | real_args;
837 /* total parameters, needed to go backward in the instruction stream */
838 *s->gen_opparam_ptr++ = 1 + nb_rets + real_args + 3;
840 #if defined(__sparc__) && !defined(__arch64__) \
841 && !defined(CONFIG_TCG_INTERPRETER)
842 /* Free all of the parts we allocated above. */
843 for (i = real_args = 0; i < orig_nargs; ++i) {
844 int is_64bit = orig_sizemask & (1 << (i+1)*2);
845 if (is_64bit) {
846 TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
847 TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
848 tcg_temp_free_i32(h);
849 tcg_temp_free_i32(l);
850 } else {
851 real_args++;
854 if (orig_sizemask & 1) {
855 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
856 Note that describing these as TCGv_i64 eliminates an unnecessary
857 zero-extension that tcg_gen_concat_i32_i64 would create. */
858 tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
859 tcg_temp_free_i64(retl);
860 tcg_temp_free_i64(reth);
862 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
863 for (i = 0; i < nargs; ++i) {
864 int is_64bit = sizemask & (1 << (i+1)*2);
865 if (!is_64bit) {
866 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
867 tcg_temp_free_i64(temp);
870 #endif /* TCG_TARGET_EXTEND_ARGS */
873 static void tcg_reg_alloc_start(TCGContext *s)
875 int i;
876 TCGTemp *ts;
877 for(i = 0; i < s->nb_globals; i++) {
878 ts = &s->temps[i];
879 if (ts->fixed_reg) {
880 ts->val_type = TEMP_VAL_REG;
881 } else {
882 ts->val_type = TEMP_VAL_MEM;
885 for(i = s->nb_globals; i < s->nb_temps; i++) {
886 ts = &s->temps[i];
887 if (ts->temp_local) {
888 ts->val_type = TEMP_VAL_MEM;
889 } else {
890 ts->val_type = TEMP_VAL_DEAD;
892 ts->mem_allocated = 0;
893 ts->fixed_reg = 0;
895 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
896 s->reg_to_temp[i] = -1;
900 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
901 int idx)
903 TCGTemp *ts;
905 assert(idx >= 0 && idx < s->nb_temps);
906 ts = &s->temps[idx];
907 if (idx < s->nb_globals) {
908 pstrcpy(buf, buf_size, ts->name);
909 } else {
910 if (ts->temp_local)
911 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
912 else
913 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
915 return buf;
918 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
920 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
923 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
925 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
928 /* Find helper name. */
929 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
931 const char *ret = NULL;
932 if (s->helpers) {
933 TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
934 if (info) {
935 ret = info->name;
938 return ret;
941 static const char * const cond_name[] =
943 [TCG_COND_NEVER] = "never",
944 [TCG_COND_ALWAYS] = "always",
945 [TCG_COND_EQ] = "eq",
946 [TCG_COND_NE] = "ne",
947 [TCG_COND_LT] = "lt",
948 [TCG_COND_GE] = "ge",
949 [TCG_COND_LE] = "le",
950 [TCG_COND_GT] = "gt",
951 [TCG_COND_LTU] = "ltu",
952 [TCG_COND_GEU] = "geu",
953 [TCG_COND_LEU] = "leu",
954 [TCG_COND_GTU] = "gtu"
957 static const char * const ldst_name[] =
959 [MO_UB] = "ub",
960 [MO_SB] = "sb",
961 [MO_LEUW] = "leuw",
962 [MO_LESW] = "lesw",
963 [MO_LEUL] = "leul",
964 [MO_LESL] = "lesl",
965 [MO_LEQ] = "leq",
966 [MO_BEUW] = "beuw",
967 [MO_BESW] = "besw",
968 [MO_BEUL] = "beul",
969 [MO_BESL] = "besl",
970 [MO_BEQ] = "beq",
973 void tcg_dump_ops(TCGContext *s)
975 const uint16_t *opc_ptr;
976 const TCGArg *args;
977 TCGArg arg;
978 TCGOpcode c;
979 int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
980 const TCGOpDef *def;
981 char buf[128];
983 first_insn = 1;
984 opc_ptr = s->gen_opc_buf;
985 args = s->gen_opparam_buf;
986 while (opc_ptr < s->gen_opc_ptr) {
987 c = *opc_ptr++;
988 def = &tcg_op_defs[c];
989 if (c == INDEX_op_debug_insn_start) {
990 uint64_t pc;
991 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
992 pc = ((uint64_t)args[1] << 32) | args[0];
993 #else
994 pc = args[0];
995 #endif
996 if (!first_insn) {
997 qemu_log("\n");
999 qemu_log(" ---- 0x%" PRIx64, pc);
1000 first_insn = 0;
1001 nb_oargs = def->nb_oargs;
1002 nb_iargs = def->nb_iargs;
1003 nb_cargs = def->nb_cargs;
1004 } else if (c == INDEX_op_call) {
1005 TCGArg arg;
1007 /* variable number of arguments */
1008 arg = *args++;
1009 nb_oargs = arg >> 16;
1010 nb_iargs = arg & 0xffff;
1011 nb_cargs = def->nb_cargs;
1013 /* function name, flags, out args */
1014 qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1015 tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1016 args[nb_oargs + nb_iargs + 1], nb_oargs);
1017 for (i = 0; i < nb_oargs; i++) {
1018 qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1019 args[i]));
1021 for (i = 0; i < nb_iargs; i++) {
1022 TCGArg arg = args[nb_oargs + i];
1023 const char *t = "<dummy>";
1024 if (arg != TCG_CALL_DUMMY_ARG) {
1025 t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1027 qemu_log(",%s", t);
1029 } else {
1030 qemu_log(" %s ", def->name);
1031 if (c == INDEX_op_nopn) {
1032 /* variable number of arguments */
1033 nb_cargs = *args;
1034 nb_oargs = 0;
1035 nb_iargs = 0;
1036 } else {
1037 nb_oargs = def->nb_oargs;
1038 nb_iargs = def->nb_iargs;
1039 nb_cargs = def->nb_cargs;
1042 k = 0;
1043 for(i = 0; i < nb_oargs; i++) {
1044 if (k != 0) {
1045 qemu_log(",");
1047 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1048 args[k++]));
1050 for(i = 0; i < nb_iargs; i++) {
1051 if (k != 0) {
1052 qemu_log(",");
1054 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1055 args[k++]));
1057 switch (c) {
1058 case INDEX_op_brcond_i32:
1059 case INDEX_op_setcond_i32:
1060 case INDEX_op_movcond_i32:
1061 case INDEX_op_brcond2_i32:
1062 case INDEX_op_setcond2_i32:
1063 case INDEX_op_brcond_i64:
1064 case INDEX_op_setcond_i64:
1065 case INDEX_op_movcond_i64:
1066 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1067 qemu_log(",%s", cond_name[args[k++]]);
1068 } else {
1069 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1071 i = 1;
1072 break;
1073 case INDEX_op_qemu_ld_i32:
1074 case INDEX_op_qemu_st_i32:
1075 case INDEX_op_qemu_ld_i64:
1076 case INDEX_op_qemu_st_i64:
1077 if (args[k] < ARRAY_SIZE(ldst_name) && ldst_name[args[k]]) {
1078 qemu_log(",%s", ldst_name[args[k++]]);
1079 } else {
1080 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1082 i = 1;
1083 break;
1084 default:
1085 i = 0;
1086 break;
1088 for(; i < nb_cargs; i++) {
1089 if (k != 0) {
1090 qemu_log(",");
1092 arg = args[k++];
1093 qemu_log("$0x%" TCG_PRIlx, arg);
1096 qemu_log("\n");
1097 args += nb_iargs + nb_oargs + nb_cargs;
1101 /* we give more priority to constraints with less registers */
1102 static int get_constraint_priority(const TCGOpDef *def, int k)
1104 const TCGArgConstraint *arg_ct;
1106 int i, n;
1107 arg_ct = &def->args_ct[k];
1108 if (arg_ct->ct & TCG_CT_ALIAS) {
1109 /* an alias is equivalent to a single register */
1110 n = 1;
1111 } else {
1112 if (!(arg_ct->ct & TCG_CT_REG))
1113 return 0;
1114 n = 0;
1115 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1116 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1117 n++;
1120 return TCG_TARGET_NB_REGS - n + 1;
1123 /* sort from highest priority to lowest */
1124 static void sort_constraints(TCGOpDef *def, int start, int n)
1126 int i, j, p1, p2, tmp;
1128 for(i = 0; i < n; i++)
1129 def->sorted_args[start + i] = start + i;
1130 if (n <= 1)
1131 return;
1132 for(i = 0; i < n - 1; i++) {
1133 for(j = i + 1; j < n; j++) {
1134 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1135 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1136 if (p1 < p2) {
1137 tmp = def->sorted_args[start + i];
1138 def->sorted_args[start + i] = def->sorted_args[start + j];
1139 def->sorted_args[start + j] = tmp;
1145 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1147 TCGOpcode op;
1148 TCGOpDef *def;
1149 const char *ct_str;
1150 int i, nb_args;
1152 for(;;) {
1153 if (tdefs->op == (TCGOpcode)-1)
1154 break;
1155 op = tdefs->op;
1156 assert((unsigned)op < NB_OPS);
1157 def = &tcg_op_defs[op];
1158 #if defined(CONFIG_DEBUG_TCG)
1159 /* Duplicate entry in op definitions? */
1160 assert(!def->used);
1161 def->used = 1;
1162 #endif
1163 nb_args = def->nb_iargs + def->nb_oargs;
1164 for(i = 0; i < nb_args; i++) {
1165 ct_str = tdefs->args_ct_str[i];
1166 /* Incomplete TCGTargetOpDef entry? */
1167 assert(ct_str != NULL);
1168 tcg_regset_clear(def->args_ct[i].u.regs);
1169 def->args_ct[i].ct = 0;
1170 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1171 int oarg;
1172 oarg = ct_str[0] - '0';
1173 assert(oarg < def->nb_oargs);
1174 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1175 /* TCG_CT_ALIAS is for the output arguments. The input
1176 argument is tagged with TCG_CT_IALIAS. */
1177 def->args_ct[i] = def->args_ct[oarg];
1178 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1179 def->args_ct[oarg].alias_index = i;
1180 def->args_ct[i].ct |= TCG_CT_IALIAS;
1181 def->args_ct[i].alias_index = oarg;
1182 } else {
1183 for(;;) {
1184 if (*ct_str == '\0')
1185 break;
1186 switch(*ct_str) {
1187 case 'i':
1188 def->args_ct[i].ct |= TCG_CT_CONST;
1189 ct_str++;
1190 break;
1191 default:
1192 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1193 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1194 ct_str, i, def->name);
1195 exit(1);
1202 /* TCGTargetOpDef entry with too much information? */
1203 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1205 /* sort the constraints (XXX: this is just an heuristic) */
1206 sort_constraints(def, 0, def->nb_oargs);
1207 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1209 #if 0
1211 int i;
1213 printf("%s: sorted=", def->name);
1214 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1215 printf(" %d", def->sorted_args[i]);
1216 printf("\n");
1218 #endif
1219 tdefs++;
1222 #if defined(CONFIG_DEBUG_TCG)
1223 i = 0;
1224 for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) {
1225 const TCGOpDef *def = &tcg_op_defs[op];
1226 if (def->flags & TCG_OPF_NOT_PRESENT) {
1227 /* Wrong entry in op definitions? */
1228 if (def->used) {
1229 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1230 i = 1;
1232 } else {
1233 /* Missing entry in op definitions? */
1234 if (!def->used) {
1235 fprintf(stderr, "Missing op definition for %s\n", def->name);
1236 i = 1;
1240 if (i == 1) {
1241 tcg_abort();
1243 #endif
1246 #ifdef USE_LIVENESS_ANALYSIS
1248 /* set a nop for an operation using 'nb_args' */
1249 static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
1250 TCGArg *args, int nb_args)
1252 if (nb_args == 0) {
1253 *opc_ptr = INDEX_op_nop;
1254 } else {
1255 *opc_ptr = INDEX_op_nopn;
1256 args[0] = nb_args;
1257 args[nb_args - 1] = nb_args;
1261 /* liveness analysis: end of function: all temps are dead, and globals
1262 should be in memory. */
1263 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1264 uint8_t *mem_temps)
1266 memset(dead_temps, 1, s->nb_temps);
1267 memset(mem_temps, 1, s->nb_globals);
1268 memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1271 /* liveness analysis: end of basic block: all temps are dead, globals
1272 and local temps should be in memory. */
1273 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1274 uint8_t *mem_temps)
1276 int i;
1278 memset(dead_temps, 1, s->nb_temps);
1279 memset(mem_temps, 1, s->nb_globals);
1280 for(i = s->nb_globals; i < s->nb_temps; i++) {
1281 mem_temps[i] = s->temps[i].temp_local;
1285 /* Liveness analysis : update the opc_dead_args array to tell if a
1286 given input arguments is dead. Instructions updating dead
1287 temporaries are removed. */
1288 static void tcg_liveness_analysis(TCGContext *s)
1290 int i, op_index, nb_args, nb_iargs, nb_oargs, nb_ops;
1291 TCGOpcode op, op_new, op_new2;
1292 TCGArg *args, arg;
1293 const TCGOpDef *def;
1294 uint8_t *dead_temps, *mem_temps;
1295 uint16_t dead_args;
1296 uint8_t sync_args;
1297 bool have_op_new2;
1299 s->gen_opc_ptr++; /* skip end */
1301 nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
1303 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1304 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1306 dead_temps = tcg_malloc(s->nb_temps);
1307 mem_temps = tcg_malloc(s->nb_temps);
1308 tcg_la_func_end(s, dead_temps, mem_temps);
1310 args = s->gen_opparam_ptr;
1311 op_index = nb_ops - 1;
1312 while (op_index >= 0) {
1313 op = s->gen_opc_buf[op_index];
1314 def = &tcg_op_defs[op];
1315 switch(op) {
1316 case INDEX_op_call:
1318 int call_flags;
1320 nb_args = args[-1];
1321 args -= nb_args;
1322 arg = *args++;
1323 nb_iargs = arg & 0xffff;
1324 nb_oargs = arg >> 16;
1325 call_flags = args[nb_oargs + nb_iargs + 1];
1327 /* pure functions can be removed if their result is not
1328 used */
1329 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1330 for (i = 0; i < nb_oargs; i++) {
1331 arg = args[i];
1332 if (!dead_temps[arg] || mem_temps[arg]) {
1333 goto do_not_remove_call;
1336 tcg_set_nop(s, s->gen_opc_buf + op_index,
1337 args - 1, nb_args);
1338 } else {
1339 do_not_remove_call:
1341 /* output args are dead */
1342 dead_args = 0;
1343 sync_args = 0;
1344 for (i = 0; i < nb_oargs; i++) {
1345 arg = args[i];
1346 if (dead_temps[arg]) {
1347 dead_args |= (1 << i);
1349 if (mem_temps[arg]) {
1350 sync_args |= (1 << i);
1352 dead_temps[arg] = 1;
1353 mem_temps[arg] = 0;
1356 if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1357 /* globals should be synced to memory */
1358 memset(mem_temps, 1, s->nb_globals);
1360 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1361 TCG_CALL_NO_READ_GLOBALS))) {
1362 /* globals should go back to memory */
1363 memset(dead_temps, 1, s->nb_globals);
1366 /* input args are live */
1367 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1368 arg = args[i];
1369 if (arg != TCG_CALL_DUMMY_ARG) {
1370 if (dead_temps[arg]) {
1371 dead_args |= (1 << i);
1373 dead_temps[arg] = 0;
1376 s->op_dead_args[op_index] = dead_args;
1377 s->op_sync_args[op_index] = sync_args;
1379 args--;
1381 break;
1382 case INDEX_op_debug_insn_start:
1383 args -= def->nb_args;
1384 break;
1385 case INDEX_op_nopn:
1386 nb_args = args[-1];
1387 args -= nb_args;
1388 break;
1389 case INDEX_op_discard:
1390 args--;
1391 /* mark the temporary as dead */
1392 dead_temps[args[0]] = 1;
1393 mem_temps[args[0]] = 0;
1394 break;
1395 case INDEX_op_end:
1396 break;
1398 case INDEX_op_add2_i32:
1399 op_new = INDEX_op_add_i32;
1400 goto do_addsub2;
1401 case INDEX_op_sub2_i32:
1402 op_new = INDEX_op_sub_i32;
1403 goto do_addsub2;
1404 case INDEX_op_add2_i64:
1405 op_new = INDEX_op_add_i64;
1406 goto do_addsub2;
1407 case INDEX_op_sub2_i64:
1408 op_new = INDEX_op_sub_i64;
1409 do_addsub2:
1410 args -= 6;
1411 nb_iargs = 4;
1412 nb_oargs = 2;
1413 /* Test if the high part of the operation is dead, but not
1414 the low part. The result can be optimized to a simple
1415 add or sub. This happens often for x86_64 guest when the
1416 cpu mode is set to 32 bit. */
1417 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1418 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1419 goto do_remove;
1421 /* Create the single operation plus nop. */
1422 s->gen_opc_buf[op_index] = op = op_new;
1423 args[1] = args[2];
1424 args[2] = args[4];
1425 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1426 tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 3);
1427 /* Fall through and mark the single-word operation live. */
1428 nb_iargs = 2;
1429 nb_oargs = 1;
1431 goto do_not_remove;
1433 case INDEX_op_mulu2_i32:
1434 op_new = INDEX_op_mul_i32;
1435 op_new2 = INDEX_op_muluh_i32;
1436 have_op_new2 = TCG_TARGET_HAS_muluh_i32;
1437 goto do_mul2;
1438 case INDEX_op_muls2_i32:
1439 op_new = INDEX_op_mul_i32;
1440 op_new2 = INDEX_op_mulsh_i32;
1441 have_op_new2 = TCG_TARGET_HAS_mulsh_i32;
1442 goto do_mul2;
1443 case INDEX_op_mulu2_i64:
1444 op_new = INDEX_op_mul_i64;
1445 op_new2 = INDEX_op_muluh_i64;
1446 have_op_new2 = TCG_TARGET_HAS_muluh_i64;
1447 goto do_mul2;
1448 case INDEX_op_muls2_i64:
1449 op_new = INDEX_op_mul_i64;
1450 op_new2 = INDEX_op_mulsh_i64;
1451 have_op_new2 = TCG_TARGET_HAS_mulsh_i64;
1452 goto do_mul2;
1453 do_mul2:
1454 args -= 4;
1455 nb_iargs = 2;
1456 nb_oargs = 2;
1457 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1458 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1459 /* Both parts of the operation are dead. */
1460 goto do_remove;
1462 /* The high part of the operation is dead; generate the low. */
1463 s->gen_opc_buf[op_index] = op = op_new;
1464 args[1] = args[2];
1465 args[2] = args[3];
1466 } else if (have_op_new2 && dead_temps[args[0]]
1467 && !mem_temps[args[0]]) {
1468 /* The low part of the operation is dead; generate the high. */
1469 s->gen_opc_buf[op_index] = op = op_new2;
1470 args[0] = args[1];
1471 args[1] = args[2];
1472 args[2] = args[3];
1473 } else {
1474 goto do_not_remove;
1476 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1477 tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1);
1478 /* Mark the single-word operation live. */
1479 nb_oargs = 1;
1480 goto do_not_remove;
1482 default:
1483 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1484 args -= def->nb_args;
1485 nb_iargs = def->nb_iargs;
1486 nb_oargs = def->nb_oargs;
1488 /* Test if the operation can be removed because all
1489 its outputs are dead. We assume that nb_oargs == 0
1490 implies side effects */
1491 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1492 for(i = 0; i < nb_oargs; i++) {
1493 arg = args[i];
1494 if (!dead_temps[arg] || mem_temps[arg]) {
1495 goto do_not_remove;
1498 do_remove:
1499 tcg_set_nop(s, s->gen_opc_buf + op_index, args, def->nb_args);
1500 #ifdef CONFIG_PROFILER
1501 s->del_op_count++;
1502 #endif
1503 } else {
1504 do_not_remove:
1506 /* output args are dead */
1507 dead_args = 0;
1508 sync_args = 0;
1509 for(i = 0; i < nb_oargs; i++) {
1510 arg = args[i];
1511 if (dead_temps[arg]) {
1512 dead_args |= (1 << i);
1514 if (mem_temps[arg]) {
1515 sync_args |= (1 << i);
1517 dead_temps[arg] = 1;
1518 mem_temps[arg] = 0;
1521 /* if end of basic block, update */
1522 if (def->flags & TCG_OPF_BB_END) {
1523 tcg_la_bb_end(s, dead_temps, mem_temps);
1524 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1525 /* globals should be synced to memory */
1526 memset(mem_temps, 1, s->nb_globals);
1529 /* input args are live */
1530 for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1531 arg = args[i];
1532 if (dead_temps[arg]) {
1533 dead_args |= (1 << i);
1535 dead_temps[arg] = 0;
1537 s->op_dead_args[op_index] = dead_args;
1538 s->op_sync_args[op_index] = sync_args;
1540 break;
1542 op_index--;
1545 if (args != s->gen_opparam_buf) {
1546 tcg_abort();
1549 #else
1550 /* dummy liveness analysis */
1551 static void tcg_liveness_analysis(TCGContext *s)
1553 int nb_ops;
1554 nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
1556 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1557 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1558 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1559 memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1561 #endif
1563 #ifndef NDEBUG
1564 static void dump_regs(TCGContext *s)
1566 TCGTemp *ts;
1567 int i;
1568 char buf[64];
1570 for(i = 0; i < s->nb_temps; i++) {
1571 ts = &s->temps[i];
1572 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1573 switch(ts->val_type) {
1574 case TEMP_VAL_REG:
1575 printf("%s", tcg_target_reg_names[ts->reg]);
1576 break;
1577 case TEMP_VAL_MEM:
1578 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1579 break;
1580 case TEMP_VAL_CONST:
1581 printf("$0x%" TCG_PRIlx, ts->val);
1582 break;
1583 case TEMP_VAL_DEAD:
1584 printf("D");
1585 break;
1586 default:
1587 printf("???");
1588 break;
1590 printf("\n");
1593 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1594 if (s->reg_to_temp[i] >= 0) {
1595 printf("%s: %s\n",
1596 tcg_target_reg_names[i],
1597 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1602 static void check_regs(TCGContext *s)
1604 int reg, k;
1605 TCGTemp *ts;
1606 char buf[64];
1608 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1609 k = s->reg_to_temp[reg];
1610 if (k >= 0) {
1611 ts = &s->temps[k];
1612 if (ts->val_type != TEMP_VAL_REG ||
1613 ts->reg != reg) {
1614 printf("Inconsistency for register %s:\n",
1615 tcg_target_reg_names[reg]);
1616 goto fail;
1620 for(k = 0; k < s->nb_temps; k++) {
1621 ts = &s->temps[k];
1622 if (ts->val_type == TEMP_VAL_REG &&
1623 !ts->fixed_reg &&
1624 s->reg_to_temp[ts->reg] != k) {
1625 printf("Inconsistency for temp %s:\n",
1626 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1627 fail:
1628 printf("reg state:\n");
1629 dump_regs(s);
1630 tcg_abort();
1634 #endif
1636 static void temp_allocate_frame(TCGContext *s, int temp)
1638 TCGTemp *ts;
1639 ts = &s->temps[temp];
1640 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1641 /* Sparc64 stack is accessed with offset of 2047 */
1642 s->current_frame_offset = (s->current_frame_offset +
1643 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1644 ~(sizeof(tcg_target_long) - 1);
1645 #endif
1646 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1647 s->frame_end) {
1648 tcg_abort();
1650 ts->mem_offset = s->current_frame_offset;
1651 ts->mem_reg = s->frame_reg;
1652 ts->mem_allocated = 1;
1653 s->current_frame_offset += sizeof(tcg_target_long);
1656 /* sync register 'reg' by saving it to the corresponding temporary */
1657 static inline void tcg_reg_sync(TCGContext *s, int reg)
1659 TCGTemp *ts;
1660 int temp;
1662 temp = s->reg_to_temp[reg];
1663 ts = &s->temps[temp];
1664 assert(ts->val_type == TEMP_VAL_REG);
1665 if (!ts->mem_coherent && !ts->fixed_reg) {
1666 if (!ts->mem_allocated) {
1667 temp_allocate_frame(s, temp);
1669 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1671 ts->mem_coherent = 1;
1674 /* free register 'reg' by spilling the corresponding temporary if necessary */
1675 static void tcg_reg_free(TCGContext *s, int reg)
1677 int temp;
1679 temp = s->reg_to_temp[reg];
1680 if (temp != -1) {
1681 tcg_reg_sync(s, reg);
1682 s->temps[temp].val_type = TEMP_VAL_MEM;
1683 s->reg_to_temp[reg] = -1;
1687 /* Allocate a register belonging to reg1 & ~reg2 */
1688 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1690 int i, reg;
1691 TCGRegSet reg_ct;
1693 tcg_regset_andnot(reg_ct, reg1, reg2);
1695 /* first try free registers */
1696 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1697 reg = tcg_target_reg_alloc_order[i];
1698 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1699 return reg;
1702 /* XXX: do better spill choice */
1703 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1704 reg = tcg_target_reg_alloc_order[i];
1705 if (tcg_regset_test_reg(reg_ct, reg)) {
1706 tcg_reg_free(s, reg);
1707 return reg;
1711 tcg_abort();
1714 /* mark a temporary as dead. */
1715 static inline void temp_dead(TCGContext *s, int temp)
1717 TCGTemp *ts;
1719 ts = &s->temps[temp];
1720 if (!ts->fixed_reg) {
1721 if (ts->val_type == TEMP_VAL_REG) {
1722 s->reg_to_temp[ts->reg] = -1;
1724 if (temp < s->nb_globals || ts->temp_local) {
1725 ts->val_type = TEMP_VAL_MEM;
1726 } else {
1727 ts->val_type = TEMP_VAL_DEAD;
1732 /* sync a temporary to memory. 'allocated_regs' is used in case a
1733 temporary registers needs to be allocated to store a constant. */
1734 static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
1736 TCGTemp *ts;
1738 ts = &s->temps[temp];
1739 if (!ts->fixed_reg) {
1740 switch(ts->val_type) {
1741 case TEMP_VAL_CONST:
1742 ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1743 allocated_regs);
1744 ts->val_type = TEMP_VAL_REG;
1745 s->reg_to_temp[ts->reg] = temp;
1746 ts->mem_coherent = 0;
1747 tcg_out_movi(s, ts->type, ts->reg, ts->val);
1748 /* fallthrough*/
1749 case TEMP_VAL_REG:
1750 tcg_reg_sync(s, ts->reg);
1751 break;
1752 case TEMP_VAL_DEAD:
1753 case TEMP_VAL_MEM:
1754 break;
1755 default:
1756 tcg_abort();
1761 /* save a temporary to memory. 'allocated_regs' is used in case a
1762 temporary registers needs to be allocated to store a constant. */
1763 static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1765 #ifdef USE_LIVENESS_ANALYSIS
1766 /* The liveness analysis already ensures that globals are back
1767 in memory. Keep an assert for safety. */
1768 assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg);
1769 #else
1770 temp_sync(s, temp, allocated_regs);
1771 temp_dead(s, temp);
1772 #endif
1775 /* save globals to their canonical location and assume they can be
1776 modified be the following code. 'allocated_regs' is used in case a
1777 temporary registers needs to be allocated to store a constant. */
1778 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1780 int i;
1782 for(i = 0; i < s->nb_globals; i++) {
1783 temp_save(s, i, allocated_regs);
1787 /* sync globals to their canonical location and assume they can be
1788 read by the following code. 'allocated_regs' is used in case a
1789 temporary registers needs to be allocated to store a constant. */
1790 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1792 int i;
1794 for (i = 0; i < s->nb_globals; i++) {
1795 #ifdef USE_LIVENESS_ANALYSIS
1796 assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg ||
1797 s->temps[i].mem_coherent);
1798 #else
1799 temp_sync(s, i, allocated_regs);
1800 #endif
1804 /* at the end of a basic block, we assume all temporaries are dead and
1805 all globals are stored at their canonical location. */
1806 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1808 TCGTemp *ts;
1809 int i;
1811 for(i = s->nb_globals; i < s->nb_temps; i++) {
1812 ts = &s->temps[i];
1813 if (ts->temp_local) {
1814 temp_save(s, i, allocated_regs);
1815 } else {
1816 #ifdef USE_LIVENESS_ANALYSIS
1817 /* The liveness analysis already ensures that temps are dead.
1818 Keep an assert for safety. */
1819 assert(ts->val_type == TEMP_VAL_DEAD);
1820 #else
1821 temp_dead(s, i);
1822 #endif
1826 save_globals(s, allocated_regs);
1829 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1830 #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1832 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1833 uint16_t dead_args, uint8_t sync_args)
1835 TCGTemp *ots;
1836 tcg_target_ulong val;
1838 ots = &s->temps[args[0]];
1839 val = args[1];
1841 if (ots->fixed_reg) {
1842 /* for fixed registers, we do not do any constant
1843 propagation */
1844 tcg_out_movi(s, ots->type, ots->reg, val);
1845 } else {
1846 /* The movi is not explicitly generated here */
1847 if (ots->val_type == TEMP_VAL_REG)
1848 s->reg_to_temp[ots->reg] = -1;
1849 ots->val_type = TEMP_VAL_CONST;
1850 ots->val = val;
1852 if (NEED_SYNC_ARG(0)) {
1853 temp_sync(s, args[0], s->reserved_regs);
1855 if (IS_DEAD_ARG(0)) {
1856 temp_dead(s, args[0]);
1860 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1861 const TCGArg *args, uint16_t dead_args,
1862 uint8_t sync_args)
1864 TCGRegSet allocated_regs;
1865 TCGTemp *ts, *ots;
1866 TCGType otype, itype;
1868 tcg_regset_set(allocated_regs, s->reserved_regs);
1869 ots = &s->temps[args[0]];
1870 ts = &s->temps[args[1]];
1872 /* Note that otype != itype for no-op truncation. */
1873 otype = ots->type;
1874 itype = ts->type;
1876 /* If the source value is not in a register, and we're going to be
1877 forced to have it in a register in order to perform the copy,
1878 then copy the SOURCE value into its own register first. That way
1879 we don't have to reload SOURCE the next time it is used. */
1880 if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1881 || ts->val_type == TEMP_VAL_MEM) {
1882 ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[itype],
1883 allocated_regs);
1884 if (ts->val_type == TEMP_VAL_MEM) {
1885 tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset);
1886 ts->mem_coherent = 1;
1887 } else if (ts->val_type == TEMP_VAL_CONST) {
1888 tcg_out_movi(s, itype, ts->reg, ts->val);
1890 s->reg_to_temp[ts->reg] = args[1];
1891 ts->val_type = TEMP_VAL_REG;
1894 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1895 /* mov to a non-saved dead register makes no sense (even with
1896 liveness analysis disabled). */
1897 assert(NEED_SYNC_ARG(0));
1898 /* The code above should have moved the temp to a register. */
1899 assert(ts->val_type == TEMP_VAL_REG);
1900 if (!ots->mem_allocated) {
1901 temp_allocate_frame(s, args[0]);
1903 tcg_out_st(s, otype, ts->reg, ots->mem_reg, ots->mem_offset);
1904 if (IS_DEAD_ARG(1)) {
1905 temp_dead(s, args[1]);
1907 temp_dead(s, args[0]);
1908 } else if (ts->val_type == TEMP_VAL_CONST) {
1909 /* propagate constant */
1910 if (ots->val_type == TEMP_VAL_REG) {
1911 s->reg_to_temp[ots->reg] = -1;
1913 ots->val_type = TEMP_VAL_CONST;
1914 ots->val = ts->val;
1915 } else {
1916 /* The code in the first if block should have moved the
1917 temp to a register. */
1918 assert(ts->val_type == TEMP_VAL_REG);
1919 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1920 /* the mov can be suppressed */
1921 if (ots->val_type == TEMP_VAL_REG) {
1922 s->reg_to_temp[ots->reg] = -1;
1924 ots->reg = ts->reg;
1925 temp_dead(s, args[1]);
1926 } else {
1927 if (ots->val_type != TEMP_VAL_REG) {
1928 /* When allocating a new register, make sure to not spill the
1929 input one. */
1930 tcg_regset_set_reg(allocated_regs, ts->reg);
1931 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
1932 allocated_regs);
1934 tcg_out_mov(s, otype, ots->reg, ts->reg);
1936 ots->val_type = TEMP_VAL_REG;
1937 ots->mem_coherent = 0;
1938 s->reg_to_temp[ots->reg] = args[0];
1939 if (NEED_SYNC_ARG(0)) {
1940 tcg_reg_sync(s, ots->reg);
1945 static void tcg_reg_alloc_op(TCGContext *s,
1946 const TCGOpDef *def, TCGOpcode opc,
1947 const TCGArg *args, uint16_t dead_args,
1948 uint8_t sync_args)
1950 TCGRegSet allocated_regs;
1951 int i, k, nb_iargs, nb_oargs, reg;
1952 TCGArg arg;
1953 const TCGArgConstraint *arg_ct;
1954 TCGTemp *ts;
1955 TCGArg new_args[TCG_MAX_OP_ARGS];
1956 int const_args[TCG_MAX_OP_ARGS];
1958 nb_oargs = def->nb_oargs;
1959 nb_iargs = def->nb_iargs;
1961 /* copy constants */
1962 memcpy(new_args + nb_oargs + nb_iargs,
1963 args + nb_oargs + nb_iargs,
1964 sizeof(TCGArg) * def->nb_cargs);
1966 /* satisfy input constraints */
1967 tcg_regset_set(allocated_regs, s->reserved_regs);
1968 for(k = 0; k < nb_iargs; k++) {
1969 i = def->sorted_args[nb_oargs + k];
1970 arg = args[i];
1971 arg_ct = &def->args_ct[i];
1972 ts = &s->temps[arg];
1973 if (ts->val_type == TEMP_VAL_MEM) {
1974 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1975 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1976 ts->val_type = TEMP_VAL_REG;
1977 ts->reg = reg;
1978 ts->mem_coherent = 1;
1979 s->reg_to_temp[reg] = arg;
1980 } else if (ts->val_type == TEMP_VAL_CONST) {
1981 if (tcg_target_const_match(ts->val, ts->type, arg_ct)) {
1982 /* constant is OK for instruction */
1983 const_args[i] = 1;
1984 new_args[i] = ts->val;
1985 goto iarg_end;
1986 } else {
1987 /* need to move to a register */
1988 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1989 tcg_out_movi(s, ts->type, reg, ts->val);
1990 ts->val_type = TEMP_VAL_REG;
1991 ts->reg = reg;
1992 ts->mem_coherent = 0;
1993 s->reg_to_temp[reg] = arg;
1996 assert(ts->val_type == TEMP_VAL_REG);
1997 if (arg_ct->ct & TCG_CT_IALIAS) {
1998 if (ts->fixed_reg) {
1999 /* if fixed register, we must allocate a new register
2000 if the alias is not the same register */
2001 if (arg != args[arg_ct->alias_index])
2002 goto allocate_in_reg;
2003 } else {
2004 /* if the input is aliased to an output and if it is
2005 not dead after the instruction, we must allocate
2006 a new register and move it */
2007 if (!IS_DEAD_ARG(i)) {
2008 goto allocate_in_reg;
2012 reg = ts->reg;
2013 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2014 /* nothing to do : the constraint is satisfied */
2015 } else {
2016 allocate_in_reg:
2017 /* allocate a new register matching the constraint
2018 and move the temporary register into it */
2019 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2020 tcg_out_mov(s, ts->type, reg, ts->reg);
2022 new_args[i] = reg;
2023 const_args[i] = 0;
2024 tcg_regset_set_reg(allocated_regs, reg);
2025 iarg_end: ;
2028 /* mark dead temporaries and free the associated registers */
2029 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2030 if (IS_DEAD_ARG(i)) {
2031 temp_dead(s, args[i]);
2035 if (def->flags & TCG_OPF_BB_END) {
2036 tcg_reg_alloc_bb_end(s, allocated_regs);
2037 } else {
2038 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2039 /* XXX: permit generic clobber register list ? */
2040 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2041 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2042 tcg_reg_free(s, reg);
2046 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2047 /* sync globals if the op has side effects and might trigger
2048 an exception. */
2049 sync_globals(s, allocated_regs);
2052 /* satisfy the output constraints */
2053 tcg_regset_set(allocated_regs, s->reserved_regs);
2054 for(k = 0; k < nb_oargs; k++) {
2055 i = def->sorted_args[k];
2056 arg = args[i];
2057 arg_ct = &def->args_ct[i];
2058 ts = &s->temps[arg];
2059 if (arg_ct->ct & TCG_CT_ALIAS) {
2060 reg = new_args[arg_ct->alias_index];
2061 } else {
2062 /* if fixed register, we try to use it */
2063 reg = ts->reg;
2064 if (ts->fixed_reg &&
2065 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2066 goto oarg_end;
2068 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2070 tcg_regset_set_reg(allocated_regs, reg);
2071 /* if a fixed register is used, then a move will be done afterwards */
2072 if (!ts->fixed_reg) {
2073 if (ts->val_type == TEMP_VAL_REG) {
2074 s->reg_to_temp[ts->reg] = -1;
2076 ts->val_type = TEMP_VAL_REG;
2077 ts->reg = reg;
2078 /* temp value is modified, so the value kept in memory is
2079 potentially not the same */
2080 ts->mem_coherent = 0;
2081 s->reg_to_temp[reg] = arg;
2083 oarg_end:
2084 new_args[i] = reg;
2088 /* emit instruction */
2089 tcg_out_op(s, opc, new_args, const_args);
2091 /* move the outputs in the correct register if needed */
2092 for(i = 0; i < nb_oargs; i++) {
2093 ts = &s->temps[args[i]];
2094 reg = new_args[i];
2095 if (ts->fixed_reg && ts->reg != reg) {
2096 tcg_out_mov(s, ts->type, ts->reg, reg);
2098 if (NEED_SYNC_ARG(i)) {
2099 tcg_reg_sync(s, reg);
2101 if (IS_DEAD_ARG(i)) {
2102 temp_dead(s, args[i]);
2107 #ifdef TCG_TARGET_STACK_GROWSUP
2108 #define STACK_DIR(x) (-(x))
2109 #else
2110 #define STACK_DIR(x) (x)
2111 #endif
2113 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
2114 TCGOpcode opc, const TCGArg *args,
2115 uint16_t dead_args, uint8_t sync_args)
2117 int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
2118 TCGArg arg;
2119 TCGTemp *ts;
2120 intptr_t stack_offset;
2121 size_t call_stack_size;
2122 tcg_insn_unit *func_addr;
2123 int allocate_args;
2124 TCGRegSet allocated_regs;
2126 arg = *args++;
2128 nb_oargs = arg >> 16;
2129 nb_iargs = arg & 0xffff;
2130 nb_params = nb_iargs;
2132 func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2133 flags = args[nb_oargs + nb_iargs + 1];
2135 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2136 if (nb_regs > nb_params) {
2137 nb_regs = nb_params;
2140 /* assign stack slots first */
2141 call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
2142 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2143 ~(TCG_TARGET_STACK_ALIGN - 1);
2144 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2145 if (allocate_args) {
2146 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2147 preallocate call stack */
2148 tcg_abort();
2151 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2152 for(i = nb_regs; i < nb_params; i++) {
2153 arg = args[nb_oargs + i];
2154 #ifdef TCG_TARGET_STACK_GROWSUP
2155 stack_offset -= sizeof(tcg_target_long);
2156 #endif
2157 if (arg != TCG_CALL_DUMMY_ARG) {
2158 ts = &s->temps[arg];
2159 if (ts->val_type == TEMP_VAL_REG) {
2160 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2161 } else if (ts->val_type == TEMP_VAL_MEM) {
2162 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2163 s->reserved_regs);
2164 /* XXX: not correct if reading values from the stack */
2165 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2166 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2167 } else if (ts->val_type == TEMP_VAL_CONST) {
2168 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2169 s->reserved_regs);
2170 /* XXX: sign extend may be needed on some targets */
2171 tcg_out_movi(s, ts->type, reg, ts->val);
2172 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2173 } else {
2174 tcg_abort();
2177 #ifndef TCG_TARGET_STACK_GROWSUP
2178 stack_offset += sizeof(tcg_target_long);
2179 #endif
2182 /* assign input registers */
2183 tcg_regset_set(allocated_regs, s->reserved_regs);
2184 for(i = 0; i < nb_regs; i++) {
2185 arg = args[nb_oargs + i];
2186 if (arg != TCG_CALL_DUMMY_ARG) {
2187 ts = &s->temps[arg];
2188 reg = tcg_target_call_iarg_regs[i];
2189 tcg_reg_free(s, reg);
2190 if (ts->val_type == TEMP_VAL_REG) {
2191 if (ts->reg != reg) {
2192 tcg_out_mov(s, ts->type, reg, ts->reg);
2194 } else if (ts->val_type == TEMP_VAL_MEM) {
2195 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2196 } else if (ts->val_type == TEMP_VAL_CONST) {
2197 /* XXX: sign extend ? */
2198 tcg_out_movi(s, ts->type, reg, ts->val);
2199 } else {
2200 tcg_abort();
2202 tcg_regset_set_reg(allocated_regs, reg);
2206 /* mark dead temporaries and free the associated registers */
2207 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2208 if (IS_DEAD_ARG(i)) {
2209 temp_dead(s, args[i]);
2213 /* clobber call registers */
2214 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2215 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2216 tcg_reg_free(s, reg);
2220 /* Save globals if they might be written by the helper, sync them if
2221 they might be read. */
2222 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2223 /* Nothing to do */
2224 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2225 sync_globals(s, allocated_regs);
2226 } else {
2227 save_globals(s, allocated_regs);
2230 tcg_out_call(s, func_addr);
2232 /* assign output registers and emit moves if needed */
2233 for(i = 0; i < nb_oargs; i++) {
2234 arg = args[i];
2235 ts = &s->temps[arg];
2236 reg = tcg_target_call_oarg_regs[i];
2237 assert(s->reg_to_temp[reg] == -1);
2239 if (ts->fixed_reg) {
2240 if (ts->reg != reg) {
2241 tcg_out_mov(s, ts->type, ts->reg, reg);
2243 } else {
2244 if (ts->val_type == TEMP_VAL_REG) {
2245 s->reg_to_temp[ts->reg] = -1;
2247 ts->val_type = TEMP_VAL_REG;
2248 ts->reg = reg;
2249 ts->mem_coherent = 0;
2250 s->reg_to_temp[reg] = arg;
2251 if (NEED_SYNC_ARG(i)) {
2252 tcg_reg_sync(s, reg);
2254 if (IS_DEAD_ARG(i)) {
2255 temp_dead(s, args[i]);
2260 return nb_iargs + nb_oargs + def->nb_cargs + 1;
2263 #ifdef CONFIG_PROFILER
2265 static int64_t tcg_table_op_count[NB_OPS];
2267 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2269 int i;
2271 for(i = INDEX_op_end; i < NB_OPS; i++) {
2272 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2273 tcg_table_op_count[i]);
2276 #else
2277 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2279 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2281 #endif
2284 static inline int tcg_gen_code_common(TCGContext *s,
2285 tcg_insn_unit *gen_code_buf,
2286 long search_pc)
2288 TCGOpcode opc;
2289 int op_index;
2290 const TCGOpDef *def;
2291 const TCGArg *args;
2293 #ifdef DEBUG_DISAS
2294 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2295 qemu_log("OP:\n");
2296 tcg_dump_ops(s);
2297 qemu_log("\n");
2299 #endif
2301 #ifdef CONFIG_PROFILER
2302 s->opt_time -= profile_getclock();
2303 #endif
2305 #ifdef USE_TCG_OPTIMIZATIONS
2306 s->gen_opparam_ptr =
2307 tcg_optimize(s, s->gen_opc_ptr, s->gen_opparam_buf, tcg_op_defs);
2308 #endif
2310 #ifdef CONFIG_PROFILER
2311 s->opt_time += profile_getclock();
2312 s->la_time -= profile_getclock();
2313 #endif
2315 tcg_liveness_analysis(s);
2317 #ifdef CONFIG_PROFILER
2318 s->la_time += profile_getclock();
2319 #endif
2321 #ifdef DEBUG_DISAS
2322 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2323 qemu_log("OP after optimization and liveness analysis:\n");
2324 tcg_dump_ops(s);
2325 qemu_log("\n");
2327 #endif
2329 tcg_reg_alloc_start(s);
2331 s->code_buf = gen_code_buf;
2332 s->code_ptr = gen_code_buf;
2334 tcg_out_tb_init(s);
2336 args = s->gen_opparam_buf;
2337 op_index = 0;
2339 for(;;) {
2340 opc = s->gen_opc_buf[op_index];
2341 #ifdef CONFIG_PROFILER
2342 tcg_table_op_count[opc]++;
2343 #endif
2344 def = &tcg_op_defs[opc];
2345 #if 0
2346 printf("%s: %d %d %d\n", def->name,
2347 def->nb_oargs, def->nb_iargs, def->nb_cargs);
2348 // dump_regs(s);
2349 #endif
2350 switch(opc) {
2351 case INDEX_op_mov_i32:
2352 case INDEX_op_mov_i64:
2353 tcg_reg_alloc_mov(s, def, args, s->op_dead_args[op_index],
2354 s->op_sync_args[op_index]);
2355 break;
2356 case INDEX_op_movi_i32:
2357 case INDEX_op_movi_i64:
2358 tcg_reg_alloc_movi(s, args, s->op_dead_args[op_index],
2359 s->op_sync_args[op_index]);
2360 break;
2361 case INDEX_op_debug_insn_start:
2362 /* debug instruction */
2363 break;
2364 case INDEX_op_nop:
2365 case INDEX_op_nop1:
2366 case INDEX_op_nop2:
2367 case INDEX_op_nop3:
2368 break;
2369 case INDEX_op_nopn:
2370 args += args[0];
2371 goto next;
2372 case INDEX_op_discard:
2373 temp_dead(s, args[0]);
2374 break;
2375 case INDEX_op_set_label:
2376 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2377 tcg_out_label(s, args[0], s->code_ptr);
2378 break;
2379 case INDEX_op_call:
2380 args += tcg_reg_alloc_call(s, def, opc, args,
2381 s->op_dead_args[op_index],
2382 s->op_sync_args[op_index]);
2383 goto next;
2384 case INDEX_op_end:
2385 goto the_end;
2386 default:
2387 /* Sanity check that we've not introduced any unhandled opcodes. */
2388 if (def->flags & TCG_OPF_NOT_PRESENT) {
2389 tcg_abort();
2391 /* Note: in order to speed up the code, it would be much
2392 faster to have specialized register allocator functions for
2393 some common argument patterns */
2394 tcg_reg_alloc_op(s, def, opc, args, s->op_dead_args[op_index],
2395 s->op_sync_args[op_index]);
2396 break;
2398 args += def->nb_args;
2399 next:
2400 if (search_pc >= 0 && search_pc < tcg_current_code_size(s)) {
2401 return op_index;
2403 op_index++;
2404 #ifndef NDEBUG
2405 check_regs(s);
2406 #endif
2408 the_end:
2409 /* Generate TB finalization at the end of block */
2410 tcg_out_tb_finalize(s);
2411 return -1;
2414 int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
2416 #ifdef CONFIG_PROFILER
2418 int n;
2419 n = (s->gen_opc_ptr - s->gen_opc_buf);
2420 s->op_count += n;
2421 if (n > s->op_count_max)
2422 s->op_count_max = n;
2424 s->temp_count += s->nb_temps;
2425 if (s->nb_temps > s->temp_count_max)
2426 s->temp_count_max = s->nb_temps;
2428 #endif
2430 tcg_gen_code_common(s, gen_code_buf, -1);
2432 /* flush instruction cache */
2433 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2435 return tcg_current_code_size(s);
2438 /* Return the index of the micro operation such as the pc after is <
2439 offset bytes from the start of the TB. The contents of gen_code_buf must
2440 not be changed, though writing the same values is ok.
2441 Return -1 if not found. */
2442 int tcg_gen_code_search_pc(TCGContext *s, tcg_insn_unit *gen_code_buf,
2443 long offset)
2445 return tcg_gen_code_common(s, gen_code_buf, offset);
2448 #ifdef CONFIG_PROFILER
2449 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2451 TCGContext *s = &tcg_ctx;
2452 int64_t tot;
2454 tot = s->interm_time + s->code_time;
2455 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2456 tot, tot / 2.4e9);
2457 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2458 s->tb_count,
2459 s->tb_count1 - s->tb_count,
2460 s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
2461 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2462 s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
2463 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2464 s->tb_count ?
2465 (double)s->del_op_count / s->tb_count : 0);
2466 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2467 s->tb_count ?
2468 (double)s->temp_count / s->tb_count : 0,
2469 s->temp_count_max);
2471 cpu_fprintf(f, "cycles/op %0.1f\n",
2472 s->op_count ? (double)tot / s->op_count : 0);
2473 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2474 s->code_in_len ? (double)tot / s->code_in_len : 0);
2475 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2476 s->code_out_len ? (double)tot / s->code_out_len : 0);
2477 if (tot == 0)
2478 tot = 1;
2479 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2480 (double)s->interm_time / tot * 100.0);
2481 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2482 (double)s->code_time / tot * 100.0);
2483 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2484 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2485 * 100.0);
2486 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2487 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2488 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2489 s->restore_count);
2490 cpu_fprintf(f, " avg cycles %0.1f\n",
2491 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2493 #else
2494 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2496 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2498 #endif
2500 #ifdef ELF_HOST_MACHINE
2501 /* In order to use this feature, the backend needs to do three things:
2503 (1) Define ELF_HOST_MACHINE to indicate both what value to
2504 put into the ELF image and to indicate support for the feature.
2506 (2) Define tcg_register_jit. This should create a buffer containing
2507 the contents of a .debug_frame section that describes the post-
2508 prologue unwind info for the tcg machine.
2510 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2513 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2514 typedef enum {
2515 JIT_NOACTION = 0,
2516 JIT_REGISTER_FN,
2517 JIT_UNREGISTER_FN
2518 } jit_actions_t;
2520 struct jit_code_entry {
2521 struct jit_code_entry *next_entry;
2522 struct jit_code_entry *prev_entry;
2523 const void *symfile_addr;
2524 uint64_t symfile_size;
2527 struct jit_descriptor {
2528 uint32_t version;
2529 uint32_t action_flag;
2530 struct jit_code_entry *relevant_entry;
2531 struct jit_code_entry *first_entry;
2534 void __jit_debug_register_code(void) __attribute__((noinline));
2535 void __jit_debug_register_code(void)
2537 asm("");
2540 /* Must statically initialize the version, because GDB may check
2541 the version before we can set it. */
2542 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2544 /* End GDB interface. */
2546 static int find_string(const char *strtab, const char *str)
2548 const char *p = strtab + 1;
2550 while (1) {
2551 if (strcmp(p, str) == 0) {
2552 return p - strtab;
2554 p += strlen(p) + 1;
2558 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2559 const void *debug_frame,
2560 size_t debug_frame_size)
2562 struct __attribute__((packed)) DebugInfo {
2563 uint32_t len;
2564 uint16_t version;
2565 uint32_t abbrev;
2566 uint8_t ptr_size;
2567 uint8_t cu_die;
2568 uint16_t cu_lang;
2569 uintptr_t cu_low_pc;
2570 uintptr_t cu_high_pc;
2571 uint8_t fn_die;
2572 char fn_name[16];
2573 uintptr_t fn_low_pc;
2574 uintptr_t fn_high_pc;
2575 uint8_t cu_eoc;
2578 struct ElfImage {
2579 ElfW(Ehdr) ehdr;
2580 ElfW(Phdr) phdr;
2581 ElfW(Shdr) shdr[7];
2582 ElfW(Sym) sym[2];
2583 struct DebugInfo di;
2584 uint8_t da[24];
2585 char str[80];
2588 struct ElfImage *img;
2590 static const struct ElfImage img_template = {
2591 .ehdr = {
2592 .e_ident[EI_MAG0] = ELFMAG0,
2593 .e_ident[EI_MAG1] = ELFMAG1,
2594 .e_ident[EI_MAG2] = ELFMAG2,
2595 .e_ident[EI_MAG3] = ELFMAG3,
2596 .e_ident[EI_CLASS] = ELF_CLASS,
2597 .e_ident[EI_DATA] = ELF_DATA,
2598 .e_ident[EI_VERSION] = EV_CURRENT,
2599 .e_type = ET_EXEC,
2600 .e_machine = ELF_HOST_MACHINE,
2601 .e_version = EV_CURRENT,
2602 .e_phoff = offsetof(struct ElfImage, phdr),
2603 .e_shoff = offsetof(struct ElfImage, shdr),
2604 .e_ehsize = sizeof(ElfW(Shdr)),
2605 .e_phentsize = sizeof(ElfW(Phdr)),
2606 .e_phnum = 1,
2607 .e_shentsize = sizeof(ElfW(Shdr)),
2608 .e_shnum = ARRAY_SIZE(img->shdr),
2609 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2610 #ifdef ELF_HOST_FLAGS
2611 .e_flags = ELF_HOST_FLAGS,
2612 #endif
2613 #ifdef ELF_OSABI
2614 .e_ident[EI_OSABI] = ELF_OSABI,
2615 #endif
2617 .phdr = {
2618 .p_type = PT_LOAD,
2619 .p_flags = PF_X,
2621 .shdr = {
2622 [0] = { .sh_type = SHT_NULL },
2623 /* Trick: The contents of code_gen_buffer are not present in
2624 this fake ELF file; that got allocated elsewhere. Therefore
2625 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2626 will not look for contents. We can record any address. */
2627 [1] = { /* .text */
2628 .sh_type = SHT_NOBITS,
2629 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2631 [2] = { /* .debug_info */
2632 .sh_type = SHT_PROGBITS,
2633 .sh_offset = offsetof(struct ElfImage, di),
2634 .sh_size = sizeof(struct DebugInfo),
2636 [3] = { /* .debug_abbrev */
2637 .sh_type = SHT_PROGBITS,
2638 .sh_offset = offsetof(struct ElfImage, da),
2639 .sh_size = sizeof(img->da),
2641 [4] = { /* .debug_frame */
2642 .sh_type = SHT_PROGBITS,
2643 .sh_offset = sizeof(struct ElfImage),
2645 [5] = { /* .symtab */
2646 .sh_type = SHT_SYMTAB,
2647 .sh_offset = offsetof(struct ElfImage, sym),
2648 .sh_size = sizeof(img->sym),
2649 .sh_info = 1,
2650 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2651 .sh_entsize = sizeof(ElfW(Sym)),
2653 [6] = { /* .strtab */
2654 .sh_type = SHT_STRTAB,
2655 .sh_offset = offsetof(struct ElfImage, str),
2656 .sh_size = sizeof(img->str),
2659 .sym = {
2660 [1] = { /* code_gen_buffer */
2661 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2662 .st_shndx = 1,
2665 .di = {
2666 .len = sizeof(struct DebugInfo) - 4,
2667 .version = 2,
2668 .ptr_size = sizeof(void *),
2669 .cu_die = 1,
2670 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2671 .fn_die = 2,
2672 .fn_name = "code_gen_buffer"
2674 .da = {
2675 1, /* abbrev number (the cu) */
2676 0x11, 1, /* DW_TAG_compile_unit, has children */
2677 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2678 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2679 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2680 0, 0, /* end of abbrev */
2681 2, /* abbrev number (the fn) */
2682 0x2e, 0, /* DW_TAG_subprogram, no children */
2683 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2684 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2685 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2686 0, 0, /* end of abbrev */
2687 0 /* no more abbrev */
2689 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2690 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2693 /* We only need a single jit entry; statically allocate it. */
2694 static struct jit_code_entry one_entry;
2696 uintptr_t buf = (uintptr_t)buf_ptr;
2697 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2698 DebugFrameHeader *dfh;
2700 img = g_malloc(img_size);
2701 *img = img_template;
2703 img->phdr.p_vaddr = buf;
2704 img->phdr.p_paddr = buf;
2705 img->phdr.p_memsz = buf_size;
2707 img->shdr[1].sh_name = find_string(img->str, ".text");
2708 img->shdr[1].sh_addr = buf;
2709 img->shdr[1].sh_size = buf_size;
2711 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2712 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2714 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2715 img->shdr[4].sh_size = debug_frame_size;
2717 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2718 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2720 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2721 img->sym[1].st_value = buf;
2722 img->sym[1].st_size = buf_size;
2724 img->di.cu_low_pc = buf;
2725 img->di.cu_high_pc = buf + buf_size;
2726 img->di.fn_low_pc = buf;
2727 img->di.fn_high_pc = buf + buf_size;
2729 dfh = (DebugFrameHeader *)(img + 1);
2730 memcpy(dfh, debug_frame, debug_frame_size);
2731 dfh->fde.func_start = buf;
2732 dfh->fde.func_len = buf_size;
2734 #ifdef DEBUG_JIT
2735 /* Enable this block to be able to debug the ELF image file creation.
2736 One can use readelf, objdump, or other inspection utilities. */
2738 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2739 if (f) {
2740 if (fwrite(img, img_size, 1, f) != img_size) {
2741 /* Avoid stupid unused return value warning for fwrite. */
2743 fclose(f);
2746 #endif
2748 one_entry.symfile_addr = img;
2749 one_entry.symfile_size = img_size;
2751 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2752 __jit_debug_descriptor.relevant_entry = &one_entry;
2753 __jit_debug_descriptor.first_entry = &one_entry;
2754 __jit_debug_register_code();
2756 #else
2757 /* No support for the feature. Provide the entry point expected by exec.c,
2758 and implement the internal function we declared earlier. */
2760 static void tcg_register_jit_int(void *buf, size_t size,
2761 const void *debug_frame,
2762 size_t debug_frame_size)
2766 void tcg_register_jit(void *buf, size_t buf_size)
2769 #endif /* ELF_HOST_MACHINE */