qemu-common: push cpu.h inclusion out of qemu-common.h
[qemu.git] / tcg / tcg.c
blob796addd1fc762b15c762f9c82069a225655ffd2d
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
29 #include "qemu/osdep.h"
31 /* Define to jump the ELF file used to communicate with GDB. */
32 #undef DEBUG_JIT
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/timer.h"
38 /* Note: the long term plan is to reduce the dependencies on the QEMU
39 CPU definitions. Currently they are used for qemu_ld/st
40 instructions */
41 #define NO_CPU_IO_DEFS
42 #include "cpu.h"
44 #include "tcg-op.h"
46 #if UINTPTR_MAX == UINT32_MAX
47 # define ELF_CLASS ELFCLASS32
48 #else
49 # define ELF_CLASS ELFCLASS64
50 #endif
51 #ifdef HOST_WORDS_BIGENDIAN
52 # define ELF_DATA ELFDATA2MSB
53 #else
54 # define ELF_DATA ELFDATA2LSB
55 #endif
57 #include "elf.h"
58 #include "exec/log.h"
60 /* Forward declarations for functions declared in tcg-target.inc.c and
61 used here. */
62 static void tcg_target_init(TCGContext *s);
63 static void tcg_target_qemu_prologue(TCGContext *s);
64 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
65 intptr_t value, intptr_t addend);
67 /* The CIE and FDE header definitions will be common to all hosts. */
68 typedef struct {
69 uint32_t len __attribute__((aligned((sizeof(void *)))));
70 uint32_t id;
71 uint8_t version;
72 char augmentation[1];
73 uint8_t code_align;
74 uint8_t data_align;
75 uint8_t return_column;
76 } DebugFrameCIE;
78 typedef struct QEMU_PACKED {
79 uint32_t len __attribute__((aligned((sizeof(void *)))));
80 uint32_t cie_offset;
81 uintptr_t func_start;
82 uintptr_t func_len;
83 } DebugFrameFDEHeader;
85 typedef struct QEMU_PACKED {
86 DebugFrameCIE cie;
87 DebugFrameFDEHeader fde;
88 } DebugFrameHeader;
90 static void tcg_register_jit_int(void *buf, size_t size,
91 const void *debug_frame,
92 size_t debug_frame_size)
93 __attribute__((unused));
95 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
96 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
97 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
98 intptr_t arg2);
99 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
100 static void tcg_out_movi(TCGContext *s, TCGType type,
101 TCGReg ret, tcg_target_long arg);
102 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
103 const int *const_args);
104 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
105 intptr_t arg2);
106 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
107 static int tcg_target_const_match(tcg_target_long val, TCGType type,
108 const TCGArgConstraint *arg_ct);
109 static void tcg_out_tb_init(TCGContext *s);
110 static bool tcg_out_tb_finalize(TCGContext *s);
114 static TCGRegSet tcg_target_available_regs[2];
115 static TCGRegSet tcg_target_call_clobber_regs;
117 #if TCG_TARGET_INSN_UNIT_SIZE == 1
118 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
120 *s->code_ptr++ = v;
123 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
124 uint8_t v)
126 *p = v;
128 #endif
130 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
131 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
133 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
134 *s->code_ptr++ = v;
135 } else {
136 tcg_insn_unit *p = s->code_ptr;
137 memcpy(p, &v, sizeof(v));
138 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
142 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
143 uint16_t v)
145 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
146 *p = v;
147 } else {
148 memcpy(p, &v, sizeof(v));
151 #endif
153 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
154 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
156 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
157 *s->code_ptr++ = v;
158 } else {
159 tcg_insn_unit *p = s->code_ptr;
160 memcpy(p, &v, sizeof(v));
161 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
165 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
166 uint32_t v)
168 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
169 *p = v;
170 } else {
171 memcpy(p, &v, sizeof(v));
174 #endif
176 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
177 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
179 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
180 *s->code_ptr++ = v;
181 } else {
182 tcg_insn_unit *p = s->code_ptr;
183 memcpy(p, &v, sizeof(v));
184 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
188 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
189 uint64_t v)
191 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
192 *p = v;
193 } else {
194 memcpy(p, &v, sizeof(v));
197 #endif
199 /* label relocation processing */
201 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
202 TCGLabel *l, intptr_t addend)
204 TCGRelocation *r;
206 if (l->has_value) {
207 /* FIXME: This may break relocations on RISC targets that
208 modify instruction fields in place. The caller may not have
209 written the initial value. */
210 patch_reloc(code_ptr, type, l->u.value, addend);
211 } else {
212 /* add a new relocation entry */
213 r = tcg_malloc(sizeof(TCGRelocation));
214 r->type = type;
215 r->ptr = code_ptr;
216 r->addend = addend;
217 r->next = l->u.first_reloc;
218 l->u.first_reloc = r;
222 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
224 intptr_t value = (intptr_t)ptr;
225 TCGRelocation *r;
227 tcg_debug_assert(!l->has_value);
229 for (r = l->u.first_reloc; r != NULL; r = r->next) {
230 patch_reloc(r->ptr, r->type, value, r->addend);
233 l->has_value = 1;
234 l->u.value_ptr = ptr;
237 TCGLabel *gen_new_label(void)
239 TCGContext *s = &tcg_ctx;
240 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
242 *l = (TCGLabel){
243 .id = s->nb_labels++
246 return l;
249 #include "tcg-target.inc.c"
251 /* pool based memory allocation */
252 void *tcg_malloc_internal(TCGContext *s, int size)
254 TCGPool *p;
255 int pool_size;
257 if (size > TCG_POOL_CHUNK_SIZE) {
258 /* big malloc: insert a new pool (XXX: could optimize) */
259 p = g_malloc(sizeof(TCGPool) + size);
260 p->size = size;
261 p->next = s->pool_first_large;
262 s->pool_first_large = p;
263 return p->data;
264 } else {
265 p = s->pool_current;
266 if (!p) {
267 p = s->pool_first;
268 if (!p)
269 goto new_pool;
270 } else {
271 if (!p->next) {
272 new_pool:
273 pool_size = TCG_POOL_CHUNK_SIZE;
274 p = g_malloc(sizeof(TCGPool) + pool_size);
275 p->size = pool_size;
276 p->next = NULL;
277 if (s->pool_current)
278 s->pool_current->next = p;
279 else
280 s->pool_first = p;
281 } else {
282 p = p->next;
286 s->pool_current = p;
287 s->pool_cur = p->data + size;
288 s->pool_end = p->data + p->size;
289 return p->data;
292 void tcg_pool_reset(TCGContext *s)
294 TCGPool *p, *t;
295 for (p = s->pool_first_large; p; p = t) {
296 t = p->next;
297 g_free(p);
299 s->pool_first_large = NULL;
300 s->pool_cur = s->pool_end = NULL;
301 s->pool_current = NULL;
304 typedef struct TCGHelperInfo {
305 void *func;
306 const char *name;
307 unsigned flags;
308 unsigned sizemask;
309 } TCGHelperInfo;
311 #include "exec/helper-proto.h"
313 static const TCGHelperInfo all_helpers[] = {
314 #include "exec/helper-tcg.h"
317 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
319 void tcg_context_init(TCGContext *s)
321 int op, total_args, n, i;
322 TCGOpDef *def;
323 TCGArgConstraint *args_ct;
324 int *sorted_args;
325 GHashTable *helper_table;
327 memset(s, 0, sizeof(*s));
328 s->nb_globals = 0;
330 /* Count total number of arguments and allocate the corresponding
331 space */
332 total_args = 0;
333 for(op = 0; op < NB_OPS; op++) {
334 def = &tcg_op_defs[op];
335 n = def->nb_iargs + def->nb_oargs;
336 total_args += n;
339 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
340 sorted_args = g_malloc(sizeof(int) * total_args);
342 for(op = 0; op < NB_OPS; op++) {
343 def = &tcg_op_defs[op];
344 def->args_ct = args_ct;
345 def->sorted_args = sorted_args;
346 n = def->nb_iargs + def->nb_oargs;
347 sorted_args += n;
348 args_ct += n;
351 /* Register helpers. */
352 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
353 s->helpers = helper_table = g_hash_table_new(NULL, NULL);
355 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
356 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
357 (gpointer)&all_helpers[i]);
360 tcg_target_init(s);
362 /* Reverse the order of the saved registers, assuming they're all at
363 the start of tcg_target_reg_alloc_order. */
364 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
365 int r = tcg_target_reg_alloc_order[n];
366 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
367 break;
370 for (i = 0; i < n; ++i) {
371 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
373 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
374 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
378 void tcg_prologue_init(TCGContext *s)
380 size_t prologue_size, total_size;
381 void *buf0, *buf1;
383 /* Put the prologue at the beginning of code_gen_buffer. */
384 buf0 = s->code_gen_buffer;
385 s->code_ptr = buf0;
386 s->code_buf = buf0;
387 s->code_gen_prologue = buf0;
389 /* Generate the prologue. */
390 tcg_target_qemu_prologue(s);
391 buf1 = s->code_ptr;
392 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
394 /* Deduct the prologue from the buffer. */
395 prologue_size = tcg_current_code_size(s);
396 s->code_gen_ptr = buf1;
397 s->code_gen_buffer = buf1;
398 s->code_buf = buf1;
399 total_size = s->code_gen_buffer_size - prologue_size;
400 s->code_gen_buffer_size = total_size;
402 /* Compute a high-water mark, at which we voluntarily flush the buffer
403 and start over. The size here is arbitrary, significantly larger
404 than we expect the code generation for any one opcode to require. */
405 s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
407 tcg_register_jit(s->code_gen_buffer, total_size);
409 #ifdef DEBUG_DISAS
410 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
411 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
412 log_disas(buf0, prologue_size);
413 qemu_log("\n");
414 qemu_log_flush();
416 #endif
419 void tcg_func_start(TCGContext *s)
421 tcg_pool_reset(s);
422 s->nb_temps = s->nb_globals;
424 /* No temps have been previously allocated for size or locality. */
425 memset(s->free_temps, 0, sizeof(s->free_temps));
427 s->nb_labels = 0;
428 s->current_frame_offset = s->frame_start;
430 #ifdef CONFIG_DEBUG_TCG
431 s->goto_tb_issue_mask = 0;
432 #endif
434 s->gen_first_op_idx = 0;
435 s->gen_last_op_idx = -1;
436 s->gen_next_op_idx = 0;
437 s->gen_next_parm_idx = 0;
439 s->be = tcg_malloc(sizeof(TCGBackendData));
442 static inline int temp_idx(TCGContext *s, TCGTemp *ts)
444 ptrdiff_t n = ts - s->temps;
445 tcg_debug_assert(n >= 0 && n < s->nb_temps);
446 return n;
449 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
451 int n = s->nb_temps++;
452 tcg_debug_assert(n < TCG_MAX_TEMPS);
453 return memset(&s->temps[n], 0, sizeof(TCGTemp));
456 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
458 tcg_debug_assert(s->nb_globals == s->nb_temps);
459 s->nb_globals++;
460 return tcg_temp_alloc(s);
463 static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
464 TCGReg reg, const char *name)
466 TCGTemp *ts;
468 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
469 tcg_abort();
472 ts = tcg_global_alloc(s);
473 ts->base_type = type;
474 ts->type = type;
475 ts->fixed_reg = 1;
476 ts->reg = reg;
477 ts->name = name;
478 tcg_regset_set_reg(s->reserved_regs, reg);
480 return temp_idx(s, ts);
483 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
485 int idx;
486 s->frame_start = start;
487 s->frame_end = start + size;
488 idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
489 s->frame_temp = &s->temps[idx];
492 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
494 TCGContext *s = &tcg_ctx;
495 int idx;
497 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
498 tcg_abort();
500 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
501 return MAKE_TCGV_I32(idx);
504 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
506 TCGContext *s = &tcg_ctx;
507 int idx;
509 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
510 tcg_abort();
512 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
513 return MAKE_TCGV_I64(idx);
516 int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
517 intptr_t offset, const char *name)
519 TCGContext *s = &tcg_ctx;
520 TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
521 TCGTemp *ts = tcg_global_alloc(s);
522 int indirect_reg = 0, bigendian = 0;
523 #ifdef HOST_WORDS_BIGENDIAN
524 bigendian = 1;
525 #endif
527 if (!base_ts->fixed_reg) {
528 indirect_reg = 1;
529 base_ts->indirect_base = 1;
532 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
533 TCGTemp *ts2 = tcg_global_alloc(s);
534 char buf[64];
536 ts->base_type = TCG_TYPE_I64;
537 ts->type = TCG_TYPE_I32;
538 ts->indirect_reg = indirect_reg;
539 ts->mem_allocated = 1;
540 ts->mem_base = base_ts;
541 ts->mem_offset = offset + bigendian * 4;
542 pstrcpy(buf, sizeof(buf), name);
543 pstrcat(buf, sizeof(buf), "_0");
544 ts->name = strdup(buf);
546 tcg_debug_assert(ts2 == ts + 1);
547 ts2->base_type = TCG_TYPE_I64;
548 ts2->type = TCG_TYPE_I32;
549 ts2->indirect_reg = indirect_reg;
550 ts2->mem_allocated = 1;
551 ts2->mem_base = base_ts;
552 ts2->mem_offset = offset + (1 - bigendian) * 4;
553 pstrcpy(buf, sizeof(buf), name);
554 pstrcat(buf, sizeof(buf), "_1");
555 ts->name = strdup(buf);
556 } else {
557 ts->base_type = type;
558 ts->type = type;
559 ts->indirect_reg = indirect_reg;
560 ts->mem_allocated = 1;
561 ts->mem_base = base_ts;
562 ts->mem_offset = offset;
563 ts->name = name;
565 return temp_idx(s, ts);
568 static int tcg_temp_new_internal(TCGType type, int temp_local)
570 TCGContext *s = &tcg_ctx;
571 TCGTemp *ts;
572 int idx, k;
574 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
575 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
576 if (idx < TCG_MAX_TEMPS) {
577 /* There is already an available temp with the right type. */
578 clear_bit(idx, s->free_temps[k].l);
580 ts = &s->temps[idx];
581 ts->temp_allocated = 1;
582 tcg_debug_assert(ts->base_type == type);
583 tcg_debug_assert(ts->temp_local == temp_local);
584 } else {
585 ts = tcg_temp_alloc(s);
586 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
587 TCGTemp *ts2 = tcg_temp_alloc(s);
589 ts->base_type = type;
590 ts->type = TCG_TYPE_I32;
591 ts->temp_allocated = 1;
592 ts->temp_local = temp_local;
594 tcg_debug_assert(ts2 == ts + 1);
595 ts2->base_type = TCG_TYPE_I64;
596 ts2->type = TCG_TYPE_I32;
597 ts2->temp_allocated = 1;
598 ts2->temp_local = temp_local;
599 } else {
600 ts->base_type = type;
601 ts->type = type;
602 ts->temp_allocated = 1;
603 ts->temp_local = temp_local;
605 idx = temp_idx(s, ts);
608 #if defined(CONFIG_DEBUG_TCG)
609 s->temps_in_use++;
610 #endif
611 return idx;
614 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
616 int idx;
618 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
619 return MAKE_TCGV_I32(idx);
622 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
624 int idx;
626 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
627 return MAKE_TCGV_I64(idx);
630 static void tcg_temp_free_internal(int idx)
632 TCGContext *s = &tcg_ctx;
633 TCGTemp *ts;
634 int k;
636 #if defined(CONFIG_DEBUG_TCG)
637 s->temps_in_use--;
638 if (s->temps_in_use < 0) {
639 fprintf(stderr, "More temporaries freed than allocated!\n");
641 #endif
643 tcg_debug_assert(idx >= s->nb_globals && idx < s->nb_temps);
644 ts = &s->temps[idx];
645 tcg_debug_assert(ts->temp_allocated != 0);
646 ts->temp_allocated = 0;
648 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
649 set_bit(idx, s->free_temps[k].l);
652 void tcg_temp_free_i32(TCGv_i32 arg)
654 tcg_temp_free_internal(GET_TCGV_I32(arg));
657 void tcg_temp_free_i64(TCGv_i64 arg)
659 tcg_temp_free_internal(GET_TCGV_I64(arg));
662 TCGv_i32 tcg_const_i32(int32_t val)
664 TCGv_i32 t0;
665 t0 = tcg_temp_new_i32();
666 tcg_gen_movi_i32(t0, val);
667 return t0;
670 TCGv_i64 tcg_const_i64(int64_t val)
672 TCGv_i64 t0;
673 t0 = tcg_temp_new_i64();
674 tcg_gen_movi_i64(t0, val);
675 return t0;
678 TCGv_i32 tcg_const_local_i32(int32_t val)
680 TCGv_i32 t0;
681 t0 = tcg_temp_local_new_i32();
682 tcg_gen_movi_i32(t0, val);
683 return t0;
686 TCGv_i64 tcg_const_local_i64(int64_t val)
688 TCGv_i64 t0;
689 t0 = tcg_temp_local_new_i64();
690 tcg_gen_movi_i64(t0, val);
691 return t0;
694 #if defined(CONFIG_DEBUG_TCG)
695 void tcg_clear_temp_count(void)
697 TCGContext *s = &tcg_ctx;
698 s->temps_in_use = 0;
701 int tcg_check_temp_count(void)
703 TCGContext *s = &tcg_ctx;
704 if (s->temps_in_use) {
705 /* Clear the count so that we don't give another
706 * warning immediately next time around.
708 s->temps_in_use = 0;
709 return 1;
711 return 0;
713 #endif
715 /* Note: we convert the 64 bit args to 32 bit and do some alignment
716 and endian swap. Maybe it would be better to do the alignment
717 and endian swap in tcg_reg_alloc_call(). */
718 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
719 int nargs, TCGArg *args)
721 int i, real_args, nb_rets, pi, pi_first;
722 unsigned sizemask, flags;
723 TCGHelperInfo *info;
725 info = g_hash_table_lookup(s->helpers, (gpointer)func);
726 flags = info->flags;
727 sizemask = info->sizemask;
729 #if defined(__sparc__) && !defined(__arch64__) \
730 && !defined(CONFIG_TCG_INTERPRETER)
731 /* We have 64-bit values in one register, but need to pass as two
732 separate parameters. Split them. */
733 int orig_sizemask = sizemask;
734 int orig_nargs = nargs;
735 TCGv_i64 retl, reth;
737 TCGV_UNUSED_I64(retl);
738 TCGV_UNUSED_I64(reth);
739 if (sizemask != 0) {
740 TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
741 for (i = real_args = 0; i < nargs; ++i) {
742 int is_64bit = sizemask & (1 << (i+1)*2);
743 if (is_64bit) {
744 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
745 TCGv_i32 h = tcg_temp_new_i32();
746 TCGv_i32 l = tcg_temp_new_i32();
747 tcg_gen_extr_i64_i32(l, h, orig);
748 split_args[real_args++] = GET_TCGV_I32(h);
749 split_args[real_args++] = GET_TCGV_I32(l);
750 } else {
751 split_args[real_args++] = args[i];
754 nargs = real_args;
755 args = split_args;
756 sizemask = 0;
758 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
759 for (i = 0; i < nargs; ++i) {
760 int is_64bit = sizemask & (1 << (i+1)*2);
761 int is_signed = sizemask & (2 << (i+1)*2);
762 if (!is_64bit) {
763 TCGv_i64 temp = tcg_temp_new_i64();
764 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
765 if (is_signed) {
766 tcg_gen_ext32s_i64(temp, orig);
767 } else {
768 tcg_gen_ext32u_i64(temp, orig);
770 args[i] = GET_TCGV_I64(temp);
773 #endif /* TCG_TARGET_EXTEND_ARGS */
775 pi_first = pi = s->gen_next_parm_idx;
776 if (ret != TCG_CALL_DUMMY_ARG) {
777 #if defined(__sparc__) && !defined(__arch64__) \
778 && !defined(CONFIG_TCG_INTERPRETER)
779 if (orig_sizemask & 1) {
780 /* The 32-bit ABI is going to return the 64-bit value in
781 the %o0/%o1 register pair. Prepare for this by using
782 two return temporaries, and reassemble below. */
783 retl = tcg_temp_new_i64();
784 reth = tcg_temp_new_i64();
785 s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
786 s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
787 nb_rets = 2;
788 } else {
789 s->gen_opparam_buf[pi++] = ret;
790 nb_rets = 1;
792 #else
793 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
794 #ifdef HOST_WORDS_BIGENDIAN
795 s->gen_opparam_buf[pi++] = ret + 1;
796 s->gen_opparam_buf[pi++] = ret;
797 #else
798 s->gen_opparam_buf[pi++] = ret;
799 s->gen_opparam_buf[pi++] = ret + 1;
800 #endif
801 nb_rets = 2;
802 } else {
803 s->gen_opparam_buf[pi++] = ret;
804 nb_rets = 1;
806 #endif
807 } else {
808 nb_rets = 0;
810 real_args = 0;
811 for (i = 0; i < nargs; i++) {
812 int is_64bit = sizemask & (1 << (i+1)*2);
813 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
814 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
815 /* some targets want aligned 64 bit args */
816 if (real_args & 1) {
817 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
818 real_args++;
820 #endif
821 /* If stack grows up, then we will be placing successive
822 arguments at lower addresses, which means we need to
823 reverse the order compared to how we would normally
824 treat either big or little-endian. For those arguments
825 that will wind up in registers, this still works for
826 HPPA (the only current STACK_GROWSUP target) since the
827 argument registers are *also* allocated in decreasing
828 order. If another such target is added, this logic may
829 have to get more complicated to differentiate between
830 stack arguments and register arguments. */
831 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
832 s->gen_opparam_buf[pi++] = args[i] + 1;
833 s->gen_opparam_buf[pi++] = args[i];
834 #else
835 s->gen_opparam_buf[pi++] = args[i];
836 s->gen_opparam_buf[pi++] = args[i] + 1;
837 #endif
838 real_args += 2;
839 continue;
842 s->gen_opparam_buf[pi++] = args[i];
843 real_args++;
845 s->gen_opparam_buf[pi++] = (uintptr_t)func;
846 s->gen_opparam_buf[pi++] = flags;
848 i = s->gen_next_op_idx;
849 tcg_debug_assert(i < OPC_BUF_SIZE);
850 tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
852 /* Set links for sequential allocation during translation. */
853 s->gen_op_buf[i] = (TCGOp){
854 .opc = INDEX_op_call,
855 .callo = nb_rets,
856 .calli = real_args,
857 .args = pi_first,
858 .prev = i - 1,
859 .next = i + 1
862 /* Make sure the calli field didn't overflow. */
863 tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
865 s->gen_last_op_idx = i;
866 s->gen_next_op_idx = i + 1;
867 s->gen_next_parm_idx = pi;
869 #if defined(__sparc__) && !defined(__arch64__) \
870 && !defined(CONFIG_TCG_INTERPRETER)
871 /* Free all of the parts we allocated above. */
872 for (i = real_args = 0; i < orig_nargs; ++i) {
873 int is_64bit = orig_sizemask & (1 << (i+1)*2);
874 if (is_64bit) {
875 TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
876 TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
877 tcg_temp_free_i32(h);
878 tcg_temp_free_i32(l);
879 } else {
880 real_args++;
883 if (orig_sizemask & 1) {
884 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
885 Note that describing these as TCGv_i64 eliminates an unnecessary
886 zero-extension that tcg_gen_concat_i32_i64 would create. */
887 tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
888 tcg_temp_free_i64(retl);
889 tcg_temp_free_i64(reth);
891 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
892 for (i = 0; i < nargs; ++i) {
893 int is_64bit = sizemask & (1 << (i+1)*2);
894 if (!is_64bit) {
895 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
896 tcg_temp_free_i64(temp);
899 #endif /* TCG_TARGET_EXTEND_ARGS */
902 static void tcg_reg_alloc_start(TCGContext *s)
904 int i;
905 TCGTemp *ts;
906 for(i = 0; i < s->nb_globals; i++) {
907 ts = &s->temps[i];
908 if (ts->fixed_reg) {
909 ts->val_type = TEMP_VAL_REG;
910 } else {
911 ts->val_type = TEMP_VAL_MEM;
914 for(i = s->nb_globals; i < s->nb_temps; i++) {
915 ts = &s->temps[i];
916 if (ts->temp_local) {
917 ts->val_type = TEMP_VAL_MEM;
918 } else {
919 ts->val_type = TEMP_VAL_DEAD;
921 ts->mem_allocated = 0;
922 ts->fixed_reg = 0;
925 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
928 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
929 TCGTemp *ts)
931 int idx = temp_idx(s, ts);
933 if (idx < s->nb_globals) {
934 pstrcpy(buf, buf_size, ts->name);
935 } else if (ts->temp_local) {
936 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
937 } else {
938 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
940 return buf;
943 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
944 int buf_size, int idx)
946 tcg_debug_assert(idx >= 0 && idx < s->nb_temps);
947 return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
950 /* Find helper name. */
951 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
953 const char *ret = NULL;
954 if (s->helpers) {
955 TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
956 if (info) {
957 ret = info->name;
960 return ret;
963 static const char * const cond_name[] =
965 [TCG_COND_NEVER] = "never",
966 [TCG_COND_ALWAYS] = "always",
967 [TCG_COND_EQ] = "eq",
968 [TCG_COND_NE] = "ne",
969 [TCG_COND_LT] = "lt",
970 [TCG_COND_GE] = "ge",
971 [TCG_COND_LE] = "le",
972 [TCG_COND_GT] = "gt",
973 [TCG_COND_LTU] = "ltu",
974 [TCG_COND_GEU] = "geu",
975 [TCG_COND_LEU] = "leu",
976 [TCG_COND_GTU] = "gtu"
979 static const char * const ldst_name[] =
981 [MO_UB] = "ub",
982 [MO_SB] = "sb",
983 [MO_LEUW] = "leuw",
984 [MO_LESW] = "lesw",
985 [MO_LEUL] = "leul",
986 [MO_LESL] = "lesl",
987 [MO_LEQ] = "leq",
988 [MO_BEUW] = "beuw",
989 [MO_BESW] = "besw",
990 [MO_BEUL] = "beul",
991 [MO_BESL] = "besl",
992 [MO_BEQ] = "beq",
995 void tcg_dump_ops(TCGContext *s)
997 char buf[128];
998 TCGOp *op;
999 int oi;
1001 for (oi = s->gen_first_op_idx; oi >= 0; oi = op->next) {
1002 int i, k, nb_oargs, nb_iargs, nb_cargs;
1003 const TCGOpDef *def;
1004 const TCGArg *args;
1005 TCGOpcode c;
1007 op = &s->gen_op_buf[oi];
1008 c = op->opc;
1009 def = &tcg_op_defs[c];
1010 args = &s->gen_opparam_buf[op->args];
1012 if (c == INDEX_op_insn_start) {
1013 qemu_log("%s ----", oi != s->gen_first_op_idx ? "\n" : "");
1015 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1016 target_ulong a;
1017 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1018 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1019 #else
1020 a = args[i];
1021 #endif
1022 qemu_log(" " TARGET_FMT_lx, a);
1024 } else if (c == INDEX_op_call) {
1025 /* variable number of arguments */
1026 nb_oargs = op->callo;
1027 nb_iargs = op->calli;
1028 nb_cargs = def->nb_cargs;
1030 /* function name, flags, out args */
1031 qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1032 tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1033 args[nb_oargs + nb_iargs + 1], nb_oargs);
1034 for (i = 0; i < nb_oargs; i++) {
1035 qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1036 args[i]));
1038 for (i = 0; i < nb_iargs; i++) {
1039 TCGArg arg = args[nb_oargs + i];
1040 const char *t = "<dummy>";
1041 if (arg != TCG_CALL_DUMMY_ARG) {
1042 t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1044 qemu_log(",%s", t);
1046 } else {
1047 qemu_log(" %s ", def->name);
1049 nb_oargs = def->nb_oargs;
1050 nb_iargs = def->nb_iargs;
1051 nb_cargs = def->nb_cargs;
1053 k = 0;
1054 for (i = 0; i < nb_oargs; i++) {
1055 if (k != 0) {
1056 qemu_log(",");
1058 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1059 args[k++]));
1061 for (i = 0; i < nb_iargs; i++) {
1062 if (k != 0) {
1063 qemu_log(",");
1065 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1066 args[k++]));
1068 switch (c) {
1069 case INDEX_op_brcond_i32:
1070 case INDEX_op_setcond_i32:
1071 case INDEX_op_movcond_i32:
1072 case INDEX_op_brcond2_i32:
1073 case INDEX_op_setcond2_i32:
1074 case INDEX_op_brcond_i64:
1075 case INDEX_op_setcond_i64:
1076 case INDEX_op_movcond_i64:
1077 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1078 qemu_log(",%s", cond_name[args[k++]]);
1079 } else {
1080 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1082 i = 1;
1083 break;
1084 case INDEX_op_qemu_ld_i32:
1085 case INDEX_op_qemu_st_i32:
1086 case INDEX_op_qemu_ld_i64:
1087 case INDEX_op_qemu_st_i64:
1089 TCGMemOpIdx oi = args[k++];
1090 TCGMemOp op = get_memop(oi);
1091 unsigned ix = get_mmuidx(oi);
1093 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1094 qemu_log(",$0x%x,%u", op, ix);
1095 } else {
1096 const char *s_al = "", *s_op;
1097 if (op & MO_AMASK) {
1098 if ((op & MO_AMASK) == MO_ALIGN) {
1099 s_al = "al+";
1100 } else {
1101 s_al = "un+";
1104 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1105 qemu_log(",%s%s,%u", s_al, s_op, ix);
1107 i = 1;
1109 break;
1110 default:
1111 i = 0;
1112 break;
1114 switch (c) {
1115 case INDEX_op_set_label:
1116 case INDEX_op_br:
1117 case INDEX_op_brcond_i32:
1118 case INDEX_op_brcond_i64:
1119 case INDEX_op_brcond2_i32:
1120 qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1121 i++, k++;
1122 break;
1123 default:
1124 break;
1126 for (; i < nb_cargs; i++, k++) {
1127 qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1130 qemu_log("\n");
1134 /* we give more priority to constraints with less registers */
1135 static int get_constraint_priority(const TCGOpDef *def, int k)
1137 const TCGArgConstraint *arg_ct;
1139 int i, n;
1140 arg_ct = &def->args_ct[k];
1141 if (arg_ct->ct & TCG_CT_ALIAS) {
1142 /* an alias is equivalent to a single register */
1143 n = 1;
1144 } else {
1145 if (!(arg_ct->ct & TCG_CT_REG))
1146 return 0;
1147 n = 0;
1148 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1149 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1150 n++;
1153 return TCG_TARGET_NB_REGS - n + 1;
1156 /* sort from highest priority to lowest */
1157 static void sort_constraints(TCGOpDef *def, int start, int n)
1159 int i, j, p1, p2, tmp;
1161 for(i = 0; i < n; i++)
1162 def->sorted_args[start + i] = start + i;
1163 if (n <= 1)
1164 return;
1165 for(i = 0; i < n - 1; i++) {
1166 for(j = i + 1; j < n; j++) {
1167 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1168 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1169 if (p1 < p2) {
1170 tmp = def->sorted_args[start + i];
1171 def->sorted_args[start + i] = def->sorted_args[start + j];
1172 def->sorted_args[start + j] = tmp;
1178 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1180 TCGOpcode op;
1181 TCGOpDef *def;
1182 const char *ct_str;
1183 int i, nb_args;
1185 for(;;) {
1186 if (tdefs->op == (TCGOpcode)-1)
1187 break;
1188 op = tdefs->op;
1189 tcg_debug_assert((unsigned)op < NB_OPS);
1190 def = &tcg_op_defs[op];
1191 #if defined(CONFIG_DEBUG_TCG)
1192 /* Duplicate entry in op definitions? */
1193 tcg_debug_assert(!def->used);
1194 def->used = 1;
1195 #endif
1196 nb_args = def->nb_iargs + def->nb_oargs;
1197 for(i = 0; i < nb_args; i++) {
1198 ct_str = tdefs->args_ct_str[i];
1199 /* Incomplete TCGTargetOpDef entry? */
1200 tcg_debug_assert(ct_str != NULL);
1201 tcg_regset_clear(def->args_ct[i].u.regs);
1202 def->args_ct[i].ct = 0;
1203 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1204 int oarg;
1205 oarg = ct_str[0] - '0';
1206 tcg_debug_assert(oarg < def->nb_oargs);
1207 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1208 /* TCG_CT_ALIAS is for the output arguments. The input
1209 argument is tagged with TCG_CT_IALIAS. */
1210 def->args_ct[i] = def->args_ct[oarg];
1211 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1212 def->args_ct[oarg].alias_index = i;
1213 def->args_ct[i].ct |= TCG_CT_IALIAS;
1214 def->args_ct[i].alias_index = oarg;
1215 } else {
1216 for(;;) {
1217 if (*ct_str == '\0')
1218 break;
1219 switch(*ct_str) {
1220 case 'i':
1221 def->args_ct[i].ct |= TCG_CT_CONST;
1222 ct_str++;
1223 break;
1224 default:
1225 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1226 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1227 ct_str, i, def->name);
1228 exit(1);
1235 /* TCGTargetOpDef entry with too much information? */
1236 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1238 /* sort the constraints (XXX: this is just an heuristic) */
1239 sort_constraints(def, 0, def->nb_oargs);
1240 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1242 #if 0
1244 int i;
1246 printf("%s: sorted=", def->name);
1247 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1248 printf(" %d", def->sorted_args[i]);
1249 printf("\n");
1251 #endif
1252 tdefs++;
1255 #if defined(CONFIG_DEBUG_TCG)
1256 i = 0;
1257 for (op = 0; op < tcg_op_defs_max; op++) {
1258 const TCGOpDef *def = &tcg_op_defs[op];
1259 if (def->flags & TCG_OPF_NOT_PRESENT) {
1260 /* Wrong entry in op definitions? */
1261 if (def->used) {
1262 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1263 i = 1;
1265 } else {
1266 /* Missing entry in op definitions? */
1267 if (!def->used) {
1268 fprintf(stderr, "Missing op definition for %s\n", def->name);
1269 i = 1;
1273 if (i == 1) {
1274 tcg_abort();
1276 #endif
1279 void tcg_op_remove(TCGContext *s, TCGOp *op)
1281 int next = op->next;
1282 int prev = op->prev;
1284 if (next >= 0) {
1285 s->gen_op_buf[next].prev = prev;
1286 } else {
1287 s->gen_last_op_idx = prev;
1289 if (prev >= 0) {
1290 s->gen_op_buf[prev].next = next;
1291 } else {
1292 s->gen_first_op_idx = next;
1295 memset(op, -1, sizeof(*op));
1297 #ifdef CONFIG_PROFILER
1298 s->del_op_count++;
1299 #endif
1302 #ifdef USE_LIVENESS_ANALYSIS
1303 /* liveness analysis: end of function: all temps are dead, and globals
1304 should be in memory. */
1305 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1306 uint8_t *mem_temps)
1308 memset(dead_temps, 1, s->nb_temps);
1309 memset(mem_temps, 1, s->nb_globals);
1310 memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1313 /* liveness analysis: end of basic block: all temps are dead, globals
1314 and local temps should be in memory. */
1315 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1316 uint8_t *mem_temps)
1318 int i;
1320 memset(dead_temps, 1, s->nb_temps);
1321 memset(mem_temps, 1, s->nb_globals);
1322 for(i = s->nb_globals; i < s->nb_temps; i++) {
1323 mem_temps[i] = s->temps[i].temp_local;
1327 /* Liveness analysis : update the opc_dead_args array to tell if a
1328 given input arguments is dead. Instructions updating dead
1329 temporaries are removed. */
1330 static void tcg_liveness_analysis(TCGContext *s)
1332 uint8_t *dead_temps, *mem_temps;
1333 int oi, oi_prev, nb_ops;
1335 nb_ops = s->gen_next_op_idx;
1336 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1337 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1339 dead_temps = tcg_malloc(s->nb_temps);
1340 mem_temps = tcg_malloc(s->nb_temps);
1341 tcg_la_func_end(s, dead_temps, mem_temps);
1343 for (oi = s->gen_last_op_idx; oi >= 0; oi = oi_prev) {
1344 int i, nb_iargs, nb_oargs;
1345 TCGOpcode opc_new, opc_new2;
1346 bool have_opc_new2;
1347 uint16_t dead_args;
1348 uint8_t sync_args;
1349 TCGArg arg;
1351 TCGOp * const op = &s->gen_op_buf[oi];
1352 TCGArg * const args = &s->gen_opparam_buf[op->args];
1353 TCGOpcode opc = op->opc;
1354 const TCGOpDef *def = &tcg_op_defs[opc];
1356 oi_prev = op->prev;
1358 switch (opc) {
1359 case INDEX_op_call:
1361 int call_flags;
1363 nb_oargs = op->callo;
1364 nb_iargs = op->calli;
1365 call_flags = args[nb_oargs + nb_iargs + 1];
1367 /* pure functions can be removed if their result is unused */
1368 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1369 for (i = 0; i < nb_oargs; i++) {
1370 arg = args[i];
1371 if (!dead_temps[arg] || mem_temps[arg]) {
1372 goto do_not_remove_call;
1375 goto do_remove;
1376 } else {
1377 do_not_remove_call:
1379 /* output args are dead */
1380 dead_args = 0;
1381 sync_args = 0;
1382 for (i = 0; i < nb_oargs; i++) {
1383 arg = args[i];
1384 if (dead_temps[arg]) {
1385 dead_args |= (1 << i);
1387 if (mem_temps[arg]) {
1388 sync_args |= (1 << i);
1390 dead_temps[arg] = 1;
1391 mem_temps[arg] = 0;
1394 if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1395 /* globals should be synced to memory */
1396 memset(mem_temps, 1, s->nb_globals);
1398 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1399 TCG_CALL_NO_READ_GLOBALS))) {
1400 /* globals should go back to memory */
1401 memset(dead_temps, 1, s->nb_globals);
1404 /* record arguments that die in this helper */
1405 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1406 arg = args[i];
1407 if (arg != TCG_CALL_DUMMY_ARG) {
1408 if (dead_temps[arg]) {
1409 dead_args |= (1 << i);
1413 /* input arguments are live for preceding opcodes */
1414 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1415 arg = args[i];
1416 dead_temps[arg] = 0;
1418 s->op_dead_args[oi] = dead_args;
1419 s->op_sync_args[oi] = sync_args;
1422 break;
1423 case INDEX_op_insn_start:
1424 break;
1425 case INDEX_op_discard:
1426 /* mark the temporary as dead */
1427 dead_temps[args[0]] = 1;
1428 mem_temps[args[0]] = 0;
1429 break;
1431 case INDEX_op_add2_i32:
1432 opc_new = INDEX_op_add_i32;
1433 goto do_addsub2;
1434 case INDEX_op_sub2_i32:
1435 opc_new = INDEX_op_sub_i32;
1436 goto do_addsub2;
1437 case INDEX_op_add2_i64:
1438 opc_new = INDEX_op_add_i64;
1439 goto do_addsub2;
1440 case INDEX_op_sub2_i64:
1441 opc_new = INDEX_op_sub_i64;
1442 do_addsub2:
1443 nb_iargs = 4;
1444 nb_oargs = 2;
1445 /* Test if the high part of the operation is dead, but not
1446 the low part. The result can be optimized to a simple
1447 add or sub. This happens often for x86_64 guest when the
1448 cpu mode is set to 32 bit. */
1449 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1450 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1451 goto do_remove;
1453 /* Replace the opcode and adjust the args in place,
1454 leaving 3 unused args at the end. */
1455 op->opc = opc = opc_new;
1456 args[1] = args[2];
1457 args[2] = args[4];
1458 /* Fall through and mark the single-word operation live. */
1459 nb_iargs = 2;
1460 nb_oargs = 1;
1462 goto do_not_remove;
1464 case INDEX_op_mulu2_i32:
1465 opc_new = INDEX_op_mul_i32;
1466 opc_new2 = INDEX_op_muluh_i32;
1467 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1468 goto do_mul2;
1469 case INDEX_op_muls2_i32:
1470 opc_new = INDEX_op_mul_i32;
1471 opc_new2 = INDEX_op_mulsh_i32;
1472 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1473 goto do_mul2;
1474 case INDEX_op_mulu2_i64:
1475 opc_new = INDEX_op_mul_i64;
1476 opc_new2 = INDEX_op_muluh_i64;
1477 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1478 goto do_mul2;
1479 case INDEX_op_muls2_i64:
1480 opc_new = INDEX_op_mul_i64;
1481 opc_new2 = INDEX_op_mulsh_i64;
1482 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1483 goto do_mul2;
1484 do_mul2:
1485 nb_iargs = 2;
1486 nb_oargs = 2;
1487 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1488 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1489 /* Both parts of the operation are dead. */
1490 goto do_remove;
1492 /* The high part of the operation is dead; generate the low. */
1493 op->opc = opc = opc_new;
1494 args[1] = args[2];
1495 args[2] = args[3];
1496 } else if (have_opc_new2 && dead_temps[args[0]]
1497 && !mem_temps[args[0]]) {
1498 /* The low part of the operation is dead; generate the high. */
1499 op->opc = opc = opc_new2;
1500 args[0] = args[1];
1501 args[1] = args[2];
1502 args[2] = args[3];
1503 } else {
1504 goto do_not_remove;
1506 /* Mark the single-word operation live. */
1507 nb_oargs = 1;
1508 goto do_not_remove;
1510 default:
1511 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1512 nb_iargs = def->nb_iargs;
1513 nb_oargs = def->nb_oargs;
1515 /* Test if the operation can be removed because all
1516 its outputs are dead. We assume that nb_oargs == 0
1517 implies side effects */
1518 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1519 for (i = 0; i < nb_oargs; i++) {
1520 arg = args[i];
1521 if (!dead_temps[arg] || mem_temps[arg]) {
1522 goto do_not_remove;
1525 do_remove:
1526 tcg_op_remove(s, op);
1527 } else {
1528 do_not_remove:
1529 /* output args are dead */
1530 dead_args = 0;
1531 sync_args = 0;
1532 for (i = 0; i < nb_oargs; i++) {
1533 arg = args[i];
1534 if (dead_temps[arg]) {
1535 dead_args |= (1 << i);
1537 if (mem_temps[arg]) {
1538 sync_args |= (1 << i);
1540 dead_temps[arg] = 1;
1541 mem_temps[arg] = 0;
1544 /* if end of basic block, update */
1545 if (def->flags & TCG_OPF_BB_END) {
1546 tcg_la_bb_end(s, dead_temps, mem_temps);
1547 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1548 /* globals should be synced to memory */
1549 memset(mem_temps, 1, s->nb_globals);
1552 /* record arguments that die in this opcode */
1553 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1554 arg = args[i];
1555 if (dead_temps[arg]) {
1556 dead_args |= (1 << i);
1559 /* input arguments are live for preceding opcodes */
1560 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1561 arg = args[i];
1562 dead_temps[arg] = 0;
1564 s->op_dead_args[oi] = dead_args;
1565 s->op_sync_args[oi] = sync_args;
1567 break;
1571 #else
1572 /* dummy liveness analysis */
1573 static void tcg_liveness_analysis(TCGContext *s)
1575 int nb_ops = s->gen_next_op_idx;
1577 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1578 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1579 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1580 memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1582 #endif
1584 #ifdef CONFIG_DEBUG_TCG
1585 static void dump_regs(TCGContext *s)
1587 TCGTemp *ts;
1588 int i;
1589 char buf[64];
1591 for(i = 0; i < s->nb_temps; i++) {
1592 ts = &s->temps[i];
1593 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1594 switch(ts->val_type) {
1595 case TEMP_VAL_REG:
1596 printf("%s", tcg_target_reg_names[ts->reg]);
1597 break;
1598 case TEMP_VAL_MEM:
1599 printf("%d(%s)", (int)ts->mem_offset,
1600 tcg_target_reg_names[ts->mem_base->reg]);
1601 break;
1602 case TEMP_VAL_CONST:
1603 printf("$0x%" TCG_PRIlx, ts->val);
1604 break;
1605 case TEMP_VAL_DEAD:
1606 printf("D");
1607 break;
1608 default:
1609 printf("???");
1610 break;
1612 printf("\n");
1615 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1616 if (s->reg_to_temp[i] != NULL) {
1617 printf("%s: %s\n",
1618 tcg_target_reg_names[i],
1619 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
1624 static void check_regs(TCGContext *s)
1626 int reg;
1627 int k;
1628 TCGTemp *ts;
1629 char buf[64];
1631 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1632 ts = s->reg_to_temp[reg];
1633 if (ts != NULL) {
1634 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
1635 printf("Inconsistency for register %s:\n",
1636 tcg_target_reg_names[reg]);
1637 goto fail;
1641 for (k = 0; k < s->nb_temps; k++) {
1642 ts = &s->temps[k];
1643 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
1644 && s->reg_to_temp[ts->reg] != ts) {
1645 printf("Inconsistency for temp %s:\n",
1646 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
1647 fail:
1648 printf("reg state:\n");
1649 dump_regs(s);
1650 tcg_abort();
1654 #endif
1656 static void temp_allocate_frame(TCGContext *s, int temp)
1658 TCGTemp *ts;
1659 ts = &s->temps[temp];
1660 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1661 /* Sparc64 stack is accessed with offset of 2047 */
1662 s->current_frame_offset = (s->current_frame_offset +
1663 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1664 ~(sizeof(tcg_target_long) - 1);
1665 #endif
1666 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1667 s->frame_end) {
1668 tcg_abort();
1670 ts->mem_offset = s->current_frame_offset;
1671 ts->mem_base = s->frame_temp;
1672 ts->mem_allocated = 1;
1673 s->current_frame_offset += sizeof(tcg_target_long);
1676 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
1678 /* sync register 'reg' by saving it to the corresponding temporary */
1679 static void tcg_reg_sync(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
1681 TCGTemp *ts = s->reg_to_temp[reg];
1683 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1684 if (!ts->mem_coherent && !ts->fixed_reg) {
1685 if (!ts->mem_allocated) {
1686 temp_allocate_frame(s, temp_idx(s, ts));
1687 } else if (ts->indirect_reg) {
1688 tcg_regset_set_reg(allocated_regs, ts->reg);
1689 temp_load(s, ts->mem_base,
1690 tcg_target_available_regs[TCG_TYPE_PTR],
1691 allocated_regs);
1693 tcg_out_st(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
1695 ts->mem_coherent = 1;
1698 /* free register 'reg' by spilling the corresponding temporary if necessary */
1699 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
1701 TCGTemp *ts = s->reg_to_temp[reg];
1703 if (ts != NULL) {
1704 tcg_reg_sync(s, reg, allocated_regs);
1705 ts->val_type = TEMP_VAL_MEM;
1706 s->reg_to_temp[reg] = NULL;
1710 /* Allocate a register belonging to reg1 & ~reg2 */
1711 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
1712 TCGRegSet allocated_regs, bool rev)
1714 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
1715 const int *order;
1716 TCGReg reg;
1717 TCGRegSet reg_ct;
1719 tcg_regset_andnot(reg_ct, desired_regs, allocated_regs);
1720 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
1722 /* first try free registers */
1723 for(i = 0; i < n; i++) {
1724 reg = order[i];
1725 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
1726 return reg;
1729 /* XXX: do better spill choice */
1730 for(i = 0; i < n; i++) {
1731 reg = order[i];
1732 if (tcg_regset_test_reg(reg_ct, reg)) {
1733 tcg_reg_free(s, reg, allocated_regs);
1734 return reg;
1738 tcg_abort();
1741 /* Make sure the temporary is in a register. If needed, allocate the register
1742 from DESIRED while avoiding ALLOCATED. */
1743 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
1744 TCGRegSet allocated_regs)
1746 TCGReg reg;
1748 switch (ts->val_type) {
1749 case TEMP_VAL_REG:
1750 return;
1751 case TEMP_VAL_CONST:
1752 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
1753 tcg_out_movi(s, ts->type, reg, ts->val);
1754 ts->mem_coherent = 0;
1755 break;
1756 case TEMP_VAL_MEM:
1757 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
1758 if (ts->indirect_reg) {
1759 tcg_regset_set_reg(allocated_regs, reg);
1760 temp_load(s, ts->mem_base,
1761 tcg_target_available_regs[TCG_TYPE_PTR],
1762 allocated_regs);
1764 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
1765 ts->mem_coherent = 1;
1766 break;
1767 case TEMP_VAL_DEAD:
1768 default:
1769 tcg_abort();
1771 ts->reg = reg;
1772 ts->val_type = TEMP_VAL_REG;
1773 s->reg_to_temp[reg] = ts;
1776 /* mark a temporary as dead. */
1777 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
1779 if (ts->fixed_reg) {
1780 return;
1782 if (ts->val_type == TEMP_VAL_REG) {
1783 s->reg_to_temp[ts->reg] = NULL;
1785 ts->val_type = (temp_idx(s, ts) < s->nb_globals || ts->temp_local
1786 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1789 /* sync a temporary to memory. 'allocated_regs' is used in case a
1790 temporary registers needs to be allocated to store a constant. */
1791 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1793 if (ts->fixed_reg) {
1794 return;
1796 switch (ts->val_type) {
1797 case TEMP_VAL_CONST:
1798 temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs);
1799 /* fallthrough */
1800 case TEMP_VAL_REG:
1801 tcg_reg_sync(s, ts->reg, allocated_regs);
1802 break;
1803 case TEMP_VAL_DEAD:
1804 case TEMP_VAL_MEM:
1805 break;
1806 default:
1807 tcg_abort();
1811 /* save a temporary to memory. 'allocated_regs' is used in case a
1812 temporary registers needs to be allocated to store a constant. */
1813 static inline void temp_save(TCGContext *s, TCGTemp *ts,
1814 TCGRegSet allocated_regs)
1816 #ifdef USE_LIVENESS_ANALYSIS
1817 /* ??? Liveness does not yet incorporate indirect bases. */
1818 if (!ts->indirect_base) {
1819 /* The liveness analysis already ensures that globals are back
1820 in memory. Keep an tcg_debug_assert for safety. */
1821 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
1822 return;
1824 #endif
1825 temp_sync(s, ts, allocated_regs);
1826 temp_dead(s, ts);
1829 /* save globals to their canonical location and assume they can be
1830 modified be the following code. 'allocated_regs' is used in case a
1831 temporary registers needs to be allocated to store a constant. */
1832 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1834 int i;
1836 for (i = 0; i < s->nb_globals; i++) {
1837 temp_save(s, &s->temps[i], allocated_regs);
1841 /* sync globals to their canonical location and assume they can be
1842 read by the following code. 'allocated_regs' is used in case a
1843 temporary registers needs to be allocated to store a constant. */
1844 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1846 int i;
1848 for (i = 0; i < s->nb_globals; i++) {
1849 TCGTemp *ts = &s->temps[i];
1850 #ifdef USE_LIVENESS_ANALYSIS
1851 /* ??? Liveness does not yet incorporate indirect bases. */
1852 if (!ts->indirect_base) {
1853 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
1854 || ts->fixed_reg
1855 || ts->mem_coherent);
1856 continue;
1858 #endif
1859 temp_sync(s, ts, allocated_regs);
1863 /* at the end of a basic block, we assume all temporaries are dead and
1864 all globals are stored at their canonical location. */
1865 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1867 int i;
1869 for (i = s->nb_globals; i < s->nb_temps; i++) {
1870 TCGTemp *ts = &s->temps[i];
1871 if (ts->temp_local) {
1872 temp_save(s, ts, allocated_regs);
1873 } else {
1874 #ifdef USE_LIVENESS_ANALYSIS
1875 /* ??? Liveness does not yet incorporate indirect bases. */
1876 if (!ts->indirect_base) {
1877 /* The liveness analysis already ensures that temps are dead.
1878 Keep an tcg_debug_assert for safety. */
1879 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
1880 continue;
1882 #endif
1883 temp_dead(s, ts);
1887 save_globals(s, allocated_regs);
1890 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1891 #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1893 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1894 uint16_t dead_args, uint8_t sync_args)
1896 TCGTemp *ots;
1897 tcg_target_ulong val;
1899 ots = &s->temps[args[0]];
1900 val = args[1];
1902 if (ots->fixed_reg) {
1903 /* for fixed registers, we do not do any constant
1904 propagation */
1905 tcg_out_movi(s, ots->type, ots->reg, val);
1906 } else {
1907 /* The movi is not explicitly generated here */
1908 if (ots->val_type == TEMP_VAL_REG) {
1909 s->reg_to_temp[ots->reg] = NULL;
1911 ots->val_type = TEMP_VAL_CONST;
1912 ots->val = val;
1914 if (NEED_SYNC_ARG(0)) {
1915 temp_sync(s, ots, s->reserved_regs);
1917 if (IS_DEAD_ARG(0)) {
1918 temp_dead(s, ots);
1922 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1923 const TCGArg *args, uint16_t dead_args,
1924 uint8_t sync_args)
1926 TCGRegSet allocated_regs;
1927 TCGTemp *ts, *ots;
1928 TCGType otype, itype;
1930 tcg_regset_set(allocated_regs, s->reserved_regs);
1931 ots = &s->temps[args[0]];
1932 ts = &s->temps[args[1]];
1934 /* Note that otype != itype for no-op truncation. */
1935 otype = ots->type;
1936 itype = ts->type;
1938 /* If the source value is not in a register, and we're going to be
1939 forced to have it in a register in order to perform the copy,
1940 then copy the SOURCE value into its own register first. That way
1941 we don't have to reload SOURCE the next time it is used. */
1942 if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1943 || ts->val_type == TEMP_VAL_MEM) {
1944 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
1947 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1948 /* mov to a non-saved dead register makes no sense (even with
1949 liveness analysis disabled). */
1950 tcg_debug_assert(NEED_SYNC_ARG(0));
1951 /* The code above should have moved the temp to a register. */
1952 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1953 if (!ots->mem_allocated) {
1954 temp_allocate_frame(s, args[0]);
1956 if (ots->indirect_reg) {
1957 tcg_regset_set_reg(allocated_regs, ts->reg);
1958 temp_load(s, ots->mem_base,
1959 tcg_target_available_regs[TCG_TYPE_PTR],
1960 allocated_regs);
1962 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
1963 if (IS_DEAD_ARG(1)) {
1964 temp_dead(s, ts);
1966 temp_dead(s, ots);
1967 } else if (ts->val_type == TEMP_VAL_CONST) {
1968 /* propagate constant */
1969 if (ots->val_type == TEMP_VAL_REG) {
1970 s->reg_to_temp[ots->reg] = NULL;
1972 ots->val_type = TEMP_VAL_CONST;
1973 ots->val = ts->val;
1974 if (IS_DEAD_ARG(1)) {
1975 temp_dead(s, ts);
1977 } else {
1978 /* The code in the first if block should have moved the
1979 temp to a register. */
1980 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1981 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1982 /* the mov can be suppressed */
1983 if (ots->val_type == TEMP_VAL_REG) {
1984 s->reg_to_temp[ots->reg] = NULL;
1986 ots->reg = ts->reg;
1987 temp_dead(s, ts);
1988 } else {
1989 if (ots->val_type != TEMP_VAL_REG) {
1990 /* When allocating a new register, make sure to not spill the
1991 input one. */
1992 tcg_regset_set_reg(allocated_regs, ts->reg);
1993 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
1994 allocated_regs, ots->indirect_base);
1996 tcg_out_mov(s, otype, ots->reg, ts->reg);
1998 ots->val_type = TEMP_VAL_REG;
1999 ots->mem_coherent = 0;
2000 s->reg_to_temp[ots->reg] = ots;
2001 if (NEED_SYNC_ARG(0)) {
2002 tcg_reg_sync(s, ots->reg, allocated_regs);
2007 static void tcg_reg_alloc_op(TCGContext *s,
2008 const TCGOpDef *def, TCGOpcode opc,
2009 const TCGArg *args, uint16_t dead_args,
2010 uint8_t sync_args)
2012 TCGRegSet allocated_regs;
2013 int i, k, nb_iargs, nb_oargs;
2014 TCGReg reg;
2015 TCGArg arg;
2016 const TCGArgConstraint *arg_ct;
2017 TCGTemp *ts;
2018 TCGArg new_args[TCG_MAX_OP_ARGS];
2019 int const_args[TCG_MAX_OP_ARGS];
2021 nb_oargs = def->nb_oargs;
2022 nb_iargs = def->nb_iargs;
2024 /* copy constants */
2025 memcpy(new_args + nb_oargs + nb_iargs,
2026 args + nb_oargs + nb_iargs,
2027 sizeof(TCGArg) * def->nb_cargs);
2029 /* satisfy input constraints */
2030 tcg_regset_set(allocated_regs, s->reserved_regs);
2031 for(k = 0; k < nb_iargs; k++) {
2032 i = def->sorted_args[nb_oargs + k];
2033 arg = args[i];
2034 arg_ct = &def->args_ct[i];
2035 ts = &s->temps[arg];
2037 if (ts->val_type == TEMP_VAL_CONST
2038 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2039 /* constant is OK for instruction */
2040 const_args[i] = 1;
2041 new_args[i] = ts->val;
2042 goto iarg_end;
2045 temp_load(s, ts, arg_ct->u.regs, allocated_regs);
2047 if (arg_ct->ct & TCG_CT_IALIAS) {
2048 if (ts->fixed_reg) {
2049 /* if fixed register, we must allocate a new register
2050 if the alias is not the same register */
2051 if (arg != args[arg_ct->alias_index])
2052 goto allocate_in_reg;
2053 } else {
2054 /* if the input is aliased to an output and if it is
2055 not dead after the instruction, we must allocate
2056 a new register and move it */
2057 if (!IS_DEAD_ARG(i)) {
2058 goto allocate_in_reg;
2060 /* check if the current register has already been allocated
2061 for another input aliased to an output */
2062 int k2, i2;
2063 for (k2 = 0 ; k2 < k ; k2++) {
2064 i2 = def->sorted_args[nb_oargs + k2];
2065 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2066 (new_args[i2] == ts->reg)) {
2067 goto allocate_in_reg;
2072 reg = ts->reg;
2073 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2074 /* nothing to do : the constraint is satisfied */
2075 } else {
2076 allocate_in_reg:
2077 /* allocate a new register matching the constraint
2078 and move the temporary register into it */
2079 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2080 ts->indirect_base);
2081 tcg_out_mov(s, ts->type, reg, ts->reg);
2083 new_args[i] = reg;
2084 const_args[i] = 0;
2085 tcg_regset_set_reg(allocated_regs, reg);
2086 iarg_end: ;
2089 /* mark dead temporaries and free the associated registers */
2090 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2091 if (IS_DEAD_ARG(i)) {
2092 temp_dead(s, &s->temps[args[i]]);
2096 if (def->flags & TCG_OPF_BB_END) {
2097 tcg_reg_alloc_bb_end(s, allocated_regs);
2098 } else {
2099 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2100 /* XXX: permit generic clobber register list ? */
2101 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2102 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2103 tcg_reg_free(s, i, allocated_regs);
2107 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2108 /* sync globals if the op has side effects and might trigger
2109 an exception. */
2110 sync_globals(s, allocated_regs);
2113 /* satisfy the output constraints */
2114 tcg_regset_set(allocated_regs, s->reserved_regs);
2115 for(k = 0; k < nb_oargs; k++) {
2116 i = def->sorted_args[k];
2117 arg = args[i];
2118 arg_ct = &def->args_ct[i];
2119 ts = &s->temps[arg];
2120 if (arg_ct->ct & TCG_CT_ALIAS) {
2121 reg = new_args[arg_ct->alias_index];
2122 } else {
2123 /* if fixed register, we try to use it */
2124 reg = ts->reg;
2125 if (ts->fixed_reg &&
2126 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2127 goto oarg_end;
2129 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2130 ts->indirect_base);
2132 tcg_regset_set_reg(allocated_regs, reg);
2133 /* if a fixed register is used, then a move will be done afterwards */
2134 if (!ts->fixed_reg) {
2135 if (ts->val_type == TEMP_VAL_REG) {
2136 s->reg_to_temp[ts->reg] = NULL;
2138 ts->val_type = TEMP_VAL_REG;
2139 ts->reg = reg;
2140 /* temp value is modified, so the value kept in memory is
2141 potentially not the same */
2142 ts->mem_coherent = 0;
2143 s->reg_to_temp[reg] = ts;
2145 oarg_end:
2146 new_args[i] = reg;
2150 /* emit instruction */
2151 tcg_out_op(s, opc, new_args, const_args);
2153 /* move the outputs in the correct register if needed */
2154 for(i = 0; i < nb_oargs; i++) {
2155 ts = &s->temps[args[i]];
2156 reg = new_args[i];
2157 if (ts->fixed_reg && ts->reg != reg) {
2158 tcg_out_mov(s, ts->type, ts->reg, reg);
2160 if (NEED_SYNC_ARG(i)) {
2161 tcg_reg_sync(s, reg, allocated_regs);
2163 if (IS_DEAD_ARG(i)) {
2164 temp_dead(s, ts);
2169 #ifdef TCG_TARGET_STACK_GROWSUP
2170 #define STACK_DIR(x) (-(x))
2171 #else
2172 #define STACK_DIR(x) (x)
2173 #endif
2175 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2176 const TCGArg * const args, uint16_t dead_args,
2177 uint8_t sync_args)
2179 int flags, nb_regs, i;
2180 TCGReg reg;
2181 TCGArg arg;
2182 TCGTemp *ts;
2183 intptr_t stack_offset;
2184 size_t call_stack_size;
2185 tcg_insn_unit *func_addr;
2186 int allocate_args;
2187 TCGRegSet allocated_regs;
2189 func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2190 flags = args[nb_oargs + nb_iargs + 1];
2192 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2193 if (nb_regs > nb_iargs) {
2194 nb_regs = nb_iargs;
2197 /* assign stack slots first */
2198 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2199 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2200 ~(TCG_TARGET_STACK_ALIGN - 1);
2201 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2202 if (allocate_args) {
2203 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2204 preallocate call stack */
2205 tcg_abort();
2208 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2209 for(i = nb_regs; i < nb_iargs; i++) {
2210 arg = args[nb_oargs + i];
2211 #ifdef TCG_TARGET_STACK_GROWSUP
2212 stack_offset -= sizeof(tcg_target_long);
2213 #endif
2214 if (arg != TCG_CALL_DUMMY_ARG) {
2215 ts = &s->temps[arg];
2216 temp_load(s, ts, tcg_target_available_regs[ts->type],
2217 s->reserved_regs);
2218 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2220 #ifndef TCG_TARGET_STACK_GROWSUP
2221 stack_offset += sizeof(tcg_target_long);
2222 #endif
2225 /* assign input registers */
2226 tcg_regset_set(allocated_regs, s->reserved_regs);
2227 for(i = 0; i < nb_regs; i++) {
2228 arg = args[nb_oargs + i];
2229 if (arg != TCG_CALL_DUMMY_ARG) {
2230 ts = &s->temps[arg];
2231 reg = tcg_target_call_iarg_regs[i];
2232 tcg_reg_free(s, reg, allocated_regs);
2234 if (ts->val_type == TEMP_VAL_REG) {
2235 if (ts->reg != reg) {
2236 tcg_out_mov(s, ts->type, reg, ts->reg);
2238 } else {
2239 TCGRegSet arg_set;
2241 tcg_regset_clear(arg_set);
2242 tcg_regset_set_reg(arg_set, reg);
2243 temp_load(s, ts, arg_set, allocated_regs);
2246 tcg_regset_set_reg(allocated_regs, reg);
2250 /* mark dead temporaries and free the associated registers */
2251 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2252 if (IS_DEAD_ARG(i)) {
2253 temp_dead(s, &s->temps[args[i]]);
2257 /* clobber call registers */
2258 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2259 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2260 tcg_reg_free(s, i, allocated_regs);
2264 /* Save globals if they might be written by the helper, sync them if
2265 they might be read. */
2266 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2267 /* Nothing to do */
2268 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2269 sync_globals(s, allocated_regs);
2270 } else {
2271 save_globals(s, allocated_regs);
2274 tcg_out_call(s, func_addr);
2276 /* assign output registers and emit moves if needed */
2277 for(i = 0; i < nb_oargs; i++) {
2278 arg = args[i];
2279 ts = &s->temps[arg];
2280 reg = tcg_target_call_oarg_regs[i];
2281 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
2283 if (ts->fixed_reg) {
2284 if (ts->reg != reg) {
2285 tcg_out_mov(s, ts->type, ts->reg, reg);
2287 } else {
2288 if (ts->val_type == TEMP_VAL_REG) {
2289 s->reg_to_temp[ts->reg] = NULL;
2291 ts->val_type = TEMP_VAL_REG;
2292 ts->reg = reg;
2293 ts->mem_coherent = 0;
2294 s->reg_to_temp[reg] = ts;
2295 if (NEED_SYNC_ARG(i)) {
2296 tcg_reg_sync(s, reg, allocated_regs);
2298 if (IS_DEAD_ARG(i)) {
2299 temp_dead(s, ts);
2305 #ifdef CONFIG_PROFILER
2307 static int64_t tcg_table_op_count[NB_OPS];
2309 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2311 int i;
2313 for (i = 0; i < NB_OPS; i++) {
2314 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2315 tcg_table_op_count[i]);
2318 #else
2319 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2321 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2323 #endif
2326 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
2328 int i, oi, oi_next, num_insns;
2330 #ifdef CONFIG_PROFILER
2332 int n;
2334 n = s->gen_last_op_idx + 1;
2335 s->op_count += n;
2336 if (n > s->op_count_max) {
2337 s->op_count_max = n;
2340 n = s->nb_temps;
2341 s->temp_count += n;
2342 if (n > s->temp_count_max) {
2343 s->temp_count_max = n;
2346 #endif
2348 #ifdef DEBUG_DISAS
2349 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
2350 && qemu_log_in_addr_range(tb->pc))) {
2351 qemu_log("OP:\n");
2352 tcg_dump_ops(s);
2353 qemu_log("\n");
2355 #endif
2357 #ifdef CONFIG_PROFILER
2358 s->opt_time -= profile_getclock();
2359 #endif
2361 #ifdef USE_TCG_OPTIMIZATIONS
2362 tcg_optimize(s);
2363 #endif
2365 #ifdef CONFIG_PROFILER
2366 s->opt_time += profile_getclock();
2367 s->la_time -= profile_getclock();
2368 #endif
2370 tcg_liveness_analysis(s);
2372 #ifdef CONFIG_PROFILER
2373 s->la_time += profile_getclock();
2374 #endif
2376 #ifdef DEBUG_DISAS
2377 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
2378 && qemu_log_in_addr_range(tb->pc))) {
2379 qemu_log("OP after optimization and liveness analysis:\n");
2380 tcg_dump_ops(s);
2381 qemu_log("\n");
2383 #endif
2385 tcg_reg_alloc_start(s);
2387 s->code_buf = tb->tc_ptr;
2388 s->code_ptr = tb->tc_ptr;
2390 tcg_out_tb_init(s);
2392 num_insns = -1;
2393 for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
2394 TCGOp * const op = &s->gen_op_buf[oi];
2395 TCGArg * const args = &s->gen_opparam_buf[op->args];
2396 TCGOpcode opc = op->opc;
2397 const TCGOpDef *def = &tcg_op_defs[opc];
2398 uint16_t dead_args = s->op_dead_args[oi];
2399 uint8_t sync_args = s->op_sync_args[oi];
2401 oi_next = op->next;
2402 #ifdef CONFIG_PROFILER
2403 tcg_table_op_count[opc]++;
2404 #endif
2406 switch (opc) {
2407 case INDEX_op_mov_i32:
2408 case INDEX_op_mov_i64:
2409 tcg_reg_alloc_mov(s, def, args, dead_args, sync_args);
2410 break;
2411 case INDEX_op_movi_i32:
2412 case INDEX_op_movi_i64:
2413 tcg_reg_alloc_movi(s, args, dead_args, sync_args);
2414 break;
2415 case INDEX_op_insn_start:
2416 if (num_insns >= 0) {
2417 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2419 num_insns++;
2420 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2421 target_ulong a;
2422 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2423 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2424 #else
2425 a = args[i];
2426 #endif
2427 s->gen_insn_data[num_insns][i] = a;
2429 break;
2430 case INDEX_op_discard:
2431 temp_dead(s, &s->temps[args[0]]);
2432 break;
2433 case INDEX_op_set_label:
2434 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2435 tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2436 break;
2437 case INDEX_op_call:
2438 tcg_reg_alloc_call(s, op->callo, op->calli, args,
2439 dead_args, sync_args);
2440 break;
2441 default:
2442 /* Sanity check that we've not introduced any unhandled opcodes. */
2443 if (def->flags & TCG_OPF_NOT_PRESENT) {
2444 tcg_abort();
2446 /* Note: in order to speed up the code, it would be much
2447 faster to have specialized register allocator functions for
2448 some common argument patterns */
2449 tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
2450 break;
2452 #ifdef CONFIG_DEBUG_TCG
2453 check_regs(s);
2454 #endif
2455 /* Test for (pending) buffer overflow. The assumption is that any
2456 one operation beginning below the high water mark cannot overrun
2457 the buffer completely. Thus we can test for overflow after
2458 generating code without having to check during generation. */
2459 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2460 return -1;
2463 tcg_debug_assert(num_insns >= 0);
2464 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2466 /* Generate TB finalization at the end of block */
2467 if (!tcg_out_tb_finalize(s)) {
2468 return -1;
2471 /* flush instruction cache */
2472 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2474 return tcg_current_code_size(s);
2477 #ifdef CONFIG_PROFILER
2478 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2480 TCGContext *s = &tcg_ctx;
2481 int64_t tb_count = s->tb_count;
2482 int64_t tb_div_count = tb_count ? tb_count : 1;
2483 int64_t tot = s->interm_time + s->code_time;
2485 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2486 tot, tot / 2.4e9);
2487 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2488 tb_count, s->tb_count1 - tb_count,
2489 (double)(s->tb_count1 - s->tb_count)
2490 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2491 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2492 (double)s->op_count / tb_div_count, s->op_count_max);
2493 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2494 (double)s->del_op_count / tb_div_count);
2495 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2496 (double)s->temp_count / tb_div_count, s->temp_count_max);
2497 cpu_fprintf(f, "avg host code/TB %0.1f\n",
2498 (double)s->code_out_len / tb_div_count);
2499 cpu_fprintf(f, "avg search data/TB %0.1f\n",
2500 (double)s->search_out_len / tb_div_count);
2502 cpu_fprintf(f, "cycles/op %0.1f\n",
2503 s->op_count ? (double)tot / s->op_count : 0);
2504 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2505 s->code_in_len ? (double)tot / s->code_in_len : 0);
2506 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2507 s->code_out_len ? (double)tot / s->code_out_len : 0);
2508 cpu_fprintf(f, "cycles/search byte %0.1f\n",
2509 s->search_out_len ? (double)tot / s->search_out_len : 0);
2510 if (tot == 0) {
2511 tot = 1;
2513 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2514 (double)s->interm_time / tot * 100.0);
2515 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2516 (double)s->code_time / tot * 100.0);
2517 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2518 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2519 * 100.0);
2520 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2521 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2522 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2523 s->restore_count);
2524 cpu_fprintf(f, " avg cycles %0.1f\n",
2525 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2527 #else
2528 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2530 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2532 #endif
2534 #ifdef ELF_HOST_MACHINE
2535 /* In order to use this feature, the backend needs to do three things:
2537 (1) Define ELF_HOST_MACHINE to indicate both what value to
2538 put into the ELF image and to indicate support for the feature.
2540 (2) Define tcg_register_jit. This should create a buffer containing
2541 the contents of a .debug_frame section that describes the post-
2542 prologue unwind info for the tcg machine.
2544 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2547 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2548 typedef enum {
2549 JIT_NOACTION = 0,
2550 JIT_REGISTER_FN,
2551 JIT_UNREGISTER_FN
2552 } jit_actions_t;
2554 struct jit_code_entry {
2555 struct jit_code_entry *next_entry;
2556 struct jit_code_entry *prev_entry;
2557 const void *symfile_addr;
2558 uint64_t symfile_size;
2561 struct jit_descriptor {
2562 uint32_t version;
2563 uint32_t action_flag;
2564 struct jit_code_entry *relevant_entry;
2565 struct jit_code_entry *first_entry;
2568 void __jit_debug_register_code(void) __attribute__((noinline));
2569 void __jit_debug_register_code(void)
2571 asm("");
2574 /* Must statically initialize the version, because GDB may check
2575 the version before we can set it. */
2576 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2578 /* End GDB interface. */
2580 static int find_string(const char *strtab, const char *str)
2582 const char *p = strtab + 1;
2584 while (1) {
2585 if (strcmp(p, str) == 0) {
2586 return p - strtab;
2588 p += strlen(p) + 1;
2592 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2593 const void *debug_frame,
2594 size_t debug_frame_size)
2596 struct __attribute__((packed)) DebugInfo {
2597 uint32_t len;
2598 uint16_t version;
2599 uint32_t abbrev;
2600 uint8_t ptr_size;
2601 uint8_t cu_die;
2602 uint16_t cu_lang;
2603 uintptr_t cu_low_pc;
2604 uintptr_t cu_high_pc;
2605 uint8_t fn_die;
2606 char fn_name[16];
2607 uintptr_t fn_low_pc;
2608 uintptr_t fn_high_pc;
2609 uint8_t cu_eoc;
2612 struct ElfImage {
2613 ElfW(Ehdr) ehdr;
2614 ElfW(Phdr) phdr;
2615 ElfW(Shdr) shdr[7];
2616 ElfW(Sym) sym[2];
2617 struct DebugInfo di;
2618 uint8_t da[24];
2619 char str[80];
2622 struct ElfImage *img;
2624 static const struct ElfImage img_template = {
2625 .ehdr = {
2626 .e_ident[EI_MAG0] = ELFMAG0,
2627 .e_ident[EI_MAG1] = ELFMAG1,
2628 .e_ident[EI_MAG2] = ELFMAG2,
2629 .e_ident[EI_MAG3] = ELFMAG3,
2630 .e_ident[EI_CLASS] = ELF_CLASS,
2631 .e_ident[EI_DATA] = ELF_DATA,
2632 .e_ident[EI_VERSION] = EV_CURRENT,
2633 .e_type = ET_EXEC,
2634 .e_machine = ELF_HOST_MACHINE,
2635 .e_version = EV_CURRENT,
2636 .e_phoff = offsetof(struct ElfImage, phdr),
2637 .e_shoff = offsetof(struct ElfImage, shdr),
2638 .e_ehsize = sizeof(ElfW(Shdr)),
2639 .e_phentsize = sizeof(ElfW(Phdr)),
2640 .e_phnum = 1,
2641 .e_shentsize = sizeof(ElfW(Shdr)),
2642 .e_shnum = ARRAY_SIZE(img->shdr),
2643 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2644 #ifdef ELF_HOST_FLAGS
2645 .e_flags = ELF_HOST_FLAGS,
2646 #endif
2647 #ifdef ELF_OSABI
2648 .e_ident[EI_OSABI] = ELF_OSABI,
2649 #endif
2651 .phdr = {
2652 .p_type = PT_LOAD,
2653 .p_flags = PF_X,
2655 .shdr = {
2656 [0] = { .sh_type = SHT_NULL },
2657 /* Trick: The contents of code_gen_buffer are not present in
2658 this fake ELF file; that got allocated elsewhere. Therefore
2659 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2660 will not look for contents. We can record any address. */
2661 [1] = { /* .text */
2662 .sh_type = SHT_NOBITS,
2663 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2665 [2] = { /* .debug_info */
2666 .sh_type = SHT_PROGBITS,
2667 .sh_offset = offsetof(struct ElfImage, di),
2668 .sh_size = sizeof(struct DebugInfo),
2670 [3] = { /* .debug_abbrev */
2671 .sh_type = SHT_PROGBITS,
2672 .sh_offset = offsetof(struct ElfImage, da),
2673 .sh_size = sizeof(img->da),
2675 [4] = { /* .debug_frame */
2676 .sh_type = SHT_PROGBITS,
2677 .sh_offset = sizeof(struct ElfImage),
2679 [5] = { /* .symtab */
2680 .sh_type = SHT_SYMTAB,
2681 .sh_offset = offsetof(struct ElfImage, sym),
2682 .sh_size = sizeof(img->sym),
2683 .sh_info = 1,
2684 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2685 .sh_entsize = sizeof(ElfW(Sym)),
2687 [6] = { /* .strtab */
2688 .sh_type = SHT_STRTAB,
2689 .sh_offset = offsetof(struct ElfImage, str),
2690 .sh_size = sizeof(img->str),
2693 .sym = {
2694 [1] = { /* code_gen_buffer */
2695 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2696 .st_shndx = 1,
2699 .di = {
2700 .len = sizeof(struct DebugInfo) - 4,
2701 .version = 2,
2702 .ptr_size = sizeof(void *),
2703 .cu_die = 1,
2704 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2705 .fn_die = 2,
2706 .fn_name = "code_gen_buffer"
2708 .da = {
2709 1, /* abbrev number (the cu) */
2710 0x11, 1, /* DW_TAG_compile_unit, has children */
2711 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2712 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2713 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2714 0, 0, /* end of abbrev */
2715 2, /* abbrev number (the fn) */
2716 0x2e, 0, /* DW_TAG_subprogram, no children */
2717 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2718 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2719 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2720 0, 0, /* end of abbrev */
2721 0 /* no more abbrev */
2723 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2724 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2727 /* We only need a single jit entry; statically allocate it. */
2728 static struct jit_code_entry one_entry;
2730 uintptr_t buf = (uintptr_t)buf_ptr;
2731 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2732 DebugFrameHeader *dfh;
2734 img = g_malloc(img_size);
2735 *img = img_template;
2737 img->phdr.p_vaddr = buf;
2738 img->phdr.p_paddr = buf;
2739 img->phdr.p_memsz = buf_size;
2741 img->shdr[1].sh_name = find_string(img->str, ".text");
2742 img->shdr[1].sh_addr = buf;
2743 img->shdr[1].sh_size = buf_size;
2745 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2746 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2748 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2749 img->shdr[4].sh_size = debug_frame_size;
2751 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2752 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2754 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2755 img->sym[1].st_value = buf;
2756 img->sym[1].st_size = buf_size;
2758 img->di.cu_low_pc = buf;
2759 img->di.cu_high_pc = buf + buf_size;
2760 img->di.fn_low_pc = buf;
2761 img->di.fn_high_pc = buf + buf_size;
2763 dfh = (DebugFrameHeader *)(img + 1);
2764 memcpy(dfh, debug_frame, debug_frame_size);
2765 dfh->fde.func_start = buf;
2766 dfh->fde.func_len = buf_size;
2768 #ifdef DEBUG_JIT
2769 /* Enable this block to be able to debug the ELF image file creation.
2770 One can use readelf, objdump, or other inspection utilities. */
2772 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2773 if (f) {
2774 if (fwrite(img, img_size, 1, f) != img_size) {
2775 /* Avoid stupid unused return value warning for fwrite. */
2777 fclose(f);
2780 #endif
2782 one_entry.symfile_addr = img;
2783 one_entry.symfile_size = img_size;
2785 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2786 __jit_debug_descriptor.relevant_entry = &one_entry;
2787 __jit_debug_descriptor.first_entry = &one_entry;
2788 __jit_debug_register_code();
2790 #else
2791 /* No support for the feature. Provide the entry point expected by exec.c,
2792 and implement the internal function we declared earlier. */
2794 static void tcg_register_jit_int(void *buf, size_t size,
2795 const void *debug_frame,
2796 size_t debug_frame_size)
2800 void tcg_register_jit(void *buf, size_t buf_size)
2803 #endif /* ELF_HOST_MACHINE */