net: fix qemu_flush_queued_packets() in presence of a hub
[qemu/ar7.git] / tcg / tcg.c
blob66d3f3de80376c3ef0d3202fd9e3f8c057a715c2
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
29 #include "config.h"
31 /* Define to jump the ELF file used to communicate with GDB. */
32 #undef DEBUG_JIT
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
36 #define NDEBUG
37 #endif
39 #include "qemu-common.h"
40 #include "qemu/cache-utils.h"
41 #include "qemu/host-utils.h"
42 #include "qemu/timer.h"
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
46 instructions */
47 #define NO_CPU_IO_DEFS
48 #include "cpu.h"
50 #include "tcg-op.h"
52 #if UINTPTR_MAX == UINT32_MAX
53 # define ELF_CLASS ELFCLASS32
54 #else
55 # define ELF_CLASS ELFCLASS64
56 #endif
57 #ifdef HOST_WORDS_BIGENDIAN
58 # define ELF_DATA ELFDATA2MSB
59 #else
60 # define ELF_DATA ELFDATA2LSB
61 #endif
63 #include "elf.h"
65 /* Forward declarations for functions declared in tcg-target.c and used here. */
66 static void tcg_target_init(TCGContext *s);
67 static void tcg_target_qemu_prologue(TCGContext *s);
68 static void patch_reloc(uint8_t *code_ptr, int type,
69 intptr_t value, intptr_t addend);
71 /* The CIE and FDE header definitions will be common to all hosts. */
72 typedef struct {
73 uint32_t len __attribute__((aligned((sizeof(void *)))));
74 uint32_t id;
75 uint8_t version;
76 char augmentation[1];
77 uint8_t code_align;
78 uint8_t data_align;
79 uint8_t return_column;
80 } DebugFrameCIE;
82 typedef struct QEMU_PACKED {
83 uint32_t len __attribute__((aligned((sizeof(void *)))));
84 uint32_t cie_offset;
85 uintptr_t func_start;
86 uintptr_t func_len;
87 } DebugFrameFDEHeader;
89 static void tcg_register_jit_int(void *buf, size_t size,
90 void *debug_frame, size_t debug_frame_size)
91 __attribute__((unused));
93 /* Forward declarations for functions declared and used in tcg-target.c. */
94 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
95 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
96 intptr_t arg2);
97 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
98 static void tcg_out_movi(TCGContext *s, TCGType type,
99 TCGReg ret, tcg_target_long arg);
100 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
101 const int *const_args);
102 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
103 intptr_t arg2);
104 static int tcg_target_const_match(tcg_target_long val,
105 const TCGArgConstraint *arg_ct);
106 static void tcg_out_tb_init(TCGContext *s);
107 static void tcg_out_tb_finalize(TCGContext *s);
110 TCGOpDef tcg_op_defs[] = {
111 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
112 #include "tcg-opc.h"
113 #undef DEF
115 const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);
117 static TCGRegSet tcg_target_available_regs[2];
118 static TCGRegSet tcg_target_call_clobber_regs;
120 static inline void tcg_out8(TCGContext *s, uint8_t v)
122 *s->code_ptr++ = v;
125 static inline void tcg_out16(TCGContext *s, uint16_t v)
127 uint8_t *p = s->code_ptr;
128 *(uint16_t *)p = v;
129 s->code_ptr = p + 2;
132 static inline void tcg_out32(TCGContext *s, uint32_t v)
134 uint8_t *p = s->code_ptr;
135 *(uint32_t *)p = v;
136 s->code_ptr = p + 4;
139 static inline void tcg_out64(TCGContext *s, uint64_t v)
141 uint8_t *p = s->code_ptr;
142 *(uint64_t *)p = v;
143 s->code_ptr = p + 8;
146 /* label relocation processing */
148 static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
149 int label_index, intptr_t addend)
151 TCGLabel *l;
152 TCGRelocation *r;
154 l = &s->labels[label_index];
155 if (l->has_value) {
156 /* FIXME: This may break relocations on RISC targets that
157 modify instruction fields in place. The caller may not have
158 written the initial value. */
159 patch_reloc(code_ptr, type, l->u.value, addend);
160 } else {
161 /* add a new relocation entry */
162 r = tcg_malloc(sizeof(TCGRelocation));
163 r->type = type;
164 r->ptr = code_ptr;
165 r->addend = addend;
166 r->next = l->u.first_reloc;
167 l->u.first_reloc = r;
171 static void tcg_out_label(TCGContext *s, int label_index, void *ptr)
173 TCGLabel *l;
174 TCGRelocation *r;
175 intptr_t value = (intptr_t)ptr;
177 l = &s->labels[label_index];
178 if (l->has_value) {
179 tcg_abort();
181 r = l->u.first_reloc;
182 while (r != NULL) {
183 patch_reloc(r->ptr, r->type, value, r->addend);
184 r = r->next;
186 l->has_value = 1;
187 l->u.value = value;
190 int gen_new_label(void)
192 TCGContext *s = &tcg_ctx;
193 int idx;
194 TCGLabel *l;
196 if (s->nb_labels >= TCG_MAX_LABELS)
197 tcg_abort();
198 idx = s->nb_labels++;
199 l = &s->labels[idx];
200 l->has_value = 0;
201 l->u.first_reloc = NULL;
202 return idx;
205 #include "tcg-target.c"
207 /* pool based memory allocation */
208 void *tcg_malloc_internal(TCGContext *s, int size)
210 TCGPool *p;
211 int pool_size;
213 if (size > TCG_POOL_CHUNK_SIZE) {
214 /* big malloc: insert a new pool (XXX: could optimize) */
215 p = g_malloc(sizeof(TCGPool) + size);
216 p->size = size;
217 p->next = s->pool_first_large;
218 s->pool_first_large = p;
219 return p->data;
220 } else {
221 p = s->pool_current;
222 if (!p) {
223 p = s->pool_first;
224 if (!p)
225 goto new_pool;
226 } else {
227 if (!p->next) {
228 new_pool:
229 pool_size = TCG_POOL_CHUNK_SIZE;
230 p = g_malloc(sizeof(TCGPool) + pool_size);
231 p->size = pool_size;
232 p->next = NULL;
233 if (s->pool_current)
234 s->pool_current->next = p;
235 else
236 s->pool_first = p;
237 } else {
238 p = p->next;
242 s->pool_current = p;
243 s->pool_cur = p->data + size;
244 s->pool_end = p->data + p->size;
245 return p->data;
248 void tcg_pool_reset(TCGContext *s)
250 TCGPool *p, *t;
251 for (p = s->pool_first_large; p; p = t) {
252 t = p->next;
253 g_free(p);
255 s->pool_first_large = NULL;
256 s->pool_cur = s->pool_end = NULL;
257 s->pool_current = NULL;
260 #include "helper.h"
262 typedef struct TCGHelperInfo {
263 void *func;
264 const char *name;
265 } TCGHelperInfo;
267 static const TCGHelperInfo all_helpers[] = {
268 #define GEN_HELPER 2
269 #include "helper.h"
271 /* Include tcg-runtime.c functions. */
272 { tcg_helper_div_i32, "div_i32" },
273 { tcg_helper_rem_i32, "rem_i32" },
274 { tcg_helper_divu_i32, "divu_i32" },
275 { tcg_helper_remu_i32, "remu_i32" },
277 { tcg_helper_shl_i64, "shl_i64" },
278 { tcg_helper_shr_i64, "shr_i64" },
279 { tcg_helper_sar_i64, "sar_i64" },
280 { tcg_helper_div_i64, "div_i64" },
281 { tcg_helper_rem_i64, "rem_i64" },
282 { tcg_helper_divu_i64, "divu_i64" },
283 { tcg_helper_remu_i64, "remu_i64" },
284 { tcg_helper_mulsh_i64, "mulsh_i64" },
285 { tcg_helper_muluh_i64, "muluh_i64" },
288 void tcg_context_init(TCGContext *s)
290 int op, total_args, n, i;
291 TCGOpDef *def;
292 TCGArgConstraint *args_ct;
293 int *sorted_args;
294 GHashTable *helper_table;
296 memset(s, 0, sizeof(*s));
297 s->nb_globals = 0;
299 /* Count total number of arguments and allocate the corresponding
300 space */
301 total_args = 0;
302 for(op = 0; op < NB_OPS; op++) {
303 def = &tcg_op_defs[op];
304 n = def->nb_iargs + def->nb_oargs;
305 total_args += n;
308 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
309 sorted_args = g_malloc(sizeof(int) * total_args);
311 for(op = 0; op < NB_OPS; op++) {
312 def = &tcg_op_defs[op];
313 def->args_ct = args_ct;
314 def->sorted_args = sorted_args;
315 n = def->nb_iargs + def->nb_oargs;
316 sorted_args += n;
317 args_ct += n;
320 /* Register helpers. */
321 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
322 s->helpers = helper_table = g_hash_table_new(NULL, NULL);
324 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
325 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
326 (gpointer)all_helpers[i].name);
329 tcg_target_init(s);
332 void tcg_prologue_init(TCGContext *s)
334 /* init global prologue and epilogue */
335 s->code_buf = s->code_gen_prologue;
336 s->code_ptr = s->code_buf;
337 tcg_target_qemu_prologue(s);
338 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
340 #ifdef DEBUG_DISAS
341 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
342 size_t size = s->code_ptr - s->code_buf;
343 qemu_log("PROLOGUE: [size=%zu]\n", size);
344 log_disas(s->code_buf, size);
345 qemu_log("\n");
346 qemu_log_flush();
348 #endif
351 void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
353 s->frame_start = start;
354 s->frame_end = start + size;
355 s->frame_reg = reg;
358 void tcg_func_start(TCGContext *s)
360 int i;
361 tcg_pool_reset(s);
362 s->nb_temps = s->nb_globals;
363 for(i = 0; i < (TCG_TYPE_COUNT * 2); i++)
364 s->first_free_temp[i] = -1;
365 s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
366 s->nb_labels = 0;
367 s->current_frame_offset = s->frame_start;
369 #ifdef CONFIG_DEBUG_TCG
370 s->goto_tb_issue_mask = 0;
371 #endif
373 s->gen_opc_ptr = s->gen_opc_buf;
374 s->gen_opparam_ptr = s->gen_opparam_buf;
376 s->be = tcg_malloc(sizeof(TCGBackendData));
379 static inline void tcg_temp_alloc(TCGContext *s, int n)
381 if (n > TCG_MAX_TEMPS)
382 tcg_abort();
385 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
386 const char *name)
388 TCGContext *s = &tcg_ctx;
389 TCGTemp *ts;
390 int idx;
392 #if TCG_TARGET_REG_BITS == 32
393 if (type != TCG_TYPE_I32)
394 tcg_abort();
395 #endif
396 if (tcg_regset_test_reg(s->reserved_regs, reg))
397 tcg_abort();
398 idx = s->nb_globals;
399 tcg_temp_alloc(s, s->nb_globals + 1);
400 ts = &s->temps[s->nb_globals];
401 ts->base_type = type;
402 ts->type = type;
403 ts->fixed_reg = 1;
404 ts->reg = reg;
405 ts->name = name;
406 s->nb_globals++;
407 tcg_regset_set_reg(s->reserved_regs, reg);
408 return idx;
411 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
413 int idx;
415 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
416 return MAKE_TCGV_I32(idx);
419 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
421 int idx;
423 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
424 return MAKE_TCGV_I64(idx);
427 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
428 intptr_t offset,
429 const char *name)
431 TCGContext *s = &tcg_ctx;
432 TCGTemp *ts;
433 int idx;
435 idx = s->nb_globals;
436 #if TCG_TARGET_REG_BITS == 32
437 if (type == TCG_TYPE_I64) {
438 char buf[64];
439 tcg_temp_alloc(s, s->nb_globals + 2);
440 ts = &s->temps[s->nb_globals];
441 ts->base_type = type;
442 ts->type = TCG_TYPE_I32;
443 ts->fixed_reg = 0;
444 ts->mem_allocated = 1;
445 ts->mem_reg = reg;
446 #ifdef TCG_TARGET_WORDS_BIGENDIAN
447 ts->mem_offset = offset + 4;
448 #else
449 ts->mem_offset = offset;
450 #endif
451 pstrcpy(buf, sizeof(buf), name);
452 pstrcat(buf, sizeof(buf), "_0");
453 ts->name = strdup(buf);
454 ts++;
456 ts->base_type = type;
457 ts->type = TCG_TYPE_I32;
458 ts->fixed_reg = 0;
459 ts->mem_allocated = 1;
460 ts->mem_reg = reg;
461 #ifdef TCG_TARGET_WORDS_BIGENDIAN
462 ts->mem_offset = offset;
463 #else
464 ts->mem_offset = offset + 4;
465 #endif
466 pstrcpy(buf, sizeof(buf), name);
467 pstrcat(buf, sizeof(buf), "_1");
468 ts->name = strdup(buf);
470 s->nb_globals += 2;
471 } else
472 #endif
474 tcg_temp_alloc(s, s->nb_globals + 1);
475 ts = &s->temps[s->nb_globals];
476 ts->base_type = type;
477 ts->type = type;
478 ts->fixed_reg = 0;
479 ts->mem_allocated = 1;
480 ts->mem_reg = reg;
481 ts->mem_offset = offset;
482 ts->name = name;
483 s->nb_globals++;
485 return idx;
488 TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name)
490 int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
491 return MAKE_TCGV_I32(idx);
494 TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name)
496 int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
497 return MAKE_TCGV_I64(idx);
500 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
502 TCGContext *s = &tcg_ctx;
503 TCGTemp *ts;
504 int idx, k;
506 k = type;
507 if (temp_local)
508 k += TCG_TYPE_COUNT;
509 idx = s->first_free_temp[k];
510 if (idx != -1) {
511 /* There is already an available temp with the
512 right type */
513 ts = &s->temps[idx];
514 s->first_free_temp[k] = ts->next_free_temp;
515 ts->temp_allocated = 1;
516 assert(ts->temp_local == temp_local);
517 } else {
518 idx = s->nb_temps;
519 #if TCG_TARGET_REG_BITS == 32
520 if (type == TCG_TYPE_I64) {
521 tcg_temp_alloc(s, s->nb_temps + 2);
522 ts = &s->temps[s->nb_temps];
523 ts->base_type = type;
524 ts->type = TCG_TYPE_I32;
525 ts->temp_allocated = 1;
526 ts->temp_local = temp_local;
527 ts->name = NULL;
528 ts++;
529 ts->base_type = TCG_TYPE_I32;
530 ts->type = TCG_TYPE_I32;
531 ts->temp_allocated = 1;
532 ts->temp_local = temp_local;
533 ts->name = NULL;
534 s->nb_temps += 2;
535 } else
536 #endif
538 tcg_temp_alloc(s, s->nb_temps + 1);
539 ts = &s->temps[s->nb_temps];
540 ts->base_type = type;
541 ts->type = type;
542 ts->temp_allocated = 1;
543 ts->temp_local = temp_local;
544 ts->name = NULL;
545 s->nb_temps++;
549 #if defined(CONFIG_DEBUG_TCG)
550 s->temps_in_use++;
551 #endif
552 return idx;
555 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
557 int idx;
559 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
560 return MAKE_TCGV_I32(idx);
563 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
565 int idx;
567 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
568 return MAKE_TCGV_I64(idx);
571 static inline void tcg_temp_free_internal(int idx)
573 TCGContext *s = &tcg_ctx;
574 TCGTemp *ts;
575 int k;
577 #if defined(CONFIG_DEBUG_TCG)
578 s->temps_in_use--;
579 if (s->temps_in_use < 0) {
580 fprintf(stderr, "More temporaries freed than allocated!\n");
582 #endif
584 assert(idx >= s->nb_globals && idx < s->nb_temps);
585 ts = &s->temps[idx];
586 assert(ts->temp_allocated != 0);
587 ts->temp_allocated = 0;
588 k = ts->base_type;
589 if (ts->temp_local)
590 k += TCG_TYPE_COUNT;
591 ts->next_free_temp = s->first_free_temp[k];
592 s->first_free_temp[k] = idx;
595 void tcg_temp_free_i32(TCGv_i32 arg)
597 tcg_temp_free_internal(GET_TCGV_I32(arg));
600 void tcg_temp_free_i64(TCGv_i64 arg)
602 tcg_temp_free_internal(GET_TCGV_I64(arg));
605 TCGv_i32 tcg_const_i32(int32_t val)
607 TCGv_i32 t0;
608 t0 = tcg_temp_new_i32();
609 tcg_gen_movi_i32(t0, val);
610 return t0;
613 TCGv_i64 tcg_const_i64(int64_t val)
615 TCGv_i64 t0;
616 t0 = tcg_temp_new_i64();
617 tcg_gen_movi_i64(t0, val);
618 return t0;
621 TCGv_i32 tcg_const_local_i32(int32_t val)
623 TCGv_i32 t0;
624 t0 = tcg_temp_local_new_i32();
625 tcg_gen_movi_i32(t0, val);
626 return t0;
629 TCGv_i64 tcg_const_local_i64(int64_t val)
631 TCGv_i64 t0;
632 t0 = tcg_temp_local_new_i64();
633 tcg_gen_movi_i64(t0, val);
634 return t0;
637 #if defined(CONFIG_DEBUG_TCG)
638 void tcg_clear_temp_count(void)
640 TCGContext *s = &tcg_ctx;
641 s->temps_in_use = 0;
644 int tcg_check_temp_count(void)
646 TCGContext *s = &tcg_ctx;
647 if (s->temps_in_use) {
648 /* Clear the count so that we don't give another
649 * warning immediately next time around.
651 s->temps_in_use = 0;
652 return 1;
654 return 0;
656 #endif
658 /* Note: we convert the 64 bit args to 32 bit and do some alignment
659 and endian swap. Maybe it would be better to do the alignment
660 and endian swap in tcg_reg_alloc_call(). */
661 void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
662 int sizemask, TCGArg ret, int nargs, TCGArg *args)
664 int i;
665 int real_args;
666 int nb_rets;
667 TCGArg *nparam;
669 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
670 for (i = 0; i < nargs; ++i) {
671 int is_64bit = sizemask & (1 << (i+1)*2);
672 int is_signed = sizemask & (2 << (i+1)*2);
673 if (!is_64bit) {
674 TCGv_i64 temp = tcg_temp_new_i64();
675 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
676 if (is_signed) {
677 tcg_gen_ext32s_i64(temp, orig);
678 } else {
679 tcg_gen_ext32u_i64(temp, orig);
681 args[i] = GET_TCGV_I64(temp);
684 #endif /* TCG_TARGET_EXTEND_ARGS */
686 *s->gen_opc_ptr++ = INDEX_op_call;
687 nparam = s->gen_opparam_ptr++;
688 if (ret != TCG_CALL_DUMMY_ARG) {
689 #if TCG_TARGET_REG_BITS < 64
690 if (sizemask & 1) {
691 #ifdef TCG_TARGET_WORDS_BIGENDIAN
692 *s->gen_opparam_ptr++ = ret + 1;
693 *s->gen_opparam_ptr++ = ret;
694 #else
695 *s->gen_opparam_ptr++ = ret;
696 *s->gen_opparam_ptr++ = ret + 1;
697 #endif
698 nb_rets = 2;
699 } else
700 #endif
702 *s->gen_opparam_ptr++ = ret;
703 nb_rets = 1;
705 } else {
706 nb_rets = 0;
708 real_args = 0;
709 for (i = 0; i < nargs; i++) {
710 #if TCG_TARGET_REG_BITS < 64
711 int is_64bit = sizemask & (1 << (i+1)*2);
712 if (is_64bit) {
713 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
714 /* some targets want aligned 64 bit args */
715 if (real_args & 1) {
716 *s->gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
717 real_args++;
719 #endif
720 /* If stack grows up, then we will be placing successive
721 arguments at lower addresses, which means we need to
722 reverse the order compared to how we would normally
723 treat either big or little-endian. For those arguments
724 that will wind up in registers, this still works for
725 HPPA (the only current STACK_GROWSUP target) since the
726 argument registers are *also* allocated in decreasing
727 order. If another such target is added, this logic may
728 have to get more complicated to differentiate between
729 stack arguments and register arguments. */
730 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
731 *s->gen_opparam_ptr++ = args[i] + 1;
732 *s->gen_opparam_ptr++ = args[i];
733 #else
734 *s->gen_opparam_ptr++ = args[i];
735 *s->gen_opparam_ptr++ = args[i] + 1;
736 #endif
737 real_args += 2;
738 continue;
740 #endif /* TCG_TARGET_REG_BITS < 64 */
742 *s->gen_opparam_ptr++ = args[i];
743 real_args++;
745 *s->gen_opparam_ptr++ = GET_TCGV_PTR(func);
747 *s->gen_opparam_ptr++ = flags;
749 *nparam = (nb_rets << 16) | (real_args + 1);
751 /* total parameters, needed to go backward in the instruction stream */
752 *s->gen_opparam_ptr++ = 1 + nb_rets + real_args + 3;
754 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
755 for (i = 0; i < nargs; ++i) {
756 int is_64bit = sizemask & (1 << (i+1)*2);
757 if (!is_64bit) {
758 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
759 tcg_temp_free_i64(temp);
762 #endif /* TCG_TARGET_EXTEND_ARGS */
765 #if TCG_TARGET_REG_BITS == 32
766 void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
767 int c, int right, int arith)
769 if (c == 0) {
770 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
771 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
772 } else if (c >= 32) {
773 c -= 32;
774 if (right) {
775 if (arith) {
776 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
777 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
778 } else {
779 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
780 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
782 } else {
783 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
784 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
786 } else {
787 TCGv_i32 t0, t1;
789 t0 = tcg_temp_new_i32();
790 t1 = tcg_temp_new_i32();
791 if (right) {
792 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
793 if (arith)
794 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
795 else
796 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
797 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
798 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
799 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
800 } else {
801 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
802 /* Note: ret can be the same as arg1, so we use t1 */
803 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
804 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
805 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
806 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
808 tcg_temp_free_i32(t0);
809 tcg_temp_free_i32(t1);
812 #endif
814 static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
816 switch (op & MO_SIZE) {
817 case MO_8:
818 op &= ~MO_BSWAP;
819 break;
820 case MO_16:
821 break;
822 case MO_32:
823 if (!is64) {
824 op &= ~MO_SIGN;
826 break;
827 case MO_64:
828 if (!is64) {
829 tcg_abort();
831 break;
833 if (st) {
834 op &= ~MO_SIGN;
836 return op;
839 static const TCGOpcode old_ld_opc[8] = {
840 [MO_UB] = INDEX_op_qemu_ld8u,
841 [MO_SB] = INDEX_op_qemu_ld8s,
842 [MO_UW] = INDEX_op_qemu_ld16u,
843 [MO_SW] = INDEX_op_qemu_ld16s,
844 #if TCG_TARGET_REG_BITS == 32
845 [MO_UL] = INDEX_op_qemu_ld32,
846 [MO_SL] = INDEX_op_qemu_ld32,
847 #else
848 [MO_UL] = INDEX_op_qemu_ld32u,
849 [MO_SL] = INDEX_op_qemu_ld32s,
850 #endif
851 [MO_Q] = INDEX_op_qemu_ld64,
854 static const TCGOpcode old_st_opc[4] = {
855 [MO_UB] = INDEX_op_qemu_st8,
856 [MO_UW] = INDEX_op_qemu_st16,
857 [MO_UL] = INDEX_op_qemu_st32,
858 [MO_Q] = INDEX_op_qemu_st64,
861 void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
863 memop = tcg_canonicalize_memop(memop, 0, 0);
865 if (TCG_TARGET_HAS_new_ldst) {
866 *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32;
867 tcg_add_param_i32(val);
868 tcg_add_param_tl(addr);
869 *tcg_ctx.gen_opparam_ptr++ = memop;
870 *tcg_ctx.gen_opparam_ptr++ = idx;
871 return;
874 /* The old opcodes only support target-endian memory operations. */
875 assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
876 assert(old_ld_opc[memop & MO_SSIZE] != 0);
878 if (TCG_TARGET_REG_BITS == 32) {
879 *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
880 tcg_add_param_i32(val);
881 tcg_add_param_tl(addr);
882 *tcg_ctx.gen_opparam_ptr++ = idx;
883 } else {
884 TCGv_i64 val64 = tcg_temp_new_i64();
886 *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
887 tcg_add_param_i64(val64);
888 tcg_add_param_tl(addr);
889 *tcg_ctx.gen_opparam_ptr++ = idx;
891 tcg_gen_trunc_i64_i32(val, val64);
892 tcg_temp_free_i64(val64);
896 void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
898 memop = tcg_canonicalize_memop(memop, 0, 1);
900 if (TCG_TARGET_HAS_new_ldst) {
901 *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32;
902 tcg_add_param_i32(val);
903 tcg_add_param_tl(addr);
904 *tcg_ctx.gen_opparam_ptr++ = memop;
905 *tcg_ctx.gen_opparam_ptr++ = idx;
906 return;
909 /* The old opcodes only support target-endian memory operations. */
910 assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
911 assert(old_st_opc[memop & MO_SIZE] != 0);
913 if (TCG_TARGET_REG_BITS == 32) {
914 *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
915 tcg_add_param_i32(val);
916 tcg_add_param_tl(addr);
917 *tcg_ctx.gen_opparam_ptr++ = idx;
918 } else {
919 TCGv_i64 val64 = tcg_temp_new_i64();
921 tcg_gen_extu_i32_i64(val64, val);
923 *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
924 tcg_add_param_i64(val64);
925 tcg_add_param_tl(addr);
926 *tcg_ctx.gen_opparam_ptr++ = idx;
928 tcg_temp_free_i64(val64);
932 void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
934 memop = tcg_canonicalize_memop(memop, 1, 0);
936 #if TCG_TARGET_REG_BITS == 32
937 if ((memop & MO_SIZE) < MO_64) {
938 tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
939 if (memop & MO_SIGN) {
940 tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
941 } else {
942 tcg_gen_movi_i32(TCGV_HIGH(val), 0);
944 return;
946 #endif
948 if (TCG_TARGET_HAS_new_ldst) {
949 *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64;
950 tcg_add_param_i64(val);
951 tcg_add_param_tl(addr);
952 *tcg_ctx.gen_opparam_ptr++ = memop;
953 *tcg_ctx.gen_opparam_ptr++ = idx;
954 return;
957 /* The old opcodes only support target-endian memory operations. */
958 assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
959 assert(old_ld_opc[memop & MO_SSIZE] != 0);
961 *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
962 tcg_add_param_i64(val);
963 tcg_add_param_tl(addr);
964 *tcg_ctx.gen_opparam_ptr++ = idx;
967 void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
969 memop = tcg_canonicalize_memop(memop, 1, 1);
971 #if TCG_TARGET_REG_BITS == 32
972 if ((memop & MO_SIZE) < MO_64) {
973 tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
974 return;
976 #endif
978 if (TCG_TARGET_HAS_new_ldst) {
979 *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64;
980 tcg_add_param_i64(val);
981 tcg_add_param_tl(addr);
982 *tcg_ctx.gen_opparam_ptr++ = memop;
983 *tcg_ctx.gen_opparam_ptr++ = idx;
984 return;
987 /* The old opcodes only support target-endian memory operations. */
988 assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
989 assert(old_st_opc[memop & MO_SIZE] != 0);
991 *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
992 tcg_add_param_i64(val);
993 tcg_add_param_tl(addr);
994 *tcg_ctx.gen_opparam_ptr++ = idx;
997 static void tcg_reg_alloc_start(TCGContext *s)
999 int i;
1000 TCGTemp *ts;
1001 for(i = 0; i < s->nb_globals; i++) {
1002 ts = &s->temps[i];
1003 if (ts->fixed_reg) {
1004 ts->val_type = TEMP_VAL_REG;
1005 } else {
1006 ts->val_type = TEMP_VAL_MEM;
1009 for(i = s->nb_globals; i < s->nb_temps; i++) {
1010 ts = &s->temps[i];
1011 if (ts->temp_local) {
1012 ts->val_type = TEMP_VAL_MEM;
1013 } else {
1014 ts->val_type = TEMP_VAL_DEAD;
1016 ts->mem_allocated = 0;
1017 ts->fixed_reg = 0;
1019 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1020 s->reg_to_temp[i] = -1;
1024 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
1025 int idx)
1027 TCGTemp *ts;
1029 assert(idx >= 0 && idx < s->nb_temps);
1030 ts = &s->temps[idx];
1031 if (idx < s->nb_globals) {
1032 pstrcpy(buf, buf_size, ts->name);
1033 } else {
1034 if (ts->temp_local)
1035 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1036 else
1037 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1039 return buf;
1042 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
1044 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
1047 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
1049 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
1052 /* Find helper name. */
1053 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
1055 const char *ret = NULL;
1056 if (s->helpers) {
1057 ret = g_hash_table_lookup(s->helpers, (gpointer)val);
1059 return ret;
1062 static const char * const cond_name[] =
1064 [TCG_COND_NEVER] = "never",
1065 [TCG_COND_ALWAYS] = "always",
1066 [TCG_COND_EQ] = "eq",
1067 [TCG_COND_NE] = "ne",
1068 [TCG_COND_LT] = "lt",
1069 [TCG_COND_GE] = "ge",
1070 [TCG_COND_LE] = "le",
1071 [TCG_COND_GT] = "gt",
1072 [TCG_COND_LTU] = "ltu",
1073 [TCG_COND_GEU] = "geu",
1074 [TCG_COND_LEU] = "leu",
1075 [TCG_COND_GTU] = "gtu"
1078 static const char * const ldst_name[] =
1080 [MO_UB] = "ub",
1081 [MO_SB] = "sb",
1082 [MO_LEUW] = "leuw",
1083 [MO_LESW] = "lesw",
1084 [MO_LEUL] = "leul",
1085 [MO_LESL] = "lesl",
1086 [MO_LEQ] = "leq",
1087 [MO_BEUW] = "beuw",
1088 [MO_BESW] = "besw",
1089 [MO_BEUL] = "beul",
1090 [MO_BESL] = "besl",
1091 [MO_BEQ] = "beq",
1094 void tcg_dump_ops(TCGContext *s)
1096 const uint16_t *opc_ptr;
1097 const TCGArg *args;
1098 TCGArg arg;
1099 TCGOpcode c;
1100 int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
1101 const TCGOpDef *def;
1102 char buf[128];
1104 first_insn = 1;
1105 opc_ptr = s->gen_opc_buf;
1106 args = s->gen_opparam_buf;
1107 while (opc_ptr < s->gen_opc_ptr) {
1108 c = *opc_ptr++;
1109 def = &tcg_op_defs[c];
1110 if (c == INDEX_op_debug_insn_start) {
1111 uint64_t pc;
1112 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1113 pc = ((uint64_t)args[1] << 32) | args[0];
1114 #else
1115 pc = args[0];
1116 #endif
1117 if (!first_insn) {
1118 qemu_log("\n");
1120 qemu_log(" ---- 0x%" PRIx64, pc);
1121 first_insn = 0;
1122 nb_oargs = def->nb_oargs;
1123 nb_iargs = def->nb_iargs;
1124 nb_cargs = def->nb_cargs;
1125 } else if (c == INDEX_op_call) {
1126 TCGArg arg;
1128 /* variable number of arguments */
1129 arg = *args++;
1130 nb_oargs = arg >> 16;
1131 nb_iargs = arg & 0xffff;
1132 nb_cargs = def->nb_cargs;
1134 qemu_log(" %s ", def->name);
1136 /* function name */
1137 qemu_log("%s",
1138 tcg_get_arg_str_idx(s, buf, sizeof(buf),
1139 args[nb_oargs + nb_iargs - 1]));
1140 /* flags */
1141 qemu_log(",$0x%" TCG_PRIlx, args[nb_oargs + nb_iargs]);
1142 /* nb out args */
1143 qemu_log(",$%d", nb_oargs);
1144 for(i = 0; i < nb_oargs; i++) {
1145 qemu_log(",");
1146 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1147 args[i]));
1149 for(i = 0; i < (nb_iargs - 1); i++) {
1150 qemu_log(",");
1151 if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) {
1152 qemu_log("<dummy>");
1153 } else {
1154 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1155 args[nb_oargs + i]));
1158 } else if (c == INDEX_op_movi_i32 || c == INDEX_op_movi_i64) {
1159 tcg_target_ulong val;
1160 const char *name;
1162 nb_oargs = def->nb_oargs;
1163 nb_iargs = def->nb_iargs;
1164 nb_cargs = def->nb_cargs;
1165 qemu_log(" %s %s,$", def->name,
1166 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
1167 val = args[1];
1168 name = tcg_find_helper(s, val);
1169 if (name) {
1170 qemu_log("%s", name);
1171 } else {
1172 if (c == INDEX_op_movi_i32) {
1173 qemu_log("0x%x", (uint32_t)val);
1174 } else {
1175 qemu_log("0x%" PRIx64 , (uint64_t)val);
1178 } else {
1179 qemu_log(" %s ", def->name);
1180 if (c == INDEX_op_nopn) {
1181 /* variable number of arguments */
1182 nb_cargs = *args;
1183 nb_oargs = 0;
1184 nb_iargs = 0;
1185 } else {
1186 nb_oargs = def->nb_oargs;
1187 nb_iargs = def->nb_iargs;
1188 nb_cargs = def->nb_cargs;
1191 k = 0;
1192 for(i = 0; i < nb_oargs; i++) {
1193 if (k != 0) {
1194 qemu_log(",");
1196 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1197 args[k++]));
1199 for(i = 0; i < nb_iargs; i++) {
1200 if (k != 0) {
1201 qemu_log(",");
1203 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1204 args[k++]));
1206 switch (c) {
1207 case INDEX_op_brcond_i32:
1208 case INDEX_op_setcond_i32:
1209 case INDEX_op_movcond_i32:
1210 case INDEX_op_brcond2_i32:
1211 case INDEX_op_setcond2_i32:
1212 case INDEX_op_brcond_i64:
1213 case INDEX_op_setcond_i64:
1214 case INDEX_op_movcond_i64:
1215 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1216 qemu_log(",%s", cond_name[args[k++]]);
1217 } else {
1218 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1220 i = 1;
1221 break;
1222 case INDEX_op_qemu_ld_i32:
1223 case INDEX_op_qemu_st_i32:
1224 case INDEX_op_qemu_ld_i64:
1225 case INDEX_op_qemu_st_i64:
1226 if (args[k] < ARRAY_SIZE(ldst_name) && ldst_name[args[k]]) {
1227 qemu_log(",%s", ldst_name[args[k++]]);
1228 } else {
1229 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1231 i = 1;
1232 break;
1233 default:
1234 i = 0;
1235 break;
1237 for(; i < nb_cargs; i++) {
1238 if (k != 0) {
1239 qemu_log(",");
1241 arg = args[k++];
1242 qemu_log("$0x%" TCG_PRIlx, arg);
1245 qemu_log("\n");
1246 args += nb_iargs + nb_oargs + nb_cargs;
1250 /* we give more priority to constraints with less registers */
1251 static int get_constraint_priority(const TCGOpDef *def, int k)
1253 const TCGArgConstraint *arg_ct;
1255 int i, n;
1256 arg_ct = &def->args_ct[k];
1257 if (arg_ct->ct & TCG_CT_ALIAS) {
1258 /* an alias is equivalent to a single register */
1259 n = 1;
1260 } else {
1261 if (!(arg_ct->ct & TCG_CT_REG))
1262 return 0;
1263 n = 0;
1264 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1265 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1266 n++;
1269 return TCG_TARGET_NB_REGS - n + 1;
1272 /* sort from highest priority to lowest */
1273 static void sort_constraints(TCGOpDef *def, int start, int n)
1275 int i, j, p1, p2, tmp;
1277 for(i = 0; i < n; i++)
1278 def->sorted_args[start + i] = start + i;
1279 if (n <= 1)
1280 return;
1281 for(i = 0; i < n - 1; i++) {
1282 for(j = i + 1; j < n; j++) {
1283 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1284 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1285 if (p1 < p2) {
1286 tmp = def->sorted_args[start + i];
1287 def->sorted_args[start + i] = def->sorted_args[start + j];
1288 def->sorted_args[start + j] = tmp;
1294 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1296 TCGOpcode op;
1297 TCGOpDef *def;
1298 const char *ct_str;
1299 int i, nb_args;
1301 for(;;) {
1302 if (tdefs->op == (TCGOpcode)-1)
1303 break;
1304 op = tdefs->op;
1305 assert((unsigned)op < NB_OPS);
1306 def = &tcg_op_defs[op];
1307 #if defined(CONFIG_DEBUG_TCG)
1308 /* Duplicate entry in op definitions? */
1309 assert(!def->used);
1310 def->used = 1;
1311 #endif
1312 nb_args = def->nb_iargs + def->nb_oargs;
1313 for(i = 0; i < nb_args; i++) {
1314 ct_str = tdefs->args_ct_str[i];
1315 /* Incomplete TCGTargetOpDef entry? */
1316 assert(ct_str != NULL);
1317 tcg_regset_clear(def->args_ct[i].u.regs);
1318 def->args_ct[i].ct = 0;
1319 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1320 int oarg;
1321 oarg = ct_str[0] - '0';
1322 assert(oarg < def->nb_oargs);
1323 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1324 /* TCG_CT_ALIAS is for the output arguments. The input
1325 argument is tagged with TCG_CT_IALIAS. */
1326 def->args_ct[i] = def->args_ct[oarg];
1327 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1328 def->args_ct[oarg].alias_index = i;
1329 def->args_ct[i].ct |= TCG_CT_IALIAS;
1330 def->args_ct[i].alias_index = oarg;
1331 } else {
1332 for(;;) {
1333 if (*ct_str == '\0')
1334 break;
1335 switch(*ct_str) {
1336 case 'i':
1337 def->args_ct[i].ct |= TCG_CT_CONST;
1338 ct_str++;
1339 break;
1340 default:
1341 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1342 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1343 ct_str, i, def->name);
1344 exit(1);
1351 /* TCGTargetOpDef entry with too much information? */
1352 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1354 /* sort the constraints (XXX: this is just an heuristic) */
1355 sort_constraints(def, 0, def->nb_oargs);
1356 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1358 #if 0
1360 int i;
1362 printf("%s: sorted=", def->name);
1363 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1364 printf(" %d", def->sorted_args[i]);
1365 printf("\n");
1367 #endif
1368 tdefs++;
1371 #if defined(CONFIG_DEBUG_TCG)
1372 i = 0;
1373 for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) {
1374 const TCGOpDef *def = &tcg_op_defs[op];
1375 if (def->flags & TCG_OPF_NOT_PRESENT) {
1376 /* Wrong entry in op definitions? */
1377 if (def->used) {
1378 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1379 i = 1;
1381 } else {
1382 /* Missing entry in op definitions? */
1383 if (!def->used) {
1384 fprintf(stderr, "Missing op definition for %s\n", def->name);
1385 i = 1;
1389 if (i == 1) {
1390 tcg_abort();
1392 #endif
1395 #ifdef USE_LIVENESS_ANALYSIS
1397 /* set a nop for an operation using 'nb_args' */
1398 static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
1399 TCGArg *args, int nb_args)
1401 if (nb_args == 0) {
1402 *opc_ptr = INDEX_op_nop;
1403 } else {
1404 *opc_ptr = INDEX_op_nopn;
1405 args[0] = nb_args;
1406 args[nb_args - 1] = nb_args;
1410 /* liveness analysis: end of function: all temps are dead, and globals
1411 should be in memory. */
1412 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1413 uint8_t *mem_temps)
1415 memset(dead_temps, 1, s->nb_temps);
1416 memset(mem_temps, 1, s->nb_globals);
1417 memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1420 /* liveness analysis: end of basic block: all temps are dead, globals
1421 and local temps should be in memory. */
1422 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1423 uint8_t *mem_temps)
1425 int i;
1427 memset(dead_temps, 1, s->nb_temps);
1428 memset(mem_temps, 1, s->nb_globals);
1429 for(i = s->nb_globals; i < s->nb_temps; i++) {
1430 mem_temps[i] = s->temps[i].temp_local;
1434 /* Liveness analysis : update the opc_dead_args array to tell if a
1435 given input arguments is dead. Instructions updating dead
1436 temporaries are removed. */
1437 static void tcg_liveness_analysis(TCGContext *s)
1439 int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
1440 TCGOpcode op, op_new, op_new2;
1441 TCGArg *args;
1442 const TCGOpDef *def;
1443 uint8_t *dead_temps, *mem_temps;
1444 uint16_t dead_args;
1445 uint8_t sync_args;
1446 bool have_op_new2;
1448 s->gen_opc_ptr++; /* skip end */
1450 nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
1452 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1453 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1455 dead_temps = tcg_malloc(s->nb_temps);
1456 mem_temps = tcg_malloc(s->nb_temps);
1457 tcg_la_func_end(s, dead_temps, mem_temps);
1459 args = s->gen_opparam_ptr;
1460 op_index = nb_ops - 1;
1461 while (op_index >= 0) {
1462 op = s->gen_opc_buf[op_index];
1463 def = &tcg_op_defs[op];
1464 switch(op) {
1465 case INDEX_op_call:
1467 int call_flags;
1469 nb_args = args[-1];
1470 args -= nb_args;
1471 nb_iargs = args[0] & 0xffff;
1472 nb_oargs = args[0] >> 16;
1473 args++;
1474 call_flags = args[nb_oargs + nb_iargs];
1476 /* pure functions can be removed if their result is not
1477 used */
1478 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1479 for(i = 0; i < nb_oargs; i++) {
1480 arg = args[i];
1481 if (!dead_temps[arg] || mem_temps[arg]) {
1482 goto do_not_remove_call;
1485 tcg_set_nop(s, s->gen_opc_buf + op_index,
1486 args - 1, nb_args);
1487 } else {
1488 do_not_remove_call:
1490 /* output args are dead */
1491 dead_args = 0;
1492 sync_args = 0;
1493 for(i = 0; i < nb_oargs; i++) {
1494 arg = args[i];
1495 if (dead_temps[arg]) {
1496 dead_args |= (1 << i);
1498 if (mem_temps[arg]) {
1499 sync_args |= (1 << i);
1501 dead_temps[arg] = 1;
1502 mem_temps[arg] = 0;
1505 if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1506 /* globals should be synced to memory */
1507 memset(mem_temps, 1, s->nb_globals);
1509 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1510 TCG_CALL_NO_READ_GLOBALS))) {
1511 /* globals should go back to memory */
1512 memset(dead_temps, 1, s->nb_globals);
1515 /* input args are live */
1516 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1517 arg = args[i];
1518 if (arg != TCG_CALL_DUMMY_ARG) {
1519 if (dead_temps[arg]) {
1520 dead_args |= (1 << i);
1522 dead_temps[arg] = 0;
1525 s->op_dead_args[op_index] = dead_args;
1526 s->op_sync_args[op_index] = sync_args;
1528 args--;
1530 break;
1531 case INDEX_op_debug_insn_start:
1532 args -= def->nb_args;
1533 break;
1534 case INDEX_op_nopn:
1535 nb_args = args[-1];
1536 args -= nb_args;
1537 break;
1538 case INDEX_op_discard:
1539 args--;
1540 /* mark the temporary as dead */
1541 dead_temps[args[0]] = 1;
1542 mem_temps[args[0]] = 0;
1543 break;
1544 case INDEX_op_end:
1545 break;
1547 case INDEX_op_add2_i32:
1548 op_new = INDEX_op_add_i32;
1549 goto do_addsub2;
1550 case INDEX_op_sub2_i32:
1551 op_new = INDEX_op_sub_i32;
1552 goto do_addsub2;
1553 case INDEX_op_add2_i64:
1554 op_new = INDEX_op_add_i64;
1555 goto do_addsub2;
1556 case INDEX_op_sub2_i64:
1557 op_new = INDEX_op_sub_i64;
1558 do_addsub2:
1559 args -= 6;
1560 nb_iargs = 4;
1561 nb_oargs = 2;
1562 /* Test if the high part of the operation is dead, but not
1563 the low part. The result can be optimized to a simple
1564 add or sub. This happens often for x86_64 guest when the
1565 cpu mode is set to 32 bit. */
1566 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1567 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1568 goto do_remove;
1570 /* Create the single operation plus nop. */
1571 s->gen_opc_buf[op_index] = op = op_new;
1572 args[1] = args[2];
1573 args[2] = args[4];
1574 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1575 tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 3);
1576 /* Fall through and mark the single-word operation live. */
1577 nb_iargs = 2;
1578 nb_oargs = 1;
1580 goto do_not_remove;
1582 case INDEX_op_mulu2_i32:
1583 op_new = INDEX_op_mul_i32;
1584 op_new2 = INDEX_op_muluh_i32;
1585 have_op_new2 = TCG_TARGET_HAS_muluh_i32;
1586 goto do_mul2;
1587 case INDEX_op_muls2_i32:
1588 op_new = INDEX_op_mul_i32;
1589 op_new2 = INDEX_op_mulsh_i32;
1590 have_op_new2 = TCG_TARGET_HAS_mulsh_i32;
1591 goto do_mul2;
1592 case INDEX_op_mulu2_i64:
1593 op_new = INDEX_op_mul_i64;
1594 op_new2 = INDEX_op_muluh_i64;
1595 have_op_new2 = TCG_TARGET_HAS_muluh_i64;
1596 goto do_mul2;
1597 case INDEX_op_muls2_i64:
1598 op_new = INDEX_op_mul_i64;
1599 op_new2 = INDEX_op_mulsh_i64;
1600 have_op_new2 = TCG_TARGET_HAS_mulsh_i64;
1601 goto do_mul2;
1602 do_mul2:
1603 args -= 4;
1604 nb_iargs = 2;
1605 nb_oargs = 2;
1606 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1607 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1608 /* Both parts of the operation are dead. */
1609 goto do_remove;
1611 /* The high part of the operation is dead; generate the low. */
1612 s->gen_opc_buf[op_index] = op = op_new;
1613 args[1] = args[2];
1614 args[2] = args[3];
1615 } else if (have_op_new2 && dead_temps[args[0]]
1616 && !mem_temps[args[0]]) {
1617 /* The low part of the operation is dead; generate the high. */
1618 s->gen_opc_buf[op_index] = op = op_new2;
1619 args[0] = args[1];
1620 args[1] = args[2];
1621 args[2] = args[3];
1622 } else {
1623 goto do_not_remove;
1625 assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
1626 tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1);
1627 /* Mark the single-word operation live. */
1628 nb_oargs = 1;
1629 goto do_not_remove;
1631 default:
1632 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1633 args -= def->nb_args;
1634 nb_iargs = def->nb_iargs;
1635 nb_oargs = def->nb_oargs;
1637 /* Test if the operation can be removed because all
1638 its outputs are dead. We assume that nb_oargs == 0
1639 implies side effects */
1640 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1641 for(i = 0; i < nb_oargs; i++) {
1642 arg = args[i];
1643 if (!dead_temps[arg] || mem_temps[arg]) {
1644 goto do_not_remove;
1647 do_remove:
1648 tcg_set_nop(s, s->gen_opc_buf + op_index, args, def->nb_args);
1649 #ifdef CONFIG_PROFILER
1650 s->del_op_count++;
1651 #endif
1652 } else {
1653 do_not_remove:
1655 /* output args are dead */
1656 dead_args = 0;
1657 sync_args = 0;
1658 for(i = 0; i < nb_oargs; i++) {
1659 arg = args[i];
1660 if (dead_temps[arg]) {
1661 dead_args |= (1 << i);
1663 if (mem_temps[arg]) {
1664 sync_args |= (1 << i);
1666 dead_temps[arg] = 1;
1667 mem_temps[arg] = 0;
1670 /* if end of basic block, update */
1671 if (def->flags & TCG_OPF_BB_END) {
1672 tcg_la_bb_end(s, dead_temps, mem_temps);
1673 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1674 /* globals should be synced to memory */
1675 memset(mem_temps, 1, s->nb_globals);
1678 /* input args are live */
1679 for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1680 arg = args[i];
1681 if (dead_temps[arg]) {
1682 dead_args |= (1 << i);
1684 dead_temps[arg] = 0;
1686 s->op_dead_args[op_index] = dead_args;
1687 s->op_sync_args[op_index] = sync_args;
1689 break;
1691 op_index--;
1694 if (args != s->gen_opparam_buf) {
1695 tcg_abort();
1698 #else
1699 /* dummy liveness analysis */
1700 static void tcg_liveness_analysis(TCGContext *s)
1702 int nb_ops;
1703 nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
1705 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1706 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1707 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1708 memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1710 #endif
1712 #ifndef NDEBUG
1713 static void dump_regs(TCGContext *s)
1715 TCGTemp *ts;
1716 int i;
1717 char buf[64];
1719 for(i = 0; i < s->nb_temps; i++) {
1720 ts = &s->temps[i];
1721 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1722 switch(ts->val_type) {
1723 case TEMP_VAL_REG:
1724 printf("%s", tcg_target_reg_names[ts->reg]);
1725 break;
1726 case TEMP_VAL_MEM:
1727 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1728 break;
1729 case TEMP_VAL_CONST:
1730 printf("$0x%" TCG_PRIlx, ts->val);
1731 break;
1732 case TEMP_VAL_DEAD:
1733 printf("D");
1734 break;
1735 default:
1736 printf("???");
1737 break;
1739 printf("\n");
1742 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1743 if (s->reg_to_temp[i] >= 0) {
1744 printf("%s: %s\n",
1745 tcg_target_reg_names[i],
1746 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1751 static void check_regs(TCGContext *s)
1753 int reg, k;
1754 TCGTemp *ts;
1755 char buf[64];
1757 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1758 k = s->reg_to_temp[reg];
1759 if (k >= 0) {
1760 ts = &s->temps[k];
1761 if (ts->val_type != TEMP_VAL_REG ||
1762 ts->reg != reg) {
1763 printf("Inconsistency for register %s:\n",
1764 tcg_target_reg_names[reg]);
1765 goto fail;
1769 for(k = 0; k < s->nb_temps; k++) {
1770 ts = &s->temps[k];
1771 if (ts->val_type == TEMP_VAL_REG &&
1772 !ts->fixed_reg &&
1773 s->reg_to_temp[ts->reg] != k) {
1774 printf("Inconsistency for temp %s:\n",
1775 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1776 fail:
1777 printf("reg state:\n");
1778 dump_regs(s);
1779 tcg_abort();
1783 #endif
1785 static void temp_allocate_frame(TCGContext *s, int temp)
1787 TCGTemp *ts;
1788 ts = &s->temps[temp];
1789 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1790 /* Sparc64 stack is accessed with offset of 2047 */
1791 s->current_frame_offset = (s->current_frame_offset +
1792 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1793 ~(sizeof(tcg_target_long) - 1);
1794 #endif
1795 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1796 s->frame_end) {
1797 tcg_abort();
1799 ts->mem_offset = s->current_frame_offset;
1800 ts->mem_reg = s->frame_reg;
1801 ts->mem_allocated = 1;
1802 s->current_frame_offset += sizeof(tcg_target_long);
1805 /* sync register 'reg' by saving it to the corresponding temporary */
1806 static inline void tcg_reg_sync(TCGContext *s, int reg)
1808 TCGTemp *ts;
1809 int temp;
1811 temp = s->reg_to_temp[reg];
1812 ts = &s->temps[temp];
1813 assert(ts->val_type == TEMP_VAL_REG);
1814 if (!ts->mem_coherent && !ts->fixed_reg) {
1815 if (!ts->mem_allocated) {
1816 temp_allocate_frame(s, temp);
1818 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1820 ts->mem_coherent = 1;
1823 /* free register 'reg' by spilling the corresponding temporary if necessary */
1824 static void tcg_reg_free(TCGContext *s, int reg)
1826 int temp;
1828 temp = s->reg_to_temp[reg];
1829 if (temp != -1) {
1830 tcg_reg_sync(s, reg);
1831 s->temps[temp].val_type = TEMP_VAL_MEM;
1832 s->reg_to_temp[reg] = -1;
1836 /* Allocate a register belonging to reg1 & ~reg2 */
1837 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1839 int i, reg;
1840 TCGRegSet reg_ct;
1842 tcg_regset_andnot(reg_ct, reg1, reg2);
1844 /* first try free registers */
1845 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1846 reg = tcg_target_reg_alloc_order[i];
1847 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1848 return reg;
1851 /* XXX: do better spill choice */
1852 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1853 reg = tcg_target_reg_alloc_order[i];
1854 if (tcg_regset_test_reg(reg_ct, reg)) {
1855 tcg_reg_free(s, reg);
1856 return reg;
1860 tcg_abort();
1863 /* mark a temporary as dead. */
1864 static inline void temp_dead(TCGContext *s, int temp)
1866 TCGTemp *ts;
1868 ts = &s->temps[temp];
1869 if (!ts->fixed_reg) {
1870 if (ts->val_type == TEMP_VAL_REG) {
1871 s->reg_to_temp[ts->reg] = -1;
1873 if (temp < s->nb_globals || ts->temp_local) {
1874 ts->val_type = TEMP_VAL_MEM;
1875 } else {
1876 ts->val_type = TEMP_VAL_DEAD;
1881 /* sync a temporary to memory. 'allocated_regs' is used in case a
1882 temporary registers needs to be allocated to store a constant. */
1883 static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
1885 TCGTemp *ts;
1887 ts = &s->temps[temp];
1888 if (!ts->fixed_reg) {
1889 switch(ts->val_type) {
1890 case TEMP_VAL_CONST:
1891 ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1892 allocated_regs);
1893 ts->val_type = TEMP_VAL_REG;
1894 s->reg_to_temp[ts->reg] = temp;
1895 ts->mem_coherent = 0;
1896 tcg_out_movi(s, ts->type, ts->reg, ts->val);
1897 /* fallthrough*/
1898 case TEMP_VAL_REG:
1899 tcg_reg_sync(s, ts->reg);
1900 break;
1901 case TEMP_VAL_DEAD:
1902 case TEMP_VAL_MEM:
1903 break;
1904 default:
1905 tcg_abort();
1910 /* save a temporary to memory. 'allocated_regs' is used in case a
1911 temporary registers needs to be allocated to store a constant. */
1912 static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1914 #ifdef USE_LIVENESS_ANALYSIS
1915 /* The liveness analysis already ensures that globals are back
1916 in memory. Keep an assert for safety. */
1917 assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg);
1918 #else
1919 temp_sync(s, temp, allocated_regs);
1920 temp_dead(s, temp);
1921 #endif
1924 /* save globals to their canonical location and assume they can be
1925 modified be the following code. 'allocated_regs' is used in case a
1926 temporary registers needs to be allocated to store a constant. */
1927 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1929 int i;
1931 for(i = 0; i < s->nb_globals; i++) {
1932 temp_save(s, i, allocated_regs);
1936 /* sync globals to their canonical location and assume they can be
1937 read by the following code. 'allocated_regs' is used in case a
1938 temporary registers needs to be allocated to store a constant. */
1939 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1941 int i;
1943 for (i = 0; i < s->nb_globals; i++) {
1944 #ifdef USE_LIVENESS_ANALYSIS
1945 assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg ||
1946 s->temps[i].mem_coherent);
1947 #else
1948 temp_sync(s, i, allocated_regs);
1949 #endif
1953 /* at the end of a basic block, we assume all temporaries are dead and
1954 all globals are stored at their canonical location. */
1955 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1957 TCGTemp *ts;
1958 int i;
1960 for(i = s->nb_globals; i < s->nb_temps; i++) {
1961 ts = &s->temps[i];
1962 if (ts->temp_local) {
1963 temp_save(s, i, allocated_regs);
1964 } else {
1965 #ifdef USE_LIVENESS_ANALYSIS
1966 /* The liveness analysis already ensures that temps are dead.
1967 Keep an assert for safety. */
1968 assert(ts->val_type == TEMP_VAL_DEAD);
1969 #else
1970 temp_dead(s, i);
1971 #endif
1975 save_globals(s, allocated_regs);
1978 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1979 #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1981 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1982 uint16_t dead_args, uint8_t sync_args)
1984 TCGTemp *ots;
1985 tcg_target_ulong val;
1987 ots = &s->temps[args[0]];
1988 val = args[1];
1990 if (ots->fixed_reg) {
1991 /* for fixed registers, we do not do any constant
1992 propagation */
1993 tcg_out_movi(s, ots->type, ots->reg, val);
1994 } else {
1995 /* The movi is not explicitly generated here */
1996 if (ots->val_type == TEMP_VAL_REG)
1997 s->reg_to_temp[ots->reg] = -1;
1998 ots->val_type = TEMP_VAL_CONST;
1999 ots->val = val;
2001 if (NEED_SYNC_ARG(0)) {
2002 temp_sync(s, args[0], s->reserved_regs);
2004 if (IS_DEAD_ARG(0)) {
2005 temp_dead(s, args[0]);
2009 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
2010 const TCGArg *args, uint16_t dead_args,
2011 uint8_t sync_args)
2013 TCGRegSet allocated_regs;
2014 TCGTemp *ts, *ots;
2015 const TCGArgConstraint *arg_ct, *oarg_ct;
2017 tcg_regset_set(allocated_regs, s->reserved_regs);
2018 ots = &s->temps[args[0]];
2019 ts = &s->temps[args[1]];
2020 oarg_ct = &def->args_ct[0];
2021 arg_ct = &def->args_ct[1];
2023 /* If the source value is not in a register, and we're going to be
2024 forced to have it in a register in order to perform the copy,
2025 then copy the SOURCE value into its own register first. That way
2026 we don't have to reload SOURCE the next time it is used. */
2027 if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
2028 || ts->val_type == TEMP_VAL_MEM) {
2029 ts->reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2030 if (ts->val_type == TEMP_VAL_MEM) {
2031 tcg_out_ld(s, ts->type, ts->reg, ts->mem_reg, ts->mem_offset);
2032 ts->mem_coherent = 1;
2033 } else if (ts->val_type == TEMP_VAL_CONST) {
2034 tcg_out_movi(s, ts->type, ts->reg, ts->val);
2036 s->reg_to_temp[ts->reg] = args[1];
2037 ts->val_type = TEMP_VAL_REG;
2040 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2041 /* mov to a non-saved dead register makes no sense (even with
2042 liveness analysis disabled). */
2043 assert(NEED_SYNC_ARG(0));
2044 /* The code above should have moved the temp to a register. */
2045 assert(ts->val_type == TEMP_VAL_REG);
2046 if (!ots->mem_allocated) {
2047 temp_allocate_frame(s, args[0]);
2049 tcg_out_st(s, ots->type, ts->reg, ots->mem_reg, ots->mem_offset);
2050 if (IS_DEAD_ARG(1)) {
2051 temp_dead(s, args[1]);
2053 temp_dead(s, args[0]);
2054 } else if (ts->val_type == TEMP_VAL_CONST) {
2055 /* propagate constant */
2056 if (ots->val_type == TEMP_VAL_REG) {
2057 s->reg_to_temp[ots->reg] = -1;
2059 ots->val_type = TEMP_VAL_CONST;
2060 ots->val = ts->val;
2061 } else {
2062 /* The code in the first if block should have moved the
2063 temp to a register. */
2064 assert(ts->val_type == TEMP_VAL_REG);
2065 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
2066 /* the mov can be suppressed */
2067 if (ots->val_type == TEMP_VAL_REG) {
2068 s->reg_to_temp[ots->reg] = -1;
2070 ots->reg = ts->reg;
2071 temp_dead(s, args[1]);
2072 } else {
2073 if (ots->val_type != TEMP_VAL_REG) {
2074 /* When allocating a new register, make sure to not spill the
2075 input one. */
2076 tcg_regset_set_reg(allocated_regs, ts->reg);
2077 ots->reg = tcg_reg_alloc(s, oarg_ct->u.regs, allocated_regs);
2079 tcg_out_mov(s, ots->type, ots->reg, ts->reg);
2081 ots->val_type = TEMP_VAL_REG;
2082 ots->mem_coherent = 0;
2083 s->reg_to_temp[ots->reg] = args[0];
2084 if (NEED_SYNC_ARG(0)) {
2085 tcg_reg_sync(s, ots->reg);
2090 static void tcg_reg_alloc_op(TCGContext *s,
2091 const TCGOpDef *def, TCGOpcode opc,
2092 const TCGArg *args, uint16_t dead_args,
2093 uint8_t sync_args)
2095 TCGRegSet allocated_regs;
2096 int i, k, nb_iargs, nb_oargs, reg;
2097 TCGArg arg;
2098 const TCGArgConstraint *arg_ct;
2099 TCGTemp *ts;
2100 TCGArg new_args[TCG_MAX_OP_ARGS];
2101 int const_args[TCG_MAX_OP_ARGS];
2103 nb_oargs = def->nb_oargs;
2104 nb_iargs = def->nb_iargs;
2106 /* copy constants */
2107 memcpy(new_args + nb_oargs + nb_iargs,
2108 args + nb_oargs + nb_iargs,
2109 sizeof(TCGArg) * def->nb_cargs);
2111 /* satisfy input constraints */
2112 tcg_regset_set(allocated_regs, s->reserved_regs);
2113 for(k = 0; k < nb_iargs; k++) {
2114 i = def->sorted_args[nb_oargs + k];
2115 arg = args[i];
2116 arg_ct = &def->args_ct[i];
2117 ts = &s->temps[arg];
2118 if (ts->val_type == TEMP_VAL_MEM) {
2119 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2120 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2121 ts->val_type = TEMP_VAL_REG;
2122 ts->reg = reg;
2123 ts->mem_coherent = 1;
2124 s->reg_to_temp[reg] = arg;
2125 } else if (ts->val_type == TEMP_VAL_CONST) {
2126 if (tcg_target_const_match(ts->val, arg_ct)) {
2127 /* constant is OK for instruction */
2128 const_args[i] = 1;
2129 new_args[i] = ts->val;
2130 goto iarg_end;
2131 } else {
2132 /* need to move to a register */
2133 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2134 tcg_out_movi(s, ts->type, reg, ts->val);
2135 ts->val_type = TEMP_VAL_REG;
2136 ts->reg = reg;
2137 ts->mem_coherent = 0;
2138 s->reg_to_temp[reg] = arg;
2141 assert(ts->val_type == TEMP_VAL_REG);
2142 if (arg_ct->ct & TCG_CT_IALIAS) {
2143 if (ts->fixed_reg) {
2144 /* if fixed register, we must allocate a new register
2145 if the alias is not the same register */
2146 if (arg != args[arg_ct->alias_index])
2147 goto allocate_in_reg;
2148 } else {
2149 /* if the input is aliased to an output and if it is
2150 not dead after the instruction, we must allocate
2151 a new register and move it */
2152 if (!IS_DEAD_ARG(i)) {
2153 goto allocate_in_reg;
2157 reg = ts->reg;
2158 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2159 /* nothing to do : the constraint is satisfied */
2160 } else {
2161 allocate_in_reg:
2162 /* allocate a new register matching the constraint
2163 and move the temporary register into it */
2164 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2165 tcg_out_mov(s, ts->type, reg, ts->reg);
2167 new_args[i] = reg;
2168 const_args[i] = 0;
2169 tcg_regset_set_reg(allocated_regs, reg);
2170 iarg_end: ;
2173 /* mark dead temporaries and free the associated registers */
2174 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2175 if (IS_DEAD_ARG(i)) {
2176 temp_dead(s, args[i]);
2180 if (def->flags & TCG_OPF_BB_END) {
2181 tcg_reg_alloc_bb_end(s, allocated_regs);
2182 } else {
2183 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2184 /* XXX: permit generic clobber register list ? */
2185 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2186 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2187 tcg_reg_free(s, reg);
2191 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2192 /* sync globals if the op has side effects and might trigger
2193 an exception. */
2194 sync_globals(s, allocated_regs);
2197 /* satisfy the output constraints */
2198 tcg_regset_set(allocated_regs, s->reserved_regs);
2199 for(k = 0; k < nb_oargs; k++) {
2200 i = def->sorted_args[k];
2201 arg = args[i];
2202 arg_ct = &def->args_ct[i];
2203 ts = &s->temps[arg];
2204 if (arg_ct->ct & TCG_CT_ALIAS) {
2205 reg = new_args[arg_ct->alias_index];
2206 } else {
2207 /* if fixed register, we try to use it */
2208 reg = ts->reg;
2209 if (ts->fixed_reg &&
2210 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2211 goto oarg_end;
2213 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2215 tcg_regset_set_reg(allocated_regs, reg);
2216 /* if a fixed register is used, then a move will be done afterwards */
2217 if (!ts->fixed_reg) {
2218 if (ts->val_type == TEMP_VAL_REG) {
2219 s->reg_to_temp[ts->reg] = -1;
2221 ts->val_type = TEMP_VAL_REG;
2222 ts->reg = reg;
2223 /* temp value is modified, so the value kept in memory is
2224 potentially not the same */
2225 ts->mem_coherent = 0;
2226 s->reg_to_temp[reg] = arg;
2228 oarg_end:
2229 new_args[i] = reg;
2233 /* emit instruction */
2234 tcg_out_op(s, opc, new_args, const_args);
2236 /* move the outputs in the correct register if needed */
2237 for(i = 0; i < nb_oargs; i++) {
2238 ts = &s->temps[args[i]];
2239 reg = new_args[i];
2240 if (ts->fixed_reg && ts->reg != reg) {
2241 tcg_out_mov(s, ts->type, ts->reg, reg);
2243 if (NEED_SYNC_ARG(i)) {
2244 tcg_reg_sync(s, reg);
2246 if (IS_DEAD_ARG(i)) {
2247 temp_dead(s, args[i]);
2252 #ifdef TCG_TARGET_STACK_GROWSUP
2253 #define STACK_DIR(x) (-(x))
2254 #else
2255 #define STACK_DIR(x) (x)
2256 #endif
2258 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
2259 TCGOpcode opc, const TCGArg *args,
2260 uint16_t dead_args, uint8_t sync_args)
2262 int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
2263 TCGArg arg, func_arg;
2264 TCGTemp *ts;
2265 intptr_t stack_offset;
2266 size_t call_stack_size;
2267 uintptr_t func_addr;
2268 int const_func_arg, allocate_args;
2269 TCGRegSet allocated_regs;
2270 const TCGArgConstraint *arg_ct;
2272 arg = *args++;
2274 nb_oargs = arg >> 16;
2275 nb_iargs = arg & 0xffff;
2276 nb_params = nb_iargs - 1;
2278 flags = args[nb_oargs + nb_iargs];
2280 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2281 if (nb_regs > nb_params)
2282 nb_regs = nb_params;
2284 /* assign stack slots first */
2285 call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
2286 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2287 ~(TCG_TARGET_STACK_ALIGN - 1);
2288 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2289 if (allocate_args) {
2290 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2291 preallocate call stack */
2292 tcg_abort();
2295 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2296 for(i = nb_regs; i < nb_params; i++) {
2297 arg = args[nb_oargs + i];
2298 #ifdef TCG_TARGET_STACK_GROWSUP
2299 stack_offset -= sizeof(tcg_target_long);
2300 #endif
2301 if (arg != TCG_CALL_DUMMY_ARG) {
2302 ts = &s->temps[arg];
2303 if (ts->val_type == TEMP_VAL_REG) {
2304 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2305 } else if (ts->val_type == TEMP_VAL_MEM) {
2306 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2307 s->reserved_regs);
2308 /* XXX: not correct if reading values from the stack */
2309 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2310 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2311 } else if (ts->val_type == TEMP_VAL_CONST) {
2312 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2313 s->reserved_regs);
2314 /* XXX: sign extend may be needed on some targets */
2315 tcg_out_movi(s, ts->type, reg, ts->val);
2316 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2317 } else {
2318 tcg_abort();
2321 #ifndef TCG_TARGET_STACK_GROWSUP
2322 stack_offset += sizeof(tcg_target_long);
2323 #endif
2326 /* assign input registers */
2327 tcg_regset_set(allocated_regs, s->reserved_regs);
2328 for(i = 0; i < nb_regs; i++) {
2329 arg = args[nb_oargs + i];
2330 if (arg != TCG_CALL_DUMMY_ARG) {
2331 ts = &s->temps[arg];
2332 reg = tcg_target_call_iarg_regs[i];
2333 tcg_reg_free(s, reg);
2334 if (ts->val_type == TEMP_VAL_REG) {
2335 if (ts->reg != reg) {
2336 tcg_out_mov(s, ts->type, reg, ts->reg);
2338 } else if (ts->val_type == TEMP_VAL_MEM) {
2339 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2340 } else if (ts->val_type == TEMP_VAL_CONST) {
2341 /* XXX: sign extend ? */
2342 tcg_out_movi(s, ts->type, reg, ts->val);
2343 } else {
2344 tcg_abort();
2346 tcg_regset_set_reg(allocated_regs, reg);
2350 /* assign function address */
2351 func_arg = args[nb_oargs + nb_iargs - 1];
2352 arg_ct = &def->args_ct[0];
2353 ts = &s->temps[func_arg];
2354 func_addr = ts->val;
2355 const_func_arg = 0;
2356 if (ts->val_type == TEMP_VAL_MEM) {
2357 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2358 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2359 func_arg = reg;
2360 tcg_regset_set_reg(allocated_regs, reg);
2361 } else if (ts->val_type == TEMP_VAL_REG) {
2362 reg = ts->reg;
2363 if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2364 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2365 tcg_out_mov(s, ts->type, reg, ts->reg);
2367 func_arg = reg;
2368 tcg_regset_set_reg(allocated_regs, reg);
2369 } else if (ts->val_type == TEMP_VAL_CONST) {
2370 if (tcg_target_const_match(func_addr, arg_ct)) {
2371 const_func_arg = 1;
2372 func_arg = func_addr;
2373 } else {
2374 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2375 tcg_out_movi(s, ts->type, reg, func_addr);
2376 func_arg = reg;
2377 tcg_regset_set_reg(allocated_regs, reg);
2379 } else {
2380 tcg_abort();
2384 /* mark dead temporaries and free the associated registers */
2385 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2386 if (IS_DEAD_ARG(i)) {
2387 temp_dead(s, args[i]);
2391 /* clobber call registers */
2392 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2393 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2394 tcg_reg_free(s, reg);
2398 /* Save globals if they might be written by the helper, sync them if
2399 they might be read. */
2400 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2401 /* Nothing to do */
2402 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2403 sync_globals(s, allocated_regs);
2404 } else {
2405 save_globals(s, allocated_regs);
2408 tcg_out_op(s, opc, &func_arg, &const_func_arg);
2410 /* assign output registers and emit moves if needed */
2411 for(i = 0; i < nb_oargs; i++) {
2412 arg = args[i];
2413 ts = &s->temps[arg];
2414 reg = tcg_target_call_oarg_regs[i];
2415 assert(s->reg_to_temp[reg] == -1);
2416 if (ts->fixed_reg) {
2417 if (ts->reg != reg) {
2418 tcg_out_mov(s, ts->type, ts->reg, reg);
2420 } else {
2421 if (ts->val_type == TEMP_VAL_REG) {
2422 s->reg_to_temp[ts->reg] = -1;
2424 ts->val_type = TEMP_VAL_REG;
2425 ts->reg = reg;
2426 ts->mem_coherent = 0;
2427 s->reg_to_temp[reg] = arg;
2428 if (NEED_SYNC_ARG(i)) {
2429 tcg_reg_sync(s, reg);
2431 if (IS_DEAD_ARG(i)) {
2432 temp_dead(s, args[i]);
2437 return nb_iargs + nb_oargs + def->nb_cargs + 1;
2440 #ifdef CONFIG_PROFILER
2442 static int64_t tcg_table_op_count[NB_OPS];
2444 static void dump_op_count(void)
2446 int i;
2447 FILE *f;
2448 f = fopen("/tmp/op.log", "w");
2449 for(i = INDEX_op_end; i < NB_OPS; i++) {
2450 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, tcg_table_op_count[i]);
2452 fclose(f);
2454 #endif
2457 static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
2458 long search_pc)
2460 TCGOpcode opc;
2461 int op_index;
2462 const TCGOpDef *def;
2463 const TCGArg *args;
2465 #ifdef DEBUG_DISAS
2466 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2467 qemu_log("OP:\n");
2468 tcg_dump_ops(s);
2469 qemu_log("\n");
2471 #endif
2473 #ifdef CONFIG_PROFILER
2474 s->opt_time -= profile_getclock();
2475 #endif
2477 #ifdef USE_TCG_OPTIMIZATIONS
2478 s->gen_opparam_ptr =
2479 tcg_optimize(s, s->gen_opc_ptr, s->gen_opparam_buf, tcg_op_defs);
2480 #endif
2482 #ifdef CONFIG_PROFILER
2483 s->opt_time += profile_getclock();
2484 s->la_time -= profile_getclock();
2485 #endif
2487 tcg_liveness_analysis(s);
2489 #ifdef CONFIG_PROFILER
2490 s->la_time += profile_getclock();
2491 #endif
2493 #ifdef DEBUG_DISAS
2494 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2495 qemu_log("OP after optimization and liveness analysis:\n");
2496 tcg_dump_ops(s);
2497 qemu_log("\n");
2499 #endif
2501 tcg_reg_alloc_start(s);
2503 s->code_buf = gen_code_buf;
2504 s->code_ptr = gen_code_buf;
2506 tcg_out_tb_init(s);
2508 args = s->gen_opparam_buf;
2509 op_index = 0;
2511 for(;;) {
2512 opc = s->gen_opc_buf[op_index];
2513 #ifdef CONFIG_PROFILER
2514 tcg_table_op_count[opc]++;
2515 #endif
2516 def = &tcg_op_defs[opc];
2517 #if 0
2518 printf("%s: %d %d %d\n", def->name,
2519 def->nb_oargs, def->nb_iargs, def->nb_cargs);
2520 // dump_regs(s);
2521 #endif
2522 switch(opc) {
2523 case INDEX_op_mov_i32:
2524 case INDEX_op_mov_i64:
2525 tcg_reg_alloc_mov(s, def, args, s->op_dead_args[op_index],
2526 s->op_sync_args[op_index]);
2527 break;
2528 case INDEX_op_movi_i32:
2529 case INDEX_op_movi_i64:
2530 tcg_reg_alloc_movi(s, args, s->op_dead_args[op_index],
2531 s->op_sync_args[op_index]);
2532 break;
2533 case INDEX_op_debug_insn_start:
2534 /* debug instruction */
2535 break;
2536 case INDEX_op_nop:
2537 case INDEX_op_nop1:
2538 case INDEX_op_nop2:
2539 case INDEX_op_nop3:
2540 break;
2541 case INDEX_op_nopn:
2542 args += args[0];
2543 goto next;
2544 case INDEX_op_discard:
2545 temp_dead(s, args[0]);
2546 break;
2547 case INDEX_op_set_label:
2548 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2549 tcg_out_label(s, args[0], s->code_ptr);
2550 break;
2551 case INDEX_op_call:
2552 args += tcg_reg_alloc_call(s, def, opc, args,
2553 s->op_dead_args[op_index],
2554 s->op_sync_args[op_index]);
2555 goto next;
2556 case INDEX_op_end:
2557 goto the_end;
2558 default:
2559 /* Sanity check that we've not introduced any unhandled opcodes. */
2560 if (def->flags & TCG_OPF_NOT_PRESENT) {
2561 tcg_abort();
2563 /* Note: in order to speed up the code, it would be much
2564 faster to have specialized register allocator functions for
2565 some common argument patterns */
2566 tcg_reg_alloc_op(s, def, opc, args, s->op_dead_args[op_index],
2567 s->op_sync_args[op_index]);
2568 break;
2570 args += def->nb_args;
2571 next:
2572 if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
2573 return op_index;
2575 op_index++;
2576 #ifndef NDEBUG
2577 check_regs(s);
2578 #endif
2580 the_end:
2581 /* Generate TB finalization at the end of block */
2582 tcg_out_tb_finalize(s);
2583 return -1;
2586 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
2588 #ifdef CONFIG_PROFILER
2590 int n;
2591 n = (s->gen_opc_ptr - s->gen_opc_buf);
2592 s->op_count += n;
2593 if (n > s->op_count_max)
2594 s->op_count_max = n;
2596 s->temp_count += s->nb_temps;
2597 if (s->nb_temps > s->temp_count_max)
2598 s->temp_count_max = s->nb_temps;
2600 #endif
2602 tcg_gen_code_common(s, gen_code_buf, -1);
2604 /* flush instruction cache */
2605 flush_icache_range((uintptr_t)gen_code_buf, (uintptr_t)s->code_ptr);
2607 return s->code_ptr - gen_code_buf;
2610 /* Return the index of the micro operation such as the pc after is <
2611 offset bytes from the start of the TB. The contents of gen_code_buf must
2612 not be changed, though writing the same values is ok.
2613 Return -1 if not found. */
2614 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
2616 return tcg_gen_code_common(s, gen_code_buf, offset);
2619 #ifdef CONFIG_PROFILER
2620 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2622 TCGContext *s = &tcg_ctx;
2623 int64_t tot;
2625 tot = s->interm_time + s->code_time;
2626 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2627 tot, tot / 2.4e9);
2628 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2629 s->tb_count,
2630 s->tb_count1 - s->tb_count,
2631 s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
2632 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2633 s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
2634 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2635 s->tb_count ?
2636 (double)s->del_op_count / s->tb_count : 0);
2637 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2638 s->tb_count ?
2639 (double)s->temp_count / s->tb_count : 0,
2640 s->temp_count_max);
2642 cpu_fprintf(f, "cycles/op %0.1f\n",
2643 s->op_count ? (double)tot / s->op_count : 0);
2644 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2645 s->code_in_len ? (double)tot / s->code_in_len : 0);
2646 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2647 s->code_out_len ? (double)tot / s->code_out_len : 0);
2648 if (tot == 0)
2649 tot = 1;
2650 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2651 (double)s->interm_time / tot * 100.0);
2652 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2653 (double)s->code_time / tot * 100.0);
2654 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2655 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2656 * 100.0);
2657 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2658 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2659 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2660 s->restore_count);
2661 cpu_fprintf(f, " avg cycles %0.1f\n",
2662 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2664 dump_op_count();
2666 #else
2667 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2669 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2671 #endif
2673 #ifdef ELF_HOST_MACHINE
2674 /* In order to use this feature, the backend needs to do three things:
2676 (1) Define ELF_HOST_MACHINE to indicate both what value to
2677 put into the ELF image and to indicate support for the feature.
2679 (2) Define tcg_register_jit. This should create a buffer containing
2680 the contents of a .debug_frame section that describes the post-
2681 prologue unwind info for the tcg machine.
2683 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2686 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2687 typedef enum {
2688 JIT_NOACTION = 0,
2689 JIT_REGISTER_FN,
2690 JIT_UNREGISTER_FN
2691 } jit_actions_t;
2693 struct jit_code_entry {
2694 struct jit_code_entry *next_entry;
2695 struct jit_code_entry *prev_entry;
2696 const void *symfile_addr;
2697 uint64_t symfile_size;
2700 struct jit_descriptor {
2701 uint32_t version;
2702 uint32_t action_flag;
2703 struct jit_code_entry *relevant_entry;
2704 struct jit_code_entry *first_entry;
2707 void __jit_debug_register_code(void) __attribute__((noinline));
2708 void __jit_debug_register_code(void)
2710 asm("");
2713 /* Must statically initialize the version, because GDB may check
2714 the version before we can set it. */
2715 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2717 /* End GDB interface. */
2719 static int find_string(const char *strtab, const char *str)
2721 const char *p = strtab + 1;
2723 while (1) {
2724 if (strcmp(p, str) == 0) {
2725 return p - strtab;
2727 p += strlen(p) + 1;
2731 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2732 void *debug_frame, size_t debug_frame_size)
2734 struct __attribute__((packed)) DebugInfo {
2735 uint32_t len;
2736 uint16_t version;
2737 uint32_t abbrev;
2738 uint8_t ptr_size;
2739 uint8_t cu_die;
2740 uint16_t cu_lang;
2741 uintptr_t cu_low_pc;
2742 uintptr_t cu_high_pc;
2743 uint8_t fn_die;
2744 char fn_name[16];
2745 uintptr_t fn_low_pc;
2746 uintptr_t fn_high_pc;
2747 uint8_t cu_eoc;
2750 struct ElfImage {
2751 ElfW(Ehdr) ehdr;
2752 ElfW(Phdr) phdr;
2753 ElfW(Shdr) shdr[7];
2754 ElfW(Sym) sym[2];
2755 struct DebugInfo di;
2756 uint8_t da[24];
2757 char str[80];
2760 struct ElfImage *img;
2762 static const struct ElfImage img_template = {
2763 .ehdr = {
2764 .e_ident[EI_MAG0] = ELFMAG0,
2765 .e_ident[EI_MAG1] = ELFMAG1,
2766 .e_ident[EI_MAG2] = ELFMAG2,
2767 .e_ident[EI_MAG3] = ELFMAG3,
2768 .e_ident[EI_CLASS] = ELF_CLASS,
2769 .e_ident[EI_DATA] = ELF_DATA,
2770 .e_ident[EI_VERSION] = EV_CURRENT,
2771 .e_type = ET_EXEC,
2772 .e_machine = ELF_HOST_MACHINE,
2773 .e_version = EV_CURRENT,
2774 .e_phoff = offsetof(struct ElfImage, phdr),
2775 .e_shoff = offsetof(struct ElfImage, shdr),
2776 .e_ehsize = sizeof(ElfW(Shdr)),
2777 .e_phentsize = sizeof(ElfW(Phdr)),
2778 .e_phnum = 1,
2779 .e_shentsize = sizeof(ElfW(Shdr)),
2780 .e_shnum = ARRAY_SIZE(img->shdr),
2781 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2782 #ifdef ELF_HOST_FLAGS
2783 .e_flags = ELF_HOST_FLAGS,
2784 #endif
2785 #ifdef ELF_OSABI
2786 .e_ident[EI_OSABI] = ELF_OSABI,
2787 #endif
2789 .phdr = {
2790 .p_type = PT_LOAD,
2791 .p_flags = PF_X,
2793 .shdr = {
2794 [0] = { .sh_type = SHT_NULL },
2795 /* Trick: The contents of code_gen_buffer are not present in
2796 this fake ELF file; that got allocated elsewhere. Therefore
2797 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2798 will not look for contents. We can record any address. */
2799 [1] = { /* .text */
2800 .sh_type = SHT_NOBITS,
2801 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2803 [2] = { /* .debug_info */
2804 .sh_type = SHT_PROGBITS,
2805 .sh_offset = offsetof(struct ElfImage, di),
2806 .sh_size = sizeof(struct DebugInfo),
2808 [3] = { /* .debug_abbrev */
2809 .sh_type = SHT_PROGBITS,
2810 .sh_offset = offsetof(struct ElfImage, da),
2811 .sh_size = sizeof(img->da),
2813 [4] = { /* .debug_frame */
2814 .sh_type = SHT_PROGBITS,
2815 .sh_offset = sizeof(struct ElfImage),
2817 [5] = { /* .symtab */
2818 .sh_type = SHT_SYMTAB,
2819 .sh_offset = offsetof(struct ElfImage, sym),
2820 .sh_size = sizeof(img->sym),
2821 .sh_info = 1,
2822 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2823 .sh_entsize = sizeof(ElfW(Sym)),
2825 [6] = { /* .strtab */
2826 .sh_type = SHT_STRTAB,
2827 .sh_offset = offsetof(struct ElfImage, str),
2828 .sh_size = sizeof(img->str),
2831 .sym = {
2832 [1] = { /* code_gen_buffer */
2833 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2834 .st_shndx = 1,
2837 .di = {
2838 .len = sizeof(struct DebugInfo) - 4,
2839 .version = 2,
2840 .ptr_size = sizeof(void *),
2841 .cu_die = 1,
2842 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2843 .fn_die = 2,
2844 .fn_name = "code_gen_buffer"
2846 .da = {
2847 1, /* abbrev number (the cu) */
2848 0x11, 1, /* DW_TAG_compile_unit, has children */
2849 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2850 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2851 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2852 0, 0, /* end of abbrev */
2853 2, /* abbrev number (the fn) */
2854 0x2e, 0, /* DW_TAG_subprogram, no children */
2855 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2856 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2857 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2858 0, 0, /* end of abbrev */
2859 0 /* no more abbrev */
2861 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2862 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2865 /* We only need a single jit entry; statically allocate it. */
2866 static struct jit_code_entry one_entry;
2868 uintptr_t buf = (uintptr_t)buf_ptr;
2869 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2871 img = g_malloc(img_size);
2872 *img = img_template;
2873 memcpy(img + 1, debug_frame, debug_frame_size);
2875 img->phdr.p_vaddr = buf;
2876 img->phdr.p_paddr = buf;
2877 img->phdr.p_memsz = buf_size;
2879 img->shdr[1].sh_name = find_string(img->str, ".text");
2880 img->shdr[1].sh_addr = buf;
2881 img->shdr[1].sh_size = buf_size;
2883 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2884 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2886 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2887 img->shdr[4].sh_size = debug_frame_size;
2889 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2890 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2892 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2893 img->sym[1].st_value = buf;
2894 img->sym[1].st_size = buf_size;
2896 img->di.cu_low_pc = buf;
2897 img->di.cu_high_pc = buf + buf_size;
2898 img->di.fn_low_pc = buf;
2899 img->di.fn_high_pc = buf + buf_size;
2901 #ifdef DEBUG_JIT
2902 /* Enable this block to be able to debug the ELF image file creation.
2903 One can use readelf, objdump, or other inspection utilities. */
2905 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2906 if (f) {
2907 if (fwrite(img, img_size, 1, f) != img_size) {
2908 /* Avoid stupid unused return value warning for fwrite. */
2910 fclose(f);
2913 #endif
2915 one_entry.symfile_addr = img;
2916 one_entry.symfile_size = img_size;
2918 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2919 __jit_debug_descriptor.relevant_entry = &one_entry;
2920 __jit_debug_descriptor.first_entry = &one_entry;
2921 __jit_debug_register_code();
2923 #else
2924 /* No support for the feature. Provide the entry point expected by exec.c,
2925 and implement the internal function we declared earlier. */
2927 static void tcg_register_jit_int(void *buf, size_t size,
2928 void *debug_frame, size_t debug_frame_size)
2932 void tcg_register_jit(void *buf, size_t buf_size)
2935 #endif /* ELF_HOST_MACHINE */