Merge commit '9605111958173938ac08298f515d55e937d0211c' into upstream-merge
[qemu-kvm/amd-iommu.git] / tcg / tcg.c
blobe0a90302a7e98577496584e2b45d207929dde4f8
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
28 #include "config.h"
30 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
31 /* define it to suppress various consistency checks (faster) */
32 #define NDEBUG
33 #endif
35 #include <stdarg.h>
36 #include <stdlib.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <inttypes.h>
40 #ifdef _WIN32
41 #include <malloc.h>
42 #endif
43 #ifdef _AIX
44 #include <alloca.h>
45 #endif
47 #include "qemu-common.h"
48 #include "cache-utils.h"
49 #include "host-utils.h"
50 #include "qemu-timer.h"
52 /* Note: the long term plan is to reduce the dependancies on the QEMU
53 CPU definitions. Currently they are used for qemu_ld/st
54 instructions */
55 #define NO_CPU_IO_DEFS
56 #include "cpu.h"
57 #include "exec-all.h"
59 #include "tcg-op.h"
60 #include "elf.h"
62 #if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
63 #error GUEST_BASE not supported on this host.
64 #endif
66 static void tcg_target_init(TCGContext *s);
67 static void tcg_target_qemu_prologue(TCGContext *s);
68 static void patch_reloc(uint8_t *code_ptr, int type,
69 tcg_target_long value, tcg_target_long addend);
71 static TCGOpDef tcg_op_defs[] = {
72 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
73 #include "tcg-opc.h"
74 #undef DEF
77 static TCGRegSet tcg_target_available_regs[2];
78 static TCGRegSet tcg_target_call_clobber_regs;
80 /* XXX: move that inside the context */
81 uint16_t *gen_opc_ptr;
82 TCGArg *gen_opparam_ptr;
84 static inline void tcg_out8(TCGContext *s, uint8_t v)
86 *s->code_ptr++ = v;
89 static inline void tcg_out16(TCGContext *s, uint16_t v)
91 *(uint16_t *)s->code_ptr = v;
92 s->code_ptr += 2;
95 static inline void tcg_out32(TCGContext *s, uint32_t v)
97 *(uint32_t *)s->code_ptr = v;
98 s->code_ptr += 4;
101 /* label relocation processing */
103 static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
104 int label_index, long addend)
106 TCGLabel *l;
107 TCGRelocation *r;
109 l = &s->labels[label_index];
110 if (l->has_value) {
111 /* FIXME: This may break relocations on RISC targets that
112 modify instruction fields in place. The caller may not have
113 written the initial value. */
114 patch_reloc(code_ptr, type, l->u.value, addend);
115 } else {
116 /* add a new relocation entry */
117 r = tcg_malloc(sizeof(TCGRelocation));
118 r->type = type;
119 r->ptr = code_ptr;
120 r->addend = addend;
121 r->next = l->u.first_reloc;
122 l->u.first_reloc = r;
126 static void tcg_out_label(TCGContext *s, int label_index,
127 tcg_target_long value)
129 TCGLabel *l;
130 TCGRelocation *r;
132 l = &s->labels[label_index];
133 if (l->has_value)
134 tcg_abort();
135 r = l->u.first_reloc;
136 while (r != NULL) {
137 patch_reloc(r->ptr, r->type, value, r->addend);
138 r = r->next;
140 l->has_value = 1;
141 l->u.value = value;
144 int gen_new_label(void)
146 TCGContext *s = &tcg_ctx;
147 int idx;
148 TCGLabel *l;
150 if (s->nb_labels >= TCG_MAX_LABELS)
151 tcg_abort();
152 idx = s->nb_labels++;
153 l = &s->labels[idx];
154 l->has_value = 0;
155 l->u.first_reloc = NULL;
156 return idx;
159 #include "tcg-target.c"
161 /* pool based memory allocation */
162 void *tcg_malloc_internal(TCGContext *s, int size)
164 TCGPool *p;
165 int pool_size;
167 if (size > TCG_POOL_CHUNK_SIZE) {
168 /* big malloc: insert a new pool (XXX: could optimize) */
169 p = qemu_malloc(sizeof(TCGPool) + size);
170 p->size = size;
171 if (s->pool_current)
172 s->pool_current->next = p;
173 else
174 s->pool_first = p;
175 p->next = s->pool_current;
176 } else {
177 p = s->pool_current;
178 if (!p) {
179 p = s->pool_first;
180 if (!p)
181 goto new_pool;
182 } else {
183 if (!p->next) {
184 new_pool:
185 pool_size = TCG_POOL_CHUNK_SIZE;
186 p = qemu_malloc(sizeof(TCGPool) + pool_size);
187 p->size = pool_size;
188 p->next = NULL;
189 if (s->pool_current)
190 s->pool_current->next = p;
191 else
192 s->pool_first = p;
193 } else {
194 p = p->next;
198 s->pool_current = p;
199 s->pool_cur = p->data + size;
200 s->pool_end = p->data + p->size;
201 return p->data;
204 void tcg_pool_reset(TCGContext *s)
206 s->pool_cur = s->pool_end = NULL;
207 s->pool_current = NULL;
210 void tcg_context_init(TCGContext *s)
212 int op, total_args, n;
213 TCGOpDef *def;
214 TCGArgConstraint *args_ct;
215 int *sorted_args;
217 memset(s, 0, sizeof(*s));
218 s->temps = s->static_temps;
219 s->nb_globals = 0;
221 /* Count total number of arguments and allocate the corresponding
222 space */
223 total_args = 0;
224 for(op = 0; op < NB_OPS; op++) {
225 def = &tcg_op_defs[op];
226 n = def->nb_iargs + def->nb_oargs;
227 total_args += n;
230 args_ct = qemu_malloc(sizeof(TCGArgConstraint) * total_args);
231 sorted_args = qemu_malloc(sizeof(int) * total_args);
233 for(op = 0; op < NB_OPS; op++) {
234 def = &tcg_op_defs[op];
235 def->args_ct = args_ct;
236 def->sorted_args = sorted_args;
237 n = def->nb_iargs + def->nb_oargs;
238 sorted_args += n;
239 args_ct += n;
242 tcg_target_init(s);
245 void tcg_prologue_init(TCGContext *s)
247 /* init global prologue and epilogue */
248 s->code_buf = code_gen_prologue;
249 s->code_ptr = s->code_buf;
250 tcg_target_qemu_prologue(s);
251 flush_icache_range((unsigned long)s->code_buf,
252 (unsigned long)s->code_ptr);
255 void tcg_set_frame(TCGContext *s, int reg,
256 tcg_target_long start, tcg_target_long size)
258 s->frame_start = start;
259 s->frame_end = start + size;
260 s->frame_reg = reg;
263 void tcg_func_start(TCGContext *s)
265 int i;
266 tcg_pool_reset(s);
267 s->nb_temps = s->nb_globals;
268 for(i = 0; i < (TCG_TYPE_COUNT * 2); i++)
269 s->first_free_temp[i] = -1;
270 s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
271 s->nb_labels = 0;
272 s->current_frame_offset = s->frame_start;
274 gen_opc_ptr = gen_opc_buf;
275 gen_opparam_ptr = gen_opparam_buf;
278 static inline void tcg_temp_alloc(TCGContext *s, int n)
280 if (n > TCG_MAX_TEMPS)
281 tcg_abort();
284 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
285 const char *name)
287 TCGContext *s = &tcg_ctx;
288 TCGTemp *ts;
289 int idx;
291 #if TCG_TARGET_REG_BITS == 32
292 if (type != TCG_TYPE_I32)
293 tcg_abort();
294 #endif
295 if (tcg_regset_test_reg(s->reserved_regs, reg))
296 tcg_abort();
297 idx = s->nb_globals;
298 tcg_temp_alloc(s, s->nb_globals + 1);
299 ts = &s->temps[s->nb_globals];
300 ts->base_type = type;
301 ts->type = type;
302 ts->fixed_reg = 1;
303 ts->reg = reg;
304 ts->name = name;
305 s->nb_globals++;
306 tcg_regset_set_reg(s->reserved_regs, reg);
307 return idx;
310 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
312 int idx;
314 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
315 return MAKE_TCGV_I32(idx);
318 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
320 int idx;
322 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
323 return MAKE_TCGV_I64(idx);
326 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
327 tcg_target_long offset,
328 const char *name)
330 TCGContext *s = &tcg_ctx;
331 TCGTemp *ts;
332 int idx;
334 idx = s->nb_globals;
335 #if TCG_TARGET_REG_BITS == 32
336 if (type == TCG_TYPE_I64) {
337 char buf[64];
338 tcg_temp_alloc(s, s->nb_globals + 2);
339 ts = &s->temps[s->nb_globals];
340 ts->base_type = type;
341 ts->type = TCG_TYPE_I32;
342 ts->fixed_reg = 0;
343 ts->mem_allocated = 1;
344 ts->mem_reg = reg;
345 #ifdef TCG_TARGET_WORDS_BIGENDIAN
346 ts->mem_offset = offset + 4;
347 #else
348 ts->mem_offset = offset;
349 #endif
350 pstrcpy(buf, sizeof(buf), name);
351 pstrcat(buf, sizeof(buf), "_0");
352 ts->name = strdup(buf);
353 ts++;
355 ts->base_type = type;
356 ts->type = TCG_TYPE_I32;
357 ts->fixed_reg = 0;
358 ts->mem_allocated = 1;
359 ts->mem_reg = reg;
360 #ifdef TCG_TARGET_WORDS_BIGENDIAN
361 ts->mem_offset = offset;
362 #else
363 ts->mem_offset = offset + 4;
364 #endif
365 pstrcpy(buf, sizeof(buf), name);
366 pstrcat(buf, sizeof(buf), "_1");
367 ts->name = strdup(buf);
369 s->nb_globals += 2;
370 } else
371 #endif
373 tcg_temp_alloc(s, s->nb_globals + 1);
374 ts = &s->temps[s->nb_globals];
375 ts->base_type = type;
376 ts->type = type;
377 ts->fixed_reg = 0;
378 ts->mem_allocated = 1;
379 ts->mem_reg = reg;
380 ts->mem_offset = offset;
381 ts->name = name;
382 s->nb_globals++;
384 return idx;
387 TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset,
388 const char *name)
390 int idx;
392 idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
393 return MAKE_TCGV_I32(idx);
396 TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset,
397 const char *name)
399 int idx;
401 idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
402 return MAKE_TCGV_I64(idx);
405 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
407 TCGContext *s = &tcg_ctx;
408 TCGTemp *ts;
409 int idx, k;
411 k = type;
412 if (temp_local)
413 k += TCG_TYPE_COUNT;
414 idx = s->first_free_temp[k];
415 if (idx != -1) {
416 /* There is already an available temp with the
417 right type */
418 ts = &s->temps[idx];
419 s->first_free_temp[k] = ts->next_free_temp;
420 ts->temp_allocated = 1;
421 assert(ts->temp_local == temp_local);
422 } else {
423 idx = s->nb_temps;
424 #if TCG_TARGET_REG_BITS == 32
425 if (type == TCG_TYPE_I64) {
426 tcg_temp_alloc(s, s->nb_temps + 2);
427 ts = &s->temps[s->nb_temps];
428 ts->base_type = type;
429 ts->type = TCG_TYPE_I32;
430 ts->temp_allocated = 1;
431 ts->temp_local = temp_local;
432 ts->name = NULL;
433 ts++;
434 ts->base_type = TCG_TYPE_I32;
435 ts->type = TCG_TYPE_I32;
436 ts->temp_allocated = 1;
437 ts->temp_local = temp_local;
438 ts->name = NULL;
439 s->nb_temps += 2;
440 } else
441 #endif
443 tcg_temp_alloc(s, s->nb_temps + 1);
444 ts = &s->temps[s->nb_temps];
445 ts->base_type = type;
446 ts->type = type;
447 ts->temp_allocated = 1;
448 ts->temp_local = temp_local;
449 ts->name = NULL;
450 s->nb_temps++;
453 return idx;
456 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
458 int idx;
460 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
461 return MAKE_TCGV_I32(idx);
464 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
466 int idx;
468 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
469 return MAKE_TCGV_I64(idx);
472 static inline void tcg_temp_free_internal(int idx)
474 TCGContext *s = &tcg_ctx;
475 TCGTemp *ts;
476 int k;
478 assert(idx >= s->nb_globals && idx < s->nb_temps);
479 ts = &s->temps[idx];
480 assert(ts->temp_allocated != 0);
481 ts->temp_allocated = 0;
482 k = ts->base_type;
483 if (ts->temp_local)
484 k += TCG_TYPE_COUNT;
485 ts->next_free_temp = s->first_free_temp[k];
486 s->first_free_temp[k] = idx;
489 void tcg_temp_free_i32(TCGv_i32 arg)
491 tcg_temp_free_internal(GET_TCGV_I32(arg));
494 void tcg_temp_free_i64(TCGv_i64 arg)
496 tcg_temp_free_internal(GET_TCGV_I64(arg));
499 TCGv_i32 tcg_const_i32(int32_t val)
501 TCGv_i32 t0;
502 t0 = tcg_temp_new_i32();
503 tcg_gen_movi_i32(t0, val);
504 return t0;
507 TCGv_i64 tcg_const_i64(int64_t val)
509 TCGv_i64 t0;
510 t0 = tcg_temp_new_i64();
511 tcg_gen_movi_i64(t0, val);
512 return t0;
515 TCGv_i32 tcg_const_local_i32(int32_t val)
517 TCGv_i32 t0;
518 t0 = tcg_temp_local_new_i32();
519 tcg_gen_movi_i32(t0, val);
520 return t0;
523 TCGv_i64 tcg_const_local_i64(int64_t val)
525 TCGv_i64 t0;
526 t0 = tcg_temp_local_new_i64();
527 tcg_gen_movi_i64(t0, val);
528 return t0;
531 void tcg_register_helper(void *func, const char *name)
533 TCGContext *s = &tcg_ctx;
534 int n;
535 if ((s->nb_helpers + 1) > s->allocated_helpers) {
536 n = s->allocated_helpers;
537 if (n == 0) {
538 n = 4;
539 } else {
540 n *= 2;
542 s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
543 s->allocated_helpers = n;
545 s->helpers[s->nb_helpers].func = (tcg_target_ulong)func;
546 s->helpers[s->nb_helpers].name = name;
547 s->nb_helpers++;
550 /* Note: we convert the 64 bit args to 32 bit and do some alignment
551 and endian swap. Maybe it would be better to do the alignment
552 and endian swap in tcg_reg_alloc_call(). */
553 void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
554 int sizemask, TCGArg ret, int nargs, TCGArg *args)
556 #ifdef TCG_TARGET_I386
557 int call_type;
558 #endif
559 int i;
560 int real_args;
561 int nb_rets;
562 TCGArg *nparam;
564 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
565 for (i = 0; i < nargs; ++i) {
566 int is_64bit = sizemask & (1 << (i+1)*2);
567 int is_signed = sizemask & (2 << (i+1)*2);
568 if (!is_64bit) {
569 TCGv_i64 temp = tcg_temp_new_i64();
570 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
571 if (is_signed) {
572 tcg_gen_ext32s_i64(temp, orig);
573 } else {
574 tcg_gen_ext32u_i64(temp, orig);
576 args[i] = GET_TCGV_I64(temp);
579 #endif /* TCG_TARGET_EXTEND_ARGS */
581 *gen_opc_ptr++ = INDEX_op_call;
582 nparam = gen_opparam_ptr++;
583 #ifdef TCG_TARGET_I386
584 call_type = (flags & TCG_CALL_TYPE_MASK);
585 #endif
586 if (ret != TCG_CALL_DUMMY_ARG) {
587 #if TCG_TARGET_REG_BITS < 64
588 if (sizemask & 1) {
589 #ifdef TCG_TARGET_WORDS_BIGENDIAN
590 *gen_opparam_ptr++ = ret + 1;
591 *gen_opparam_ptr++ = ret;
592 #else
593 *gen_opparam_ptr++ = ret;
594 *gen_opparam_ptr++ = ret + 1;
595 #endif
596 nb_rets = 2;
597 } else
598 #endif
600 *gen_opparam_ptr++ = ret;
601 nb_rets = 1;
603 } else {
604 nb_rets = 0;
606 real_args = 0;
607 for (i = 0; i < nargs; i++) {
608 #if TCG_TARGET_REG_BITS < 64
609 int is_64bit = sizemask & (1 << (i+1)*2);
610 if (is_64bit) {
611 #ifdef TCG_TARGET_I386
612 /* REGPARM case: if the third parameter is 64 bit, it is
613 allocated on the stack */
614 if (i == 2 && call_type == TCG_CALL_TYPE_REGPARM) {
615 call_type = TCG_CALL_TYPE_REGPARM_2;
616 flags = (flags & ~TCG_CALL_TYPE_MASK) | call_type;
618 #endif
619 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
620 /* some targets want aligned 64 bit args */
621 if (real_args & 1) {
622 *gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
623 real_args++;
625 #endif
626 /* If stack grows up, then we will be placing successive
627 arguments at lower addresses, which means we need to
628 reverse the order compared to how we would normally
629 treat either big or little-endian. For those arguments
630 that will wind up in registers, this still works for
631 HPPA (the only current STACK_GROWSUP target) since the
632 argument registers are *also* allocated in decreasing
633 order. If another such target is added, this logic may
634 have to get more complicated to differentiate between
635 stack arguments and register arguments. */
636 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
637 *gen_opparam_ptr++ = args[i] + 1;
638 *gen_opparam_ptr++ = args[i];
639 #else
640 *gen_opparam_ptr++ = args[i];
641 *gen_opparam_ptr++ = args[i] + 1;
642 #endif
643 real_args += 2;
644 continue;
646 #endif /* TCG_TARGET_REG_BITS < 64 */
648 *gen_opparam_ptr++ = args[i];
649 real_args++;
651 *gen_opparam_ptr++ = GET_TCGV_PTR(func);
653 *gen_opparam_ptr++ = flags;
655 *nparam = (nb_rets << 16) | (real_args + 1);
657 /* total parameters, needed to go backward in the instruction stream */
658 *gen_opparam_ptr++ = 1 + nb_rets + real_args + 3;
660 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
661 for (i = 0; i < nargs; ++i) {
662 int is_64bit = sizemask & (1 << (i+1)*2);
663 if (!is_64bit) {
664 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
665 tcg_temp_free_i64(temp);
668 #endif /* TCG_TARGET_EXTEND_ARGS */
671 #if TCG_TARGET_REG_BITS == 32
672 void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
673 int c, int right, int arith)
675 if (c == 0) {
676 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
677 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
678 } else if (c >= 32) {
679 c -= 32;
680 if (right) {
681 if (arith) {
682 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
683 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
684 } else {
685 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
686 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
688 } else {
689 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
690 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
692 } else {
693 TCGv_i32 t0, t1;
695 t0 = tcg_temp_new_i32();
696 t1 = tcg_temp_new_i32();
697 if (right) {
698 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
699 if (arith)
700 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
701 else
702 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
703 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
704 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
705 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
706 } else {
707 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
708 /* Note: ret can be the same as arg1, so we use t1 */
709 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
710 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
711 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
712 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
714 tcg_temp_free_i32(t0);
715 tcg_temp_free_i32(t1);
718 #endif
721 static void tcg_reg_alloc_start(TCGContext *s)
723 int i;
724 TCGTemp *ts;
725 for(i = 0; i < s->nb_globals; i++) {
726 ts = &s->temps[i];
727 if (ts->fixed_reg) {
728 ts->val_type = TEMP_VAL_REG;
729 } else {
730 ts->val_type = TEMP_VAL_MEM;
733 for(i = s->nb_globals; i < s->nb_temps; i++) {
734 ts = &s->temps[i];
735 ts->val_type = TEMP_VAL_DEAD;
736 ts->mem_allocated = 0;
737 ts->fixed_reg = 0;
739 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
740 s->reg_to_temp[i] = -1;
744 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
745 int idx)
747 TCGTemp *ts;
749 ts = &s->temps[idx];
750 if (idx < s->nb_globals) {
751 pstrcpy(buf, buf_size, ts->name);
752 } else {
753 if (ts->temp_local)
754 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
755 else
756 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
758 return buf;
761 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
763 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
766 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
768 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
771 static int helper_cmp(const void *p1, const void *p2)
773 const TCGHelperInfo *th1 = p1;
774 const TCGHelperInfo *th2 = p2;
775 if (th1->func < th2->func)
776 return -1;
777 else if (th1->func == th2->func)
778 return 0;
779 else
780 return 1;
783 /* find helper definition (Note: A hash table would be better) */
784 static TCGHelperInfo *tcg_find_helper(TCGContext *s, tcg_target_ulong val)
786 int m, m_min, m_max;
787 TCGHelperInfo *th;
788 tcg_target_ulong v;
790 if (unlikely(!s->helpers_sorted)) {
791 qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
792 helper_cmp);
793 s->helpers_sorted = 1;
796 /* binary search */
797 m_min = 0;
798 m_max = s->nb_helpers - 1;
799 while (m_min <= m_max) {
800 m = (m_min + m_max) >> 1;
801 th = &s->helpers[m];
802 v = th->func;
803 if (v == val)
804 return th;
805 else if (val < v) {
806 m_max = m - 1;
807 } else {
808 m_min = m + 1;
811 return NULL;
814 static const char * const cond_name[] =
816 [TCG_COND_EQ] = "eq",
817 [TCG_COND_NE] = "ne",
818 [TCG_COND_LT] = "lt",
819 [TCG_COND_GE] = "ge",
820 [TCG_COND_LE] = "le",
821 [TCG_COND_GT] = "gt",
822 [TCG_COND_LTU] = "ltu",
823 [TCG_COND_GEU] = "geu",
824 [TCG_COND_LEU] = "leu",
825 [TCG_COND_GTU] = "gtu"
828 void tcg_dump_ops(TCGContext *s, FILE *outfile)
830 const uint16_t *opc_ptr;
831 const TCGArg *args;
832 TCGArg arg;
833 TCGOpcode c;
834 int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
835 const TCGOpDef *def;
836 char buf[128];
838 first_insn = 1;
839 opc_ptr = gen_opc_buf;
840 args = gen_opparam_buf;
841 while (opc_ptr < gen_opc_ptr) {
842 c = *opc_ptr++;
843 def = &tcg_op_defs[c];
844 if (c == INDEX_op_debug_insn_start) {
845 uint64_t pc;
846 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
847 pc = ((uint64_t)args[1] << 32) | args[0];
848 #else
849 pc = args[0];
850 #endif
851 if (!first_insn)
852 fprintf(outfile, "\n");
853 fprintf(outfile, " ---- 0x%" PRIx64, pc);
854 first_insn = 0;
855 nb_oargs = def->nb_oargs;
856 nb_iargs = def->nb_iargs;
857 nb_cargs = def->nb_cargs;
858 } else if (c == INDEX_op_call) {
859 TCGArg arg;
861 /* variable number of arguments */
862 arg = *args++;
863 nb_oargs = arg >> 16;
864 nb_iargs = arg & 0xffff;
865 nb_cargs = def->nb_cargs;
867 fprintf(outfile, " %s ", def->name);
869 /* function name */
870 fprintf(outfile, "%s",
871 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + nb_iargs - 1]));
872 /* flags */
873 fprintf(outfile, ",$0x%" TCG_PRIlx,
874 args[nb_oargs + nb_iargs]);
875 /* nb out args */
876 fprintf(outfile, ",$%d", nb_oargs);
877 for(i = 0; i < nb_oargs; i++) {
878 fprintf(outfile, ",");
879 fprintf(outfile, "%s",
880 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[i]));
882 for(i = 0; i < (nb_iargs - 1); i++) {
883 fprintf(outfile, ",");
884 if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) {
885 fprintf(outfile, "<dummy>");
886 } else {
887 fprintf(outfile, "%s",
888 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + i]));
891 } else if (c == INDEX_op_movi_i32
892 #if TCG_TARGET_REG_BITS == 64
893 || c == INDEX_op_movi_i64
894 #endif
896 tcg_target_ulong val;
897 TCGHelperInfo *th;
899 nb_oargs = def->nb_oargs;
900 nb_iargs = def->nb_iargs;
901 nb_cargs = def->nb_cargs;
902 fprintf(outfile, " %s %s,$", def->name,
903 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
904 val = args[1];
905 th = tcg_find_helper(s, val);
906 if (th) {
907 fprintf(outfile, "%s", th->name);
908 } else {
909 if (c == INDEX_op_movi_i32)
910 fprintf(outfile, "0x%x", (uint32_t)val);
911 else
912 fprintf(outfile, "0x%" PRIx64 , (uint64_t)val);
914 } else {
915 fprintf(outfile, " %s ", def->name);
916 if (c == INDEX_op_nopn) {
917 /* variable number of arguments */
918 nb_cargs = *args;
919 nb_oargs = 0;
920 nb_iargs = 0;
921 } else {
922 nb_oargs = def->nb_oargs;
923 nb_iargs = def->nb_iargs;
924 nb_cargs = def->nb_cargs;
927 k = 0;
928 for(i = 0; i < nb_oargs; i++) {
929 if (k != 0)
930 fprintf(outfile, ",");
931 fprintf(outfile, "%s",
932 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
934 for(i = 0; i < nb_iargs; i++) {
935 if (k != 0)
936 fprintf(outfile, ",");
937 fprintf(outfile, "%s",
938 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
940 switch (c) {
941 case INDEX_op_brcond_i32:
942 #if TCG_TARGET_REG_BITS == 32
943 case INDEX_op_brcond2_i32:
944 #elif TCG_TARGET_REG_BITS == 64
945 case INDEX_op_brcond_i64:
946 #endif
947 case INDEX_op_setcond_i32:
948 #if TCG_TARGET_REG_BITS == 32
949 case INDEX_op_setcond2_i32:
950 #elif TCG_TARGET_REG_BITS == 64
951 case INDEX_op_setcond_i64:
952 #endif
953 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]])
954 fprintf(outfile, ",%s", cond_name[args[k++]]);
955 else
956 fprintf(outfile, ",$0x%" TCG_PRIlx, args[k++]);
957 i = 1;
958 break;
959 default:
960 i = 0;
961 break;
963 for(; i < nb_cargs; i++) {
964 if (k != 0)
965 fprintf(outfile, ",");
966 arg = args[k++];
967 fprintf(outfile, "$0x%" TCG_PRIlx, arg);
970 fprintf(outfile, "\n");
971 args += nb_iargs + nb_oargs + nb_cargs;
975 /* we give more priority to constraints with less registers */
976 static int get_constraint_priority(const TCGOpDef *def, int k)
978 const TCGArgConstraint *arg_ct;
980 int i, n;
981 arg_ct = &def->args_ct[k];
982 if (arg_ct->ct & TCG_CT_ALIAS) {
983 /* an alias is equivalent to a single register */
984 n = 1;
985 } else {
986 if (!(arg_ct->ct & TCG_CT_REG))
987 return 0;
988 n = 0;
989 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
990 if (tcg_regset_test_reg(arg_ct->u.regs, i))
991 n++;
994 return TCG_TARGET_NB_REGS - n + 1;
997 /* sort from highest priority to lowest */
998 static void sort_constraints(TCGOpDef *def, int start, int n)
1000 int i, j, p1, p2, tmp;
1002 for(i = 0; i < n; i++)
1003 def->sorted_args[start + i] = start + i;
1004 if (n <= 1)
1005 return;
1006 for(i = 0; i < n - 1; i++) {
1007 for(j = i + 1; j < n; j++) {
1008 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1009 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1010 if (p1 < p2) {
1011 tmp = def->sorted_args[start + i];
1012 def->sorted_args[start + i] = def->sorted_args[start + j];
1013 def->sorted_args[start + j] = tmp;
1019 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1021 TCGOpcode op;
1022 TCGOpDef *def;
1023 const char *ct_str;
1024 int i, nb_args;
1026 for(;;) {
1027 if (tdefs->op == (TCGOpcode)-1)
1028 break;
1029 op = tdefs->op;
1030 assert(op >= 0 && op < NB_OPS);
1031 def = &tcg_op_defs[op];
1032 #if defined(CONFIG_DEBUG_TCG)
1033 /* Duplicate entry in op definitions? */
1034 assert(!def->used);
1035 def->used = 1;
1036 #endif
1037 nb_args = def->nb_iargs + def->nb_oargs;
1038 for(i = 0; i < nb_args; i++) {
1039 ct_str = tdefs->args_ct_str[i];
1040 /* Incomplete TCGTargetOpDef entry? */
1041 assert(ct_str != NULL);
1042 tcg_regset_clear(def->args_ct[i].u.regs);
1043 def->args_ct[i].ct = 0;
1044 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1045 int oarg;
1046 oarg = ct_str[0] - '0';
1047 assert(oarg < def->nb_oargs);
1048 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1049 /* TCG_CT_ALIAS is for the output arguments. The input
1050 argument is tagged with TCG_CT_IALIAS. */
1051 def->args_ct[i] = def->args_ct[oarg];
1052 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1053 def->args_ct[oarg].alias_index = i;
1054 def->args_ct[i].ct |= TCG_CT_IALIAS;
1055 def->args_ct[i].alias_index = oarg;
1056 } else {
1057 for(;;) {
1058 if (*ct_str == '\0')
1059 break;
1060 switch(*ct_str) {
1061 case 'i':
1062 def->args_ct[i].ct |= TCG_CT_CONST;
1063 ct_str++;
1064 break;
1065 default:
1066 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1067 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1068 ct_str, i, def->name);
1069 exit(1);
1076 /* TCGTargetOpDef entry with too much information? */
1077 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1079 /* sort the constraints (XXX: this is just an heuristic) */
1080 sort_constraints(def, 0, def->nb_oargs);
1081 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1083 #if 0
1085 int i;
1087 printf("%s: sorted=", def->name);
1088 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1089 printf(" %d", def->sorted_args[i]);
1090 printf("\n");
1092 #endif
1093 tdefs++;
1096 #if defined(CONFIG_DEBUG_TCG)
1097 i = 0;
1098 for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) {
1099 if (op < INDEX_op_call || op == INDEX_op_debug_insn_start) {
1100 /* Wrong entry in op definitions? */
1101 if (tcg_op_defs[op].used) {
1102 fprintf(stderr, "Invalid op definition for %s\n",
1103 tcg_op_defs[op].name);
1104 i = 1;
1106 } else {
1107 /* Missing entry in op definitions? */
1108 if (!tcg_op_defs[op].used) {
1109 fprintf(stderr, "Missing op definition for %s\n",
1110 tcg_op_defs[op].name);
1111 i = 1;
1115 if (i == 1) {
1116 tcg_abort();
1118 #endif
1121 #ifdef USE_LIVENESS_ANALYSIS
1123 /* set a nop for an operation using 'nb_args' */
1124 static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
1125 TCGArg *args, int nb_args)
1127 if (nb_args == 0) {
1128 *opc_ptr = INDEX_op_nop;
1129 } else {
1130 *opc_ptr = INDEX_op_nopn;
1131 args[0] = nb_args;
1132 args[nb_args - 1] = nb_args;
1136 /* liveness analysis: end of function: globals are live, temps are
1137 dead. */
1138 /* XXX: at this stage, not used as there would be little gains because
1139 most TBs end with a conditional jump. */
1140 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps)
1142 memset(dead_temps, 0, s->nb_globals);
1143 memset(dead_temps + s->nb_globals, 1, s->nb_temps - s->nb_globals);
1146 /* liveness analysis: end of basic block: globals are live, temps are
1147 dead, local temps are live. */
1148 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps)
1150 int i;
1151 TCGTemp *ts;
1153 memset(dead_temps, 0, s->nb_globals);
1154 ts = &s->temps[s->nb_globals];
1155 for(i = s->nb_globals; i < s->nb_temps; i++) {
1156 if (ts->temp_local)
1157 dead_temps[i] = 0;
1158 else
1159 dead_temps[i] = 1;
1160 ts++;
1164 /* Liveness analysis : update the opc_dead_iargs array to tell if a
1165 given input arguments is dead. Instructions updating dead
1166 temporaries are removed. */
1167 static void tcg_liveness_analysis(TCGContext *s)
1169 int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
1170 TCGOpcode op;
1171 TCGArg *args;
1172 const TCGOpDef *def;
1173 uint8_t *dead_temps;
1174 unsigned int dead_iargs;
1176 gen_opc_ptr++; /* skip end */
1178 nb_ops = gen_opc_ptr - gen_opc_buf;
1180 s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t));
1182 dead_temps = tcg_malloc(s->nb_temps);
1183 memset(dead_temps, 1, s->nb_temps);
1185 args = gen_opparam_ptr;
1186 op_index = nb_ops - 1;
1187 while (op_index >= 0) {
1188 op = gen_opc_buf[op_index];
1189 def = &tcg_op_defs[op];
1190 switch(op) {
1191 case INDEX_op_call:
1193 int call_flags;
1195 nb_args = args[-1];
1196 args -= nb_args;
1197 nb_iargs = args[0] & 0xffff;
1198 nb_oargs = args[0] >> 16;
1199 args++;
1200 call_flags = args[nb_oargs + nb_iargs];
1202 /* pure functions can be removed if their result is not
1203 used */
1204 if (call_flags & TCG_CALL_PURE) {
1205 for(i = 0; i < nb_oargs; i++) {
1206 arg = args[i];
1207 if (!dead_temps[arg])
1208 goto do_not_remove_call;
1210 tcg_set_nop(s, gen_opc_buf + op_index,
1211 args - 1, nb_args);
1212 } else {
1213 do_not_remove_call:
1215 /* output args are dead */
1216 for(i = 0; i < nb_oargs; i++) {
1217 arg = args[i];
1218 dead_temps[arg] = 1;
1221 if (!(call_flags & TCG_CALL_CONST)) {
1222 /* globals are live (they may be used by the call) */
1223 memset(dead_temps, 0, s->nb_globals);
1226 /* input args are live */
1227 dead_iargs = 0;
1228 for(i = 0; i < nb_iargs; i++) {
1229 arg = args[i + nb_oargs];
1230 if (arg != TCG_CALL_DUMMY_ARG) {
1231 if (dead_temps[arg]) {
1232 dead_iargs |= (1 << i);
1234 dead_temps[arg] = 0;
1237 s->op_dead_iargs[op_index] = dead_iargs;
1239 args--;
1241 break;
1242 case INDEX_op_set_label:
1243 args--;
1244 /* mark end of basic block */
1245 tcg_la_bb_end(s, dead_temps);
1246 break;
1247 case INDEX_op_debug_insn_start:
1248 args -= def->nb_args;
1249 break;
1250 case INDEX_op_nopn:
1251 nb_args = args[-1];
1252 args -= nb_args;
1253 break;
1254 case INDEX_op_discard:
1255 args--;
1256 /* mark the temporary as dead */
1257 dead_temps[args[0]] = 1;
1258 break;
1259 case INDEX_op_end:
1260 break;
1261 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1262 default:
1263 args -= def->nb_args;
1264 nb_iargs = def->nb_iargs;
1265 nb_oargs = def->nb_oargs;
1267 /* Test if the operation can be removed because all
1268 its outputs are dead. We assume that nb_oargs == 0
1269 implies side effects */
1270 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1271 for(i = 0; i < nb_oargs; i++) {
1272 arg = args[i];
1273 if (!dead_temps[arg])
1274 goto do_not_remove;
1276 tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args);
1277 #ifdef CONFIG_PROFILER
1278 s->del_op_count++;
1279 #endif
1280 } else {
1281 do_not_remove:
1283 /* output args are dead */
1284 for(i = 0; i < nb_oargs; i++) {
1285 arg = args[i];
1286 dead_temps[arg] = 1;
1289 /* if end of basic block, update */
1290 if (def->flags & TCG_OPF_BB_END) {
1291 tcg_la_bb_end(s, dead_temps);
1292 } else if (def->flags & TCG_OPF_CALL_CLOBBER) {
1293 /* globals are live */
1294 memset(dead_temps, 0, s->nb_globals);
1297 /* input args are live */
1298 dead_iargs = 0;
1299 for(i = 0; i < nb_iargs; i++) {
1300 arg = args[i + nb_oargs];
1301 if (dead_temps[arg]) {
1302 dead_iargs |= (1 << i);
1304 dead_temps[arg] = 0;
1306 s->op_dead_iargs[op_index] = dead_iargs;
1308 break;
1310 op_index--;
1313 if (args != gen_opparam_buf)
1314 tcg_abort();
1316 #else
1317 /* dummy liveness analysis */
1318 static void tcg_liveness_analysis(TCGContext *s)
1320 int nb_ops;
1321 nb_ops = gen_opc_ptr - gen_opc_buf;
1323 s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t));
1324 memset(s->op_dead_iargs, 0, nb_ops * sizeof(uint16_t));
1326 #endif
1328 #ifndef NDEBUG
1329 static void dump_regs(TCGContext *s)
1331 TCGTemp *ts;
1332 int i;
1333 char buf[64];
1335 for(i = 0; i < s->nb_temps; i++) {
1336 ts = &s->temps[i];
1337 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1338 switch(ts->val_type) {
1339 case TEMP_VAL_REG:
1340 printf("%s", tcg_target_reg_names[ts->reg]);
1341 break;
1342 case TEMP_VAL_MEM:
1343 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1344 break;
1345 case TEMP_VAL_CONST:
1346 printf("$0x%" TCG_PRIlx, ts->val);
1347 break;
1348 case TEMP_VAL_DEAD:
1349 printf("D");
1350 break;
1351 default:
1352 printf("???");
1353 break;
1355 printf("\n");
1358 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1359 if (s->reg_to_temp[i] >= 0) {
1360 printf("%s: %s\n",
1361 tcg_target_reg_names[i],
1362 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1367 static void check_regs(TCGContext *s)
1369 int reg, k;
1370 TCGTemp *ts;
1371 char buf[64];
1373 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1374 k = s->reg_to_temp[reg];
1375 if (k >= 0) {
1376 ts = &s->temps[k];
1377 if (ts->val_type != TEMP_VAL_REG ||
1378 ts->reg != reg) {
1379 printf("Inconsistency for register %s:\n",
1380 tcg_target_reg_names[reg]);
1381 goto fail;
1385 for(k = 0; k < s->nb_temps; k++) {
1386 ts = &s->temps[k];
1387 if (ts->val_type == TEMP_VAL_REG &&
1388 !ts->fixed_reg &&
1389 s->reg_to_temp[ts->reg] != k) {
1390 printf("Inconsistency for temp %s:\n",
1391 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1392 fail:
1393 printf("reg state:\n");
1394 dump_regs(s);
1395 tcg_abort();
1399 #endif
1401 static void temp_allocate_frame(TCGContext *s, int temp)
1403 TCGTemp *ts;
1404 ts = &s->temps[temp];
1405 s->current_frame_offset = (s->current_frame_offset + sizeof(tcg_target_long) - 1) & ~(sizeof(tcg_target_long) - 1);
1406 if (s->current_frame_offset + sizeof(tcg_target_long) > s->frame_end)
1407 tcg_abort();
1408 ts->mem_offset = s->current_frame_offset;
1409 ts->mem_reg = s->frame_reg;
1410 ts->mem_allocated = 1;
1411 s->current_frame_offset += sizeof(tcg_target_long);
1414 /* free register 'reg' by spilling the corresponding temporary if necessary */
1415 static void tcg_reg_free(TCGContext *s, int reg)
1417 TCGTemp *ts;
1418 int temp;
1420 temp = s->reg_to_temp[reg];
1421 if (temp != -1) {
1422 ts = &s->temps[temp];
1423 assert(ts->val_type == TEMP_VAL_REG);
1424 if (!ts->mem_coherent) {
1425 if (!ts->mem_allocated)
1426 temp_allocate_frame(s, temp);
1427 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1429 ts->val_type = TEMP_VAL_MEM;
1430 s->reg_to_temp[reg] = -1;
1434 /* Allocate a register belonging to reg1 & ~reg2 */
1435 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1437 int i, reg;
1438 TCGRegSet reg_ct;
1440 tcg_regset_andnot(reg_ct, reg1, reg2);
1442 /* first try free registers */
1443 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1444 reg = tcg_target_reg_alloc_order[i];
1445 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1446 return reg;
1449 /* XXX: do better spill choice */
1450 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1451 reg = tcg_target_reg_alloc_order[i];
1452 if (tcg_regset_test_reg(reg_ct, reg)) {
1453 tcg_reg_free(s, reg);
1454 return reg;
1458 tcg_abort();
1461 /* save a temporary to memory. 'allocated_regs' is used in case a
1462 temporary registers needs to be allocated to store a constant. */
1463 static void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1465 TCGTemp *ts;
1466 int reg;
1468 ts = &s->temps[temp];
1469 if (!ts->fixed_reg) {
1470 switch(ts->val_type) {
1471 case TEMP_VAL_REG:
1472 tcg_reg_free(s, ts->reg);
1473 break;
1474 case TEMP_VAL_DEAD:
1475 ts->val_type = TEMP_VAL_MEM;
1476 break;
1477 case TEMP_VAL_CONST:
1478 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1479 allocated_regs);
1480 if (!ts->mem_allocated)
1481 temp_allocate_frame(s, temp);
1482 tcg_out_movi(s, ts->type, reg, ts->val);
1483 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1484 ts->val_type = TEMP_VAL_MEM;
1485 break;
1486 case TEMP_VAL_MEM:
1487 break;
1488 default:
1489 tcg_abort();
1494 /* save globals to their cannonical location and assume they can be
1495 modified be the following code. 'allocated_regs' is used in case a
1496 temporary registers needs to be allocated to store a constant. */
1497 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1499 int i;
1501 for(i = 0; i < s->nb_globals; i++) {
1502 temp_save(s, i, allocated_regs);
1506 /* at the end of a basic block, we assume all temporaries are dead and
1507 all globals are stored at their canonical location. */
1508 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1510 TCGTemp *ts;
1511 int i;
1513 for(i = s->nb_globals; i < s->nb_temps; i++) {
1514 ts = &s->temps[i];
1515 if (ts->temp_local) {
1516 temp_save(s, i, allocated_regs);
1517 } else {
1518 if (ts->val_type == TEMP_VAL_REG) {
1519 s->reg_to_temp[ts->reg] = -1;
1521 ts->val_type = TEMP_VAL_DEAD;
1525 save_globals(s, allocated_regs);
1528 #define IS_DEAD_IARG(n) ((dead_iargs >> (n)) & 1)
1530 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args)
1532 TCGTemp *ots;
1533 tcg_target_ulong val;
1535 ots = &s->temps[args[0]];
1536 val = args[1];
1538 if (ots->fixed_reg) {
1539 /* for fixed registers, we do not do any constant
1540 propagation */
1541 tcg_out_movi(s, ots->type, ots->reg, val);
1542 } else {
1543 /* The movi is not explicitly generated here */
1544 if (ots->val_type == TEMP_VAL_REG)
1545 s->reg_to_temp[ots->reg] = -1;
1546 ots->val_type = TEMP_VAL_CONST;
1547 ots->val = val;
1551 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1552 const TCGArg *args,
1553 unsigned int dead_iargs)
1555 TCGTemp *ts, *ots;
1556 int reg;
1557 const TCGArgConstraint *arg_ct;
1559 ots = &s->temps[args[0]];
1560 ts = &s->temps[args[1]];
1561 arg_ct = &def->args_ct[0];
1563 /* XXX: always mark arg dead if IS_DEAD_IARG(0) */
1564 if (ts->val_type == TEMP_VAL_REG) {
1565 if (IS_DEAD_IARG(0) && !ts->fixed_reg && !ots->fixed_reg) {
1566 /* the mov can be suppressed */
1567 if (ots->val_type == TEMP_VAL_REG)
1568 s->reg_to_temp[ots->reg] = -1;
1569 reg = ts->reg;
1570 s->reg_to_temp[reg] = -1;
1571 ts->val_type = TEMP_VAL_DEAD;
1572 } else {
1573 if (ots->val_type == TEMP_VAL_REG) {
1574 reg = ots->reg;
1575 } else {
1576 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1578 if (ts->reg != reg) {
1579 tcg_out_mov(s, ots->type, reg, ts->reg);
1582 } else if (ts->val_type == TEMP_VAL_MEM) {
1583 if (ots->val_type == TEMP_VAL_REG) {
1584 reg = ots->reg;
1585 } else {
1586 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1588 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1589 } else if (ts->val_type == TEMP_VAL_CONST) {
1590 if (ots->fixed_reg) {
1591 reg = ots->reg;
1592 tcg_out_movi(s, ots->type, reg, ts->val);
1593 } else {
1594 /* propagate constant */
1595 if (ots->val_type == TEMP_VAL_REG)
1596 s->reg_to_temp[ots->reg] = -1;
1597 ots->val_type = TEMP_VAL_CONST;
1598 ots->val = ts->val;
1599 return;
1601 } else {
1602 tcg_abort();
1604 s->reg_to_temp[reg] = args[0];
1605 ots->reg = reg;
1606 ots->val_type = TEMP_VAL_REG;
1607 ots->mem_coherent = 0;
1610 static void tcg_reg_alloc_op(TCGContext *s,
1611 const TCGOpDef *def, TCGOpcode opc,
1612 const TCGArg *args,
1613 unsigned int dead_iargs)
1615 TCGRegSet allocated_regs;
1616 int i, k, nb_iargs, nb_oargs, reg;
1617 TCGArg arg;
1618 const TCGArgConstraint *arg_ct;
1619 TCGTemp *ts;
1620 TCGArg new_args[TCG_MAX_OP_ARGS];
1621 int const_args[TCG_MAX_OP_ARGS];
1623 nb_oargs = def->nb_oargs;
1624 nb_iargs = def->nb_iargs;
1626 /* copy constants */
1627 memcpy(new_args + nb_oargs + nb_iargs,
1628 args + nb_oargs + nb_iargs,
1629 sizeof(TCGArg) * def->nb_cargs);
1631 /* satisfy input constraints */
1632 tcg_regset_set(allocated_regs, s->reserved_regs);
1633 for(k = 0; k < nb_iargs; k++) {
1634 i = def->sorted_args[nb_oargs + k];
1635 arg = args[i];
1636 arg_ct = &def->args_ct[i];
1637 ts = &s->temps[arg];
1638 if (ts->val_type == TEMP_VAL_MEM) {
1639 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1640 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1641 ts->val_type = TEMP_VAL_REG;
1642 ts->reg = reg;
1643 ts->mem_coherent = 1;
1644 s->reg_to_temp[reg] = arg;
1645 } else if (ts->val_type == TEMP_VAL_CONST) {
1646 if (tcg_target_const_match(ts->val, arg_ct)) {
1647 /* constant is OK for instruction */
1648 const_args[i] = 1;
1649 new_args[i] = ts->val;
1650 goto iarg_end;
1651 } else {
1652 /* need to move to a register */
1653 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1654 tcg_out_movi(s, ts->type, reg, ts->val);
1655 ts->val_type = TEMP_VAL_REG;
1656 ts->reg = reg;
1657 ts->mem_coherent = 0;
1658 s->reg_to_temp[reg] = arg;
1661 assert(ts->val_type == TEMP_VAL_REG);
1662 if (arg_ct->ct & TCG_CT_IALIAS) {
1663 if (ts->fixed_reg) {
1664 /* if fixed register, we must allocate a new register
1665 if the alias is not the same register */
1666 if (arg != args[arg_ct->alias_index])
1667 goto allocate_in_reg;
1668 } else {
1669 /* if the input is aliased to an output and if it is
1670 not dead after the instruction, we must allocate
1671 a new register and move it */
1672 if (!IS_DEAD_IARG(i - nb_oargs))
1673 goto allocate_in_reg;
1676 reg = ts->reg;
1677 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1678 /* nothing to do : the constraint is satisfied */
1679 } else {
1680 allocate_in_reg:
1681 /* allocate a new register matching the constraint
1682 and move the temporary register into it */
1683 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1684 tcg_out_mov(s, ts->type, reg, ts->reg);
1686 new_args[i] = reg;
1687 const_args[i] = 0;
1688 tcg_regset_set_reg(allocated_regs, reg);
1689 iarg_end: ;
1692 if (def->flags & TCG_OPF_BB_END) {
1693 tcg_reg_alloc_bb_end(s, allocated_regs);
1694 } else {
1695 /* mark dead temporaries and free the associated registers */
1696 for(i = 0; i < nb_iargs; i++) {
1697 arg = args[nb_oargs + i];
1698 if (IS_DEAD_IARG(i)) {
1699 ts = &s->temps[arg];
1700 if (!ts->fixed_reg) {
1701 if (ts->val_type == TEMP_VAL_REG)
1702 s->reg_to_temp[ts->reg] = -1;
1703 ts->val_type = TEMP_VAL_DEAD;
1708 if (def->flags & TCG_OPF_CALL_CLOBBER) {
1709 /* XXX: permit generic clobber register list ? */
1710 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1711 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1712 tcg_reg_free(s, reg);
1715 /* XXX: for load/store we could do that only for the slow path
1716 (i.e. when a memory callback is called) */
1718 /* store globals and free associated registers (we assume the insn
1719 can modify any global. */
1720 save_globals(s, allocated_regs);
1723 /* satisfy the output constraints */
1724 tcg_regset_set(allocated_regs, s->reserved_regs);
1725 for(k = 0; k < nb_oargs; k++) {
1726 i = def->sorted_args[k];
1727 arg = args[i];
1728 arg_ct = &def->args_ct[i];
1729 ts = &s->temps[arg];
1730 if (arg_ct->ct & TCG_CT_ALIAS) {
1731 reg = new_args[arg_ct->alias_index];
1732 } else {
1733 /* if fixed register, we try to use it */
1734 reg = ts->reg;
1735 if (ts->fixed_reg &&
1736 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1737 goto oarg_end;
1739 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1741 tcg_regset_set_reg(allocated_regs, reg);
1742 /* if a fixed register is used, then a move will be done afterwards */
1743 if (!ts->fixed_reg) {
1744 if (ts->val_type == TEMP_VAL_REG)
1745 s->reg_to_temp[ts->reg] = -1;
1746 ts->val_type = TEMP_VAL_REG;
1747 ts->reg = reg;
1748 /* temp value is modified, so the value kept in memory is
1749 potentially not the same */
1750 ts->mem_coherent = 0;
1751 s->reg_to_temp[reg] = arg;
1753 oarg_end:
1754 new_args[i] = reg;
1758 /* emit instruction */
1759 tcg_out_op(s, opc, new_args, const_args);
1761 /* move the outputs in the correct register if needed */
1762 for(i = 0; i < nb_oargs; i++) {
1763 ts = &s->temps[args[i]];
1764 reg = new_args[i];
1765 if (ts->fixed_reg && ts->reg != reg) {
1766 tcg_out_mov(s, ts->type, ts->reg, reg);
1771 #ifdef TCG_TARGET_STACK_GROWSUP
1772 #define STACK_DIR(x) (-(x))
1773 #else
1774 #define STACK_DIR(x) (x)
1775 #endif
1777 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
1778 TCGOpcode opc, const TCGArg *args,
1779 unsigned int dead_iargs)
1781 int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
1782 TCGArg arg, func_arg;
1783 TCGTemp *ts;
1784 tcg_target_long stack_offset, call_stack_size, func_addr;
1785 int const_func_arg, allocate_args;
1786 TCGRegSet allocated_regs;
1787 const TCGArgConstraint *arg_ct;
1789 arg = *args++;
1791 nb_oargs = arg >> 16;
1792 nb_iargs = arg & 0xffff;
1793 nb_params = nb_iargs - 1;
1795 flags = args[nb_oargs + nb_iargs];
1797 nb_regs = tcg_target_get_call_iarg_regs_count(flags);
1798 if (nb_regs > nb_params)
1799 nb_regs = nb_params;
1801 /* assign stack slots first */
1802 /* XXX: preallocate call stack */
1803 call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
1804 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
1805 ~(TCG_TARGET_STACK_ALIGN - 1);
1806 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
1807 if (allocate_args) {
1808 tcg_out_addi(s, TCG_REG_CALL_STACK, -STACK_DIR(call_stack_size));
1811 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
1812 for(i = nb_regs; i < nb_params; i++) {
1813 arg = args[nb_oargs + i];
1814 #ifdef TCG_TARGET_STACK_GROWSUP
1815 stack_offset -= sizeof(tcg_target_long);
1816 #endif
1817 if (arg != TCG_CALL_DUMMY_ARG) {
1818 ts = &s->temps[arg];
1819 if (ts->val_type == TEMP_VAL_REG) {
1820 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
1821 } else if (ts->val_type == TEMP_VAL_MEM) {
1822 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1823 s->reserved_regs);
1824 /* XXX: not correct if reading values from the stack */
1825 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1826 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1827 } else if (ts->val_type == TEMP_VAL_CONST) {
1828 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1829 s->reserved_regs);
1830 /* XXX: sign extend may be needed on some targets */
1831 tcg_out_movi(s, ts->type, reg, ts->val);
1832 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1833 } else {
1834 tcg_abort();
1837 #ifndef TCG_TARGET_STACK_GROWSUP
1838 stack_offset += sizeof(tcg_target_long);
1839 #endif
1842 /* assign input registers */
1843 tcg_regset_set(allocated_regs, s->reserved_regs);
1844 for(i = 0; i < nb_regs; i++) {
1845 arg = args[nb_oargs + i];
1846 if (arg != TCG_CALL_DUMMY_ARG) {
1847 ts = &s->temps[arg];
1848 reg = tcg_target_call_iarg_regs[i];
1849 tcg_reg_free(s, reg);
1850 if (ts->val_type == TEMP_VAL_REG) {
1851 if (ts->reg != reg) {
1852 tcg_out_mov(s, ts->type, reg, ts->reg);
1854 } else if (ts->val_type == TEMP_VAL_MEM) {
1855 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1856 } else if (ts->val_type == TEMP_VAL_CONST) {
1857 /* XXX: sign extend ? */
1858 tcg_out_movi(s, ts->type, reg, ts->val);
1859 } else {
1860 tcg_abort();
1862 tcg_regset_set_reg(allocated_regs, reg);
1866 /* assign function address */
1867 func_arg = args[nb_oargs + nb_iargs - 1];
1868 arg_ct = &def->args_ct[0];
1869 ts = &s->temps[func_arg];
1870 func_addr = ts->val;
1871 const_func_arg = 0;
1872 if (ts->val_type == TEMP_VAL_MEM) {
1873 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1874 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1875 func_arg = reg;
1876 tcg_regset_set_reg(allocated_regs, reg);
1877 } else if (ts->val_type == TEMP_VAL_REG) {
1878 reg = ts->reg;
1879 if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1880 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1881 tcg_out_mov(s, ts->type, reg, ts->reg);
1883 func_arg = reg;
1884 tcg_regset_set_reg(allocated_regs, reg);
1885 } else if (ts->val_type == TEMP_VAL_CONST) {
1886 if (tcg_target_const_match(func_addr, arg_ct)) {
1887 const_func_arg = 1;
1888 func_arg = func_addr;
1889 } else {
1890 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1891 tcg_out_movi(s, ts->type, reg, func_addr);
1892 func_arg = reg;
1893 tcg_regset_set_reg(allocated_regs, reg);
1895 } else {
1896 tcg_abort();
1900 /* mark dead temporaries and free the associated registers */
1901 for(i = 0; i < nb_iargs; i++) {
1902 arg = args[nb_oargs + i];
1903 if (IS_DEAD_IARG(i)) {
1904 ts = &s->temps[arg];
1905 if (!ts->fixed_reg) {
1906 if (ts->val_type == TEMP_VAL_REG)
1907 s->reg_to_temp[ts->reg] = -1;
1908 ts->val_type = TEMP_VAL_DEAD;
1913 /* clobber call registers */
1914 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1915 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1916 tcg_reg_free(s, reg);
1920 /* store globals and free associated registers (we assume the call
1921 can modify any global. */
1922 if (!(flags & TCG_CALL_CONST)) {
1923 save_globals(s, allocated_regs);
1926 tcg_out_op(s, opc, &func_arg, &const_func_arg);
1928 if (allocate_args) {
1929 tcg_out_addi(s, TCG_REG_CALL_STACK, STACK_DIR(call_stack_size));
1932 /* assign output registers and emit moves if needed */
1933 for(i = 0; i < nb_oargs; i++) {
1934 arg = args[i];
1935 ts = &s->temps[arg];
1936 reg = tcg_target_call_oarg_regs[i];
1937 assert(s->reg_to_temp[reg] == -1);
1938 if (ts->fixed_reg) {
1939 if (ts->reg != reg) {
1940 tcg_out_mov(s, ts->type, ts->reg, reg);
1942 } else {
1943 if (ts->val_type == TEMP_VAL_REG)
1944 s->reg_to_temp[ts->reg] = -1;
1945 ts->val_type = TEMP_VAL_REG;
1946 ts->reg = reg;
1947 ts->mem_coherent = 0;
1948 s->reg_to_temp[reg] = arg;
1952 return nb_iargs + nb_oargs + def->nb_cargs + 1;
1955 #ifdef CONFIG_PROFILER
1957 static int64_t tcg_table_op_count[NB_OPS];
1959 static void dump_op_count(void)
1961 int i;
1962 FILE *f;
1963 f = fopen("/tmp/op.log", "w");
1964 for(i = INDEX_op_end; i < NB_OPS; i++) {
1965 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, tcg_table_op_count[i]);
1967 fclose(f);
1969 #endif
1972 static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
1973 long search_pc)
1975 TCGOpcode opc;
1976 int op_index;
1977 const TCGOpDef *def;
1978 unsigned int dead_iargs;
1979 const TCGArg *args;
1981 #ifdef DEBUG_DISAS
1982 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1983 qemu_log("OP:\n");
1984 tcg_dump_ops(s, logfile);
1985 qemu_log("\n");
1987 #endif
1989 #ifdef CONFIG_PROFILER
1990 s->la_time -= profile_getclock();
1991 #endif
1992 tcg_liveness_analysis(s);
1993 #ifdef CONFIG_PROFILER
1994 s->la_time += profile_getclock();
1995 #endif
1997 #ifdef DEBUG_DISAS
1998 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
1999 qemu_log("OP after liveness analysis:\n");
2000 tcg_dump_ops(s, logfile);
2001 qemu_log("\n");
2003 #endif
2005 tcg_reg_alloc_start(s);
2007 s->code_buf = gen_code_buf;
2008 s->code_ptr = gen_code_buf;
2010 args = gen_opparam_buf;
2011 op_index = 0;
2013 for(;;) {
2014 opc = gen_opc_buf[op_index];
2015 #ifdef CONFIG_PROFILER
2016 tcg_table_op_count[opc]++;
2017 #endif
2018 def = &tcg_op_defs[opc];
2019 #if 0
2020 printf("%s: %d %d %d\n", def->name,
2021 def->nb_oargs, def->nb_iargs, def->nb_cargs);
2022 // dump_regs(s);
2023 #endif
2024 switch(opc) {
2025 case INDEX_op_mov_i32:
2026 #if TCG_TARGET_REG_BITS == 64
2027 case INDEX_op_mov_i64:
2028 #endif
2029 dead_iargs = s->op_dead_iargs[op_index];
2030 tcg_reg_alloc_mov(s, def, args, dead_iargs);
2031 break;
2032 case INDEX_op_movi_i32:
2033 #if TCG_TARGET_REG_BITS == 64
2034 case INDEX_op_movi_i64:
2035 #endif
2036 tcg_reg_alloc_movi(s, args);
2037 break;
2038 case INDEX_op_debug_insn_start:
2039 /* debug instruction */
2040 break;
2041 case INDEX_op_nop:
2042 case INDEX_op_nop1:
2043 case INDEX_op_nop2:
2044 case INDEX_op_nop3:
2045 break;
2046 case INDEX_op_nopn:
2047 args += args[0];
2048 goto next;
2049 case INDEX_op_discard:
2051 TCGTemp *ts;
2052 ts = &s->temps[args[0]];
2053 /* mark the temporary as dead */
2054 if (!ts->fixed_reg) {
2055 if (ts->val_type == TEMP_VAL_REG)
2056 s->reg_to_temp[ts->reg] = -1;
2057 ts->val_type = TEMP_VAL_DEAD;
2060 break;
2061 case INDEX_op_set_label:
2062 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2063 tcg_out_label(s, args[0], (long)s->code_ptr);
2064 break;
2065 case INDEX_op_call:
2066 dead_iargs = s->op_dead_iargs[op_index];
2067 args += tcg_reg_alloc_call(s, def, opc, args, dead_iargs);
2068 goto next;
2069 case INDEX_op_end:
2070 goto the_end;
2071 default:
2072 /* Note: in order to speed up the code, it would be much
2073 faster to have specialized register allocator functions for
2074 some common argument patterns */
2075 dead_iargs = s->op_dead_iargs[op_index];
2076 tcg_reg_alloc_op(s, def, opc, args, dead_iargs);
2077 break;
2079 args += def->nb_args;
2080 next:
2081 if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
2082 return op_index;
2084 op_index++;
2085 #ifndef NDEBUG
2086 check_regs(s);
2087 #endif
2089 the_end:
2090 return -1;
2093 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
2095 #ifdef CONFIG_PROFILER
2097 int n;
2098 n = (gen_opc_ptr - gen_opc_buf);
2099 s->op_count += n;
2100 if (n > s->op_count_max)
2101 s->op_count_max = n;
2103 s->temp_count += s->nb_temps;
2104 if (s->nb_temps > s->temp_count_max)
2105 s->temp_count_max = s->nb_temps;
2107 #endif
2109 tcg_gen_code_common(s, gen_code_buf, -1);
2111 /* flush instruction cache */
2112 flush_icache_range((unsigned long)gen_code_buf,
2113 (unsigned long)s->code_ptr);
2114 return s->code_ptr - gen_code_buf;
2117 /* Return the index of the micro operation such as the pc after is <
2118 offset bytes from the start of the TB. The contents of gen_code_buf must
2119 not be changed, though writing the same values is ok.
2120 Return -1 if not found. */
2121 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
2123 return tcg_gen_code_common(s, gen_code_buf, offset);
2126 #ifdef CONFIG_PROFILER
2127 void tcg_dump_info(FILE *f,
2128 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2130 TCGContext *s = &tcg_ctx;
2131 int64_t tot;
2133 tot = s->interm_time + s->code_time;
2134 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2135 tot, tot / 2.4e9);
2136 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2137 s->tb_count,
2138 s->tb_count1 - s->tb_count,
2139 s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
2140 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2141 s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
2142 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2143 s->tb_count ?
2144 (double)s->del_op_count / s->tb_count : 0);
2145 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2146 s->tb_count ?
2147 (double)s->temp_count / s->tb_count : 0,
2148 s->temp_count_max);
2150 cpu_fprintf(f, "cycles/op %0.1f\n",
2151 s->op_count ? (double)tot / s->op_count : 0);
2152 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2153 s->code_in_len ? (double)tot / s->code_in_len : 0);
2154 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2155 s->code_out_len ? (double)tot / s->code_out_len : 0);
2156 if (tot == 0)
2157 tot = 1;
2158 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2159 (double)s->interm_time / tot * 100.0);
2160 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2161 (double)s->code_time / tot * 100.0);
2162 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2163 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2164 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2165 s->restore_count);
2166 cpu_fprintf(f, " avg cycles %0.1f\n",
2167 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2169 dump_op_count();
2171 #else
2172 void tcg_dump_info(FILE *f,
2173 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2175 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2177 #endif