2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
31 /* Define to jump the ELF file used to communicate with GDB. */
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
39 #include "qemu-common.h"
40 #include "cache-utils.h"
41 #include "host-utils.h"
42 #include "qemu-timer.h"
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
47 #define NO_CPU_IO_DEFS
52 #if TCG_TARGET_REG_BITS == 64
53 # define ELF_CLASS ELFCLASS64
55 # define ELF_CLASS ELFCLASS32
57 #ifdef HOST_WORDS_BIGENDIAN
58 # define ELF_DATA ELFDATA2MSB
60 # define ELF_DATA ELFDATA2LSB
65 #if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
66 #error GUEST_BASE not supported on this host.
69 /* Forward declarations for functions declared in tcg-target.c and used here. */
70 static void tcg_target_init(TCGContext
*s
);
71 static void tcg_target_qemu_prologue(TCGContext
*s
);
72 static void patch_reloc(uint8_t *code_ptr
, int type
,
73 tcg_target_long value
, tcg_target_long addend
);
75 static void tcg_register_jit_int(void *buf
, size_t size
,
76 void *debug_frame
, size_t debug_frame_size
)
77 __attribute__((unused
));
79 /* Forward declarations for functions declared and used in tcg-target.c. */
80 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
);
81 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg1
,
82 tcg_target_long arg2
);
83 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
);
84 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
85 TCGReg ret
, tcg_target_long arg
);
86 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
87 const int *const_args
);
88 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
, TCGReg arg1
,
89 tcg_target_long arg2
);
90 static int tcg_target_const_match(tcg_target_long val
,
91 const TCGArgConstraint
*arg_ct
);
92 static int tcg_target_get_call_iarg_regs_count(int flags
);
94 TCGOpDef tcg_op_defs
[] = {
95 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
99 const size_t tcg_op_defs_max
= ARRAY_SIZE(tcg_op_defs
);
101 static TCGRegSet tcg_target_available_regs
[2];
102 static TCGRegSet tcg_target_call_clobber_regs
;
104 /* XXX: move that inside the context */
105 uint16_t *gen_opc_ptr
;
106 TCGArg
*gen_opparam_ptr
;
108 static inline void tcg_out8(TCGContext
*s
, uint8_t v
)
113 static inline void tcg_out16(TCGContext
*s
, uint16_t v
)
115 *(uint16_t *)s
->code_ptr
= v
;
119 static inline void tcg_out32(TCGContext
*s
, uint32_t v
)
121 *(uint32_t *)s
->code_ptr
= v
;
125 /* label relocation processing */
127 static void tcg_out_reloc(TCGContext
*s
, uint8_t *code_ptr
, int type
,
128 int label_index
, long addend
)
133 l
= &s
->labels
[label_index
];
135 /* FIXME: This may break relocations on RISC targets that
136 modify instruction fields in place. The caller may not have
137 written the initial value. */
138 patch_reloc(code_ptr
, type
, l
->u
.value
, addend
);
140 /* add a new relocation entry */
141 r
= tcg_malloc(sizeof(TCGRelocation
));
145 r
->next
= l
->u
.first_reloc
;
146 l
->u
.first_reloc
= r
;
150 static void tcg_out_label(TCGContext
*s
, int label_index
, void *ptr
)
154 tcg_target_long value
= (tcg_target_long
)ptr
;
156 l
= &s
->labels
[label_index
];
159 r
= l
->u
.first_reloc
;
161 patch_reloc(r
->ptr
, r
->type
, value
, r
->addend
);
168 int gen_new_label(void)
170 TCGContext
*s
= &tcg_ctx
;
174 if (s
->nb_labels
>= TCG_MAX_LABELS
)
176 idx
= s
->nb_labels
++;
179 l
->u
.first_reloc
= NULL
;
183 #include "tcg-target.c"
185 /* pool based memory allocation */
186 void *tcg_malloc_internal(TCGContext
*s
, int size
)
191 if (size
> TCG_POOL_CHUNK_SIZE
) {
192 /* big malloc: insert a new pool (XXX: could optimize) */
193 p
= g_malloc(sizeof(TCGPool
) + size
);
195 p
->next
= s
->pool_first_large
;
196 s
->pool_first_large
= p
;
207 pool_size
= TCG_POOL_CHUNK_SIZE
;
208 p
= g_malloc(sizeof(TCGPool
) + pool_size
);
212 s
->pool_current
->next
= p
;
221 s
->pool_cur
= p
->data
+ size
;
222 s
->pool_end
= p
->data
+ p
->size
;
226 void tcg_pool_reset(TCGContext
*s
)
229 for (p
= s
->pool_first_large
; p
; p
= t
) {
233 s
->pool_first_large
= NULL
;
234 s
->pool_cur
= s
->pool_end
= NULL
;
235 s
->pool_current
= NULL
;
238 void tcg_context_init(TCGContext
*s
)
240 int op
, total_args
, n
;
242 TCGArgConstraint
*args_ct
;
245 memset(s
, 0, sizeof(*s
));
246 s
->temps
= s
->static_temps
;
249 /* Count total number of arguments and allocate the corresponding
252 for(op
= 0; op
< NB_OPS
; op
++) {
253 def
= &tcg_op_defs
[op
];
254 n
= def
->nb_iargs
+ def
->nb_oargs
;
258 args_ct
= g_malloc(sizeof(TCGArgConstraint
) * total_args
);
259 sorted_args
= g_malloc(sizeof(int) * total_args
);
261 for(op
= 0; op
< NB_OPS
; op
++) {
262 def
= &tcg_op_defs
[op
];
263 def
->args_ct
= args_ct
;
264 def
->sorted_args
= sorted_args
;
265 n
= def
->nb_iargs
+ def
->nb_oargs
;
273 void tcg_prologue_init(TCGContext
*s
)
275 /* init global prologue and epilogue */
276 s
->code_buf
= code_gen_prologue
;
277 s
->code_ptr
= s
->code_buf
;
278 tcg_target_qemu_prologue(s
);
279 flush_icache_range((tcg_target_ulong
)s
->code_buf
,
280 (tcg_target_ulong
)s
->code_ptr
);
283 void tcg_set_frame(TCGContext
*s
, int reg
,
284 tcg_target_long start
, tcg_target_long size
)
286 s
->frame_start
= start
;
287 s
->frame_end
= start
+ size
;
291 void tcg_func_start(TCGContext
*s
)
295 s
->nb_temps
= s
->nb_globals
;
296 for(i
= 0; i
< (TCG_TYPE_COUNT
* 2); i
++)
297 s
->first_free_temp
[i
] = -1;
298 s
->labels
= tcg_malloc(sizeof(TCGLabel
) * TCG_MAX_LABELS
);
300 s
->current_frame_offset
= s
->frame_start
;
302 gen_opc_ptr
= gen_opc_buf
;
303 gen_opparam_ptr
= gen_opparam_buf
;
306 static inline void tcg_temp_alloc(TCGContext
*s
, int n
)
308 if (n
> TCG_MAX_TEMPS
)
312 static inline int tcg_global_reg_new_internal(TCGType type
, int reg
,
315 TCGContext
*s
= &tcg_ctx
;
319 #if TCG_TARGET_REG_BITS == 32
320 if (type
!= TCG_TYPE_I32
)
323 if (tcg_regset_test_reg(s
->reserved_regs
, reg
))
326 tcg_temp_alloc(s
, s
->nb_globals
+ 1);
327 ts
= &s
->temps
[s
->nb_globals
];
328 ts
->base_type
= type
;
334 tcg_regset_set_reg(s
->reserved_regs
, reg
);
338 TCGv_i32
tcg_global_reg_new_i32(int reg
, const char *name
)
342 idx
= tcg_global_reg_new_internal(TCG_TYPE_I32
, reg
, name
);
343 return MAKE_TCGV_I32(idx
);
346 TCGv_i64
tcg_global_reg_new_i64(int reg
, const char *name
)
350 idx
= tcg_global_reg_new_internal(TCG_TYPE_I64
, reg
, name
);
351 return MAKE_TCGV_I64(idx
);
354 static inline int tcg_global_mem_new_internal(TCGType type
, int reg
,
355 tcg_target_long offset
,
358 TCGContext
*s
= &tcg_ctx
;
363 #if TCG_TARGET_REG_BITS == 32
364 if (type
== TCG_TYPE_I64
) {
366 tcg_temp_alloc(s
, s
->nb_globals
+ 2);
367 ts
= &s
->temps
[s
->nb_globals
];
368 ts
->base_type
= type
;
369 ts
->type
= TCG_TYPE_I32
;
371 ts
->mem_allocated
= 1;
373 #ifdef TCG_TARGET_WORDS_BIGENDIAN
374 ts
->mem_offset
= offset
+ 4;
376 ts
->mem_offset
= offset
;
378 pstrcpy(buf
, sizeof(buf
), name
);
379 pstrcat(buf
, sizeof(buf
), "_0");
380 ts
->name
= strdup(buf
);
383 ts
->base_type
= type
;
384 ts
->type
= TCG_TYPE_I32
;
386 ts
->mem_allocated
= 1;
388 #ifdef TCG_TARGET_WORDS_BIGENDIAN
389 ts
->mem_offset
= offset
;
391 ts
->mem_offset
= offset
+ 4;
393 pstrcpy(buf
, sizeof(buf
), name
);
394 pstrcat(buf
, sizeof(buf
), "_1");
395 ts
->name
= strdup(buf
);
401 tcg_temp_alloc(s
, s
->nb_globals
+ 1);
402 ts
= &s
->temps
[s
->nb_globals
];
403 ts
->base_type
= type
;
406 ts
->mem_allocated
= 1;
408 ts
->mem_offset
= offset
;
415 TCGv_i32
tcg_global_mem_new_i32(int reg
, tcg_target_long offset
,
420 idx
= tcg_global_mem_new_internal(TCG_TYPE_I32
, reg
, offset
, name
);
421 return MAKE_TCGV_I32(idx
);
424 TCGv_i64
tcg_global_mem_new_i64(int reg
, tcg_target_long offset
,
429 idx
= tcg_global_mem_new_internal(TCG_TYPE_I64
, reg
, offset
, name
);
430 return MAKE_TCGV_I64(idx
);
433 static inline int tcg_temp_new_internal(TCGType type
, int temp_local
)
435 TCGContext
*s
= &tcg_ctx
;
442 idx
= s
->first_free_temp
[k
];
444 /* There is already an available temp with the
447 s
->first_free_temp
[k
] = ts
->next_free_temp
;
448 ts
->temp_allocated
= 1;
449 assert(ts
->temp_local
== temp_local
);
452 #if TCG_TARGET_REG_BITS == 32
453 if (type
== TCG_TYPE_I64
) {
454 tcg_temp_alloc(s
, s
->nb_temps
+ 2);
455 ts
= &s
->temps
[s
->nb_temps
];
456 ts
->base_type
= type
;
457 ts
->type
= TCG_TYPE_I32
;
458 ts
->temp_allocated
= 1;
459 ts
->temp_local
= temp_local
;
462 ts
->base_type
= TCG_TYPE_I32
;
463 ts
->type
= TCG_TYPE_I32
;
464 ts
->temp_allocated
= 1;
465 ts
->temp_local
= temp_local
;
471 tcg_temp_alloc(s
, s
->nb_temps
+ 1);
472 ts
= &s
->temps
[s
->nb_temps
];
473 ts
->base_type
= type
;
475 ts
->temp_allocated
= 1;
476 ts
->temp_local
= temp_local
;
482 #if defined(CONFIG_DEBUG_TCG)
488 TCGv_i32
tcg_temp_new_internal_i32(int temp_local
)
492 idx
= tcg_temp_new_internal(TCG_TYPE_I32
, temp_local
);
493 return MAKE_TCGV_I32(idx
);
496 TCGv_i64
tcg_temp_new_internal_i64(int temp_local
)
500 idx
= tcg_temp_new_internal(TCG_TYPE_I64
, temp_local
);
501 return MAKE_TCGV_I64(idx
);
504 static inline void tcg_temp_free_internal(int idx
)
506 TCGContext
*s
= &tcg_ctx
;
510 #if defined(CONFIG_DEBUG_TCG)
512 if (s
->temps_in_use
< 0) {
513 fprintf(stderr
, "More temporaries freed than allocated!\n");
517 assert(idx
>= s
->nb_globals
&& idx
< s
->nb_temps
);
519 assert(ts
->temp_allocated
!= 0);
520 ts
->temp_allocated
= 0;
524 ts
->next_free_temp
= s
->first_free_temp
[k
];
525 s
->first_free_temp
[k
] = idx
;
528 void tcg_temp_free_i32(TCGv_i32 arg
)
530 tcg_temp_free_internal(GET_TCGV_I32(arg
));
533 void tcg_temp_free_i64(TCGv_i64 arg
)
535 tcg_temp_free_internal(GET_TCGV_I64(arg
));
538 TCGv_i32
tcg_const_i32(int32_t val
)
541 t0
= tcg_temp_new_i32();
542 tcg_gen_movi_i32(t0
, val
);
546 TCGv_i64
tcg_const_i64(int64_t val
)
549 t0
= tcg_temp_new_i64();
550 tcg_gen_movi_i64(t0
, val
);
554 TCGv_i32
tcg_const_local_i32(int32_t val
)
557 t0
= tcg_temp_local_new_i32();
558 tcg_gen_movi_i32(t0
, val
);
562 TCGv_i64
tcg_const_local_i64(int64_t val
)
565 t0
= tcg_temp_local_new_i64();
566 tcg_gen_movi_i64(t0
, val
);
570 #if defined(CONFIG_DEBUG_TCG)
571 void tcg_clear_temp_count(void)
573 TCGContext
*s
= &tcg_ctx
;
577 int tcg_check_temp_count(void)
579 TCGContext
*s
= &tcg_ctx
;
580 if (s
->temps_in_use
) {
581 /* Clear the count so that we don't give another
582 * warning immediately next time around.
591 void tcg_register_helper(void *func
, const char *name
)
593 TCGContext
*s
= &tcg_ctx
;
595 if ((s
->nb_helpers
+ 1) > s
->allocated_helpers
) {
596 n
= s
->allocated_helpers
;
602 s
->helpers
= realloc(s
->helpers
, n
* sizeof(TCGHelperInfo
));
603 s
->allocated_helpers
= n
;
605 s
->helpers
[s
->nb_helpers
].func
= (tcg_target_ulong
)func
;
606 s
->helpers
[s
->nb_helpers
].name
= name
;
610 /* Note: we convert the 64 bit args to 32 bit and do some alignment
611 and endian swap. Maybe it would be better to do the alignment
612 and endian swap in tcg_reg_alloc_call(). */
613 void tcg_gen_callN(TCGContext
*s
, TCGv_ptr func
, unsigned int flags
,
614 int sizemask
, TCGArg ret
, int nargs
, TCGArg
*args
)
621 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
622 for (i
= 0; i
< nargs
; ++i
) {
623 int is_64bit
= sizemask
& (1 << (i
+1)*2);
624 int is_signed
= sizemask
& (2 << (i
+1)*2);
626 TCGv_i64 temp
= tcg_temp_new_i64();
627 TCGv_i64 orig
= MAKE_TCGV_I64(args
[i
]);
629 tcg_gen_ext32s_i64(temp
, orig
);
631 tcg_gen_ext32u_i64(temp
, orig
);
633 args
[i
] = GET_TCGV_I64(temp
);
636 #endif /* TCG_TARGET_EXTEND_ARGS */
638 *gen_opc_ptr
++ = INDEX_op_call
;
639 nparam
= gen_opparam_ptr
++;
640 if (ret
!= TCG_CALL_DUMMY_ARG
) {
641 #if TCG_TARGET_REG_BITS < 64
643 #ifdef TCG_TARGET_WORDS_BIGENDIAN
644 *gen_opparam_ptr
++ = ret
+ 1;
645 *gen_opparam_ptr
++ = ret
;
647 *gen_opparam_ptr
++ = ret
;
648 *gen_opparam_ptr
++ = ret
+ 1;
654 *gen_opparam_ptr
++ = ret
;
661 for (i
= 0; i
< nargs
; i
++) {
662 #if TCG_TARGET_REG_BITS < 64
663 int is_64bit
= sizemask
& (1 << (i
+1)*2);
665 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
666 /* some targets want aligned 64 bit args */
668 *gen_opparam_ptr
++ = TCG_CALL_DUMMY_ARG
;
672 /* If stack grows up, then we will be placing successive
673 arguments at lower addresses, which means we need to
674 reverse the order compared to how we would normally
675 treat either big or little-endian. For those arguments
676 that will wind up in registers, this still works for
677 HPPA (the only current STACK_GROWSUP target) since the
678 argument registers are *also* allocated in decreasing
679 order. If another such target is added, this logic may
680 have to get more complicated to differentiate between
681 stack arguments and register arguments. */
682 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
683 *gen_opparam_ptr
++ = args
[i
] + 1;
684 *gen_opparam_ptr
++ = args
[i
];
686 *gen_opparam_ptr
++ = args
[i
];
687 *gen_opparam_ptr
++ = args
[i
] + 1;
692 #endif /* TCG_TARGET_REG_BITS < 64 */
694 *gen_opparam_ptr
++ = args
[i
];
697 *gen_opparam_ptr
++ = GET_TCGV_PTR(func
);
699 *gen_opparam_ptr
++ = flags
;
701 *nparam
= (nb_rets
<< 16) | (real_args
+ 1);
703 /* total parameters, needed to go backward in the instruction stream */
704 *gen_opparam_ptr
++ = 1 + nb_rets
+ real_args
+ 3;
706 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
707 for (i
= 0; i
< nargs
; ++i
) {
708 int is_64bit
= sizemask
& (1 << (i
+1)*2);
710 TCGv_i64 temp
= MAKE_TCGV_I64(args
[i
]);
711 tcg_temp_free_i64(temp
);
714 #endif /* TCG_TARGET_EXTEND_ARGS */
717 #if TCG_TARGET_REG_BITS == 32
718 void tcg_gen_shifti_i64(TCGv_i64 ret
, TCGv_i64 arg1
,
719 int c
, int right
, int arith
)
722 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
723 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
724 } else if (c
>= 32) {
728 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
729 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), 31);
731 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
732 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
735 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_LOW(arg1
), c
);
736 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
741 t0
= tcg_temp_new_i32();
742 t1
= tcg_temp_new_i32();
744 tcg_gen_shli_i32(t0
, TCGV_HIGH(arg1
), 32 - c
);
746 tcg_gen_sari_i32(t1
, TCGV_HIGH(arg1
), c
);
748 tcg_gen_shri_i32(t1
, TCGV_HIGH(arg1
), c
);
749 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), c
);
750 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t0
);
751 tcg_gen_mov_i32(TCGV_HIGH(ret
), t1
);
753 tcg_gen_shri_i32(t0
, TCGV_LOW(arg1
), 32 - c
);
754 /* Note: ret can be the same as arg1, so we use t1 */
755 tcg_gen_shli_i32(t1
, TCGV_LOW(arg1
), c
);
756 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), c
);
757 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t0
);
758 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
760 tcg_temp_free_i32(t0
);
761 tcg_temp_free_i32(t1
);
767 static void tcg_reg_alloc_start(TCGContext
*s
)
771 for(i
= 0; i
< s
->nb_globals
; i
++) {
774 ts
->val_type
= TEMP_VAL_REG
;
776 ts
->val_type
= TEMP_VAL_MEM
;
779 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
781 ts
->val_type
= TEMP_VAL_DEAD
;
782 ts
->mem_allocated
= 0;
785 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
786 s
->reg_to_temp
[i
] = -1;
790 static char *tcg_get_arg_str_idx(TCGContext
*s
, char *buf
, int buf_size
,
795 assert(idx
>= 0 && idx
< s
->nb_temps
);
798 if (idx
< s
->nb_globals
) {
799 pstrcpy(buf
, buf_size
, ts
->name
);
802 snprintf(buf
, buf_size
, "loc%d", idx
- s
->nb_globals
);
804 snprintf(buf
, buf_size
, "tmp%d", idx
- s
->nb_globals
);
809 char *tcg_get_arg_str_i32(TCGContext
*s
, char *buf
, int buf_size
, TCGv_i32 arg
)
811 return tcg_get_arg_str_idx(s
, buf
, buf_size
, GET_TCGV_I32(arg
));
814 char *tcg_get_arg_str_i64(TCGContext
*s
, char *buf
, int buf_size
, TCGv_i64 arg
)
816 return tcg_get_arg_str_idx(s
, buf
, buf_size
, GET_TCGV_I64(arg
));
819 static int helper_cmp(const void *p1
, const void *p2
)
821 const TCGHelperInfo
*th1
= p1
;
822 const TCGHelperInfo
*th2
= p2
;
823 if (th1
->func
< th2
->func
)
825 else if (th1
->func
== th2
->func
)
831 /* find helper definition (Note: A hash table would be better) */
832 static TCGHelperInfo
*tcg_find_helper(TCGContext
*s
, tcg_target_ulong val
)
838 if (unlikely(!s
->helpers_sorted
)) {
839 qsort(s
->helpers
, s
->nb_helpers
, sizeof(TCGHelperInfo
),
841 s
->helpers_sorted
= 1;
846 m_max
= s
->nb_helpers
- 1;
847 while (m_min
<= m_max
) {
848 m
= (m_min
+ m_max
) >> 1;
862 static const char * const cond_name
[] =
864 [TCG_COND_EQ
] = "eq",
865 [TCG_COND_NE
] = "ne",
866 [TCG_COND_LT
] = "lt",
867 [TCG_COND_GE
] = "ge",
868 [TCG_COND_LE
] = "le",
869 [TCG_COND_GT
] = "gt",
870 [TCG_COND_LTU
] = "ltu",
871 [TCG_COND_GEU
] = "geu",
872 [TCG_COND_LEU
] = "leu",
873 [TCG_COND_GTU
] = "gtu"
876 void tcg_dump_ops(TCGContext
*s
)
878 const uint16_t *opc_ptr
;
882 int i
, k
, nb_oargs
, nb_iargs
, nb_cargs
, first_insn
;
887 opc_ptr
= gen_opc_buf
;
888 args
= gen_opparam_buf
;
889 while (opc_ptr
< gen_opc_ptr
) {
891 def
= &tcg_op_defs
[c
];
892 if (c
== INDEX_op_debug_insn_start
) {
894 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
895 pc
= ((uint64_t)args
[1] << 32) | args
[0];
902 qemu_log(" ---- 0x%" PRIx64
, pc
);
904 nb_oargs
= def
->nb_oargs
;
905 nb_iargs
= def
->nb_iargs
;
906 nb_cargs
= def
->nb_cargs
;
907 } else if (c
== INDEX_op_call
) {
910 /* variable number of arguments */
912 nb_oargs
= arg
>> 16;
913 nb_iargs
= arg
& 0xffff;
914 nb_cargs
= def
->nb_cargs
;
916 qemu_log(" %s ", def
->name
);
920 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
921 args
[nb_oargs
+ nb_iargs
- 1]));
923 qemu_log(",$0x%" TCG_PRIlx
, args
[nb_oargs
+ nb_iargs
]);
925 qemu_log(",$%d", nb_oargs
);
926 for(i
= 0; i
< nb_oargs
; i
++) {
928 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
931 for(i
= 0; i
< (nb_iargs
- 1); i
++) {
933 if (args
[nb_oargs
+ i
] == TCG_CALL_DUMMY_ARG
) {
936 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
937 args
[nb_oargs
+ i
]));
940 } else if (c
== INDEX_op_movi_i32
941 #if TCG_TARGET_REG_BITS == 64
942 || c
== INDEX_op_movi_i64
945 tcg_target_ulong val
;
948 nb_oargs
= def
->nb_oargs
;
949 nb_iargs
= def
->nb_iargs
;
950 nb_cargs
= def
->nb_cargs
;
951 qemu_log(" %s %s,$", def
->name
,
952 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), args
[0]));
954 th
= tcg_find_helper(s
, val
);
956 qemu_log("%s", th
->name
);
958 if (c
== INDEX_op_movi_i32
) {
959 qemu_log("0x%x", (uint32_t)val
);
961 qemu_log("0x%" PRIx64
, (uint64_t)val
);
965 qemu_log(" %s ", def
->name
);
966 if (c
== INDEX_op_nopn
) {
967 /* variable number of arguments */
972 nb_oargs
= def
->nb_oargs
;
973 nb_iargs
= def
->nb_iargs
;
974 nb_cargs
= def
->nb_cargs
;
978 for(i
= 0; i
< nb_oargs
; i
++) {
982 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
985 for(i
= 0; i
< nb_iargs
; i
++) {
989 qemu_log("%s", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
),
993 case INDEX_op_brcond_i32
:
994 #if TCG_TARGET_REG_BITS == 32
995 case INDEX_op_brcond2_i32
:
996 #elif TCG_TARGET_REG_BITS == 64
997 case INDEX_op_brcond_i64
:
999 case INDEX_op_setcond_i32
:
1000 #if TCG_TARGET_REG_BITS == 32
1001 case INDEX_op_setcond2_i32
:
1002 #elif TCG_TARGET_REG_BITS == 64
1003 case INDEX_op_setcond_i64
:
1005 if (args
[k
] < ARRAY_SIZE(cond_name
) && cond_name
[args
[k
]]) {
1006 qemu_log(",%s", cond_name
[args
[k
++]]);
1008 qemu_log(",$0x%" TCG_PRIlx
, args
[k
++]);
1016 for(; i
< nb_cargs
; i
++) {
1021 qemu_log("$0x%" TCG_PRIlx
, arg
);
1025 args
+= nb_iargs
+ nb_oargs
+ nb_cargs
;
1029 /* we give more priority to constraints with less registers */
1030 static int get_constraint_priority(const TCGOpDef
*def
, int k
)
1032 const TCGArgConstraint
*arg_ct
;
1035 arg_ct
= &def
->args_ct
[k
];
1036 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1037 /* an alias is equivalent to a single register */
1040 if (!(arg_ct
->ct
& TCG_CT_REG
))
1043 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1044 if (tcg_regset_test_reg(arg_ct
->u
.regs
, i
))
1048 return TCG_TARGET_NB_REGS
- n
+ 1;
1051 /* sort from highest priority to lowest */
1052 static void sort_constraints(TCGOpDef
*def
, int start
, int n
)
1054 int i
, j
, p1
, p2
, tmp
;
1056 for(i
= 0; i
< n
; i
++)
1057 def
->sorted_args
[start
+ i
] = start
+ i
;
1060 for(i
= 0; i
< n
- 1; i
++) {
1061 for(j
= i
+ 1; j
< n
; j
++) {
1062 p1
= get_constraint_priority(def
, def
->sorted_args
[start
+ i
]);
1063 p2
= get_constraint_priority(def
, def
->sorted_args
[start
+ j
]);
1065 tmp
= def
->sorted_args
[start
+ i
];
1066 def
->sorted_args
[start
+ i
] = def
->sorted_args
[start
+ j
];
1067 def
->sorted_args
[start
+ j
] = tmp
;
1073 void tcg_add_target_add_op_defs(const TCGTargetOpDef
*tdefs
)
1081 if (tdefs
->op
== (TCGOpcode
)-1)
1084 assert((unsigned)op
< NB_OPS
);
1085 def
= &tcg_op_defs
[op
];
1086 #if defined(CONFIG_DEBUG_TCG)
1087 /* Duplicate entry in op definitions? */
1091 nb_args
= def
->nb_iargs
+ def
->nb_oargs
;
1092 for(i
= 0; i
< nb_args
; i
++) {
1093 ct_str
= tdefs
->args_ct_str
[i
];
1094 /* Incomplete TCGTargetOpDef entry? */
1095 assert(ct_str
!= NULL
);
1096 tcg_regset_clear(def
->args_ct
[i
].u
.regs
);
1097 def
->args_ct
[i
].ct
= 0;
1098 if (ct_str
[0] >= '0' && ct_str
[0] <= '9') {
1100 oarg
= ct_str
[0] - '0';
1101 assert(oarg
< def
->nb_oargs
);
1102 assert(def
->args_ct
[oarg
].ct
& TCG_CT_REG
);
1103 /* TCG_CT_ALIAS is for the output arguments. The input
1104 argument is tagged with TCG_CT_IALIAS. */
1105 def
->args_ct
[i
] = def
->args_ct
[oarg
];
1106 def
->args_ct
[oarg
].ct
= TCG_CT_ALIAS
;
1107 def
->args_ct
[oarg
].alias_index
= i
;
1108 def
->args_ct
[i
].ct
|= TCG_CT_IALIAS
;
1109 def
->args_ct
[i
].alias_index
= oarg
;
1112 if (*ct_str
== '\0')
1116 def
->args_ct
[i
].ct
|= TCG_CT_CONST
;
1120 if (target_parse_constraint(&def
->args_ct
[i
], &ct_str
) < 0) {
1121 fprintf(stderr
, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1122 ct_str
, i
, def
->name
);
1130 /* TCGTargetOpDef entry with too much information? */
1131 assert(i
== TCG_MAX_OP_ARGS
|| tdefs
->args_ct_str
[i
] == NULL
);
1133 /* sort the constraints (XXX: this is just an heuristic) */
1134 sort_constraints(def
, 0, def
->nb_oargs
);
1135 sort_constraints(def
, def
->nb_oargs
, def
->nb_iargs
);
1141 printf("%s: sorted=", def
->name
);
1142 for(i
= 0; i
< def
->nb_oargs
+ def
->nb_iargs
; i
++)
1143 printf(" %d", def
->sorted_args
[i
]);
1150 #if defined(CONFIG_DEBUG_TCG)
1152 for (op
= 0; op
< ARRAY_SIZE(tcg_op_defs
); op
++) {
1153 const TCGOpDef
*def
= &tcg_op_defs
[op
];
1154 if (op
< INDEX_op_call
1155 || op
== INDEX_op_debug_insn_start
1156 || (def
->flags
& TCG_OPF_NOT_PRESENT
)) {
1157 /* Wrong entry in op definitions? */
1159 fprintf(stderr
, "Invalid op definition for %s\n", def
->name
);
1163 /* Missing entry in op definitions? */
1165 fprintf(stderr
, "Missing op definition for %s\n", def
->name
);
1176 #ifdef USE_LIVENESS_ANALYSIS
1178 /* set a nop for an operation using 'nb_args' */
1179 static inline void tcg_set_nop(TCGContext
*s
, uint16_t *opc_ptr
,
1180 TCGArg
*args
, int nb_args
)
1183 *opc_ptr
= INDEX_op_nop
;
1185 *opc_ptr
= INDEX_op_nopn
;
1187 args
[nb_args
- 1] = nb_args
;
1191 /* liveness analysis: end of function: globals are live, temps are
1193 /* XXX: at this stage, not used as there would be little gains because
1194 most TBs end with a conditional jump. */
1195 static inline void tcg_la_func_end(TCGContext
*s
, uint8_t *dead_temps
)
1197 memset(dead_temps
, 0, s
->nb_globals
);
1198 memset(dead_temps
+ s
->nb_globals
, 1, s
->nb_temps
- s
->nb_globals
);
1201 /* liveness analysis: end of basic block: globals are live, temps are
1202 dead, local temps are live. */
1203 static inline void tcg_la_bb_end(TCGContext
*s
, uint8_t *dead_temps
)
1208 memset(dead_temps
, 0, s
->nb_globals
);
1209 ts
= &s
->temps
[s
->nb_globals
];
1210 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
1219 /* Liveness analysis : update the opc_dead_args array to tell if a
1220 given input arguments is dead. Instructions updating dead
1221 temporaries are removed. */
1222 static void tcg_liveness_analysis(TCGContext
*s
)
1224 int i
, op_index
, nb_args
, nb_iargs
, nb_oargs
, arg
, nb_ops
;
1227 const TCGOpDef
*def
;
1228 uint8_t *dead_temps
;
1229 unsigned int dead_args
;
1231 gen_opc_ptr
++; /* skip end */
1233 nb_ops
= gen_opc_ptr
- gen_opc_buf
;
1235 s
->op_dead_args
= tcg_malloc(nb_ops
* sizeof(uint16_t));
1237 dead_temps
= tcg_malloc(s
->nb_temps
);
1238 memset(dead_temps
, 1, s
->nb_temps
);
1240 args
= gen_opparam_ptr
;
1241 op_index
= nb_ops
- 1;
1242 while (op_index
>= 0) {
1243 op
= gen_opc_buf
[op_index
];
1244 def
= &tcg_op_defs
[op
];
1252 nb_iargs
= args
[0] & 0xffff;
1253 nb_oargs
= args
[0] >> 16;
1255 call_flags
= args
[nb_oargs
+ nb_iargs
];
1257 /* pure functions can be removed if their result is not
1259 if (call_flags
& TCG_CALL_PURE
) {
1260 for(i
= 0; i
< nb_oargs
; i
++) {
1262 if (!dead_temps
[arg
])
1263 goto do_not_remove_call
;
1265 tcg_set_nop(s
, gen_opc_buf
+ op_index
,
1270 /* output args are dead */
1272 for(i
= 0; i
< nb_oargs
; i
++) {
1274 if (dead_temps
[arg
]) {
1275 dead_args
|= (1 << i
);
1277 dead_temps
[arg
] = 1;
1280 if (!(call_flags
& TCG_CALL_CONST
)) {
1281 /* globals are live (they may be used by the call) */
1282 memset(dead_temps
, 0, s
->nb_globals
);
1285 /* input args are live */
1286 for(i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
1288 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1289 if (dead_temps
[arg
]) {
1290 dead_args
|= (1 << i
);
1292 dead_temps
[arg
] = 0;
1295 s
->op_dead_args
[op_index
] = dead_args
;
1300 case INDEX_op_set_label
:
1302 /* mark end of basic block */
1303 tcg_la_bb_end(s
, dead_temps
);
1305 case INDEX_op_debug_insn_start
:
1306 args
-= def
->nb_args
;
1312 case INDEX_op_discard
:
1314 /* mark the temporary as dead */
1315 dead_temps
[args
[0]] = 1;
1319 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1321 args
-= def
->nb_args
;
1322 nb_iargs
= def
->nb_iargs
;
1323 nb_oargs
= def
->nb_oargs
;
1325 /* Test if the operation can be removed because all
1326 its outputs are dead. We assume that nb_oargs == 0
1327 implies side effects */
1328 if (!(def
->flags
& TCG_OPF_SIDE_EFFECTS
) && nb_oargs
!= 0) {
1329 for(i
= 0; i
< nb_oargs
; i
++) {
1331 if (!dead_temps
[arg
])
1334 tcg_set_nop(s
, gen_opc_buf
+ op_index
, args
, def
->nb_args
);
1335 #ifdef CONFIG_PROFILER
1341 /* output args are dead */
1343 for(i
= 0; i
< nb_oargs
; i
++) {
1345 if (dead_temps
[arg
]) {
1346 dead_args
|= (1 << i
);
1348 dead_temps
[arg
] = 1;
1351 /* if end of basic block, update */
1352 if (def
->flags
& TCG_OPF_BB_END
) {
1353 tcg_la_bb_end(s
, dead_temps
);
1354 } else if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
1355 /* globals are live */
1356 memset(dead_temps
, 0, s
->nb_globals
);
1359 /* input args are live */
1360 for(i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
1362 if (dead_temps
[arg
]) {
1363 dead_args
|= (1 << i
);
1365 dead_temps
[arg
] = 0;
1367 s
->op_dead_args
[op_index
] = dead_args
;
1374 if (args
!= gen_opparam_buf
)
1378 /* dummy liveness analysis */
1379 static void tcg_liveness_analysis(TCGContext
*s
)
1382 nb_ops
= gen_opc_ptr
- gen_opc_buf
;
1384 s
->op_dead_args
= tcg_malloc(nb_ops
* sizeof(uint16_t));
1385 memset(s
->op_dead_args
, 0, nb_ops
* sizeof(uint16_t));
1390 static void dump_regs(TCGContext
*s
)
1396 for(i
= 0; i
< s
->nb_temps
; i
++) {
1398 printf(" %10s: ", tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), i
));
1399 switch(ts
->val_type
) {
1401 printf("%s", tcg_target_reg_names
[ts
->reg
]);
1404 printf("%d(%s)", (int)ts
->mem_offset
, tcg_target_reg_names
[ts
->mem_reg
]);
1406 case TEMP_VAL_CONST
:
1407 printf("$0x%" TCG_PRIlx
, ts
->val
);
1419 for(i
= 0; i
< TCG_TARGET_NB_REGS
; i
++) {
1420 if (s
->reg_to_temp
[i
] >= 0) {
1422 tcg_target_reg_names
[i
],
1423 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), s
->reg_to_temp
[i
]));
1428 static void check_regs(TCGContext
*s
)
1434 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1435 k
= s
->reg_to_temp
[reg
];
1438 if (ts
->val_type
!= TEMP_VAL_REG
||
1440 printf("Inconsistency for register %s:\n",
1441 tcg_target_reg_names
[reg
]);
1446 for(k
= 0; k
< s
->nb_temps
; k
++) {
1448 if (ts
->val_type
== TEMP_VAL_REG
&&
1450 s
->reg_to_temp
[ts
->reg
] != k
) {
1451 printf("Inconsistency for temp %s:\n",
1452 tcg_get_arg_str_idx(s
, buf
, sizeof(buf
), k
));
1454 printf("reg state:\n");
1462 static void temp_allocate_frame(TCGContext
*s
, int temp
)
1465 ts
= &s
->temps
[temp
];
1466 #ifndef __sparc_v9__ /* Sparc64 stack is accessed with offset of 2047 */
1467 s
->current_frame_offset
= (s
->current_frame_offset
+
1468 (tcg_target_long
)sizeof(tcg_target_long
) - 1) &
1469 ~(sizeof(tcg_target_long
) - 1);
1471 if (s
->current_frame_offset
+ (tcg_target_long
)sizeof(tcg_target_long
) >
1475 ts
->mem_offset
= s
->current_frame_offset
;
1476 ts
->mem_reg
= s
->frame_reg
;
1477 ts
->mem_allocated
= 1;
1478 s
->current_frame_offset
+= (tcg_target_long
)sizeof(tcg_target_long
);
1481 /* free register 'reg' by spilling the corresponding temporary if necessary */
1482 static void tcg_reg_free(TCGContext
*s
, int reg
)
1487 temp
= s
->reg_to_temp
[reg
];
1489 ts
= &s
->temps
[temp
];
1490 assert(ts
->val_type
== TEMP_VAL_REG
);
1491 if (!ts
->mem_coherent
) {
1492 if (!ts
->mem_allocated
)
1493 temp_allocate_frame(s
, temp
);
1494 tcg_out_st(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1496 ts
->val_type
= TEMP_VAL_MEM
;
1497 s
->reg_to_temp
[reg
] = -1;
1501 /* Allocate a register belonging to reg1 & ~reg2 */
1502 static int tcg_reg_alloc(TCGContext
*s
, TCGRegSet reg1
, TCGRegSet reg2
)
1507 tcg_regset_andnot(reg_ct
, reg1
, reg2
);
1509 /* first try free registers */
1510 for(i
= 0; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); i
++) {
1511 reg
= tcg_target_reg_alloc_order
[i
];
1512 if (tcg_regset_test_reg(reg_ct
, reg
) && s
->reg_to_temp
[reg
] == -1)
1516 /* XXX: do better spill choice */
1517 for(i
= 0; i
< ARRAY_SIZE(tcg_target_reg_alloc_order
); i
++) {
1518 reg
= tcg_target_reg_alloc_order
[i
];
1519 if (tcg_regset_test_reg(reg_ct
, reg
)) {
1520 tcg_reg_free(s
, reg
);
1528 /* save a temporary to memory. 'allocated_regs' is used in case a
1529 temporary registers needs to be allocated to store a constant. */
1530 static void temp_save(TCGContext
*s
, int temp
, TCGRegSet allocated_regs
)
1535 ts
= &s
->temps
[temp
];
1536 if (!ts
->fixed_reg
) {
1537 switch(ts
->val_type
) {
1539 tcg_reg_free(s
, ts
->reg
);
1542 ts
->val_type
= TEMP_VAL_MEM
;
1544 case TEMP_VAL_CONST
:
1545 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1547 if (!ts
->mem_allocated
)
1548 temp_allocate_frame(s
, temp
);
1549 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1550 tcg_out_st(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1551 ts
->val_type
= TEMP_VAL_MEM
;
1561 /* save globals to their canonical location and assume they can be
1562 modified be the following code. 'allocated_regs' is used in case a
1563 temporary registers needs to be allocated to store a constant. */
1564 static void save_globals(TCGContext
*s
, TCGRegSet allocated_regs
)
1568 for(i
= 0; i
< s
->nb_globals
; i
++) {
1569 temp_save(s
, i
, allocated_regs
);
1573 /* at the end of a basic block, we assume all temporaries are dead and
1574 all globals are stored at their canonical location. */
1575 static void tcg_reg_alloc_bb_end(TCGContext
*s
, TCGRegSet allocated_regs
)
1580 for(i
= s
->nb_globals
; i
< s
->nb_temps
; i
++) {
1582 if (ts
->temp_local
) {
1583 temp_save(s
, i
, allocated_regs
);
1585 if (ts
->val_type
== TEMP_VAL_REG
) {
1586 s
->reg_to_temp
[ts
->reg
] = -1;
1588 ts
->val_type
= TEMP_VAL_DEAD
;
1592 save_globals(s
, allocated_regs
);
1595 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1597 static void tcg_reg_alloc_movi(TCGContext
*s
, const TCGArg
*args
)
1600 tcg_target_ulong val
;
1602 ots
= &s
->temps
[args
[0]];
1605 if (ots
->fixed_reg
) {
1606 /* for fixed registers, we do not do any constant
1608 tcg_out_movi(s
, ots
->type
, ots
->reg
, val
);
1610 /* The movi is not explicitly generated here */
1611 if (ots
->val_type
== TEMP_VAL_REG
)
1612 s
->reg_to_temp
[ots
->reg
] = -1;
1613 ots
->val_type
= TEMP_VAL_CONST
;
1618 static void tcg_reg_alloc_mov(TCGContext
*s
, const TCGOpDef
*def
,
1620 unsigned int dead_args
)
1624 const TCGArgConstraint
*arg_ct
;
1626 ots
= &s
->temps
[args
[0]];
1627 ts
= &s
->temps
[args
[1]];
1628 arg_ct
= &def
->args_ct
[0];
1630 /* XXX: always mark arg dead if IS_DEAD_ARG(1) */
1631 if (ts
->val_type
== TEMP_VAL_REG
) {
1632 if (IS_DEAD_ARG(1) && !ts
->fixed_reg
&& !ots
->fixed_reg
) {
1633 /* the mov can be suppressed */
1634 if (ots
->val_type
== TEMP_VAL_REG
)
1635 s
->reg_to_temp
[ots
->reg
] = -1;
1637 s
->reg_to_temp
[reg
] = -1;
1638 ts
->val_type
= TEMP_VAL_DEAD
;
1640 if (ots
->val_type
== TEMP_VAL_REG
) {
1643 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, s
->reserved_regs
);
1645 if (ts
->reg
!= reg
) {
1646 tcg_out_mov(s
, ots
->type
, reg
, ts
->reg
);
1649 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1650 if (ots
->val_type
== TEMP_VAL_REG
) {
1653 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, s
->reserved_regs
);
1655 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1656 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1657 if (ots
->fixed_reg
) {
1659 tcg_out_movi(s
, ots
->type
, reg
, ts
->val
);
1661 /* propagate constant */
1662 if (ots
->val_type
== TEMP_VAL_REG
)
1663 s
->reg_to_temp
[ots
->reg
] = -1;
1664 ots
->val_type
= TEMP_VAL_CONST
;
1671 s
->reg_to_temp
[reg
] = args
[0];
1673 ots
->val_type
= TEMP_VAL_REG
;
1674 ots
->mem_coherent
= 0;
1677 static void tcg_reg_alloc_op(TCGContext
*s
,
1678 const TCGOpDef
*def
, TCGOpcode opc
,
1680 unsigned int dead_args
)
1682 TCGRegSet allocated_regs
;
1683 int i
, k
, nb_iargs
, nb_oargs
, reg
;
1685 const TCGArgConstraint
*arg_ct
;
1687 TCGArg new_args
[TCG_MAX_OP_ARGS
];
1688 int const_args
[TCG_MAX_OP_ARGS
];
1690 nb_oargs
= def
->nb_oargs
;
1691 nb_iargs
= def
->nb_iargs
;
1693 /* copy constants */
1694 memcpy(new_args
+ nb_oargs
+ nb_iargs
,
1695 args
+ nb_oargs
+ nb_iargs
,
1696 sizeof(TCGArg
) * def
->nb_cargs
);
1698 /* satisfy input constraints */
1699 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1700 for(k
= 0; k
< nb_iargs
; k
++) {
1701 i
= def
->sorted_args
[nb_oargs
+ k
];
1703 arg_ct
= &def
->args_ct
[i
];
1704 ts
= &s
->temps
[arg
];
1705 if (ts
->val_type
== TEMP_VAL_MEM
) {
1706 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1707 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1708 ts
->val_type
= TEMP_VAL_REG
;
1710 ts
->mem_coherent
= 1;
1711 s
->reg_to_temp
[reg
] = arg
;
1712 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1713 if (tcg_target_const_match(ts
->val
, arg_ct
)) {
1714 /* constant is OK for instruction */
1716 new_args
[i
] = ts
->val
;
1719 /* need to move to a register */
1720 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1721 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1722 ts
->val_type
= TEMP_VAL_REG
;
1724 ts
->mem_coherent
= 0;
1725 s
->reg_to_temp
[reg
] = arg
;
1728 assert(ts
->val_type
== TEMP_VAL_REG
);
1729 if (arg_ct
->ct
& TCG_CT_IALIAS
) {
1730 if (ts
->fixed_reg
) {
1731 /* if fixed register, we must allocate a new register
1732 if the alias is not the same register */
1733 if (arg
!= args
[arg_ct
->alias_index
])
1734 goto allocate_in_reg
;
1736 /* if the input is aliased to an output and if it is
1737 not dead after the instruction, we must allocate
1738 a new register and move it */
1739 if (!IS_DEAD_ARG(i
)) {
1740 goto allocate_in_reg
;
1745 if (tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1746 /* nothing to do : the constraint is satisfied */
1749 /* allocate a new register matching the constraint
1750 and move the temporary register into it */
1751 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1752 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1756 tcg_regset_set_reg(allocated_regs
, reg
);
1760 if (def
->flags
& TCG_OPF_BB_END
) {
1761 tcg_reg_alloc_bb_end(s
, allocated_regs
);
1763 /* mark dead temporaries and free the associated registers */
1764 for(i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
1766 if (IS_DEAD_ARG(i
)) {
1767 ts
= &s
->temps
[arg
];
1768 if (!ts
->fixed_reg
) {
1769 if (ts
->val_type
== TEMP_VAL_REG
)
1770 s
->reg_to_temp
[ts
->reg
] = -1;
1771 ts
->val_type
= TEMP_VAL_DEAD
;
1776 if (def
->flags
& TCG_OPF_CALL_CLOBBER
) {
1777 /* XXX: permit generic clobber register list ? */
1778 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1779 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, reg
)) {
1780 tcg_reg_free(s
, reg
);
1783 /* XXX: for load/store we could do that only for the slow path
1784 (i.e. when a memory callback is called) */
1786 /* store globals and free associated registers (we assume the insn
1787 can modify any global. */
1788 save_globals(s
, allocated_regs
);
1791 /* satisfy the output constraints */
1792 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1793 for(k
= 0; k
< nb_oargs
; k
++) {
1794 i
= def
->sorted_args
[k
];
1796 arg_ct
= &def
->args_ct
[i
];
1797 ts
= &s
->temps
[arg
];
1798 if (arg_ct
->ct
& TCG_CT_ALIAS
) {
1799 reg
= new_args
[arg_ct
->alias_index
];
1801 /* if fixed register, we try to use it */
1803 if (ts
->fixed_reg
&&
1804 tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1807 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1809 tcg_regset_set_reg(allocated_regs
, reg
);
1810 /* if a fixed register is used, then a move will be done afterwards */
1811 if (!ts
->fixed_reg
) {
1812 if (ts
->val_type
== TEMP_VAL_REG
)
1813 s
->reg_to_temp
[ts
->reg
] = -1;
1814 if (IS_DEAD_ARG(i
)) {
1815 ts
->val_type
= TEMP_VAL_DEAD
;
1817 ts
->val_type
= TEMP_VAL_REG
;
1819 /* temp value is modified, so the value kept in memory is
1820 potentially not the same */
1821 ts
->mem_coherent
= 0;
1822 s
->reg_to_temp
[reg
] = arg
;
1830 /* emit instruction */
1831 tcg_out_op(s
, opc
, new_args
, const_args
);
1833 /* move the outputs in the correct register if needed */
1834 for(i
= 0; i
< nb_oargs
; i
++) {
1835 ts
= &s
->temps
[args
[i
]];
1837 if (ts
->fixed_reg
&& ts
->reg
!= reg
) {
1838 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
1843 #ifdef TCG_TARGET_STACK_GROWSUP
1844 #define STACK_DIR(x) (-(x))
1846 #define STACK_DIR(x) (x)
1849 static int tcg_reg_alloc_call(TCGContext
*s
, const TCGOpDef
*def
,
1850 TCGOpcode opc
, const TCGArg
*args
,
1851 unsigned int dead_args
)
1853 int nb_iargs
, nb_oargs
, flags
, nb_regs
, i
, reg
, nb_params
;
1854 TCGArg arg
, func_arg
;
1856 tcg_target_long stack_offset
, call_stack_size
, func_addr
;
1857 int const_func_arg
, allocate_args
;
1858 TCGRegSet allocated_regs
;
1859 const TCGArgConstraint
*arg_ct
;
1863 nb_oargs
= arg
>> 16;
1864 nb_iargs
= arg
& 0xffff;
1865 nb_params
= nb_iargs
- 1;
1867 flags
= args
[nb_oargs
+ nb_iargs
];
1869 nb_regs
= tcg_target_get_call_iarg_regs_count(flags
);
1870 if (nb_regs
> nb_params
)
1871 nb_regs
= nb_params
;
1873 /* assign stack slots first */
1874 call_stack_size
= (nb_params
- nb_regs
) * sizeof(tcg_target_long
);
1875 call_stack_size
= (call_stack_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1876 ~(TCG_TARGET_STACK_ALIGN
- 1);
1877 allocate_args
= (call_stack_size
> TCG_STATIC_CALL_ARGS_SIZE
);
1878 if (allocate_args
) {
1879 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
1880 preallocate call stack */
1884 stack_offset
= TCG_TARGET_CALL_STACK_OFFSET
;
1885 for(i
= nb_regs
; i
< nb_params
; i
++) {
1886 arg
= args
[nb_oargs
+ i
];
1887 #ifdef TCG_TARGET_STACK_GROWSUP
1888 stack_offset
-= sizeof(tcg_target_long
);
1890 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1891 ts
= &s
->temps
[arg
];
1892 if (ts
->val_type
== TEMP_VAL_REG
) {
1893 tcg_out_st(s
, ts
->type
, ts
->reg
, TCG_REG_CALL_STACK
, stack_offset
);
1894 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1895 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1897 /* XXX: not correct if reading values from the stack */
1898 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1899 tcg_out_st(s
, ts
->type
, reg
, TCG_REG_CALL_STACK
, stack_offset
);
1900 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1901 reg
= tcg_reg_alloc(s
, tcg_target_available_regs
[ts
->type
],
1903 /* XXX: sign extend may be needed on some targets */
1904 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1905 tcg_out_st(s
, ts
->type
, reg
, TCG_REG_CALL_STACK
, stack_offset
);
1910 #ifndef TCG_TARGET_STACK_GROWSUP
1911 stack_offset
+= sizeof(tcg_target_long
);
1915 /* assign input registers */
1916 tcg_regset_set(allocated_regs
, s
->reserved_regs
);
1917 for(i
= 0; i
< nb_regs
; i
++) {
1918 arg
= args
[nb_oargs
+ i
];
1919 if (arg
!= TCG_CALL_DUMMY_ARG
) {
1920 ts
= &s
->temps
[arg
];
1921 reg
= tcg_target_call_iarg_regs
[i
];
1922 tcg_reg_free(s
, reg
);
1923 if (ts
->val_type
== TEMP_VAL_REG
) {
1924 if (ts
->reg
!= reg
) {
1925 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1927 } else if (ts
->val_type
== TEMP_VAL_MEM
) {
1928 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1929 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1930 /* XXX: sign extend ? */
1931 tcg_out_movi(s
, ts
->type
, reg
, ts
->val
);
1935 tcg_regset_set_reg(allocated_regs
, reg
);
1939 /* assign function address */
1940 func_arg
= args
[nb_oargs
+ nb_iargs
- 1];
1941 arg_ct
= &def
->args_ct
[0];
1942 ts
= &s
->temps
[func_arg
];
1943 func_addr
= ts
->val
;
1945 if (ts
->val_type
== TEMP_VAL_MEM
) {
1946 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1947 tcg_out_ld(s
, ts
->type
, reg
, ts
->mem_reg
, ts
->mem_offset
);
1949 tcg_regset_set_reg(allocated_regs
, reg
);
1950 } else if (ts
->val_type
== TEMP_VAL_REG
) {
1952 if (!tcg_regset_test_reg(arg_ct
->u
.regs
, reg
)) {
1953 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1954 tcg_out_mov(s
, ts
->type
, reg
, ts
->reg
);
1957 tcg_regset_set_reg(allocated_regs
, reg
);
1958 } else if (ts
->val_type
== TEMP_VAL_CONST
) {
1959 if (tcg_target_const_match(func_addr
, arg_ct
)) {
1961 func_arg
= func_addr
;
1963 reg
= tcg_reg_alloc(s
, arg_ct
->u
.regs
, allocated_regs
);
1964 tcg_out_movi(s
, ts
->type
, reg
, func_addr
);
1966 tcg_regset_set_reg(allocated_regs
, reg
);
1973 /* mark dead temporaries and free the associated registers */
1974 for(i
= nb_oargs
; i
< nb_iargs
+ nb_oargs
; i
++) {
1976 if (IS_DEAD_ARG(i
)) {
1977 ts
= &s
->temps
[arg
];
1978 if (!ts
->fixed_reg
) {
1979 if (ts
->val_type
== TEMP_VAL_REG
)
1980 s
->reg_to_temp
[ts
->reg
] = -1;
1981 ts
->val_type
= TEMP_VAL_DEAD
;
1986 /* clobber call registers */
1987 for(reg
= 0; reg
< TCG_TARGET_NB_REGS
; reg
++) {
1988 if (tcg_regset_test_reg(tcg_target_call_clobber_regs
, reg
)) {
1989 tcg_reg_free(s
, reg
);
1993 /* store globals and free associated registers (we assume the call
1994 can modify any global. */
1995 if (!(flags
& TCG_CALL_CONST
)) {
1996 save_globals(s
, allocated_regs
);
1999 tcg_out_op(s
, opc
, &func_arg
, &const_func_arg
);
2001 /* assign output registers and emit moves if needed */
2002 for(i
= 0; i
< nb_oargs
; i
++) {
2004 ts
= &s
->temps
[arg
];
2005 reg
= tcg_target_call_oarg_regs
[i
];
2006 assert(s
->reg_to_temp
[reg
] == -1);
2007 if (ts
->fixed_reg
) {
2008 if (ts
->reg
!= reg
) {
2009 tcg_out_mov(s
, ts
->type
, ts
->reg
, reg
);
2012 if (ts
->val_type
== TEMP_VAL_REG
)
2013 s
->reg_to_temp
[ts
->reg
] = -1;
2014 if (IS_DEAD_ARG(i
)) {
2015 ts
->val_type
= TEMP_VAL_DEAD
;
2017 ts
->val_type
= TEMP_VAL_REG
;
2019 ts
->mem_coherent
= 0;
2020 s
->reg_to_temp
[reg
] = arg
;
2025 return nb_iargs
+ nb_oargs
+ def
->nb_cargs
+ 1;
2028 #ifdef CONFIG_PROFILER
2030 static int64_t tcg_table_op_count
[NB_OPS
];
2032 static void dump_op_count(void)
2036 f
= fopen("/tmp/op.log", "w");
2037 for(i
= INDEX_op_end
; i
< NB_OPS
; i
++) {
2038 fprintf(f
, "%s %" PRId64
"\n", tcg_op_defs
[i
].name
, tcg_table_op_count
[i
]);
2045 static inline int tcg_gen_code_common(TCGContext
*s
, uint8_t *gen_code_buf
,
2050 const TCGOpDef
*def
;
2051 unsigned int dead_args
;
2055 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
2062 #ifdef USE_TCG_OPTIMIZATIONS
2064 tcg_optimize(s
, gen_opc_ptr
, gen_opparam_buf
, tcg_op_defs
);
2067 #ifdef CONFIG_PROFILER
2068 s
->la_time
-= profile_getclock();
2070 tcg_liveness_analysis(s
);
2071 #ifdef CONFIG_PROFILER
2072 s
->la_time
+= profile_getclock();
2076 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT
))) {
2077 qemu_log("OP after liveness analysis:\n");
2083 tcg_reg_alloc_start(s
);
2085 s
->code_buf
= gen_code_buf
;
2086 s
->code_ptr
= gen_code_buf
;
2088 args
= gen_opparam_buf
;
2092 opc
= gen_opc_buf
[op_index
];
2093 #ifdef CONFIG_PROFILER
2094 tcg_table_op_count
[opc
]++;
2096 def
= &tcg_op_defs
[opc
];
2098 printf("%s: %d %d %d\n", def
->name
,
2099 def
->nb_oargs
, def
->nb_iargs
, def
->nb_cargs
);
2103 case INDEX_op_mov_i32
:
2104 #if TCG_TARGET_REG_BITS == 64
2105 case INDEX_op_mov_i64
:
2107 dead_args
= s
->op_dead_args
[op_index
];
2108 tcg_reg_alloc_mov(s
, def
, args
, dead_args
);
2110 case INDEX_op_movi_i32
:
2111 #if TCG_TARGET_REG_BITS == 64
2112 case INDEX_op_movi_i64
:
2114 tcg_reg_alloc_movi(s
, args
);
2116 case INDEX_op_debug_insn_start
:
2117 /* debug instruction */
2127 case INDEX_op_discard
:
2130 ts
= &s
->temps
[args
[0]];
2131 /* mark the temporary as dead */
2132 if (!ts
->fixed_reg
) {
2133 if (ts
->val_type
== TEMP_VAL_REG
)
2134 s
->reg_to_temp
[ts
->reg
] = -1;
2135 ts
->val_type
= TEMP_VAL_DEAD
;
2139 case INDEX_op_set_label
:
2140 tcg_reg_alloc_bb_end(s
, s
->reserved_regs
);
2141 tcg_out_label(s
, args
[0], s
->code_ptr
);
2144 dead_args
= s
->op_dead_args
[op_index
];
2145 args
+= tcg_reg_alloc_call(s
, def
, opc
, args
, dead_args
);
2150 /* Sanity check that we've not introduced any unhandled opcodes. */
2151 if (def
->flags
& TCG_OPF_NOT_PRESENT
) {
2154 /* Note: in order to speed up the code, it would be much
2155 faster to have specialized register allocator functions for
2156 some common argument patterns */
2157 dead_args
= s
->op_dead_args
[op_index
];
2158 tcg_reg_alloc_op(s
, def
, opc
, args
, dead_args
);
2161 args
+= def
->nb_args
;
2163 if (search_pc
>= 0 && search_pc
< s
->code_ptr
- gen_code_buf
) {
2175 int tcg_gen_code(TCGContext
*s
, uint8_t *gen_code_buf
)
2177 #ifdef CONFIG_PROFILER
2180 n
= (gen_opc_ptr
- gen_opc_buf
);
2182 if (n
> s
->op_count_max
)
2183 s
->op_count_max
= n
;
2185 s
->temp_count
+= s
->nb_temps
;
2186 if (s
->nb_temps
> s
->temp_count_max
)
2187 s
->temp_count_max
= s
->nb_temps
;
2191 tcg_gen_code_common(s
, gen_code_buf
, -1);
2193 /* flush instruction cache */
2194 flush_icache_range((tcg_target_ulong
)gen_code_buf
,
2195 (tcg_target_ulong
)s
->code_ptr
);
2197 return s
->code_ptr
- gen_code_buf
;
2200 /* Return the index of the micro operation such as the pc after is <
2201 offset bytes from the start of the TB. The contents of gen_code_buf must
2202 not be changed, though writing the same values is ok.
2203 Return -1 if not found. */
2204 int tcg_gen_code_search_pc(TCGContext
*s
, uint8_t *gen_code_buf
, long offset
)
2206 return tcg_gen_code_common(s
, gen_code_buf
, offset
);
2209 #ifdef CONFIG_PROFILER
2210 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
2212 TCGContext
*s
= &tcg_ctx
;
2215 tot
= s
->interm_time
+ s
->code_time
;
2216 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
2218 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
2220 s
->tb_count1
- s
->tb_count
,
2221 s
->tb_count1
? (double)(s
->tb_count1
- s
->tb_count
) / s
->tb_count1
* 100.0 : 0);
2222 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
2223 s
->tb_count
? (double)s
->op_count
/ s
->tb_count
: 0, s
->op_count_max
);
2224 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
2226 (double)s
->del_op_count
/ s
->tb_count
: 0);
2227 cpu_fprintf(f
, "avg temps/TB %0.2f max=%d\n",
2229 (double)s
->temp_count
/ s
->tb_count
: 0,
2232 cpu_fprintf(f
, "cycles/op %0.1f\n",
2233 s
->op_count
? (double)tot
/ s
->op_count
: 0);
2234 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
2235 s
->code_in_len
? (double)tot
/ s
->code_in_len
: 0);
2236 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
2237 s
->code_out_len
? (double)tot
/ s
->code_out_len
: 0);
2240 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
2241 (double)s
->interm_time
/ tot
* 100.0);
2242 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
2243 (double)s
->code_time
/ tot
* 100.0);
2244 cpu_fprintf(f
, "liveness/code time %0.1f%%\n",
2245 (double)s
->la_time
/ (s
->code_time
? s
->code_time
: 1) * 100.0);
2246 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
2248 cpu_fprintf(f
, " avg cycles %0.1f\n",
2249 s
->restore_count
? (double)s
->restore_time
/ s
->restore_count
: 0);
2254 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
)
2256 cpu_fprintf(f
, "[TCG profiler not compiled]\n");
2260 #ifdef ELF_HOST_MACHINE
2261 /* In order to use this feature, the backend needs to do three things:
2263 (1) Define ELF_HOST_MACHINE to indicate both what value to
2264 put into the ELF image and to indicate support for the feature.
2266 (2) Define tcg_register_jit. This should create a buffer containing
2267 the contents of a .debug_frame section that describes the post-
2268 prologue unwind info for the tcg machine.
2270 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2273 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2280 struct jit_code_entry
{
2281 struct jit_code_entry
*next_entry
;
2282 struct jit_code_entry
*prev_entry
;
2283 const void *symfile_addr
;
2284 uint64_t symfile_size
;
2287 struct jit_descriptor
{
2289 uint32_t action_flag
;
2290 struct jit_code_entry
*relevant_entry
;
2291 struct jit_code_entry
*first_entry
;
2294 void __jit_debug_register_code(void) __attribute__((noinline
));
2295 void __jit_debug_register_code(void)
2300 /* Must statically initialize the version, because GDB may check
2301 the version before we can set it. */
2302 struct jit_descriptor __jit_debug_descriptor
= { 1, 0, 0, 0 };
2304 /* End GDB interface. */
2306 static int find_string(const char *strtab
, const char *str
)
2308 const char *p
= strtab
+ 1;
2311 if (strcmp(p
, str
) == 0) {
2318 static void tcg_register_jit_int(void *buf_ptr
, size_t buf_size
,
2319 void *debug_frame
, size_t debug_frame_size
)
2321 struct __attribute__((packed
)) DebugInfo
{
2328 uintptr_t cu_low_pc
;
2329 uintptr_t cu_high_pc
;
2332 uintptr_t fn_low_pc
;
2333 uintptr_t fn_high_pc
;
2342 struct DebugInfo di
;
2347 struct ElfImage
*img
;
2349 static const struct ElfImage img_template
= {
2351 .e_ident
[EI_MAG0
] = ELFMAG0
,
2352 .e_ident
[EI_MAG1
] = ELFMAG1
,
2353 .e_ident
[EI_MAG2
] = ELFMAG2
,
2354 .e_ident
[EI_MAG3
] = ELFMAG3
,
2355 .e_ident
[EI_CLASS
] = ELF_CLASS
,
2356 .e_ident
[EI_DATA
] = ELF_DATA
,
2357 .e_ident
[EI_VERSION
] = EV_CURRENT
,
2359 .e_machine
= ELF_HOST_MACHINE
,
2360 .e_version
= EV_CURRENT
,
2361 .e_phoff
= offsetof(struct ElfImage
, phdr
),
2362 .e_shoff
= offsetof(struct ElfImage
, shdr
),
2363 .e_ehsize
= sizeof(ElfW(Shdr
)),
2364 .e_phentsize
= sizeof(ElfW(Phdr
)),
2366 .e_shentsize
= sizeof(ElfW(Shdr
)),
2367 .e_shnum
= ARRAY_SIZE(img
->shdr
),
2368 .e_shstrndx
= ARRAY_SIZE(img
->shdr
) - 1,
2369 #ifdef ELF_HOST_FLAGS
2370 .e_flags
= ELF_HOST_FLAGS
,
2373 .e_ident
[EI_OSABI
] = ELF_OSABI
,
2381 [0] = { .sh_type
= SHT_NULL
},
2382 /* Trick: The contents of code_gen_buffer are not present in
2383 this fake ELF file; that got allocated elsewhere. Therefore
2384 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2385 will not look for contents. We can record any address. */
2387 .sh_type
= SHT_NOBITS
,
2388 .sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
,
2390 [2] = { /* .debug_info */
2391 .sh_type
= SHT_PROGBITS
,
2392 .sh_offset
= offsetof(struct ElfImage
, di
),
2393 .sh_size
= sizeof(struct DebugInfo
),
2395 [3] = { /* .debug_abbrev */
2396 .sh_type
= SHT_PROGBITS
,
2397 .sh_offset
= offsetof(struct ElfImage
, da
),
2398 .sh_size
= sizeof(img
->da
),
2400 [4] = { /* .debug_frame */
2401 .sh_type
= SHT_PROGBITS
,
2402 .sh_offset
= sizeof(struct ElfImage
),
2404 [5] = { /* .symtab */
2405 .sh_type
= SHT_SYMTAB
,
2406 .sh_offset
= offsetof(struct ElfImage
, sym
),
2407 .sh_size
= sizeof(img
->sym
),
2409 .sh_link
= ARRAY_SIZE(img
->shdr
) - 1,
2410 .sh_entsize
= sizeof(ElfW(Sym
)),
2412 [6] = { /* .strtab */
2413 .sh_type
= SHT_STRTAB
,
2414 .sh_offset
= offsetof(struct ElfImage
, str
),
2415 .sh_size
= sizeof(img
->str
),
2419 [1] = { /* code_gen_buffer */
2420 .st_info
= ELF_ST_INFO(STB_GLOBAL
, STT_FUNC
),
2425 .len
= sizeof(struct DebugInfo
) - 4,
2427 .ptr_size
= sizeof(void *),
2429 .cu_lang
= 0x8001, /* DW_LANG_Mips_Assembler */
2431 .fn_name
= "code_gen_buffer"
2434 1, /* abbrev number (the cu) */
2435 0x11, 1, /* DW_TAG_compile_unit, has children */
2436 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2437 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2438 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2439 0, 0, /* end of abbrev */
2440 2, /* abbrev number (the fn) */
2441 0x2e, 0, /* DW_TAG_subprogram, no children */
2442 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2443 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2444 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2445 0, 0, /* end of abbrev */
2446 0 /* no more abbrev */
2448 .str
= "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2449 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2452 /* We only need a single jit entry; statically allocate it. */
2453 static struct jit_code_entry one_entry
;
2455 uintptr_t buf
= (uintptr_t)buf_ptr
;
2456 size_t img_size
= sizeof(struct ElfImage
) + debug_frame_size
;
2458 img
= g_malloc(img_size
);
2459 *img
= img_template
;
2460 memcpy(img
+ 1, debug_frame
, debug_frame_size
);
2462 img
->phdr
.p_vaddr
= buf
;
2463 img
->phdr
.p_paddr
= buf
;
2464 img
->phdr
.p_memsz
= buf_size
;
2466 img
->shdr
[1].sh_name
= find_string(img
->str
, ".text");
2467 img
->shdr
[1].sh_addr
= buf
;
2468 img
->shdr
[1].sh_size
= buf_size
;
2470 img
->shdr
[2].sh_name
= find_string(img
->str
, ".debug_info");
2471 img
->shdr
[3].sh_name
= find_string(img
->str
, ".debug_abbrev");
2473 img
->shdr
[4].sh_name
= find_string(img
->str
, ".debug_frame");
2474 img
->shdr
[4].sh_size
= debug_frame_size
;
2476 img
->shdr
[5].sh_name
= find_string(img
->str
, ".symtab");
2477 img
->shdr
[6].sh_name
= find_string(img
->str
, ".strtab");
2479 img
->sym
[1].st_name
= find_string(img
->str
, "code_gen_buffer");
2480 img
->sym
[1].st_value
= buf
;
2481 img
->sym
[1].st_size
= buf_size
;
2483 img
->di
.cu_low_pc
= buf
;
2484 img
->di
.cu_high_pc
= buf_size
;
2485 img
->di
.fn_low_pc
= buf
;
2486 img
->di
.fn_high_pc
= buf_size
;
2489 /* Enable this block to be able to debug the ELF image file creation.
2490 One can use readelf, objdump, or other inspection utilities. */
2492 FILE *f
= fopen("/tmp/qemu.jit", "w+b");
2494 if (fwrite(img
, img_size
, 1, f
) != img_size
) {
2495 /* Avoid stupid unused return value warning for fwrite. */
2502 one_entry
.symfile_addr
= img
;
2503 one_entry
.symfile_size
= img_size
;
2505 __jit_debug_descriptor
.action_flag
= JIT_REGISTER_FN
;
2506 __jit_debug_descriptor
.relevant_entry
= &one_entry
;
2507 __jit_debug_descriptor
.first_entry
= &one_entry
;
2508 __jit_debug_register_code();
2511 /* No support for the feature. Provide the entry point expected by exec.c,
2512 and implement the internal function we declared earlier. */
2514 static void tcg_register_jit_int(void *buf
, size_t size
,
2515 void *debug_frame
, size_t debug_frame_size
)
2519 void tcg_register_jit(void *buf
, size_t buf_size
)
2522 #endif /* ELF_HOST_MACHINE */