2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu-common.h"
30 #include "exec/tb-context.h"
31 #include "qemu/bitops.h"
32 #include "tcg-target.h"
34 /* XXX: make safe guess about sizes */
35 #define MAX_OP_PER_INSTR 266
37 #if HOST_LONG_BITS == 32
38 #define MAX_OPC_PARAM_PER_ARG 2
40 #define MAX_OPC_PARAM_PER_ARG 1
42 #define MAX_OPC_PARAM_IARGS 5
43 #define MAX_OPC_PARAM_OARGS 1
44 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
46 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
47 * and up to 4 + N parameters on 64-bit archs
48 * (N = number of input arguments + output arguments). */
49 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
50 #define OPC_BUF_SIZE 640
51 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
53 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
55 #define CPU_TEMP_BUF_NLONGS 128
57 /* Default target word size to pointer size. */
58 #ifndef TCG_TARGET_REG_BITS
59 # if UINTPTR_MAX == UINT32_MAX
60 # define TCG_TARGET_REG_BITS 32
61 # elif UINTPTR_MAX == UINT64_MAX
62 # define TCG_TARGET_REG_BITS 64
64 # error Unknown pointer size for tcg target
68 #if TCG_TARGET_REG_BITS == 32
69 typedef int32_t tcg_target_long
;
70 typedef uint32_t tcg_target_ulong
;
71 #define TCG_PRIlx PRIx32
72 #define TCG_PRIld PRId32
73 #elif TCG_TARGET_REG_BITS == 64
74 typedef int64_t tcg_target_long
;
75 typedef uint64_t tcg_target_ulong
;
76 #define TCG_PRIlx PRIx64
77 #define TCG_PRIld PRId64
82 #if TCG_TARGET_NB_REGS <= 32
83 typedef uint32_t TCGRegSet
;
84 #elif TCG_TARGET_NB_REGS <= 64
85 typedef uint64_t TCGRegSet
;
90 #if TCG_TARGET_REG_BITS == 32
91 /* Turn some undef macros into false macros. */
92 #define TCG_TARGET_HAS_extrl_i64_i32 0
93 #define TCG_TARGET_HAS_extrh_i64_i32 0
94 #define TCG_TARGET_HAS_div_i64 0
95 #define TCG_TARGET_HAS_rem_i64 0
96 #define TCG_TARGET_HAS_div2_i64 0
97 #define TCG_TARGET_HAS_rot_i64 0
98 #define TCG_TARGET_HAS_ext8s_i64 0
99 #define TCG_TARGET_HAS_ext16s_i64 0
100 #define TCG_TARGET_HAS_ext32s_i64 0
101 #define TCG_TARGET_HAS_ext8u_i64 0
102 #define TCG_TARGET_HAS_ext16u_i64 0
103 #define TCG_TARGET_HAS_ext32u_i64 0
104 #define TCG_TARGET_HAS_bswap16_i64 0
105 #define TCG_TARGET_HAS_bswap32_i64 0
106 #define TCG_TARGET_HAS_bswap64_i64 0
107 #define TCG_TARGET_HAS_neg_i64 0
108 #define TCG_TARGET_HAS_not_i64 0
109 #define TCG_TARGET_HAS_andc_i64 0
110 #define TCG_TARGET_HAS_orc_i64 0
111 #define TCG_TARGET_HAS_eqv_i64 0
112 #define TCG_TARGET_HAS_nand_i64 0
113 #define TCG_TARGET_HAS_nor_i64 0
114 #define TCG_TARGET_HAS_deposit_i64 0
115 #define TCG_TARGET_HAS_movcond_i64 0
116 #define TCG_TARGET_HAS_add2_i64 0
117 #define TCG_TARGET_HAS_sub2_i64 0
118 #define TCG_TARGET_HAS_mulu2_i64 0
119 #define TCG_TARGET_HAS_muls2_i64 0
120 #define TCG_TARGET_HAS_muluh_i64 0
121 #define TCG_TARGET_HAS_mulsh_i64 0
122 /* Turn some undef macros into true macros. */
123 #define TCG_TARGET_HAS_add2_i32 1
124 #define TCG_TARGET_HAS_sub2_i32 1
127 #ifndef TCG_TARGET_deposit_i32_valid
128 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
130 #ifndef TCG_TARGET_deposit_i64_valid
131 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
134 /* Only one of DIV or DIV2 should be defined. */
135 #if defined(TCG_TARGET_HAS_div_i32)
136 #define TCG_TARGET_HAS_div2_i32 0
137 #elif defined(TCG_TARGET_HAS_div2_i32)
138 #define TCG_TARGET_HAS_div_i32 0
139 #define TCG_TARGET_HAS_rem_i32 0
141 #if defined(TCG_TARGET_HAS_div_i64)
142 #define TCG_TARGET_HAS_div2_i64 0
143 #elif defined(TCG_TARGET_HAS_div2_i64)
144 #define TCG_TARGET_HAS_div_i64 0
145 #define TCG_TARGET_HAS_rem_i64 0
148 /* For 32-bit targets, some sort of unsigned widening multiply is required. */
149 #if TCG_TARGET_REG_BITS == 32 \
150 && !(defined(TCG_TARGET_HAS_mulu2_i32) \
151 || defined(TCG_TARGET_HAS_muluh_i32))
152 # error "Missing unsigned widening multiply"
155 #ifndef TARGET_INSN_START_EXTRA_WORDS
156 # define TARGET_INSN_START_WORDS 1
158 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
161 typedef enum TCGOpcode
{
162 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
168 #define tcg_regset_clear(d) (d) = 0
169 #define tcg_regset_set(d, s) (d) = (s)
170 #define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg)
171 #define tcg_regset_set_reg(d, r) (d) |= 1L << (r)
172 #define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r))
173 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
174 #define tcg_regset_or(d, a, b) (d) = (a) | (b)
175 #define tcg_regset_and(d, a, b) (d) = (a) & (b)
176 #define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b)
177 #define tcg_regset_not(d, a) (d) = ~(a)
179 #ifndef TCG_TARGET_INSN_UNIT_SIZE
180 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
181 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
182 typedef uint8_t tcg_insn_unit
;
183 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
184 typedef uint16_t tcg_insn_unit
;
185 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
186 typedef uint32_t tcg_insn_unit
;
187 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
188 typedef uint64_t tcg_insn_unit
;
190 /* The port better have done this. */
194 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
195 # define tcg_debug_assert(X) do { assert(X); } while (0)
196 #elif QEMU_GNUC_PREREQ(4, 5)
197 # define tcg_debug_assert(X) \
198 do { if (!(X)) { __builtin_unreachable(); } } while (0)
200 # define tcg_debug_assert(X) do { (void)(X); } while (0)
203 typedef struct TCGRelocation
{
204 struct TCGRelocation
*next
;
210 typedef struct TCGLabel
{
211 unsigned has_value
: 1;
215 tcg_insn_unit
*value_ptr
;
216 TCGRelocation
*first_reloc
;
220 typedef struct TCGPool
{
221 struct TCGPool
*next
;
223 uint8_t data
[0] __attribute__ ((aligned
));
226 #define TCG_POOL_CHUNK_SIZE 32768
228 #define TCG_MAX_TEMPS 512
229 #define TCG_MAX_INSNS 512
231 /* when the size of the arguments of a called function is smaller than
232 this value, they are statically allocated in the TB stack frame */
233 #define TCG_STATIC_CALL_ARGS_SIZE 128
235 typedef enum TCGType
{
238 TCG_TYPE_COUNT
, /* number of different types */
240 /* An alias for the size of the host register. */
241 #if TCG_TARGET_REG_BITS == 32
242 TCG_TYPE_REG
= TCG_TYPE_I32
,
244 TCG_TYPE_REG
= TCG_TYPE_I64
,
247 /* An alias for the size of the native pointer. */
248 #if UINTPTR_MAX == UINT32_MAX
249 TCG_TYPE_PTR
= TCG_TYPE_I32
,
251 TCG_TYPE_PTR
= TCG_TYPE_I64
,
254 /* An alias for the size of the target "long", aka register. */
255 #if TARGET_LONG_BITS == 64
256 TCG_TYPE_TL
= TCG_TYPE_I64
,
258 TCG_TYPE_TL
= TCG_TYPE_I32
,
262 /* Constants for qemu_ld and qemu_st for the Memory Operation field. */
263 typedef enum TCGMemOp
{
268 MO_SIZE
= 3, /* Mask for the above. */
270 MO_SIGN
= 4, /* Sign-extended, otherwise zero-extended. */
272 MO_BSWAP
= 8, /* Host reverse endian. */
273 #ifdef HOST_WORDS_BIGENDIAN
280 #ifdef TARGET_WORDS_BIGENDIAN
286 /* MO_UNALN accesses are never checked for alignment.
287 * MO_ALIGN accesses will result in a call to the CPU's
288 * do_unaligned_access hook if the guest address is not aligned.
289 * The default depends on whether the target CPU defines ALIGNED_ONLY.
290 * Some architectures (e.g. ARMv8) need the address which is aligned
291 * to a size more than the size of the memory access.
292 * To support such check it's enough the current costless alignment
293 * check implementation in QEMU, but we need to support
294 * an alignment size specifying.
295 * MO_ALIGN supposes a natural alignment
296 * (i.e. the alignment size is the size of a memory access).
297 * Note that an alignment size must be equal or greater
298 * than an access size.
299 * There are three options:
300 * - an alignment to the size of an access (MO_ALIGN);
301 * - an alignment to the specified size that is equal or greater than
302 * an access size (MO_ALIGN_x where 'x' is a size in bytes);
303 * - unaligned access permitted (MO_UNALN).
306 MO_AMASK
= 7 << MO_ASHIFT
,
314 MO_ALIGN_2
= 1 << MO_ASHIFT
,
315 MO_ALIGN_4
= 2 << MO_ASHIFT
,
316 MO_ALIGN_8
= 3 << MO_ASHIFT
,
317 MO_ALIGN_16
= 4 << MO_ASHIFT
,
318 MO_ALIGN_32
= 5 << MO_ASHIFT
,
319 MO_ALIGN_64
= 6 << MO_ASHIFT
,
321 /* Combinations of the above, for ease of use. */
325 MO_SB
= MO_SIGN
| MO_8
,
326 MO_SW
= MO_SIGN
| MO_16
,
327 MO_SL
= MO_SIGN
| MO_32
,
330 MO_LEUW
= MO_LE
| MO_UW
,
331 MO_LEUL
= MO_LE
| MO_UL
,
332 MO_LESW
= MO_LE
| MO_SW
,
333 MO_LESL
= MO_LE
| MO_SL
,
334 MO_LEQ
= MO_LE
| MO_Q
,
336 MO_BEUW
= MO_BE
| MO_UW
,
337 MO_BEUL
= MO_BE
| MO_UL
,
338 MO_BESW
= MO_BE
| MO_SW
,
339 MO_BESL
= MO_BE
| MO_SL
,
340 MO_BEQ
= MO_BE
| MO_Q
,
342 MO_TEUW
= MO_TE
| MO_UW
,
343 MO_TEUL
= MO_TE
| MO_UL
,
344 MO_TESW
= MO_TE
| MO_SW
,
345 MO_TESL
= MO_TE
| MO_SL
,
346 MO_TEQ
= MO_TE
| MO_Q
,
348 MO_SSIZE
= MO_SIZE
| MO_SIGN
,
353 * @memop: TCGMemOp value
355 * Extract the alignment size from the memop.
357 * Returns: 0 in case of byte access (which is always aligned);
358 * positive value - number of alignment bits;
359 * negative value if unaligned access enabled
360 * and this is not a byte access.
362 static inline int get_alignment_bits(TCGMemOp memop
)
364 int a
= memop
& MO_AMASK
;
365 int s
= memop
& MO_SIZE
;
369 /* Negative value if unaligned access enabled,
370 * or zero value in case of byte access.
373 } else if (a
== MO_ALIGN
) {
374 /* A natural alignment: return a number of access size bits */
377 /* Specific alignment size. It must be equal or greater
378 * than the access size.
381 tcg_debug_assert(r
>= s
);
383 #if defined(CONFIG_SOFTMMU)
384 /* The requested alignment cannot overlap the TLB flags. */
385 tcg_debug_assert((TLB_FLAGS_MASK
& ((1 << r
) - 1)) == 0);
390 typedef tcg_target_ulong TCGArg
;
392 /* Define a type and accessor macros for variables. Using pointer types
393 is nice because it gives some level of type safely. Converting to and
394 from intptr_t rather than int reduces the number of sign-extension
395 instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't
396 need to know about any of this, and should treat TCGv as an opaque type.
397 In addition we do typechecking for different types of variables. TCGv_i32
398 and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr
399 are aliases for target_ulong and host pointer sized values respectively. */
401 typedef struct TCGv_i32_d
*TCGv_i32
;
402 typedef struct TCGv_i64_d
*TCGv_i64
;
403 typedef struct TCGv_ptr_d
*TCGv_ptr
;
404 typedef TCGv_ptr TCGv_env
;
405 #if TARGET_LONG_BITS == 32
406 #define TCGv TCGv_i32
407 #elif TARGET_LONG_BITS == 64
408 #define TCGv TCGv_i64
410 #error Unhandled TARGET_LONG_BITS value
413 static inline TCGv_i32 QEMU_ARTIFICIAL
MAKE_TCGV_I32(intptr_t i
)
418 static inline TCGv_i64 QEMU_ARTIFICIAL
MAKE_TCGV_I64(intptr_t i
)
423 static inline TCGv_ptr QEMU_ARTIFICIAL
MAKE_TCGV_PTR(intptr_t i
)
428 static inline intptr_t QEMU_ARTIFICIAL
GET_TCGV_I32(TCGv_i32 t
)
433 static inline intptr_t QEMU_ARTIFICIAL
GET_TCGV_I64(TCGv_i64 t
)
438 static inline intptr_t QEMU_ARTIFICIAL
GET_TCGV_PTR(TCGv_ptr t
)
443 #if TCG_TARGET_REG_BITS == 32
444 #define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
445 #define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
448 #define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
449 #define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
450 #define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
452 /* Dummy definition to avoid compiler warnings. */
453 #define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
454 #define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
455 #define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
457 #define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
458 #define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
459 #define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
462 /* Helper does not read globals (either directly or through an exception). It
463 implies TCG_CALL_NO_WRITE_GLOBALS. */
464 #define TCG_CALL_NO_READ_GLOBALS 0x0010
465 /* Helper does not write globals */
466 #define TCG_CALL_NO_WRITE_GLOBALS 0x0020
467 /* Helper can be safely suppressed if the return value is not used. */
468 #define TCG_CALL_NO_SIDE_EFFECTS 0x0040
470 /* convenience version of most used call flags */
471 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
472 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
473 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
474 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
475 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
477 /* used to align parameters */
478 #define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1)
479 #define TCG_CALL_DUMMY_ARG ((TCGArg)(-1))
481 /* Conditions. Note that these are laid out for easy manipulation by
483 bit 0 is used for inverting;
486 bit 3 is used with bit 0 for swapping signed/unsigned. */
489 TCG_COND_NEVER
= 0 | 0 | 0 | 0,
490 TCG_COND_ALWAYS
= 0 | 0 | 0 | 1,
491 TCG_COND_EQ
= 8 | 0 | 0 | 0,
492 TCG_COND_NE
= 8 | 0 | 0 | 1,
494 TCG_COND_LT
= 0 | 0 | 2 | 0,
495 TCG_COND_GE
= 0 | 0 | 2 | 1,
496 TCG_COND_LE
= 8 | 0 | 2 | 0,
497 TCG_COND_GT
= 8 | 0 | 2 | 1,
499 TCG_COND_LTU
= 0 | 4 | 0 | 0,
500 TCG_COND_GEU
= 0 | 4 | 0 | 1,
501 TCG_COND_LEU
= 8 | 4 | 0 | 0,
502 TCG_COND_GTU
= 8 | 4 | 0 | 1,
505 /* Invert the sense of the comparison. */
506 static inline TCGCond
tcg_invert_cond(TCGCond c
)
508 return (TCGCond
)(c
^ 1);
511 /* Swap the operands in a comparison. */
512 static inline TCGCond
tcg_swap_cond(TCGCond c
)
514 return c
& 6 ? (TCGCond
)(c
^ 9) : c
;
517 /* Create an "unsigned" version of a "signed" comparison. */
518 static inline TCGCond
tcg_unsigned_cond(TCGCond c
)
520 return c
& 2 ? (TCGCond
)(c
^ 6) : c
;
523 /* Must a comparison be considered unsigned? */
524 static inline bool is_unsigned_cond(TCGCond c
)
529 /* Create a "high" version of a double-word comparison.
530 This removes equality from a LTE or GTE comparison. */
531 static inline TCGCond
tcg_high_cond(TCGCond c
)
538 return (TCGCond
)(c
^ 8);
544 typedef enum TCGTempVal
{
551 typedef struct TCGTemp
{
553 TCGTempVal val_type
:8;
556 unsigned int fixed_reg
:1;
557 unsigned int indirect_reg
:1;
558 unsigned int indirect_base
:1;
559 unsigned int mem_coherent
:1;
560 unsigned int mem_allocated
:1;
561 unsigned int temp_local
:1; /* If true, the temp is saved across
562 basic blocks. Otherwise, it is not
563 preserved across basic blocks. */
564 unsigned int temp_allocated
:1; /* never used for code gen */
567 struct TCGTemp
*mem_base
;
572 typedef struct TCGContext TCGContext
;
574 typedef struct TCGTempSet
{
575 unsigned long l
[BITS_TO_LONGS(TCG_MAX_TEMPS
)];
578 typedef struct TCGOp
{
581 /* The number of out and in parameter for a call. */
585 /* Index of the arguments for this op, or -1 for zero-operand ops. */
588 /* Index of the prex/next op, or -1 for the end of the list. */
593 QEMU_BUILD_BUG_ON(NB_OPS
> 0xff);
594 QEMU_BUILD_BUG_ON(OPC_BUF_SIZE
>= 0x7fff);
595 QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE
>= 0x7fff);
598 uint8_t *pool_cur
, *pool_end
;
599 TCGPool
*pool_first
, *pool_current
, *pool_first_large
;
604 /* goto_tb support */
605 tcg_insn_unit
*code_buf
;
606 uint16_t *tb_jmp_reset_offset
; /* tb->jmp_reset_offset */
607 uint16_t *tb_jmp_insn_offset
; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
608 uintptr_t *tb_jmp_target_addr
; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
610 /* liveness analysis */
611 uint16_t *op_dead_args
; /* for each operation, each bit tells if the
612 corresponding argument is dead */
613 uint8_t *op_sync_args
; /* for each operation, each bit tells if the
614 corresponding output argument needs to be
617 TCGRegSet reserved_regs
;
618 intptr_t current_frame_offset
;
619 intptr_t frame_start
;
623 tcg_insn_unit
*code_ptr
;
627 #ifdef CONFIG_PROFILER
631 int64_t op_count
; /* total insn count */
632 int op_count_max
; /* max insn per TB */
635 int64_t del_op_count
;
637 int64_t code_out_len
;
638 int64_t search_out_len
;
643 int64_t restore_count
;
644 int64_t restore_time
;
647 #ifdef CONFIG_DEBUG_TCG
649 int goto_tb_issue_mask
;
652 int gen_first_op_idx
;
655 int gen_next_parm_idx
;
657 /* Code generation. Note that we specifically do not use tcg_insn_unit
658 here, because there's too much arithmetic throughout that relies
659 on addition and subtraction working on bytes. Rely on the GCC
660 extension that allows arithmetic on void*. */
661 int code_gen_max_blocks
;
662 void *code_gen_prologue
;
663 void *code_gen_buffer
;
664 size_t code_gen_buffer_size
;
667 /* Threshold to flush the translated code buffer. */
668 void *code_gen_highwater
;
672 /* Track which vCPU triggers events */
673 CPUState
*cpu
; /* *_trans */
674 TCGv_env tcg_env
; /* *_exec */
676 /* The TCGBackendData structure is private to tcg-target.inc.c. */
677 struct TCGBackendData
*be
;
679 TCGTempSet free_temps
[TCG_TYPE_COUNT
* 2];
680 TCGTemp temps
[TCG_MAX_TEMPS
]; /* globals first, temps after */
682 /* Tells which temporary holds a given register.
683 It does not take into account fixed registers */
684 TCGTemp
*reg_to_temp
[TCG_TARGET_NB_REGS
];
686 TCGOp gen_op_buf
[OPC_BUF_SIZE
];
687 TCGArg gen_opparam_buf
[OPPARAM_BUF_SIZE
];
689 uint16_t gen_insn_end_off
[TCG_MAX_INSNS
];
690 target_ulong gen_insn_data
[TCG_MAX_INSNS
][TARGET_INSN_START_WORDS
];
693 extern TCGContext tcg_ctx
;
695 static inline void tcg_set_insn_param(int op_idx
, int arg
, TCGArg v
)
697 int op_argi
= tcg_ctx
.gen_op_buf
[op_idx
].args
;
698 tcg_ctx
.gen_opparam_buf
[op_argi
+ arg
] = v
;
701 /* The number of opcodes emitted so far. */
702 static inline int tcg_op_buf_count(void)
704 return tcg_ctx
.gen_next_op_idx
;
707 /* Test for whether to terminate the TB for using too many opcodes. */
708 static inline bool tcg_op_buf_full(void)
710 return tcg_op_buf_count() >= OPC_MAX_SIZE
;
713 /* pool based memory allocation */
715 void *tcg_malloc_internal(TCGContext
*s
, int size
);
716 void tcg_pool_reset(TCGContext
*s
);
717 void tcg_pool_delete(TCGContext
*s
);
720 void tb_unlock(void);
721 void tb_lock_reset(void);
723 static inline void *tcg_malloc(int size
)
725 TCGContext
*s
= &tcg_ctx
;
726 uint8_t *ptr
, *ptr_end
;
727 size
= (size
+ sizeof(long) - 1) & ~(sizeof(long) - 1);
729 ptr_end
= ptr
+ size
;
730 if (unlikely(ptr_end
> s
->pool_end
)) {
731 return tcg_malloc_internal(&tcg_ctx
, size
);
733 s
->pool_cur
= ptr_end
;
738 void tcg_context_init(TCGContext
*s
);
739 void tcg_prologue_init(TCGContext
*s
);
740 void tcg_func_start(TCGContext
*s
);
742 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
);
744 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
);
746 int tcg_global_mem_new_internal(TCGType
, TCGv_ptr
, intptr_t, const char *);
748 TCGv_i32
tcg_global_reg_new_i32(TCGReg reg
, const char *name
);
749 TCGv_i64
tcg_global_reg_new_i64(TCGReg reg
, const char *name
);
751 TCGv_i32
tcg_temp_new_internal_i32(int temp_local
);
752 TCGv_i64
tcg_temp_new_internal_i64(int temp_local
);
754 void tcg_temp_free_i32(TCGv_i32 arg
);
755 void tcg_temp_free_i64(TCGv_i64 arg
);
757 static inline TCGv_i32
tcg_global_mem_new_i32(TCGv_ptr reg
, intptr_t offset
,
760 int idx
= tcg_global_mem_new_internal(TCG_TYPE_I32
, reg
, offset
, name
);
761 return MAKE_TCGV_I32(idx
);
764 static inline TCGv_i32
tcg_temp_new_i32(void)
766 return tcg_temp_new_internal_i32(0);
769 static inline TCGv_i32
tcg_temp_local_new_i32(void)
771 return tcg_temp_new_internal_i32(1);
774 static inline TCGv_i64
tcg_global_mem_new_i64(TCGv_ptr reg
, intptr_t offset
,
777 int idx
= tcg_global_mem_new_internal(TCG_TYPE_I64
, reg
, offset
, name
);
778 return MAKE_TCGV_I64(idx
);
781 static inline TCGv_i64
tcg_temp_new_i64(void)
783 return tcg_temp_new_internal_i64(0);
786 static inline TCGv_i64
tcg_temp_local_new_i64(void)
788 return tcg_temp_new_internal_i64(1);
791 #if defined(CONFIG_DEBUG_TCG)
792 /* If you call tcg_clear_temp_count() at the start of a section of
793 * code which is not supposed to leak any TCG temporaries, then
794 * calling tcg_check_temp_count() at the end of the section will
795 * return 1 if the section did in fact leak a temporary.
797 void tcg_clear_temp_count(void);
798 int tcg_check_temp_count(void);
800 #define tcg_clear_temp_count() do { } while (0)
801 #define tcg_check_temp_count() 0
804 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
);
805 void tcg_dump_op_count(FILE *f
, fprintf_function cpu_fprintf
);
807 #define TCG_CT_ALIAS 0x80
808 #define TCG_CT_IALIAS 0x40
809 #define TCG_CT_REG 0x01
810 #define TCG_CT_CONST 0x02 /* any constant of register size */
812 typedef struct TCGArgConstraint
{
820 #define TCG_MAX_OP_ARGS 16
822 /* Bits for TCGOpDef->flags, 8 bits available. */
824 /* Instruction defines the end of a basic block. */
825 TCG_OPF_BB_END
= 0x01,
826 /* Instruction clobbers call registers and potentially update globals. */
827 TCG_OPF_CALL_CLOBBER
= 0x02,
828 /* Instruction has side effects: it cannot be removed if its outputs
829 are not used, and might trigger exceptions. */
830 TCG_OPF_SIDE_EFFECTS
= 0x04,
831 /* Instruction operands are 64-bits (otherwise 32-bits). */
832 TCG_OPF_64BIT
= 0x08,
833 /* Instruction is optional and not implemented by the host, or insn
834 is generic and should not be implemened by the host. */
835 TCG_OPF_NOT_PRESENT
= 0x10,
838 typedef struct TCGOpDef
{
840 uint8_t nb_oargs
, nb_iargs
, nb_cargs
, nb_args
;
842 TCGArgConstraint
*args_ct
;
844 #if defined(CONFIG_DEBUG_TCG)
849 extern TCGOpDef tcg_op_defs
[];
850 extern const size_t tcg_op_defs_max
;
852 typedef struct TCGTargetOpDef
{
854 const char *args_ct_str
[TCG_MAX_OP_ARGS
];
857 #define tcg_abort() \
859 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
863 void tcg_add_target_add_op_defs(const TCGTargetOpDef
*tdefs
);
865 #if UINTPTR_MAX == UINT32_MAX
866 #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
867 #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))
869 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
870 #define tcg_global_reg_new_ptr(R, N) \
871 TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
872 #define tcg_global_mem_new_ptr(R, O, N) \
873 TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
874 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
875 #define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
877 #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n))
878 #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n))
880 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
881 #define tcg_global_reg_new_ptr(R, N) \
882 TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
883 #define tcg_global_mem_new_ptr(R, O, N) \
884 TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
885 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
886 #define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
889 void tcg_gen_callN(TCGContext
*s
, void *func
,
890 TCGArg ret
, int nargs
, TCGArg
*args
);
892 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
);
893 void tcg_optimize(TCGContext
*s
);
895 /* only used for debugging purposes */
896 void tcg_dump_ops(TCGContext
*s
);
898 void dump_ops(const uint16_t *opc_buf
, const TCGArg
*opparam_buf
);
899 TCGv_i32
tcg_const_i32(int32_t val
);
900 TCGv_i64
tcg_const_i64(int64_t val
);
901 TCGv_i32
tcg_const_local_i32(int32_t val
);
902 TCGv_i64
tcg_const_local_i64(int64_t val
);
904 TCGLabel
*gen_new_label(void);
910 * Encode a label for storage in the TCG opcode stream.
913 static inline TCGArg
label_arg(TCGLabel
*l
)
922 * The opposite of label_arg. Retrieve a label from the
923 * encoding of the TCG opcode stream.
926 static inline TCGLabel
*arg_label(TCGArg i
)
928 return (TCGLabel
*)(uintptr_t)i
;
933 * @a, @b: addresses to be differenced
935 * There are many places within the TCG backends where we need a byte
936 * difference between two pointers. While this can be accomplished
937 * with local casting, it's easy to get wrong -- especially if one is
938 * concerned with the signedness of the result.
940 * This version relies on GCC's void pointer arithmetic to get the
944 static inline ptrdiff_t tcg_ptr_byte_diff(void *a
, void *b
)
951 * @s: the tcg context
952 * @target: address of the target
954 * Produce a pc-relative difference, from the current code_ptr
955 * to the destination address.
958 static inline ptrdiff_t tcg_pcrel_diff(TCGContext
*s
, void *target
)
960 return tcg_ptr_byte_diff(target
, s
->code_ptr
);
964 * tcg_current_code_size
965 * @s: the tcg context
967 * Compute the current code size within the translation block.
968 * This is used to fill in qemu's data structures for goto_tb.
971 static inline size_t tcg_current_code_size(TCGContext
*s
)
973 return tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
);
976 /* Combine the TCGMemOp and mmu_idx parameters into a single value. */
977 typedef uint32_t TCGMemOpIdx
;
981 * @op: memory operation
984 * Encode these values into a single parameter.
986 static inline TCGMemOpIdx
make_memop_idx(TCGMemOp op
, unsigned idx
)
988 tcg_debug_assert(idx
<= 15);
989 return (op
<< 4) | idx
;
994 * @oi: combined op/idx parameter
996 * Extract the memory operation from the combined value.
998 static inline TCGMemOp
get_memop(TCGMemOpIdx oi
)
1005 * @oi: combined op/idx parameter
1007 * Extract the mmu index from the combined value.
1009 static inline unsigned get_mmuidx(TCGMemOpIdx oi
)
1016 * @env: pointer to CPUArchState for the CPU
1017 * @tb_ptr: address of generated code for the TB to execute
1019 * Start executing code from a given translation block.
1020 * Where translation blocks have been linked, execution
1021 * may proceed from the given TB into successive ones.
1022 * Control eventually returns only when some action is needed
1023 * from the top-level loop: either control must pass to a TB
1024 * which has not yet been directly linked, or an asynchronous
1025 * event such as an interrupt needs handling.
1027 * Return: The return value is the value passed to the corresponding
1028 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1029 * The value is either zero or a 4-byte aligned pointer to that TB combined
1030 * with additional information in its two least significant bits. The
1031 * additional information is encoded as follows:
1032 * 0, 1: the link between this TB and the next is via the specified
1033 * TB index (0 or 1). That is, we left the TB via (the equivalent
1034 * of) "goto_tb <index>". The main loop uses this to determine
1035 * how to link the TB just executed to the next.
1036 * 2: we are using instruction counting code generation, and we
1037 * did not start executing this TB because the instruction counter
1038 * would hit zero midway through it. In this case the pointer
1039 * returned is the TB we were about to execute, and the caller must
1040 * arrange to execute the remaining count of instructions.
1041 * 3: we stopped because the CPU's exit_request flag was set
1042 * (usually meaning that there is an interrupt that needs to be
1043 * handled). The pointer returned is the TB we were about to execute
1044 * when we noticed the pending exit request.
1046 * If the bottom two bits indicate an exit-via-index then the CPU
1047 * state is correctly synchronised and ready for execution of the next
1048 * TB (and in particular the guest PC is the address to execute next).
1049 * Otherwise, we gave up on execution of this TB before it started, and
1050 * the caller must fix up the CPU state by calling the CPU's
1051 * synchronize_from_tb() method with the TB pointer we return (falling
1052 * back to calling the CPU's set_pc method with tb->pb if no
1053 * synchronize_from_tb() method exists).
1055 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1056 * to this default (which just calls the prologue.code emitted by
1057 * tcg_target_qemu_prologue()).
1059 #define TB_EXIT_MASK 3
1060 #define TB_EXIT_IDX0 0
1061 #define TB_EXIT_IDX1 1
1062 #define TB_EXIT_ICOUNT_EXPIRED 2
1063 #define TB_EXIT_REQUESTED 3
1065 #ifdef HAVE_TCG_QEMU_TB_EXEC
1066 uintptr_t tcg_qemu_tb_exec(CPUArchState
*env
, uint8_t *tb_ptr
);
1068 # define tcg_qemu_tb_exec(env, tb_ptr) \
1069 ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
1072 void tcg_register_jit(void *buf
, size_t buf_size
);
1075 * Memory helpers that will be used by TCG generated code.
1077 #ifdef CONFIG_SOFTMMU
1078 /* Value zero-extended to tcg register size. */
1079 tcg_target_ulong
helper_ret_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1080 TCGMemOpIdx oi
, uintptr_t retaddr
);
1081 tcg_target_ulong
helper_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1082 TCGMemOpIdx oi
, uintptr_t retaddr
);
1083 tcg_target_ulong
helper_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1084 TCGMemOpIdx oi
, uintptr_t retaddr
);
1085 uint64_t helper_le_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1086 TCGMemOpIdx oi
, uintptr_t retaddr
);
1087 tcg_target_ulong
helper_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1088 TCGMemOpIdx oi
, uintptr_t retaddr
);
1089 tcg_target_ulong
helper_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1090 TCGMemOpIdx oi
, uintptr_t retaddr
);
1091 uint64_t helper_be_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1092 TCGMemOpIdx oi
, uintptr_t retaddr
);
1094 /* Value sign-extended to tcg register size. */
1095 tcg_target_ulong
helper_ret_ldsb_mmu(CPUArchState
*env
, target_ulong addr
,
1096 TCGMemOpIdx oi
, uintptr_t retaddr
);
1097 tcg_target_ulong
helper_le_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1098 TCGMemOpIdx oi
, uintptr_t retaddr
);
1099 tcg_target_ulong
helper_le_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1100 TCGMemOpIdx oi
, uintptr_t retaddr
);
1101 tcg_target_ulong
helper_be_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1102 TCGMemOpIdx oi
, uintptr_t retaddr
);
1103 tcg_target_ulong
helper_be_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1104 TCGMemOpIdx oi
, uintptr_t retaddr
);
1106 void helper_ret_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
1107 TCGMemOpIdx oi
, uintptr_t retaddr
);
1108 void helper_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1109 TCGMemOpIdx oi
, uintptr_t retaddr
);
1110 void helper_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1111 TCGMemOpIdx oi
, uintptr_t retaddr
);
1112 void helper_le_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1113 TCGMemOpIdx oi
, uintptr_t retaddr
);
1114 void helper_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1115 TCGMemOpIdx oi
, uintptr_t retaddr
);
1116 void helper_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1117 TCGMemOpIdx oi
, uintptr_t retaddr
);
1118 void helper_be_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1119 TCGMemOpIdx oi
, uintptr_t retaddr
);
1121 uint8_t helper_ret_ldb_cmmu(CPUArchState
*env
, target_ulong addr
,
1122 TCGMemOpIdx oi
, uintptr_t retaddr
);
1123 uint16_t helper_le_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1124 TCGMemOpIdx oi
, uintptr_t retaddr
);
1125 uint32_t helper_le_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1126 TCGMemOpIdx oi
, uintptr_t retaddr
);
1127 uint64_t helper_le_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1128 TCGMemOpIdx oi
, uintptr_t retaddr
);
1129 uint16_t helper_be_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1130 TCGMemOpIdx oi
, uintptr_t retaddr
);
1131 uint32_t helper_be_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1132 TCGMemOpIdx oi
, uintptr_t retaddr
);
1133 uint64_t helper_be_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1134 TCGMemOpIdx oi
, uintptr_t retaddr
);
1136 /* Temporary aliases until backends are converted. */
1137 #ifdef TARGET_WORDS_BIGENDIAN
1138 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu
1139 # define helper_ret_lduw_mmu helper_be_lduw_mmu
1140 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu
1141 # define helper_ret_ldul_mmu helper_be_ldul_mmu
1142 # define helper_ret_ldl_mmu helper_be_ldul_mmu
1143 # define helper_ret_ldq_mmu helper_be_ldq_mmu
1144 # define helper_ret_stw_mmu helper_be_stw_mmu
1145 # define helper_ret_stl_mmu helper_be_stl_mmu
1146 # define helper_ret_stq_mmu helper_be_stq_mmu
1147 # define helper_ret_ldw_cmmu helper_be_ldw_cmmu
1148 # define helper_ret_ldl_cmmu helper_be_ldl_cmmu
1149 # define helper_ret_ldq_cmmu helper_be_ldq_cmmu
1151 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu
1152 # define helper_ret_lduw_mmu helper_le_lduw_mmu
1153 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu
1154 # define helper_ret_ldul_mmu helper_le_ldul_mmu
1155 # define helper_ret_ldl_mmu helper_le_ldul_mmu
1156 # define helper_ret_ldq_mmu helper_le_ldq_mmu
1157 # define helper_ret_stw_mmu helper_le_stw_mmu
1158 # define helper_ret_stl_mmu helper_le_stl_mmu
1159 # define helper_ret_stq_mmu helper_le_stq_mmu
1160 # define helper_ret_ldw_cmmu helper_le_ldw_cmmu
1161 # define helper_ret_ldl_cmmu helper_le_ldl_cmmu
1162 # define helper_ret_ldq_cmmu helper_le_ldq_cmmu
1165 #endif /* CONFIG_SOFTMMU */