2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu-common.h"
30 #include "exec/tb-context.h"
31 #include "qemu/bitops.h"
33 #include "tcg-target.h"
35 /* XXX: make safe guess about sizes */
36 #define MAX_OP_PER_INSTR 266
38 #if HOST_LONG_BITS == 32
39 #define MAX_OPC_PARAM_PER_ARG 2
41 #define MAX_OPC_PARAM_PER_ARG 1
43 #define MAX_OPC_PARAM_IARGS 5
44 #define MAX_OPC_PARAM_OARGS 1
45 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
47 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
48 * and up to 4 + N parameters on 64-bit archs
49 * (N = number of input arguments + output arguments). */
50 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
51 #define OPC_BUF_SIZE 640
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54 #define CPU_TEMP_BUF_NLONGS 128
56 /* Default target word size to pointer size. */
57 #ifndef TCG_TARGET_REG_BITS
58 # if UINTPTR_MAX == UINT32_MAX
59 # define TCG_TARGET_REG_BITS 32
60 # elif UINTPTR_MAX == UINT64_MAX
61 # define TCG_TARGET_REG_BITS 64
63 # error Unknown pointer size for tcg target
67 #if TCG_TARGET_REG_BITS == 32
68 typedef int32_t tcg_target_long
;
69 typedef uint32_t tcg_target_ulong
;
70 #define TCG_PRIlx PRIx32
71 #define TCG_PRIld PRId32
72 #elif TCG_TARGET_REG_BITS == 64
73 typedef int64_t tcg_target_long
;
74 typedef uint64_t tcg_target_ulong
;
75 #define TCG_PRIlx PRIx64
76 #define TCG_PRIld PRId64
81 /* Oversized TCG guests make things like MTTCG hard
82 * as we can't use atomics for cputlb updates.
84 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
85 #define TCG_OVERSIZED_GUEST 1
87 #define TCG_OVERSIZED_GUEST 0
90 #if TCG_TARGET_NB_REGS <= 32
91 typedef uint32_t TCGRegSet
;
92 #elif TCG_TARGET_NB_REGS <= 64
93 typedef uint64_t TCGRegSet
;
98 #if TCG_TARGET_REG_BITS == 32
99 /* Turn some undef macros into false macros. */
100 #define TCG_TARGET_HAS_extrl_i64_i32 0
101 #define TCG_TARGET_HAS_extrh_i64_i32 0
102 #define TCG_TARGET_HAS_div_i64 0
103 #define TCG_TARGET_HAS_rem_i64 0
104 #define TCG_TARGET_HAS_div2_i64 0
105 #define TCG_TARGET_HAS_rot_i64 0
106 #define TCG_TARGET_HAS_ext8s_i64 0
107 #define TCG_TARGET_HAS_ext16s_i64 0
108 #define TCG_TARGET_HAS_ext32s_i64 0
109 #define TCG_TARGET_HAS_ext8u_i64 0
110 #define TCG_TARGET_HAS_ext16u_i64 0
111 #define TCG_TARGET_HAS_ext32u_i64 0
112 #define TCG_TARGET_HAS_bswap16_i64 0
113 #define TCG_TARGET_HAS_bswap32_i64 0
114 #define TCG_TARGET_HAS_bswap64_i64 0
115 #define TCG_TARGET_HAS_neg_i64 0
116 #define TCG_TARGET_HAS_not_i64 0
117 #define TCG_TARGET_HAS_andc_i64 0
118 #define TCG_TARGET_HAS_orc_i64 0
119 #define TCG_TARGET_HAS_eqv_i64 0
120 #define TCG_TARGET_HAS_nand_i64 0
121 #define TCG_TARGET_HAS_nor_i64 0
122 #define TCG_TARGET_HAS_clz_i64 0
123 #define TCG_TARGET_HAS_ctz_i64 0
124 #define TCG_TARGET_HAS_ctpop_i64 0
125 #define TCG_TARGET_HAS_deposit_i64 0
126 #define TCG_TARGET_HAS_extract_i64 0
127 #define TCG_TARGET_HAS_sextract_i64 0
128 #define TCG_TARGET_HAS_movcond_i64 0
129 #define TCG_TARGET_HAS_add2_i64 0
130 #define TCG_TARGET_HAS_sub2_i64 0
131 #define TCG_TARGET_HAS_mulu2_i64 0
132 #define TCG_TARGET_HAS_muls2_i64 0
133 #define TCG_TARGET_HAS_muluh_i64 0
134 #define TCG_TARGET_HAS_mulsh_i64 0
135 /* Turn some undef macros into true macros. */
136 #define TCG_TARGET_HAS_add2_i32 1
137 #define TCG_TARGET_HAS_sub2_i32 1
140 #ifndef TCG_TARGET_deposit_i32_valid
141 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
143 #ifndef TCG_TARGET_deposit_i64_valid
144 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
146 #ifndef TCG_TARGET_extract_i32_valid
147 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
149 #ifndef TCG_TARGET_extract_i64_valid
150 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
153 /* Only one of DIV or DIV2 should be defined. */
154 #if defined(TCG_TARGET_HAS_div_i32)
155 #define TCG_TARGET_HAS_div2_i32 0
156 #elif defined(TCG_TARGET_HAS_div2_i32)
157 #define TCG_TARGET_HAS_div_i32 0
158 #define TCG_TARGET_HAS_rem_i32 0
160 #if defined(TCG_TARGET_HAS_div_i64)
161 #define TCG_TARGET_HAS_div2_i64 0
162 #elif defined(TCG_TARGET_HAS_div2_i64)
163 #define TCG_TARGET_HAS_div_i64 0
164 #define TCG_TARGET_HAS_rem_i64 0
167 /* For 32-bit targets, some sort of unsigned widening multiply is required. */
168 #if TCG_TARGET_REG_BITS == 32 \
169 && !(defined(TCG_TARGET_HAS_mulu2_i32) \
170 || defined(TCG_TARGET_HAS_muluh_i32))
171 # error "Missing unsigned widening multiply"
174 #ifndef TARGET_INSN_START_EXTRA_WORDS
175 # define TARGET_INSN_START_WORDS 1
177 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
180 typedef enum TCGOpcode
{
181 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
187 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r))
188 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
189 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
191 #ifndef TCG_TARGET_INSN_UNIT_SIZE
192 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
193 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
194 typedef uint8_t tcg_insn_unit
;
195 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
196 typedef uint16_t tcg_insn_unit
;
197 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
198 typedef uint32_t tcg_insn_unit
;
199 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
200 typedef uint64_t tcg_insn_unit
;
202 /* The port better have done this. */
206 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
207 # define tcg_debug_assert(X) do { assert(X); } while (0)
208 #elif QEMU_GNUC_PREREQ(4, 5)
209 # define tcg_debug_assert(X) \
210 do { if (!(X)) { __builtin_unreachable(); } } while (0)
212 # define tcg_debug_assert(X) do { (void)(X); } while (0)
215 typedef struct TCGRelocation
{
216 struct TCGRelocation
*next
;
222 typedef struct TCGLabel
{
223 unsigned has_value
: 1;
227 tcg_insn_unit
*value_ptr
;
228 TCGRelocation
*first_reloc
;
232 typedef struct TCGPool
{
233 struct TCGPool
*next
;
235 uint8_t data
[0] __attribute__ ((aligned
));
238 #define TCG_POOL_CHUNK_SIZE 32768
240 #define TCG_MAX_TEMPS 512
241 #define TCG_MAX_INSNS 512
243 /* when the size of the arguments of a called function is smaller than
244 this value, they are statically allocated in the TB stack frame */
245 #define TCG_STATIC_CALL_ARGS_SIZE 128
247 typedef enum TCGType
{
250 TCG_TYPE_COUNT
, /* number of different types */
252 /* An alias for the size of the host register. */
253 #if TCG_TARGET_REG_BITS == 32
254 TCG_TYPE_REG
= TCG_TYPE_I32
,
256 TCG_TYPE_REG
= TCG_TYPE_I64
,
259 /* An alias for the size of the native pointer. */
260 #if UINTPTR_MAX == UINT32_MAX
261 TCG_TYPE_PTR
= TCG_TYPE_I32
,
263 TCG_TYPE_PTR
= TCG_TYPE_I64
,
266 /* An alias for the size of the target "long", aka register. */
267 #if TARGET_LONG_BITS == 64
268 TCG_TYPE_TL
= TCG_TYPE_I64
,
270 TCG_TYPE_TL
= TCG_TYPE_I32
,
274 /* Constants for qemu_ld and qemu_st for the Memory Operation field. */
275 typedef enum TCGMemOp
{
280 MO_SIZE
= 3, /* Mask for the above. */
282 MO_SIGN
= 4, /* Sign-extended, otherwise zero-extended. */
284 MO_BSWAP
= 8, /* Host reverse endian. */
285 #ifdef HOST_WORDS_BIGENDIAN
292 #ifdef TARGET_WORDS_BIGENDIAN
298 /* MO_UNALN accesses are never checked for alignment.
299 * MO_ALIGN accesses will result in a call to the CPU's
300 * do_unaligned_access hook if the guest address is not aligned.
301 * The default depends on whether the target CPU defines ALIGNED_ONLY.
303 * Some architectures (e.g. ARMv8) need the address which is aligned
304 * to a size more than the size of the memory access.
305 * Some architectures (e.g. SPARCv9) need an address which is aligned,
306 * but less strictly than the natural alignment.
308 * MO_ALIGN supposes the alignment size is the size of a memory access.
310 * There are three options:
311 * - unaligned access permitted (MO_UNALN).
312 * - an alignment to the size of an access (MO_ALIGN);
313 * - an alignment to a specified size, which may be more or less than
314 * the access size (MO_ALIGN_x where 'x' is a size in bytes);
317 MO_AMASK
= 7 << MO_ASHIFT
,
325 MO_ALIGN_2
= 1 << MO_ASHIFT
,
326 MO_ALIGN_4
= 2 << MO_ASHIFT
,
327 MO_ALIGN_8
= 3 << MO_ASHIFT
,
328 MO_ALIGN_16
= 4 << MO_ASHIFT
,
329 MO_ALIGN_32
= 5 << MO_ASHIFT
,
330 MO_ALIGN_64
= 6 << MO_ASHIFT
,
332 /* Combinations of the above, for ease of use. */
336 MO_SB
= MO_SIGN
| MO_8
,
337 MO_SW
= MO_SIGN
| MO_16
,
338 MO_SL
= MO_SIGN
| MO_32
,
341 MO_LEUW
= MO_LE
| MO_UW
,
342 MO_LEUL
= MO_LE
| MO_UL
,
343 MO_LESW
= MO_LE
| MO_SW
,
344 MO_LESL
= MO_LE
| MO_SL
,
345 MO_LEQ
= MO_LE
| MO_Q
,
347 MO_BEUW
= MO_BE
| MO_UW
,
348 MO_BEUL
= MO_BE
| MO_UL
,
349 MO_BESW
= MO_BE
| MO_SW
,
350 MO_BESL
= MO_BE
| MO_SL
,
351 MO_BEQ
= MO_BE
| MO_Q
,
353 MO_TEUW
= MO_TE
| MO_UW
,
354 MO_TEUL
= MO_TE
| MO_UL
,
355 MO_TESW
= MO_TE
| MO_SW
,
356 MO_TESL
= MO_TE
| MO_SL
,
357 MO_TEQ
= MO_TE
| MO_Q
,
359 MO_SSIZE
= MO_SIZE
| MO_SIGN
,
364 * @memop: TCGMemOp value
366 * Extract the alignment size from the memop.
368 static inline unsigned get_alignment_bits(TCGMemOp memop
)
370 unsigned a
= memop
& MO_AMASK
;
373 /* No alignment required. */
375 } else if (a
== MO_ALIGN
) {
376 /* A natural alignment requirement. */
379 /* A specific alignment requirement. */
382 #if defined(CONFIG_SOFTMMU)
383 /* The requested alignment cannot overlap the TLB flags. */
384 tcg_debug_assert((TLB_FLAGS_MASK
& ((1 << a
) - 1)) == 0);
389 typedef tcg_target_ulong TCGArg
;
391 /* Define type and accessor macros for TCG variables.
393 TCG variables are the inputs and outputs of TCG ops, as described
394 in tcg/README. Target CPU front-end code uses these types to deal
395 with TCG variables as it emits TCG code via the tcg_gen_* functions.
396 They come in several flavours:
397 * TCGv_i32 : 32 bit integer type
398 * TCGv_i64 : 64 bit integer type
399 * TCGv_ptr : a host pointer type
400 * TCGv : an integer type the same size as target_ulong
401 (an alias for either TCGv_i32 or TCGv_i64)
402 The compiler's type checking will complain if you mix them
403 up and pass the wrong sized TCGv to a function.
405 Users of tcg_gen_* don't need to know about any of the internal
406 details of these, and should treat them as opaque types.
407 You won't be able to look inside them in a debugger either.
409 Internal implementation details follow:
411 Note that there is no definition of the structs TCGv_i32_d etc anywhere.
412 This is deliberate, because the values we store in variables of type
413 TCGv_i32 are not really pointers-to-structures. They're just small
414 integers, but keeping them in pointer types like this means that the
415 compiler will complain if you accidentally pass a TCGv_i32 to a
416 function which takes a TCGv_i64, and so on. Only the internals of
417 TCG need to care about the actual contents of the types. */
419 typedef struct TCGv_i32_d
*TCGv_i32
;
420 typedef struct TCGv_i64_d
*TCGv_i64
;
421 typedef struct TCGv_ptr_d
*TCGv_ptr
;
422 typedef TCGv_ptr TCGv_env
;
423 #if TARGET_LONG_BITS == 32
424 #define TCGv TCGv_i32
425 #elif TARGET_LONG_BITS == 64
426 #define TCGv TCGv_i64
428 #error Unhandled TARGET_LONG_BITS value
432 /* Helper does not read globals (either directly or through an exception). It
433 implies TCG_CALL_NO_WRITE_GLOBALS. */
434 #define TCG_CALL_NO_READ_GLOBALS 0x0010
435 /* Helper does not write globals */
436 #define TCG_CALL_NO_WRITE_GLOBALS 0x0020
437 /* Helper can be safely suppressed if the return value is not used. */
438 #define TCG_CALL_NO_SIDE_EFFECTS 0x0040
440 /* convenience version of most used call flags */
441 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
442 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
443 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
444 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
445 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
447 /* Used to align parameters. See the comment before tcgv_i32_temp. */
448 #define TCG_CALL_DUMMY_ARG ((TCGArg)0)
450 /* Conditions. Note that these are laid out for easy manipulation by
452 bit 0 is used for inverting;
455 bit 3 is used with bit 0 for swapping signed/unsigned. */
458 TCG_COND_NEVER
= 0 | 0 | 0 | 0,
459 TCG_COND_ALWAYS
= 0 | 0 | 0 | 1,
460 TCG_COND_EQ
= 8 | 0 | 0 | 0,
461 TCG_COND_NE
= 8 | 0 | 0 | 1,
463 TCG_COND_LT
= 0 | 0 | 2 | 0,
464 TCG_COND_GE
= 0 | 0 | 2 | 1,
465 TCG_COND_LE
= 8 | 0 | 2 | 0,
466 TCG_COND_GT
= 8 | 0 | 2 | 1,
468 TCG_COND_LTU
= 0 | 4 | 0 | 0,
469 TCG_COND_GEU
= 0 | 4 | 0 | 1,
470 TCG_COND_LEU
= 8 | 4 | 0 | 0,
471 TCG_COND_GTU
= 8 | 4 | 0 | 1,
474 /* Invert the sense of the comparison. */
475 static inline TCGCond
tcg_invert_cond(TCGCond c
)
477 return (TCGCond
)(c
^ 1);
480 /* Swap the operands in a comparison. */
481 static inline TCGCond
tcg_swap_cond(TCGCond c
)
483 return c
& 6 ? (TCGCond
)(c
^ 9) : c
;
486 /* Create an "unsigned" version of a "signed" comparison. */
487 static inline TCGCond
tcg_unsigned_cond(TCGCond c
)
489 return c
& 2 ? (TCGCond
)(c
^ 6) : c
;
492 /* Must a comparison be considered unsigned? */
493 static inline bool is_unsigned_cond(TCGCond c
)
498 /* Create a "high" version of a double-word comparison.
499 This removes equality from a LTE or GTE comparison. */
500 static inline TCGCond
tcg_high_cond(TCGCond c
)
507 return (TCGCond
)(c
^ 8);
513 typedef enum TCGTempVal
{
520 typedef struct TCGTemp
{
522 TCGTempVal val_type
:8;
525 unsigned int fixed_reg
:1;
526 unsigned int indirect_reg
:1;
527 unsigned int indirect_base
:1;
528 unsigned int mem_coherent
:1;
529 unsigned int mem_allocated
:1;
530 /* If true, the temp is saved across both basic blocks and
531 translation blocks. */
532 unsigned int temp_global
:1;
533 /* If true, the temp is saved across basic blocks but dead
534 at the end of translation blocks. If false, the temp is
535 dead at the end of basic blocks. */
536 unsigned int temp_local
:1;
537 unsigned int temp_allocated
:1;
540 struct TCGTemp
*mem_base
;
544 /* Pass-specific information that can be stored for a temporary.
545 One word worth of integer data, and one pointer to data
546 allocated separately. */
551 typedef struct TCGContext TCGContext
;
553 typedef struct TCGTempSet
{
554 unsigned long l
[BITS_TO_LONGS(TCG_MAX_TEMPS
)];
557 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
558 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
559 There are never more than 2 outputs, which means that we can store all
560 dead + sync data within 16 bits. */
563 typedef uint16_t TCGLifeData
;
565 /* The layout here is designed to avoid a bitfield crossing of
566 a 32-bit boundary, which would cause GCC to add extra padding. */
567 typedef struct TCGOp
{
568 TCGOpcode opc
: 8; /* 8 */
570 /* The number of out and in parameter for a call. */
571 unsigned calli
: 4; /* 12 */
572 unsigned callo
: 2; /* 14 */
573 unsigned : 2; /* 16 */
575 /* Index of the prev/next op, or 0 for the end of the list. */
576 unsigned prev
: 16; /* 32 */
577 unsigned next
: 16; /* 48 */
579 /* Lifetime data of the operands. */
580 unsigned life
: 16; /* 64 */
582 /* Arguments for the opcode. */
583 TCGArg args
[MAX_OPC_PARAM
];
586 /* Make sure that we don't expand the structure without noticing. */
587 QEMU_BUILD_BUG_ON(sizeof(TCGOp
) != 8 + sizeof(TCGArg
) * MAX_OPC_PARAM
);
589 /* Make sure operands fit in the bitfields above. */
590 QEMU_BUILD_BUG_ON(NB_OPS
> (1 << 8));
591 QEMU_BUILD_BUG_ON(OPC_BUF_SIZE
> (1 << 16));
593 typedef struct TCGProfile
{
596 int64_t op_count
; /* total insn count */
597 int op_count_max
; /* max insn per TB */
600 int64_t del_op_count
;
602 int64_t code_out_len
;
603 int64_t search_out_len
;
608 int64_t restore_count
;
609 int64_t restore_time
;
610 int64_t table_op_count
[NB_OPS
];
614 uint8_t *pool_cur
, *pool_end
;
615 TCGPool
*pool_first
, *pool_current
, *pool_first_large
;
621 /* goto_tb support */
622 tcg_insn_unit
*code_buf
;
623 uint16_t *tb_jmp_reset_offset
; /* tb->jmp_reset_offset */
624 uintptr_t *tb_jmp_insn_offset
; /* tb->jmp_target_arg if direct_jump */
625 uintptr_t *tb_jmp_target_addr
; /* tb->jmp_target_arg if !direct_jump */
627 TCGRegSet reserved_regs
;
628 uint32_t tb_cflags
; /* cflags of the current TB */
629 intptr_t current_frame_offset
;
630 intptr_t frame_start
;
634 tcg_insn_unit
*code_ptr
;
636 #ifdef CONFIG_PROFILER
640 #ifdef CONFIG_DEBUG_TCG
642 int goto_tb_issue_mask
;
647 /* Code generation. Note that we specifically do not use tcg_insn_unit
648 here, because there's too much arithmetic throughout that relies
649 on addition and subtraction working on bytes. Rely on the GCC
650 extension that allows arithmetic on void*. */
651 void *code_gen_prologue
;
652 void *code_gen_epilogue
;
653 void *code_gen_buffer
;
654 size_t code_gen_buffer_size
;
658 /* Threshold to flush the translated code buffer. */
659 void *code_gen_highwater
;
661 /* Track which vCPU triggers events */
662 CPUState
*cpu
; /* *_trans */
664 /* These structures are private to tcg-target.inc.c. */
665 #ifdef TCG_TARGET_NEED_LDST_LABELS
666 struct TCGLabelQemuLdst
*ldst_labels
;
668 #ifdef TCG_TARGET_NEED_POOL_LABELS
669 struct TCGLabelPoolData
*pool_labels
;
672 TCGLabel
*exitreq_label
;
674 TCGTempSet free_temps
[TCG_TYPE_COUNT
* 2];
675 TCGTemp temps
[TCG_MAX_TEMPS
]; /* globals first, temps after */
677 /* Tells which temporary holds a given register.
678 It does not take into account fixed registers */
679 TCGTemp
*reg_to_temp
[TCG_TARGET_NB_REGS
];
681 TCGOp gen_op_buf
[OPC_BUF_SIZE
];
683 uint16_t gen_insn_end_off
[TCG_MAX_INSNS
];
684 target_ulong gen_insn_data
[TCG_MAX_INSNS
][TARGET_INSN_START_WORDS
];
687 extern TCGContext tcg_init_ctx
;
688 extern __thread TCGContext
*tcg_ctx
;
689 extern TCGv_env cpu_env
;
691 static inline size_t temp_idx(TCGTemp
*ts
)
693 ptrdiff_t n
= ts
- tcg_ctx
->temps
;
694 tcg_debug_assert(n
>= 0 && n
< tcg_ctx
->nb_temps
);
698 static inline TCGArg
temp_arg(TCGTemp
*ts
)
700 return (uintptr_t)ts
;
703 static inline TCGTemp
*arg_temp(TCGArg a
)
705 return (TCGTemp
*)(uintptr_t)a
;
708 /* Using the offset of a temporary, relative to TCGContext, rather than
709 its index means that we don't use 0. That leaves offset 0 free for
710 a NULL representation without having to leave index 0 unused. */
711 static inline TCGTemp
*tcgv_i32_temp(TCGv_i32 v
)
713 uintptr_t o
= (uintptr_t)v
;
714 TCGTemp
*t
= (void *)tcg_ctx
+ o
;
715 tcg_debug_assert(offsetof(TCGContext
, temps
[temp_idx(t
)]) == o
);
719 static inline TCGTemp
*tcgv_i64_temp(TCGv_i64 v
)
721 return tcgv_i32_temp((TCGv_i32
)v
);
724 static inline TCGTemp
*tcgv_ptr_temp(TCGv_ptr v
)
726 return tcgv_i32_temp((TCGv_i32
)v
);
729 static inline TCGArg
tcgv_i32_arg(TCGv_i32 v
)
731 return temp_arg(tcgv_i32_temp(v
));
734 static inline TCGArg
tcgv_i64_arg(TCGv_i64 v
)
736 return temp_arg(tcgv_i64_temp(v
));
739 static inline TCGArg
tcgv_ptr_arg(TCGv_ptr v
)
741 return temp_arg(tcgv_ptr_temp(v
));
744 static inline TCGv_i32
temp_tcgv_i32(TCGTemp
*t
)
746 (void)temp_idx(t
); /* trigger embedded assert */
747 return (TCGv_i32
)((void *)t
- (void *)tcg_ctx
);
750 static inline TCGv_i64
temp_tcgv_i64(TCGTemp
*t
)
752 return (TCGv_i64
)temp_tcgv_i32(t
);
755 static inline TCGv_ptr
temp_tcgv_ptr(TCGTemp
*t
)
757 return (TCGv_ptr
)temp_tcgv_i32(t
);
760 #if TCG_TARGET_REG_BITS == 32
761 static inline TCGv_i32
TCGV_LOW(TCGv_i64 t
)
763 return temp_tcgv_i32(tcgv_i64_temp(t
));
766 static inline TCGv_i32
TCGV_HIGH(TCGv_i64 t
)
768 return temp_tcgv_i32(tcgv_i64_temp(t
) + 1);
772 static inline void tcg_set_insn_param(int op_idx
, int arg
, TCGArg v
)
774 tcg_ctx
->gen_op_buf
[op_idx
].args
[arg
] = v
;
777 /* The number of opcodes emitted so far. */
778 static inline int tcg_op_buf_count(void)
780 return tcg_ctx
->gen_next_op_idx
;
783 /* Test for whether to terminate the TB for using too many opcodes. */
784 static inline bool tcg_op_buf_full(void)
786 return tcg_op_buf_count() >= OPC_MAX_SIZE
;
789 /* pool based memory allocation */
791 /* user-mode: tb_lock must be held for tcg_malloc_internal. */
792 void *tcg_malloc_internal(TCGContext
*s
, int size
);
793 void tcg_pool_reset(TCGContext
*s
);
794 TranslationBlock
*tcg_tb_alloc(TCGContext
*s
);
796 void tcg_region_init(void);
797 void tcg_region_reset_all(void);
799 size_t tcg_code_size(void);
800 size_t tcg_code_capacity(void);
802 /* user-mode: Called with tb_lock held. */
803 static inline void *tcg_malloc(int size
)
805 TCGContext
*s
= tcg_ctx
;
806 uint8_t *ptr
, *ptr_end
;
808 /* ??? This is a weak placeholder for minimum malloc alignment. */
809 size
= QEMU_ALIGN_UP(size
, 8);
812 ptr_end
= ptr
+ size
;
813 if (unlikely(ptr_end
> s
->pool_end
)) {
814 return tcg_malloc_internal(tcg_ctx
, size
);
816 s
->pool_cur
= ptr_end
;
821 void tcg_context_init(TCGContext
*s
);
822 void tcg_register_thread(void);
823 void tcg_prologue_init(TCGContext
*s
);
824 void tcg_func_start(TCGContext
*s
);
826 int tcg_gen_code(TCGContext
*s
, TranslationBlock
*tb
);
828 void tcg_set_frame(TCGContext
*s
, TCGReg reg
, intptr_t start
, intptr_t size
);
830 TCGTemp
*tcg_global_mem_new_internal(TCGType
, TCGv_ptr
,
831 intptr_t, const char *);
833 TCGv_i32
tcg_temp_new_internal_i32(int temp_local
);
834 TCGv_i64
tcg_temp_new_internal_i64(int temp_local
);
836 void tcg_temp_free_i32(TCGv_i32 arg
);
837 void tcg_temp_free_i64(TCGv_i64 arg
);
839 static inline TCGv_i32
tcg_global_mem_new_i32(TCGv_ptr reg
, intptr_t offset
,
842 TCGTemp
*t
= tcg_global_mem_new_internal(TCG_TYPE_I32
, reg
, offset
, name
);
843 return temp_tcgv_i32(t
);
846 static inline TCGv_i32
tcg_temp_new_i32(void)
848 return tcg_temp_new_internal_i32(0);
851 static inline TCGv_i32
tcg_temp_local_new_i32(void)
853 return tcg_temp_new_internal_i32(1);
856 static inline TCGv_i64
tcg_global_mem_new_i64(TCGv_ptr reg
, intptr_t offset
,
859 TCGTemp
*t
= tcg_global_mem_new_internal(TCG_TYPE_I64
, reg
, offset
, name
);
860 return temp_tcgv_i64(t
);
863 static inline TCGv_i64
tcg_temp_new_i64(void)
865 return tcg_temp_new_internal_i64(0);
868 static inline TCGv_i64
tcg_temp_local_new_i64(void)
870 return tcg_temp_new_internal_i64(1);
873 #if defined(CONFIG_DEBUG_TCG)
874 /* If you call tcg_clear_temp_count() at the start of a section of
875 * code which is not supposed to leak any TCG temporaries, then
876 * calling tcg_check_temp_count() at the end of the section will
877 * return 1 if the section did in fact leak a temporary.
879 void tcg_clear_temp_count(void);
880 int tcg_check_temp_count(void);
882 #define tcg_clear_temp_count() do { } while (0)
883 #define tcg_check_temp_count() 0
886 void tcg_dump_info(FILE *f
, fprintf_function cpu_fprintf
);
887 void tcg_dump_op_count(FILE *f
, fprintf_function cpu_fprintf
);
889 #define TCG_CT_ALIAS 0x80
890 #define TCG_CT_IALIAS 0x40
891 #define TCG_CT_NEWREG 0x20 /* output requires a new register */
892 #define TCG_CT_REG 0x01
893 #define TCG_CT_CONST 0x02 /* any constant of register size */
895 typedef struct TCGArgConstraint
{
903 #define TCG_MAX_OP_ARGS 16
905 /* Bits for TCGOpDef->flags, 8 bits available. */
907 /* Instruction defines the end of a basic block. */
908 TCG_OPF_BB_END
= 0x01,
909 /* Instruction clobbers call registers and potentially update globals. */
910 TCG_OPF_CALL_CLOBBER
= 0x02,
911 /* Instruction has side effects: it cannot be removed if its outputs
912 are not used, and might trigger exceptions. */
913 TCG_OPF_SIDE_EFFECTS
= 0x04,
914 /* Instruction operands are 64-bits (otherwise 32-bits). */
915 TCG_OPF_64BIT
= 0x08,
916 /* Instruction is optional and not implemented by the host, or insn
917 is generic and should not be implemened by the host. */
918 TCG_OPF_NOT_PRESENT
= 0x10,
921 typedef struct TCGOpDef
{
923 uint8_t nb_oargs
, nb_iargs
, nb_cargs
, nb_args
;
925 TCGArgConstraint
*args_ct
;
927 #if defined(CONFIG_DEBUG_TCG)
932 extern TCGOpDef tcg_op_defs
[];
933 extern const size_t tcg_op_defs_max
;
935 typedef struct TCGTargetOpDef
{
937 const char *args_ct_str
[TCG_MAX_OP_ARGS
];
940 #define tcg_abort() \
942 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
946 #if UINTPTR_MAX == UINT32_MAX
947 static inline TCGv_ptr
TCGV_NAT_TO_PTR(TCGv_i32 n
) { return (TCGv_ptr
)n
; }
948 static inline TCGv_i32
TCGV_PTR_TO_NAT(TCGv_ptr n
) { return (TCGv_i32
)n
; }
950 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
951 #define tcg_global_mem_new_ptr(R, O, N) \
952 TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
953 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
954 #define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
956 static inline TCGv_ptr
TCGV_NAT_TO_PTR(TCGv_i64 n
) { return (TCGv_ptr
)n
; }
957 static inline TCGv_i64
TCGV_PTR_TO_NAT(TCGv_ptr n
) { return (TCGv_i64
)n
; }
959 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
960 #define tcg_global_mem_new_ptr(R, O, N) \
961 TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
962 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
963 #define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
966 bool tcg_op_supported(TCGOpcode op
);
968 void tcg_gen_callN(void *func
, TCGTemp
*ret
, int nargs
, TCGTemp
**args
);
970 void tcg_op_remove(TCGContext
*s
, TCGOp
*op
);
971 TCGOp
*tcg_op_insert_before(TCGContext
*s
, TCGOp
*op
, TCGOpcode opc
, int narg
);
972 TCGOp
*tcg_op_insert_after(TCGContext
*s
, TCGOp
*op
, TCGOpcode opc
, int narg
);
974 void tcg_optimize(TCGContext
*s
);
976 /* only used for debugging purposes */
977 void tcg_dump_ops(TCGContext
*s
);
979 TCGv_i32
tcg_const_i32(int32_t val
);
980 TCGv_i64
tcg_const_i64(int64_t val
);
981 TCGv_i32
tcg_const_local_i32(int32_t val
);
982 TCGv_i64
tcg_const_local_i64(int64_t val
);
984 TCGLabel
*gen_new_label(void);
990 * Encode a label for storage in the TCG opcode stream.
993 static inline TCGArg
label_arg(TCGLabel
*l
)
1002 * The opposite of label_arg. Retrieve a label from the
1003 * encoding of the TCG opcode stream.
1006 static inline TCGLabel
*arg_label(TCGArg i
)
1008 return (TCGLabel
*)(uintptr_t)i
;
1013 * @a, @b: addresses to be differenced
1015 * There are many places within the TCG backends where we need a byte
1016 * difference between two pointers. While this can be accomplished
1017 * with local casting, it's easy to get wrong -- especially if one is
1018 * concerned with the signedness of the result.
1020 * This version relies on GCC's void pointer arithmetic to get the
1024 static inline ptrdiff_t tcg_ptr_byte_diff(void *a
, void *b
)
1031 * @s: the tcg context
1032 * @target: address of the target
1034 * Produce a pc-relative difference, from the current code_ptr
1035 * to the destination address.
1038 static inline ptrdiff_t tcg_pcrel_diff(TCGContext
*s
, void *target
)
1040 return tcg_ptr_byte_diff(target
, s
->code_ptr
);
1044 * tcg_current_code_size
1045 * @s: the tcg context
1047 * Compute the current code size within the translation block.
1048 * This is used to fill in qemu's data structures for goto_tb.
1051 static inline size_t tcg_current_code_size(TCGContext
*s
)
1053 return tcg_ptr_byte_diff(s
->code_ptr
, s
->code_buf
);
1056 /* Combine the TCGMemOp and mmu_idx parameters into a single value. */
1057 typedef uint32_t TCGMemOpIdx
;
1061 * @op: memory operation
1064 * Encode these values into a single parameter.
1066 static inline TCGMemOpIdx
make_memop_idx(TCGMemOp op
, unsigned idx
)
1068 tcg_debug_assert(idx
<= 15);
1069 return (op
<< 4) | idx
;
1074 * @oi: combined op/idx parameter
1076 * Extract the memory operation from the combined value.
1078 static inline TCGMemOp
get_memop(TCGMemOpIdx oi
)
1085 * @oi: combined op/idx parameter
1087 * Extract the mmu index from the combined value.
1089 static inline unsigned get_mmuidx(TCGMemOpIdx oi
)
1096 * @env: pointer to CPUArchState for the CPU
1097 * @tb_ptr: address of generated code for the TB to execute
1099 * Start executing code from a given translation block.
1100 * Where translation blocks have been linked, execution
1101 * may proceed from the given TB into successive ones.
1102 * Control eventually returns only when some action is needed
1103 * from the top-level loop: either control must pass to a TB
1104 * which has not yet been directly linked, or an asynchronous
1105 * event such as an interrupt needs handling.
1107 * Return: The return value is the value passed to the corresponding
1108 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1109 * The value is either zero or a 4-byte aligned pointer to that TB combined
1110 * with additional information in its two least significant bits. The
1111 * additional information is encoded as follows:
1112 * 0, 1: the link between this TB and the next is via the specified
1113 * TB index (0 or 1). That is, we left the TB via (the equivalent
1114 * of) "goto_tb <index>". The main loop uses this to determine
1115 * how to link the TB just executed to the next.
1116 * 2: we are using instruction counting code generation, and we
1117 * did not start executing this TB because the instruction counter
1118 * would hit zero midway through it. In this case the pointer
1119 * returned is the TB we were about to execute, and the caller must
1120 * arrange to execute the remaining count of instructions.
1121 * 3: we stopped because the CPU's exit_request flag was set
1122 * (usually meaning that there is an interrupt that needs to be
1123 * handled). The pointer returned is the TB we were about to execute
1124 * when we noticed the pending exit request.
1126 * If the bottom two bits indicate an exit-via-index then the CPU
1127 * state is correctly synchronised and ready for execution of the next
1128 * TB (and in particular the guest PC is the address to execute next).
1129 * Otherwise, we gave up on execution of this TB before it started, and
1130 * the caller must fix up the CPU state by calling the CPU's
1131 * synchronize_from_tb() method with the TB pointer we return (falling
1132 * back to calling the CPU's set_pc method with tb->pb if no
1133 * synchronize_from_tb() method exists).
1135 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1136 * to this default (which just calls the prologue.code emitted by
1137 * tcg_target_qemu_prologue()).
1139 #define TB_EXIT_MASK 3
1140 #define TB_EXIT_IDX0 0
1141 #define TB_EXIT_IDX1 1
1142 #define TB_EXIT_REQUESTED 3
1144 #ifdef HAVE_TCG_QEMU_TB_EXEC
1145 uintptr_t tcg_qemu_tb_exec(CPUArchState
*env
, uint8_t *tb_ptr
);
1147 # define tcg_qemu_tb_exec(env, tb_ptr) \
1148 ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
1151 void tcg_register_jit(void *buf
, size_t buf_size
);
1154 * Memory helpers that will be used by TCG generated code.
1156 #ifdef CONFIG_SOFTMMU
1157 /* Value zero-extended to tcg register size. */
1158 tcg_target_ulong
helper_ret_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1159 TCGMemOpIdx oi
, uintptr_t retaddr
);
1160 tcg_target_ulong
helper_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1161 TCGMemOpIdx oi
, uintptr_t retaddr
);
1162 tcg_target_ulong
helper_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1163 TCGMemOpIdx oi
, uintptr_t retaddr
);
1164 uint64_t helper_le_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1165 TCGMemOpIdx oi
, uintptr_t retaddr
);
1166 tcg_target_ulong
helper_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1167 TCGMemOpIdx oi
, uintptr_t retaddr
);
1168 tcg_target_ulong
helper_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1169 TCGMemOpIdx oi
, uintptr_t retaddr
);
1170 uint64_t helper_be_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1171 TCGMemOpIdx oi
, uintptr_t retaddr
);
1173 /* Value sign-extended to tcg register size. */
1174 tcg_target_ulong
helper_ret_ldsb_mmu(CPUArchState
*env
, target_ulong addr
,
1175 TCGMemOpIdx oi
, uintptr_t retaddr
);
1176 tcg_target_ulong
helper_le_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1177 TCGMemOpIdx oi
, uintptr_t retaddr
);
1178 tcg_target_ulong
helper_le_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1179 TCGMemOpIdx oi
, uintptr_t retaddr
);
1180 tcg_target_ulong
helper_be_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1181 TCGMemOpIdx oi
, uintptr_t retaddr
);
1182 tcg_target_ulong
helper_be_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1183 TCGMemOpIdx oi
, uintptr_t retaddr
);
1185 void helper_ret_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
1186 TCGMemOpIdx oi
, uintptr_t retaddr
);
1187 void helper_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1188 TCGMemOpIdx oi
, uintptr_t retaddr
);
1189 void helper_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1190 TCGMemOpIdx oi
, uintptr_t retaddr
);
1191 void helper_le_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1192 TCGMemOpIdx oi
, uintptr_t retaddr
);
1193 void helper_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1194 TCGMemOpIdx oi
, uintptr_t retaddr
);
1195 void helper_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1196 TCGMemOpIdx oi
, uintptr_t retaddr
);
1197 void helper_be_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1198 TCGMemOpIdx oi
, uintptr_t retaddr
);
1200 uint8_t helper_ret_ldb_cmmu(CPUArchState
*env
, target_ulong addr
,
1201 TCGMemOpIdx oi
, uintptr_t retaddr
);
1202 uint16_t helper_le_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1203 TCGMemOpIdx oi
, uintptr_t retaddr
);
1204 uint32_t helper_le_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1205 TCGMemOpIdx oi
, uintptr_t retaddr
);
1206 uint64_t helper_le_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1207 TCGMemOpIdx oi
, uintptr_t retaddr
);
1208 uint16_t helper_be_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1209 TCGMemOpIdx oi
, uintptr_t retaddr
);
1210 uint32_t helper_be_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1211 TCGMemOpIdx oi
, uintptr_t retaddr
);
1212 uint64_t helper_be_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1213 TCGMemOpIdx oi
, uintptr_t retaddr
);
1215 /* Temporary aliases until backends are converted. */
1216 #ifdef TARGET_WORDS_BIGENDIAN
1217 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu
1218 # define helper_ret_lduw_mmu helper_be_lduw_mmu
1219 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu
1220 # define helper_ret_ldul_mmu helper_be_ldul_mmu
1221 # define helper_ret_ldl_mmu helper_be_ldul_mmu
1222 # define helper_ret_ldq_mmu helper_be_ldq_mmu
1223 # define helper_ret_stw_mmu helper_be_stw_mmu
1224 # define helper_ret_stl_mmu helper_be_stl_mmu
1225 # define helper_ret_stq_mmu helper_be_stq_mmu
1226 # define helper_ret_ldw_cmmu helper_be_ldw_cmmu
1227 # define helper_ret_ldl_cmmu helper_be_ldl_cmmu
1228 # define helper_ret_ldq_cmmu helper_be_ldq_cmmu
1230 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu
1231 # define helper_ret_lduw_mmu helper_le_lduw_mmu
1232 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu
1233 # define helper_ret_ldul_mmu helper_le_ldul_mmu
1234 # define helper_ret_ldl_mmu helper_le_ldul_mmu
1235 # define helper_ret_ldq_mmu helper_le_ldq_mmu
1236 # define helper_ret_stw_mmu helper_le_stw_mmu
1237 # define helper_ret_stl_mmu helper_le_stl_mmu
1238 # define helper_ret_stq_mmu helper_le_stq_mmu
1239 # define helper_ret_ldw_cmmu helper_le_ldw_cmmu
1240 # define helper_ret_ldl_cmmu helper_le_ldl_cmmu
1241 # define helper_ret_ldq_cmmu helper_le_ldq_cmmu
1244 uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState
*env
, target_ulong addr
,
1245 uint32_t cmpv
, uint32_t newv
,
1246 TCGMemOpIdx oi
, uintptr_t retaddr
);
1247 uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState
*env
, target_ulong addr
,
1248 uint32_t cmpv
, uint32_t newv
,
1249 TCGMemOpIdx oi
, uintptr_t retaddr
);
1250 uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState
*env
, target_ulong addr
,
1251 uint32_t cmpv
, uint32_t newv
,
1252 TCGMemOpIdx oi
, uintptr_t retaddr
);
1253 uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState
*env
, target_ulong addr
,
1254 uint64_t cmpv
, uint64_t newv
,
1255 TCGMemOpIdx oi
, uintptr_t retaddr
);
1256 uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState
*env
, target_ulong addr
,
1257 uint32_t cmpv
, uint32_t newv
,
1258 TCGMemOpIdx oi
, uintptr_t retaddr
);
1259 uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState
*env
, target_ulong addr
,
1260 uint32_t cmpv
, uint32_t newv
,
1261 TCGMemOpIdx oi
, uintptr_t retaddr
);
1262 uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState
*env
, target_ulong addr
,
1263 uint64_t cmpv
, uint64_t newv
,
1264 TCGMemOpIdx oi
, uintptr_t retaddr
);
1266 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
1267 TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
1268 (CPUArchState *env, target_ulong addr, TYPE val, \
1269 TCGMemOpIdx oi, uintptr_t retaddr);
1271 #ifdef CONFIG_ATOMIC64
1272 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1273 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1274 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1275 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1276 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1277 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
1278 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
1279 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1281 #define GEN_ATOMIC_HELPER_ALL(NAME) \
1282 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
1283 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
1284 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
1285 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
1286 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1289 GEN_ATOMIC_HELPER_ALL(fetch_add
)
1290 GEN_ATOMIC_HELPER_ALL(fetch_sub
)
1291 GEN_ATOMIC_HELPER_ALL(fetch_and
)
1292 GEN_ATOMIC_HELPER_ALL(fetch_or
)
1293 GEN_ATOMIC_HELPER_ALL(fetch_xor
)
1295 GEN_ATOMIC_HELPER_ALL(add_fetch
)
1296 GEN_ATOMIC_HELPER_ALL(sub_fetch
)
1297 GEN_ATOMIC_HELPER_ALL(and_fetch
)
1298 GEN_ATOMIC_HELPER_ALL(or_fetch
)
1299 GEN_ATOMIC_HELPER_ALL(xor_fetch
)
1301 GEN_ATOMIC_HELPER_ALL(xchg
)
1303 #undef GEN_ATOMIC_HELPER_ALL
1304 #undef GEN_ATOMIC_HELPER
1305 #endif /* CONFIG_SOFTMMU */
1307 #ifdef CONFIG_ATOMIC128
1308 #include "qemu/int128.h"
1310 /* These aren't really a "proper" helpers because TCG cannot manage Int128.
1311 However, use the same format as the others, for use by the backends. */
1312 Int128
helper_atomic_cmpxchgo_le_mmu(CPUArchState
*env
, target_ulong addr
,
1313 Int128 cmpv
, Int128 newv
,
1314 TCGMemOpIdx oi
, uintptr_t retaddr
);
1315 Int128
helper_atomic_cmpxchgo_be_mmu(CPUArchState
*env
, target_ulong addr
,
1316 Int128 cmpv
, Int128 newv
,
1317 TCGMemOpIdx oi
, uintptr_t retaddr
);
1319 Int128
helper_atomic_ldo_le_mmu(CPUArchState
*env
, target_ulong addr
,
1320 TCGMemOpIdx oi
, uintptr_t retaddr
);
1321 Int128
helper_atomic_ldo_be_mmu(CPUArchState
*env
, target_ulong addr
,
1322 TCGMemOpIdx oi
, uintptr_t retaddr
);
1323 void helper_atomic_sto_le_mmu(CPUArchState
*env
, target_ulong addr
, Int128 val
,
1324 TCGMemOpIdx oi
, uintptr_t retaddr
);
1325 void helper_atomic_sto_be_mmu(CPUArchState
*env
, target_ulong addr
, Int128 val
,
1326 TCGMemOpIdx oi
, uintptr_t retaddr
);
1328 #endif /* CONFIG_ATOMIC128 */