2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "tcg/tcg-temp-internal.h"
28 #include "tcg/tcg-op-common.h"
29 #include "tcg/tcg-mo.h"
30 #include "exec/translation-block.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
35 static void check_max_alignment(unsigned a_bits
)
38 * The requested alignment cannot overlap the TLB flags.
39 * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
41 if (tcg_use_softmmu
) {
42 tcg_debug_assert(a_bits
+ 5 <= tcg_ctx
->page_bits
);
46 static MemOp
tcg_canonicalize_memop(MemOp op
, bool is64
, bool st
)
48 unsigned a_bits
= get_alignment_bits(op
);
50 check_max_alignment(a_bits
);
52 /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
53 if (a_bits
== (op
& MO_SIZE
)) {
54 op
= (op
& ~MO_AMASK
) | MO_ALIGN
;
57 switch (op
& MO_SIZE
) {
75 g_assert_not_reached();
81 /* In serial mode, reduce atomicity. */
82 if (!(tcg_ctx
->gen_tb
->cflags
& CF_PARALLEL
)) {
90 static void gen_ldst(TCGOpcode opc
, TCGTemp
*vl
, TCGTemp
*vh
,
91 TCGTemp
*addr
, MemOpIdx oi
)
93 if (TCG_TARGET_REG_BITS
== 64 || tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
95 tcg_gen_op4(opc
, temp_arg(vl
), temp_arg(vh
), temp_arg(addr
), oi
);
97 tcg_gen_op3(opc
, temp_arg(vl
), temp_arg(addr
), oi
);
100 /* See TCGV_LOW/HIGH. */
101 TCGTemp
*al
= addr
+ HOST_BIG_ENDIAN
;
102 TCGTemp
*ah
= addr
+ !HOST_BIG_ENDIAN
;
105 tcg_gen_op5(opc
, temp_arg(vl
), temp_arg(vh
),
106 temp_arg(al
), temp_arg(ah
), oi
);
108 tcg_gen_op4(opc
, temp_arg(vl
), temp_arg(al
), temp_arg(ah
), oi
);
113 static void gen_ldst_i64(TCGOpcode opc
, TCGv_i64 v
, TCGTemp
*addr
, MemOpIdx oi
)
115 if (TCG_TARGET_REG_BITS
== 32) {
116 TCGTemp
*vl
= tcgv_i32_temp(TCGV_LOW(v
));
117 TCGTemp
*vh
= tcgv_i32_temp(TCGV_HIGH(v
));
118 gen_ldst(opc
, vl
, vh
, addr
, oi
);
120 gen_ldst(opc
, tcgv_i64_temp(v
), NULL
, addr
, oi
);
124 static void tcg_gen_req_mo(TCGBar type
)
126 type
&= tcg_ctx
->guest_mo
;
127 type
&= ~TCG_TARGET_DEFAULT_MO
;
129 tcg_gen_mb(type
| TCG_BAR_SC
);
133 /* Only required for loads, where value might overlap addr. */
134 static TCGv_i64
plugin_maybe_preserve_addr(TCGTemp
*addr
)
137 if (tcg_ctx
->plugin_insn
!= NULL
) {
138 /* Save a copy of the vaddr for use after a load. */
139 TCGv_i64 temp
= tcg_temp_ebb_new_i64();
140 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
141 tcg_gen_extu_i32_i64(temp
, temp_tcgv_i32(addr
));
143 tcg_gen_mov_i64(temp
, temp_tcgv_i64(addr
));
152 plugin_gen_mem_callbacks(TCGv_i64 copy_addr
, TCGTemp
*orig_addr
, MemOpIdx oi
,
153 enum qemu_plugin_mem_rw rw
)
156 if (tcg_ctx
->plugin_insn
!= NULL
) {
157 qemu_plugin_meminfo_t info
= make_plugin_meminfo(oi
, rw
);
159 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
161 copy_addr
= tcg_temp_ebb_new_i64();
162 tcg_gen_extu_i32_i64(copy_addr
, temp_tcgv_i32(orig_addr
));
164 tcg_gen_plugin_mem_cb(copy_addr
, info
);
165 tcg_temp_free_i64(copy_addr
);
168 tcg_gen_plugin_mem_cb(copy_addr
, info
);
169 tcg_temp_free_i64(copy_addr
);
171 tcg_gen_plugin_mem_cb(temp_tcgv_i64(orig_addr
), info
);
178 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val
, TCGTemp
*addr
,
179 TCGArg idx
, MemOp memop
)
182 MemOpIdx orig_oi
, oi
;
186 tcg_gen_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
187 orig_memop
= memop
= tcg_canonicalize_memop(memop
, 0, 0);
188 orig_oi
= oi
= make_memop_idx(memop
, idx
);
190 if ((memop
& MO_BSWAP
) && !tcg_target_has_memory_bswap(memop
)) {
192 /* The bswap primitive benefits from zero-extended input. */
193 if ((memop
& MO_SSIZE
) == MO_SW
) {
196 oi
= make_memop_idx(memop
, idx
);
199 copy_addr
= plugin_maybe_preserve_addr(addr
);
200 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
201 opc
= INDEX_op_qemu_ld_a32_i32
;
203 opc
= INDEX_op_qemu_ld_a64_i32
;
205 gen_ldst(opc
, tcgv_i32_temp(val
), NULL
, addr
, oi
);
206 plugin_gen_mem_callbacks(copy_addr
, addr
, orig_oi
, QEMU_PLUGIN_MEM_R
);
208 if ((orig_memop
^ memop
) & MO_BSWAP
) {
209 switch (orig_memop
& MO_SIZE
) {
211 tcg_gen_bswap16_i32(val
, val
, (orig_memop
& MO_SIGN
212 ? TCG_BSWAP_IZ
| TCG_BSWAP_OS
213 : TCG_BSWAP_IZ
| TCG_BSWAP_OZ
));
216 tcg_gen_bswap32_i32(val
, val
);
219 g_assert_not_reached();
224 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val
, TCGTemp
*addr
, TCGArg idx
,
225 MemOp memop
, TCGType addr_type
)
227 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
228 tcg_debug_assert((memop
& MO_SIZE
) <= MO_32
);
229 tcg_gen_qemu_ld_i32_int(val
, addr
, idx
, memop
);
232 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val
, TCGTemp
*addr
,
233 TCGArg idx
, MemOp memop
)
235 TCGv_i32 swap
= NULL
;
236 MemOpIdx orig_oi
, oi
;
239 tcg_gen_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
240 memop
= tcg_canonicalize_memop(memop
, 0, 1);
241 orig_oi
= oi
= make_memop_idx(memop
, idx
);
243 if ((memop
& MO_BSWAP
) && !tcg_target_has_memory_bswap(memop
)) {
244 swap
= tcg_temp_ebb_new_i32();
245 switch (memop
& MO_SIZE
) {
247 tcg_gen_bswap16_i32(swap
, val
, 0);
250 tcg_gen_bswap32_i32(swap
, val
);
253 g_assert_not_reached();
257 oi
= make_memop_idx(memop
, idx
);
260 if (TCG_TARGET_HAS_qemu_st8_i32
&& (memop
& MO_SIZE
) == MO_8
) {
261 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
262 opc
= INDEX_op_qemu_st8_a32_i32
;
264 opc
= INDEX_op_qemu_st8_a64_i32
;
267 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
268 opc
= INDEX_op_qemu_st_a32_i32
;
270 opc
= INDEX_op_qemu_st_a64_i32
;
273 gen_ldst(opc
, tcgv_i32_temp(val
), NULL
, addr
, oi
);
274 plugin_gen_mem_callbacks(NULL
, addr
, orig_oi
, QEMU_PLUGIN_MEM_W
);
277 tcg_temp_free_i32(swap
);
281 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val
, TCGTemp
*addr
, TCGArg idx
,
282 MemOp memop
, TCGType addr_type
)
284 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
285 tcg_debug_assert((memop
& MO_SIZE
) <= MO_32
);
286 tcg_gen_qemu_st_i32_int(val
, addr
, idx
, memop
);
289 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val
, TCGTemp
*addr
,
290 TCGArg idx
, MemOp memop
)
293 MemOpIdx orig_oi
, oi
;
297 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
298 tcg_gen_qemu_ld_i32_int(TCGV_LOW(val
), addr
, idx
, memop
);
299 if (memop
& MO_SIGN
) {
300 tcg_gen_sari_i32(TCGV_HIGH(val
), TCGV_LOW(val
), 31);
302 tcg_gen_movi_i32(TCGV_HIGH(val
), 0);
307 tcg_gen_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
308 orig_memop
= memop
= tcg_canonicalize_memop(memop
, 1, 0);
309 orig_oi
= oi
= make_memop_idx(memop
, idx
);
311 if ((memop
& MO_BSWAP
) && !tcg_target_has_memory_bswap(memop
)) {
313 /* The bswap primitive benefits from zero-extended input. */
314 if ((memop
& MO_SIGN
) && (memop
& MO_SIZE
) < MO_64
) {
317 oi
= make_memop_idx(memop
, idx
);
320 copy_addr
= plugin_maybe_preserve_addr(addr
);
321 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
322 opc
= INDEX_op_qemu_ld_a32_i64
;
324 opc
= INDEX_op_qemu_ld_a64_i64
;
326 gen_ldst_i64(opc
, val
, addr
, oi
);
327 plugin_gen_mem_callbacks(copy_addr
, addr
, orig_oi
, QEMU_PLUGIN_MEM_R
);
329 if ((orig_memop
^ memop
) & MO_BSWAP
) {
330 int flags
= (orig_memop
& MO_SIGN
331 ? TCG_BSWAP_IZ
| TCG_BSWAP_OS
332 : TCG_BSWAP_IZ
| TCG_BSWAP_OZ
);
333 switch (orig_memop
& MO_SIZE
) {
335 tcg_gen_bswap16_i64(val
, val
, flags
);
338 tcg_gen_bswap32_i64(val
, val
, flags
);
341 tcg_gen_bswap64_i64(val
, val
);
344 g_assert_not_reached();
349 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val
, TCGTemp
*addr
, TCGArg idx
,
350 MemOp memop
, TCGType addr_type
)
352 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
353 tcg_debug_assert((memop
& MO_SIZE
) <= MO_64
);
354 tcg_gen_qemu_ld_i64_int(val
, addr
, idx
, memop
);
357 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val
, TCGTemp
*addr
,
358 TCGArg idx
, MemOp memop
)
360 TCGv_i64 swap
= NULL
;
361 MemOpIdx orig_oi
, oi
;
364 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
365 tcg_gen_qemu_st_i32_int(TCGV_LOW(val
), addr
, idx
, memop
);
369 tcg_gen_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
370 memop
= tcg_canonicalize_memop(memop
, 1, 1);
371 orig_oi
= oi
= make_memop_idx(memop
, idx
);
373 if ((memop
& MO_BSWAP
) && !tcg_target_has_memory_bswap(memop
)) {
374 swap
= tcg_temp_ebb_new_i64();
375 switch (memop
& MO_SIZE
) {
377 tcg_gen_bswap16_i64(swap
, val
, 0);
380 tcg_gen_bswap32_i64(swap
, val
, 0);
383 tcg_gen_bswap64_i64(swap
, val
);
386 g_assert_not_reached();
390 oi
= make_memop_idx(memop
, idx
);
393 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
394 opc
= INDEX_op_qemu_st_a32_i64
;
396 opc
= INDEX_op_qemu_st_a64_i64
;
398 gen_ldst_i64(opc
, val
, addr
, oi
);
399 plugin_gen_mem_callbacks(NULL
, addr
, orig_oi
, QEMU_PLUGIN_MEM_W
);
402 tcg_temp_free_i64(swap
);
406 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val
, TCGTemp
*addr
, TCGArg idx
,
407 MemOp memop
, TCGType addr_type
)
409 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
410 tcg_debug_assert((memop
& MO_SIZE
) <= MO_64
);
411 tcg_gen_qemu_st_i64_int(val
, addr
, idx
, memop
);
415 * Return true if @mop, without knowledge of the pointer alignment,
416 * does not require 16-byte atomicity, and it would be adventagous
417 * to avoid a call to a helper function.
419 static bool use_two_i64_for_i128(MemOp mop
)
421 /* Two softmmu tlb lookups is larger than one function call. */
422 if (tcg_use_softmmu
) {
427 * For user-only, two 64-bit operations may well be smaller than a call.
428 * Determine if that would be legal for the requested atomicity.
430 switch (mop
& MO_ATOM_MASK
) {
432 case MO_ATOM_IFALIGN_PAIR
:
434 case MO_ATOM_IFALIGN
:
435 case MO_ATOM_SUBALIGN
:
436 case MO_ATOM_WITHIN16
:
437 case MO_ATOM_WITHIN16_PAIR
:
440 g_assert_not_reached();
444 static void canonicalize_memop_i128_as_i64(MemOp ret
[2], MemOp orig
)
446 MemOp mop_1
= orig
, mop_2
;
448 /* Reduce the size to 64-bit. */
449 mop_1
= (mop_1
& ~MO_SIZE
) | MO_64
;
451 /* Retain the alignment constraints of the original. */
452 switch (orig
& MO_AMASK
) {
459 /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
460 mop_1
= (mop_1
& ~MO_AMASK
) | MO_ALIGN
;
464 /* Second has 8-byte alignment; first has 16-byte alignment. */
466 mop_1
= (mop_1
& ~MO_AMASK
) | MO_ALIGN_16
;
471 /* Second has 8-byte alignment; first retains original. */
472 mop_2
= (mop_1
& ~MO_AMASK
) | MO_ALIGN
;
475 g_assert_not_reached();
478 /* Use a memory ordering implemented by the host. */
479 if ((orig
& MO_BSWAP
) && !tcg_target_has_memory_bswap(mop_1
)) {
488 static TCGv_i64
maybe_extend_addr64(TCGTemp
*addr
)
490 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
491 TCGv_i64 a64
= tcg_temp_ebb_new_i64();
492 tcg_gen_extu_i32_i64(a64
, temp_tcgv_i32(addr
));
495 return temp_tcgv_i64(addr
);
498 static void maybe_free_addr64(TCGv_i64 a64
)
500 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
501 tcg_temp_free_i64(a64
);
505 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val
, TCGTemp
*addr
,
506 TCGArg idx
, MemOp memop
)
509 TCGv_i64 ext_addr
= NULL
;
512 check_max_alignment(get_alignment_bits(memop
));
513 tcg_gen_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
515 /* In serial mode, reduce atomicity. */
516 if (!(tcg_ctx
->gen_tb
->cflags
& CF_PARALLEL
)) {
517 memop
&= ~MO_ATOM_MASK
;
518 memop
|= MO_ATOM_NONE
;
520 orig_oi
= make_memop_idx(memop
, idx
);
522 /* TODO: For now, force 32-bit hosts to use the helper. */
523 if (TCG_TARGET_HAS_qemu_ldst_i128
&& TCG_TARGET_REG_BITS
== 64) {
525 bool need_bswap
= false;
526 MemOpIdx oi
= orig_oi
;
528 if ((memop
& MO_BSWAP
) && !tcg_target_has_memory_bswap(memop
)) {
529 lo
= TCGV128_HIGH(val
);
530 hi
= TCGV128_LOW(val
);
531 oi
= make_memop_idx(memop
& ~MO_BSWAP
, idx
);
534 lo
= TCGV128_LOW(val
);
535 hi
= TCGV128_HIGH(val
);
538 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
539 opc
= INDEX_op_qemu_ld_a32_i128
;
541 opc
= INDEX_op_qemu_ld_a64_i128
;
543 gen_ldst(opc
, tcgv_i64_temp(lo
), tcgv_i64_temp(hi
), addr
, oi
);
546 tcg_gen_bswap64_i64(lo
, lo
);
547 tcg_gen_bswap64_i64(hi
, hi
);
549 } else if (use_two_i64_for_i128(memop
)) {
555 canonicalize_memop_i128_as_i64(mop
, memop
);
556 need_bswap
= (mop
[0] ^ memop
) & MO_BSWAP
;
558 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
559 opc
= INDEX_op_qemu_ld_a32_i64
;
561 opc
= INDEX_op_qemu_ld_a64_i64
;
565 * Since there are no global TCGv_i128, there is no visible state
566 * changed if the second load faults. Load directly into the two
569 if ((memop
& MO_BSWAP
) == MO_LE
) {
570 x
= TCGV128_LOW(val
);
571 y
= TCGV128_HIGH(val
);
573 x
= TCGV128_HIGH(val
);
574 y
= TCGV128_LOW(val
);
577 gen_ldst_i64(opc
, x
, addr
, make_memop_idx(mop
[0], idx
));
580 tcg_gen_bswap64_i64(x
, x
);
583 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
584 TCGv_i32 t
= tcg_temp_ebb_new_i32();
585 tcg_gen_addi_i32(t
, temp_tcgv_i32(addr
), 8);
586 addr_p8
= tcgv_i32_temp(t
);
588 TCGv_i64 t
= tcg_temp_ebb_new_i64();
589 tcg_gen_addi_i64(t
, temp_tcgv_i64(addr
), 8);
590 addr_p8
= tcgv_i64_temp(t
);
593 gen_ldst_i64(opc
, y
, addr_p8
, make_memop_idx(mop
[1], idx
));
594 tcg_temp_free_internal(addr_p8
);
597 tcg_gen_bswap64_i64(y
, y
);
600 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
601 ext_addr
= tcg_temp_ebb_new_i64();
602 tcg_gen_extu_i32_i64(ext_addr
, temp_tcgv_i32(addr
));
603 addr
= tcgv_i64_temp(ext_addr
);
605 gen_helper_ld_i128(val
, tcg_env
, temp_tcgv_i64(addr
),
606 tcg_constant_i32(orig_oi
));
609 plugin_gen_mem_callbacks(ext_addr
, addr
, orig_oi
, QEMU_PLUGIN_MEM_R
);
612 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val
, TCGTemp
*addr
, TCGArg idx
,
613 MemOp memop
, TCGType addr_type
)
615 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
616 tcg_debug_assert((memop
& MO_SIZE
) == MO_128
);
617 tcg_debug_assert((memop
& MO_SIGN
) == 0);
618 tcg_gen_qemu_ld_i128_int(val
, addr
, idx
, memop
);
621 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val
, TCGTemp
*addr
,
622 TCGArg idx
, MemOp memop
)
625 TCGv_i64 ext_addr
= NULL
;
628 check_max_alignment(get_alignment_bits(memop
));
629 tcg_gen_req_mo(TCG_MO_ST_LD
| TCG_MO_ST_ST
);
631 /* In serial mode, reduce atomicity. */
632 if (!(tcg_ctx
->gen_tb
->cflags
& CF_PARALLEL
)) {
633 memop
&= ~MO_ATOM_MASK
;
634 memop
|= MO_ATOM_NONE
;
636 orig_oi
= make_memop_idx(memop
, idx
);
638 /* TODO: For now, force 32-bit hosts to use the helper. */
640 if (TCG_TARGET_HAS_qemu_ldst_i128
&& TCG_TARGET_REG_BITS
== 64) {
642 MemOpIdx oi
= orig_oi
;
643 bool need_bswap
= false;
645 if ((memop
& MO_BSWAP
) && !tcg_target_has_memory_bswap(memop
)) {
646 lo
= tcg_temp_ebb_new_i64();
647 hi
= tcg_temp_ebb_new_i64();
648 tcg_gen_bswap64_i64(lo
, TCGV128_HIGH(val
));
649 tcg_gen_bswap64_i64(hi
, TCGV128_LOW(val
));
650 oi
= make_memop_idx(memop
& ~MO_BSWAP
, idx
);
653 lo
= TCGV128_LOW(val
);
654 hi
= TCGV128_HIGH(val
);
657 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
658 opc
= INDEX_op_qemu_st_a32_i128
;
660 opc
= INDEX_op_qemu_st_a64_i128
;
662 gen_ldst(opc
, tcgv_i64_temp(lo
), tcgv_i64_temp(hi
), addr
, oi
);
665 tcg_temp_free_i64(lo
);
666 tcg_temp_free_i64(hi
);
668 } else if (use_two_i64_for_i128(memop
)) {
671 TCGv_i64 x
, y
, b
= NULL
;
673 canonicalize_memop_i128_as_i64(mop
, memop
);
675 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
676 opc
= INDEX_op_qemu_st_a32_i64
;
678 opc
= INDEX_op_qemu_st_a64_i64
;
681 if ((memop
& MO_BSWAP
) == MO_LE
) {
682 x
= TCGV128_LOW(val
);
683 y
= TCGV128_HIGH(val
);
685 x
= TCGV128_HIGH(val
);
686 y
= TCGV128_LOW(val
);
689 if ((mop
[0] ^ memop
) & MO_BSWAP
) {
690 b
= tcg_temp_ebb_new_i64();
691 tcg_gen_bswap64_i64(b
, x
);
695 gen_ldst_i64(opc
, x
, addr
, make_memop_idx(mop
[0], idx
));
697 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
698 TCGv_i32 t
= tcg_temp_ebb_new_i32();
699 tcg_gen_addi_i32(t
, temp_tcgv_i32(addr
), 8);
700 addr_p8
= tcgv_i32_temp(t
);
702 TCGv_i64 t
= tcg_temp_ebb_new_i64();
703 tcg_gen_addi_i64(t
, temp_tcgv_i64(addr
), 8);
704 addr_p8
= tcgv_i64_temp(t
);
708 tcg_gen_bswap64_i64(b
, y
);
709 gen_ldst_i64(opc
, b
, addr_p8
, make_memop_idx(mop
[1], idx
));
710 tcg_temp_free_i64(b
);
712 gen_ldst_i64(opc
, y
, addr_p8
, make_memop_idx(mop
[1], idx
));
714 tcg_temp_free_internal(addr_p8
);
716 if (tcg_ctx
->addr_type
== TCG_TYPE_I32
) {
717 ext_addr
= tcg_temp_ebb_new_i64();
718 tcg_gen_extu_i32_i64(ext_addr
, temp_tcgv_i32(addr
));
719 addr
= tcgv_i64_temp(ext_addr
);
721 gen_helper_st_i128(tcg_env
, temp_tcgv_i64(addr
), val
,
722 tcg_constant_i32(orig_oi
));
725 plugin_gen_mem_callbacks(ext_addr
, addr
, orig_oi
, QEMU_PLUGIN_MEM_W
);
728 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val
, TCGTemp
*addr
, TCGArg idx
,
729 MemOp memop
, TCGType addr_type
)
731 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
732 tcg_debug_assert((memop
& MO_SIZE
) == MO_128
);
733 tcg_debug_assert((memop
& MO_SIGN
) == 0);
734 tcg_gen_qemu_st_i128_int(val
, addr
, idx
, memop
);
737 void tcg_gen_ext_i32(TCGv_i32 ret
, TCGv_i32 val
, MemOp opc
)
739 switch (opc
& MO_SSIZE
) {
741 tcg_gen_ext8s_i32(ret
, val
);
744 tcg_gen_ext8u_i32(ret
, val
);
747 tcg_gen_ext16s_i32(ret
, val
);
750 tcg_gen_ext16u_i32(ret
, val
);
754 tcg_gen_mov_i32(ret
, val
);
757 g_assert_not_reached();
761 void tcg_gen_ext_i64(TCGv_i64 ret
, TCGv_i64 val
, MemOp opc
)
763 switch (opc
& MO_SSIZE
) {
765 tcg_gen_ext8s_i64(ret
, val
);
768 tcg_gen_ext8u_i64(ret
, val
);
771 tcg_gen_ext16s_i64(ret
, val
);
774 tcg_gen_ext16u_i64(ret
, val
);
777 tcg_gen_ext32s_i64(ret
, val
);
780 tcg_gen_ext32u_i64(ret
, val
);
784 tcg_gen_mov_i64(ret
, val
);
787 g_assert_not_reached();
791 typedef void (*gen_atomic_cx_i32
)(TCGv_i32
, TCGv_env
, TCGv_i64
,
792 TCGv_i32
, TCGv_i32
, TCGv_i32
);
793 typedef void (*gen_atomic_cx_i64
)(TCGv_i64
, TCGv_env
, TCGv_i64
,
794 TCGv_i64
, TCGv_i64
, TCGv_i32
);
795 typedef void (*gen_atomic_cx_i128
)(TCGv_i128
, TCGv_env
, TCGv_i64
,
796 TCGv_i128
, TCGv_i128
, TCGv_i32
);
797 typedef void (*gen_atomic_op_i32
)(TCGv_i32
, TCGv_env
, TCGv_i64
,
799 typedef void (*gen_atomic_op_i64
)(TCGv_i64
, TCGv_env
, TCGv_i64
,
802 #ifdef CONFIG_ATOMIC64
803 # define WITH_ATOMIC64(X) X,
805 # define WITH_ATOMIC64(X)
808 # define WITH_ATOMIC128(X) X,
810 # define WITH_ATOMIC128(X)
813 static void * const table_cmpxchg
[(MO_SIZE
| MO_BSWAP
) + 1] = {
814 [MO_8
] = gen_helper_atomic_cmpxchgb
,
815 [MO_16
| MO_LE
] = gen_helper_atomic_cmpxchgw_le
,
816 [MO_16
| MO_BE
] = gen_helper_atomic_cmpxchgw_be
,
817 [MO_32
| MO_LE
] = gen_helper_atomic_cmpxchgl_le
,
818 [MO_32
| MO_BE
] = gen_helper_atomic_cmpxchgl_be
,
819 WITH_ATOMIC64([MO_64
| MO_LE
] = gen_helper_atomic_cmpxchgq_le
)
820 WITH_ATOMIC64([MO_64
| MO_BE
] = gen_helper_atomic_cmpxchgq_be
)
821 WITH_ATOMIC128([MO_128
| MO_LE
] = gen_helper_atomic_cmpxchgo_le
)
822 WITH_ATOMIC128([MO_128
| MO_BE
] = gen_helper_atomic_cmpxchgo_be
)
825 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv
, TCGTemp
*addr
,
826 TCGv_i32 cmpv
, TCGv_i32 newv
,
827 TCGArg idx
, MemOp memop
)
829 TCGv_i32 t1
= tcg_temp_ebb_new_i32();
830 TCGv_i32 t2
= tcg_temp_ebb_new_i32();
832 tcg_gen_ext_i32(t2
, cmpv
, memop
& MO_SIZE
);
834 tcg_gen_qemu_ld_i32_int(t1
, addr
, idx
, memop
& ~MO_SIGN
);
835 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
836 tcg_gen_qemu_st_i32_int(t2
, addr
, idx
, memop
);
837 tcg_temp_free_i32(t2
);
839 if (memop
& MO_SIGN
) {
840 tcg_gen_ext_i32(retv
, t1
, memop
);
842 tcg_gen_mov_i32(retv
, t1
);
844 tcg_temp_free_i32(t1
);
847 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv
, TCGTemp
*addr
,
848 TCGv_i32 cmpv
, TCGv_i32 newv
,
849 TCGArg idx
, MemOp memop
,
852 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
853 tcg_debug_assert((memop
& MO_SIZE
) <= MO_32
);
854 tcg_gen_nonatomic_cmpxchg_i32_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
857 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv
, TCGTemp
*addr
,
858 TCGv_i32 cmpv
, TCGv_i32 newv
,
859 TCGArg idx
, MemOp memop
)
861 gen_atomic_cx_i32 gen
;
865 if (!(tcg_ctx
->gen_tb
->cflags
& CF_PARALLEL
)) {
866 tcg_gen_nonatomic_cmpxchg_i32_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
870 memop
= tcg_canonicalize_memop(memop
, 0, 0);
871 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
872 tcg_debug_assert(gen
!= NULL
);
874 oi
= make_memop_idx(memop
& ~MO_SIGN
, idx
);
875 a64
= maybe_extend_addr64(addr
);
876 gen(retv
, tcg_env
, a64
, cmpv
, newv
, tcg_constant_i32(oi
));
877 maybe_free_addr64(a64
);
879 if (memop
& MO_SIGN
) {
880 tcg_gen_ext_i32(retv
, retv
, memop
);
884 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv
, TCGTemp
*addr
,
885 TCGv_i32 cmpv
, TCGv_i32 newv
,
886 TCGArg idx
, MemOp memop
,
889 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
890 tcg_debug_assert((memop
& MO_SIZE
) <= MO_32
);
891 tcg_gen_atomic_cmpxchg_i32_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
894 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv
, TCGTemp
*addr
,
895 TCGv_i64 cmpv
, TCGv_i64 newv
,
896 TCGArg idx
, MemOp memop
)
900 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
901 tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv
), addr
, TCGV_LOW(cmpv
),
902 TCGV_LOW(newv
), idx
, memop
);
903 if (memop
& MO_SIGN
) {
904 tcg_gen_sari_i32(TCGV_HIGH(retv
), TCGV_LOW(retv
), 31);
906 tcg_gen_movi_i32(TCGV_HIGH(retv
), 0);
911 t1
= tcg_temp_ebb_new_i64();
912 t2
= tcg_temp_ebb_new_i64();
914 tcg_gen_ext_i64(t2
, cmpv
, memop
& MO_SIZE
);
916 tcg_gen_qemu_ld_i64_int(t1
, addr
, idx
, memop
& ~MO_SIGN
);
917 tcg_gen_movcond_i64(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
918 tcg_gen_qemu_st_i64_int(t2
, addr
, idx
, memop
);
919 tcg_temp_free_i64(t2
);
921 if (memop
& MO_SIGN
) {
922 tcg_gen_ext_i64(retv
, t1
, memop
);
924 tcg_gen_mov_i64(retv
, t1
);
926 tcg_temp_free_i64(t1
);
929 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv
, TCGTemp
*addr
,
930 TCGv_i64 cmpv
, TCGv_i64 newv
,
931 TCGArg idx
, MemOp memop
,
934 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
935 tcg_debug_assert((memop
& MO_SIZE
) <= MO_64
);
936 tcg_gen_nonatomic_cmpxchg_i64_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
939 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv
, TCGTemp
*addr
,
940 TCGv_i64 cmpv
, TCGv_i64 newv
,
941 TCGArg idx
, MemOp memop
)
943 if (!(tcg_ctx
->gen_tb
->cflags
& CF_PARALLEL
)) {
944 tcg_gen_nonatomic_cmpxchg_i64_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
948 if ((memop
& MO_SIZE
) == MO_64
) {
949 gen_atomic_cx_i64 gen
;
951 memop
= tcg_canonicalize_memop(memop
, 1, 0);
952 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
954 MemOpIdx oi
= make_memop_idx(memop
, idx
);
955 TCGv_i64 a64
= maybe_extend_addr64(addr
);
956 gen(retv
, tcg_env
, a64
, cmpv
, newv
, tcg_constant_i32(oi
));
957 maybe_free_addr64(a64
);
961 gen_helper_exit_atomic(tcg_env
);
964 * Produce a result for a well-formed opcode stream. This satisfies
965 * liveness for set before used, which happens before this dead code
968 tcg_gen_movi_i64(retv
, 0);
972 if (TCG_TARGET_REG_BITS
== 32) {
973 tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv
), addr
, TCGV_LOW(cmpv
),
974 TCGV_LOW(newv
), idx
, memop
);
975 if (memop
& MO_SIGN
) {
976 tcg_gen_sari_i32(TCGV_HIGH(retv
), TCGV_LOW(retv
), 31);
978 tcg_gen_movi_i32(TCGV_HIGH(retv
), 0);
981 TCGv_i32 c32
= tcg_temp_ebb_new_i32();
982 TCGv_i32 n32
= tcg_temp_ebb_new_i32();
983 TCGv_i32 r32
= tcg_temp_ebb_new_i32();
985 tcg_gen_extrl_i64_i32(c32
, cmpv
);
986 tcg_gen_extrl_i64_i32(n32
, newv
);
987 tcg_gen_atomic_cmpxchg_i32_int(r32
, addr
, c32
, n32
,
988 idx
, memop
& ~MO_SIGN
);
989 tcg_temp_free_i32(c32
);
990 tcg_temp_free_i32(n32
);
992 tcg_gen_extu_i32_i64(retv
, r32
);
993 tcg_temp_free_i32(r32
);
995 if (memop
& MO_SIGN
) {
996 tcg_gen_ext_i64(retv
, retv
, memop
);
1001 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv
, TCGTemp
*addr
,
1002 TCGv_i64 cmpv
, TCGv_i64 newv
,
1003 TCGArg idx
, MemOp memop
, TCGType addr_type
)
1005 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
1006 tcg_debug_assert((memop
& MO_SIZE
) <= MO_64
);
1007 tcg_gen_atomic_cmpxchg_i64_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
1010 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv
, TCGTemp
*addr
,
1011 TCGv_i128 cmpv
, TCGv_i128 newv
,
1012 TCGArg idx
, MemOp memop
)
1014 if (TCG_TARGET_REG_BITS
== 32) {
1015 /* Inline expansion below is simply too large for 32-bit hosts. */
1016 MemOpIdx oi
= make_memop_idx(memop
, idx
);
1017 TCGv_i64 a64
= maybe_extend_addr64(addr
);
1019 gen_helper_nonatomic_cmpxchgo(retv
, tcg_env
, a64
, cmpv
, newv
,
1020 tcg_constant_i32(oi
));
1021 maybe_free_addr64(a64
);
1023 TCGv_i128 oldv
= tcg_temp_ebb_new_i128();
1024 TCGv_i128 tmpv
= tcg_temp_ebb_new_i128();
1025 TCGv_i64 t0
= tcg_temp_ebb_new_i64();
1026 TCGv_i64 t1
= tcg_temp_ebb_new_i64();
1027 TCGv_i64 z
= tcg_constant_i64(0);
1029 tcg_gen_qemu_ld_i128_int(oldv
, addr
, idx
, memop
);
1032 tcg_gen_xor_i64(t0
, TCGV128_LOW(oldv
), TCGV128_LOW(cmpv
));
1033 tcg_gen_xor_i64(t1
, TCGV128_HIGH(oldv
), TCGV128_HIGH(cmpv
));
1034 tcg_gen_or_i64(t0
, t0
, t1
);
1036 /* tmpv = equal ? newv : oldv */
1037 tcg_gen_movcond_i64(TCG_COND_EQ
, TCGV128_LOW(tmpv
), t0
, z
,
1038 TCGV128_LOW(newv
), TCGV128_LOW(oldv
));
1039 tcg_gen_movcond_i64(TCG_COND_EQ
, TCGV128_HIGH(tmpv
), t0
, z
,
1040 TCGV128_HIGH(newv
), TCGV128_HIGH(oldv
));
1042 /* Unconditional writeback. */
1043 tcg_gen_qemu_st_i128_int(tmpv
, addr
, idx
, memop
);
1044 tcg_gen_mov_i128(retv
, oldv
);
1046 tcg_temp_free_i64(t0
);
1047 tcg_temp_free_i64(t1
);
1048 tcg_temp_free_i128(tmpv
);
1049 tcg_temp_free_i128(oldv
);
1053 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv
, TCGTemp
*addr
,
1054 TCGv_i128 cmpv
, TCGv_i128 newv
,
1055 TCGArg idx
, MemOp memop
,
1058 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
1059 tcg_debug_assert((memop
& (MO_SIZE
| MO_SIGN
)) == MO_128
);
1060 tcg_gen_nonatomic_cmpxchg_i128_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
1063 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv
, TCGTemp
*addr
,
1064 TCGv_i128 cmpv
, TCGv_i128 newv
,
1065 TCGArg idx
, MemOp memop
)
1067 gen_atomic_cx_i128 gen
;
1069 if (!(tcg_ctx
->gen_tb
->cflags
& CF_PARALLEL
)) {
1070 tcg_gen_nonatomic_cmpxchg_i128_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
1074 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
1076 MemOpIdx oi
= make_memop_idx(memop
, idx
);
1077 TCGv_i64 a64
= maybe_extend_addr64(addr
);
1078 gen(retv
, tcg_env
, a64
, cmpv
, newv
, tcg_constant_i32(oi
));
1079 maybe_free_addr64(a64
);
1083 gen_helper_exit_atomic(tcg_env
);
1086 * Produce a result for a well-formed opcode stream. This satisfies
1087 * liveness for set before used, which happens before this dead code
1090 tcg_gen_movi_i64(TCGV128_LOW(retv
), 0);
1091 tcg_gen_movi_i64(TCGV128_HIGH(retv
), 0);
1094 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv
, TCGTemp
*addr
,
1095 TCGv_i128 cmpv
, TCGv_i128 newv
,
1096 TCGArg idx
, MemOp memop
,
1099 tcg_debug_assert(addr_type
== tcg_ctx
->addr_type
);
1100 tcg_debug_assert((memop
& (MO_SIZE
| MO_SIGN
)) == MO_128
);
1101 tcg_gen_atomic_cmpxchg_i128_int(retv
, addr
, cmpv
, newv
, idx
, memop
);
1104 static void do_nonatomic_op_i32(TCGv_i32 ret
, TCGTemp
*addr
, TCGv_i32 val
,
1105 TCGArg idx
, MemOp memop
, bool new_val
,
1106 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1108 TCGv_i32 t1
= tcg_temp_ebb_new_i32();
1109 TCGv_i32 t2
= tcg_temp_ebb_new_i32();
1111 memop
= tcg_canonicalize_memop(memop
, 0, 0);
1113 tcg_gen_qemu_ld_i32_int(t1
, addr
, idx
, memop
);
1114 tcg_gen_ext_i32(t2
, val
, memop
);
1116 tcg_gen_qemu_st_i32_int(t2
, addr
, idx
, memop
);
1118 tcg_gen_ext_i32(ret
, (new_val
? t2
: t1
), memop
);
1119 tcg_temp_free_i32(t1
);
1120 tcg_temp_free_i32(t2
);
1123 static void do_atomic_op_i32(TCGv_i32 ret
, TCGTemp
*addr
, TCGv_i32 val
,
1124 TCGArg idx
, MemOp memop
, void * const table
[])
1126 gen_atomic_op_i32 gen
;
1130 memop
= tcg_canonicalize_memop(memop
, 0, 0);
1132 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
1133 tcg_debug_assert(gen
!= NULL
);
1135 oi
= make_memop_idx(memop
& ~MO_SIGN
, idx
);
1136 a64
= maybe_extend_addr64(addr
);
1137 gen(ret
, tcg_env
, a64
, val
, tcg_constant_i32(oi
));
1138 maybe_free_addr64(a64
);
1140 if (memop
& MO_SIGN
) {
1141 tcg_gen_ext_i32(ret
, ret
, memop
);
1145 static void do_nonatomic_op_i64(TCGv_i64 ret
, TCGTemp
*addr
, TCGv_i64 val
,
1146 TCGArg idx
, MemOp memop
, bool new_val
,
1147 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1149 TCGv_i64 t1
= tcg_temp_ebb_new_i64();
1150 TCGv_i64 t2
= tcg_temp_ebb_new_i64();
1152 memop
= tcg_canonicalize_memop(memop
, 1, 0);
1154 tcg_gen_qemu_ld_i64_int(t1
, addr
, idx
, memop
);
1155 tcg_gen_ext_i64(t2
, val
, memop
);
1157 tcg_gen_qemu_st_i64_int(t2
, addr
, idx
, memop
);
1159 tcg_gen_ext_i64(ret
, (new_val
? t2
: t1
), memop
);
1160 tcg_temp_free_i64(t1
);
1161 tcg_temp_free_i64(t2
);
1164 static void do_atomic_op_i64(TCGv_i64 ret
, TCGTemp
*addr
, TCGv_i64 val
,
1165 TCGArg idx
, MemOp memop
, void * const table
[])
1167 memop
= tcg_canonicalize_memop(memop
, 1, 0);
1169 if ((memop
& MO_SIZE
) == MO_64
) {
1170 gen_atomic_op_i64 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
1173 MemOpIdx oi
= make_memop_idx(memop
& ~MO_SIGN
, idx
);
1174 TCGv_i64 a64
= maybe_extend_addr64(addr
);
1175 gen(ret
, tcg_env
, a64
, val
, tcg_constant_i32(oi
));
1176 maybe_free_addr64(a64
);
1180 gen_helper_exit_atomic(tcg_env
);
1181 /* Produce a result, so that we have a well-formed opcode stream
1182 with respect to uses of the result in the (dead) code following. */
1183 tcg_gen_movi_i64(ret
, 0);
1185 TCGv_i32 v32
= tcg_temp_ebb_new_i32();
1186 TCGv_i32 r32
= tcg_temp_ebb_new_i32();
1188 tcg_gen_extrl_i64_i32(v32
, val
);
1189 do_atomic_op_i32(r32
, addr
, v32
, idx
, memop
& ~MO_SIGN
, table
);
1190 tcg_temp_free_i32(v32
);
1192 tcg_gen_extu_i32_i64(ret
, r32
);
1193 tcg_temp_free_i32(r32
);
1195 if (memop
& MO_SIGN
) {
1196 tcg_gen_ext_i64(ret
, ret
, memop
);
1201 #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
1202 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
1203 [MO_8] = gen_helper_atomic_##NAME##b, \
1204 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
1205 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
1206 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
1207 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
1208 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
1209 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
1211 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr, \
1212 TCGv_i32 val, TCGArg idx, \
1213 MemOp memop, TCGType addr_type) \
1215 tcg_debug_assert(addr_type == tcg_ctx->addr_type); \
1216 tcg_debug_assert((memop & MO_SIZE) <= MO_32); \
1217 if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
1218 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
1220 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
1221 tcg_gen_##OP##_i32); \
1224 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \
1225 TCGv_i64 val, TCGArg idx, \
1226 MemOp memop, TCGType addr_type) \
1228 tcg_debug_assert(addr_type == tcg_ctx->addr_type); \
1229 tcg_debug_assert((memop & MO_SIZE) <= MO_64); \
1230 if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
1231 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
1233 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
1234 tcg_gen_##OP##_i64); \
1238 GEN_ATOMIC_HELPER(fetch_add
, add
, 0)
1239 GEN_ATOMIC_HELPER(fetch_and
, and, 0)
1240 GEN_ATOMIC_HELPER(fetch_or
, or, 0)
1241 GEN_ATOMIC_HELPER(fetch_xor
, xor, 0)
1242 GEN_ATOMIC_HELPER(fetch_smin
, smin
, 0)
1243 GEN_ATOMIC_HELPER(fetch_umin
, umin
, 0)
1244 GEN_ATOMIC_HELPER(fetch_smax
, smax
, 0)
1245 GEN_ATOMIC_HELPER(fetch_umax
, umax
, 0)
1247 GEN_ATOMIC_HELPER(add_fetch
, add
, 1)
1248 GEN_ATOMIC_HELPER(and_fetch
, and, 1)
1249 GEN_ATOMIC_HELPER(or_fetch
, or, 1)
1250 GEN_ATOMIC_HELPER(xor_fetch
, xor, 1)
1251 GEN_ATOMIC_HELPER(smin_fetch
, smin
, 1)
1252 GEN_ATOMIC_HELPER(umin_fetch
, umin
, 1)
1253 GEN_ATOMIC_HELPER(smax_fetch
, smax
, 1)
1254 GEN_ATOMIC_HELPER(umax_fetch
, umax
, 1)
1256 static void tcg_gen_mov2_i32(TCGv_i32 r
, TCGv_i32 a
, TCGv_i32 b
)
1258 tcg_gen_mov_i32(r
, b
);
1261 static void tcg_gen_mov2_i64(TCGv_i64 r
, TCGv_i64 a
, TCGv_i64 b
)
1263 tcg_gen_mov_i64(r
, b
);
1266 GEN_ATOMIC_HELPER(xchg
, mov2
, 0)
1268 #undef GEN_ATOMIC_HELPER