migration: Export send_queued_data()
[qemu/kevin.git] / tcg / tcg-op-ldst.c
blobe2c55df21774f38304f876daca817b2d91c0baf9
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "tcg/tcg.h"
27 #include "tcg/tcg-temp-internal.h"
28 #include "tcg/tcg-op-common.h"
29 #include "tcg/tcg-mo.h"
30 #include "exec/translation-block.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
35 static void check_max_alignment(unsigned a_bits)
38 * The requested alignment cannot overlap the TLB flags.
39 * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
41 if (tcg_use_softmmu) {
42 tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
46 static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
48 unsigned a_bits = get_alignment_bits(op);
50 check_max_alignment(a_bits);
52 /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
53 if (a_bits == (op & MO_SIZE)) {
54 op = (op & ~MO_AMASK) | MO_ALIGN;
57 switch (op & MO_SIZE) {
58 case MO_8:
59 op &= ~MO_BSWAP;
60 break;
61 case MO_16:
62 break;
63 case MO_32:
64 if (!is64) {
65 op &= ~MO_SIGN;
67 break;
68 case MO_64:
69 if (is64) {
70 op &= ~MO_SIGN;
71 break;
73 /* fall through */
74 default:
75 g_assert_not_reached();
77 if (st) {
78 op &= ~MO_SIGN;
80 return op;
83 static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
84 TCGTemp *addr, MemOpIdx oi)
86 if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
87 if (vh) {
88 tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
89 } else {
90 tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi);
92 } else {
93 /* See TCGV_LOW/HIGH. */
94 TCGTemp *al = addr + HOST_BIG_ENDIAN;
95 TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
97 if (vh) {
98 tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh),
99 temp_arg(al), temp_arg(ah), oi);
100 } else {
101 tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi);
106 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
108 if (TCG_TARGET_REG_BITS == 32) {
109 TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
110 TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
111 gen_ldst(opc, vl, vh, addr, oi);
112 } else {
113 gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi);
117 static void tcg_gen_req_mo(TCGBar type)
119 type &= tcg_ctx->guest_mo;
120 type &= ~TCG_TARGET_DEFAULT_MO;
121 if (type) {
122 tcg_gen_mb(type | TCG_BAR_SC);
126 /* Only required for loads, where value might overlap addr. */
127 static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
129 #ifdef CONFIG_PLUGIN
130 if (tcg_ctx->plugin_insn != NULL) {
131 /* Save a copy of the vaddr for use after a load. */
132 TCGv_i64 temp = tcg_temp_ebb_new_i64();
133 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
134 tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
135 } else {
136 tcg_gen_mov_i64(temp, temp_tcgv_i64(addr));
138 return temp;
140 #endif
141 return NULL;
144 static void
145 plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
146 enum qemu_plugin_mem_rw rw)
148 #ifdef CONFIG_PLUGIN
149 if (tcg_ctx->plugin_insn != NULL) {
150 qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
152 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
153 if (!copy_addr) {
154 copy_addr = tcg_temp_ebb_new_i64();
155 tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
157 plugin_gen_empty_mem_callback(copy_addr, info);
158 tcg_temp_free_i64(copy_addr);
159 } else {
160 if (copy_addr) {
161 plugin_gen_empty_mem_callback(copy_addr, info);
162 tcg_temp_free_i64(copy_addr);
163 } else {
164 plugin_gen_empty_mem_callback(temp_tcgv_i64(orig_addr), info);
168 #endif
171 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
172 TCGArg idx, MemOp memop)
174 MemOp orig_memop;
175 MemOpIdx orig_oi, oi;
176 TCGv_i64 copy_addr;
177 TCGOpcode opc;
179 tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
180 orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
181 orig_oi = oi = make_memop_idx(memop, idx);
183 if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
184 memop &= ~MO_BSWAP;
185 /* The bswap primitive benefits from zero-extended input. */
186 if ((memop & MO_SSIZE) == MO_SW) {
187 memop &= ~MO_SIGN;
189 oi = make_memop_idx(memop, idx);
192 copy_addr = plugin_maybe_preserve_addr(addr);
193 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
194 opc = INDEX_op_qemu_ld_a32_i32;
195 } else {
196 opc = INDEX_op_qemu_ld_a64_i32;
198 gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
199 plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
201 if ((orig_memop ^ memop) & MO_BSWAP) {
202 switch (orig_memop & MO_SIZE) {
203 case MO_16:
204 tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
205 ? TCG_BSWAP_IZ | TCG_BSWAP_OS
206 : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
207 break;
208 case MO_32:
209 tcg_gen_bswap32_i32(val, val);
210 break;
211 default:
212 g_assert_not_reached();
217 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
218 MemOp memop, TCGType addr_type)
220 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
221 tcg_debug_assert((memop & MO_SIZE) <= MO_32);
222 tcg_gen_qemu_ld_i32_int(val, addr, idx, memop);
225 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
226 TCGArg idx, MemOp memop)
228 TCGv_i32 swap = NULL;
229 MemOpIdx orig_oi, oi;
230 TCGOpcode opc;
232 tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
233 memop = tcg_canonicalize_memop(memop, 0, 1);
234 orig_oi = oi = make_memop_idx(memop, idx);
236 if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
237 swap = tcg_temp_ebb_new_i32();
238 switch (memop & MO_SIZE) {
239 case MO_16:
240 tcg_gen_bswap16_i32(swap, val, 0);
241 break;
242 case MO_32:
243 tcg_gen_bswap32_i32(swap, val);
244 break;
245 default:
246 g_assert_not_reached();
248 val = swap;
249 memop &= ~MO_BSWAP;
250 oi = make_memop_idx(memop, idx);
253 if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
254 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
255 opc = INDEX_op_qemu_st8_a32_i32;
256 } else {
257 opc = INDEX_op_qemu_st8_a64_i32;
259 } else {
260 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
261 opc = INDEX_op_qemu_st_a32_i32;
262 } else {
263 opc = INDEX_op_qemu_st_a64_i32;
266 gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
267 plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
269 if (swap) {
270 tcg_temp_free_i32(swap);
274 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
275 MemOp memop, TCGType addr_type)
277 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
278 tcg_debug_assert((memop & MO_SIZE) <= MO_32);
279 tcg_gen_qemu_st_i32_int(val, addr, idx, memop);
282 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
283 TCGArg idx, MemOp memop)
285 MemOp orig_memop;
286 MemOpIdx orig_oi, oi;
287 TCGv_i64 copy_addr;
288 TCGOpcode opc;
290 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
291 tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
292 if (memop & MO_SIGN) {
293 tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
294 } else {
295 tcg_gen_movi_i32(TCGV_HIGH(val), 0);
297 return;
300 tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
301 orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
302 orig_oi = oi = make_memop_idx(memop, idx);
304 if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
305 memop &= ~MO_BSWAP;
306 /* The bswap primitive benefits from zero-extended input. */
307 if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
308 memop &= ~MO_SIGN;
310 oi = make_memop_idx(memop, idx);
313 copy_addr = plugin_maybe_preserve_addr(addr);
314 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
315 opc = INDEX_op_qemu_ld_a32_i64;
316 } else {
317 opc = INDEX_op_qemu_ld_a64_i64;
319 gen_ldst_i64(opc, val, addr, oi);
320 plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
322 if ((orig_memop ^ memop) & MO_BSWAP) {
323 int flags = (orig_memop & MO_SIGN
324 ? TCG_BSWAP_IZ | TCG_BSWAP_OS
325 : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
326 switch (orig_memop & MO_SIZE) {
327 case MO_16:
328 tcg_gen_bswap16_i64(val, val, flags);
329 break;
330 case MO_32:
331 tcg_gen_bswap32_i64(val, val, flags);
332 break;
333 case MO_64:
334 tcg_gen_bswap64_i64(val, val);
335 break;
336 default:
337 g_assert_not_reached();
342 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
343 MemOp memop, TCGType addr_type)
345 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
346 tcg_debug_assert((memop & MO_SIZE) <= MO_64);
347 tcg_gen_qemu_ld_i64_int(val, addr, idx, memop);
350 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
351 TCGArg idx, MemOp memop)
353 TCGv_i64 swap = NULL;
354 MemOpIdx orig_oi, oi;
355 TCGOpcode opc;
357 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
358 tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
359 return;
362 tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
363 memop = tcg_canonicalize_memop(memop, 1, 1);
364 orig_oi = oi = make_memop_idx(memop, idx);
366 if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
367 swap = tcg_temp_ebb_new_i64();
368 switch (memop & MO_SIZE) {
369 case MO_16:
370 tcg_gen_bswap16_i64(swap, val, 0);
371 break;
372 case MO_32:
373 tcg_gen_bswap32_i64(swap, val, 0);
374 break;
375 case MO_64:
376 tcg_gen_bswap64_i64(swap, val);
377 break;
378 default:
379 g_assert_not_reached();
381 val = swap;
382 memop &= ~MO_BSWAP;
383 oi = make_memop_idx(memop, idx);
386 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
387 opc = INDEX_op_qemu_st_a32_i64;
388 } else {
389 opc = INDEX_op_qemu_st_a64_i64;
391 gen_ldst_i64(opc, val, addr, oi);
392 plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
394 if (swap) {
395 tcg_temp_free_i64(swap);
399 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
400 MemOp memop, TCGType addr_type)
402 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
403 tcg_debug_assert((memop & MO_SIZE) <= MO_64);
404 tcg_gen_qemu_st_i64_int(val, addr, idx, memop);
408 * Return true if @mop, without knowledge of the pointer alignment,
409 * does not require 16-byte atomicity, and it would be adventagous
410 * to avoid a call to a helper function.
412 static bool use_two_i64_for_i128(MemOp mop)
414 /* Two softmmu tlb lookups is larger than one function call. */
415 if (tcg_use_softmmu) {
416 return false;
420 * For user-only, two 64-bit operations may well be smaller than a call.
421 * Determine if that would be legal for the requested atomicity.
423 switch (mop & MO_ATOM_MASK) {
424 case MO_ATOM_NONE:
425 case MO_ATOM_IFALIGN_PAIR:
426 return true;
427 case MO_ATOM_IFALIGN:
428 case MO_ATOM_SUBALIGN:
429 case MO_ATOM_WITHIN16:
430 case MO_ATOM_WITHIN16_PAIR:
431 /* In a serialized context, no atomicity is required. */
432 return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL);
433 default:
434 g_assert_not_reached();
438 static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
440 MemOp mop_1 = orig, mop_2;
442 /* Reduce the size to 64-bit. */
443 mop_1 = (mop_1 & ~MO_SIZE) | MO_64;
445 /* Retain the alignment constraints of the original. */
446 switch (orig & MO_AMASK) {
447 case MO_UNALN:
448 case MO_ALIGN_2:
449 case MO_ALIGN_4:
450 mop_2 = mop_1;
451 break;
452 case MO_ALIGN_8:
453 /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
454 mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
455 mop_2 = mop_1;
456 break;
457 case MO_ALIGN:
458 /* Second has 8-byte alignment; first has 16-byte alignment. */
459 mop_2 = mop_1;
460 mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16;
461 break;
462 case MO_ALIGN_16:
463 case MO_ALIGN_32:
464 case MO_ALIGN_64:
465 /* Second has 8-byte alignment; first retains original. */
466 mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
467 break;
468 default:
469 g_assert_not_reached();
472 /* Use a memory ordering implemented by the host. */
473 if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) {
474 mop_1 &= ~MO_BSWAP;
475 mop_2 &= ~MO_BSWAP;
478 ret[0] = mop_1;
479 ret[1] = mop_2;
482 static TCGv_i64 maybe_extend_addr64(TCGTemp *addr)
484 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
485 TCGv_i64 a64 = tcg_temp_ebb_new_i64();
486 tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr));
487 return a64;
489 return temp_tcgv_i64(addr);
492 static void maybe_free_addr64(TCGv_i64 a64)
494 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
495 tcg_temp_free_i64(a64);
499 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
500 TCGArg idx, MemOp memop)
502 const MemOpIdx orig_oi = make_memop_idx(memop, idx);
503 TCGv_i64 ext_addr = NULL;
504 TCGOpcode opc;
506 check_max_alignment(get_alignment_bits(memop));
507 tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
509 /* TODO: For now, force 32-bit hosts to use the helper. */
510 if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
511 TCGv_i64 lo, hi;
512 bool need_bswap = false;
513 MemOpIdx oi = orig_oi;
515 if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
516 lo = TCGV128_HIGH(val);
517 hi = TCGV128_LOW(val);
518 oi = make_memop_idx(memop & ~MO_BSWAP, idx);
519 need_bswap = true;
520 } else {
521 lo = TCGV128_LOW(val);
522 hi = TCGV128_HIGH(val);
525 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
526 opc = INDEX_op_qemu_ld_a32_i128;
527 } else {
528 opc = INDEX_op_qemu_ld_a64_i128;
530 gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
532 if (need_bswap) {
533 tcg_gen_bswap64_i64(lo, lo);
534 tcg_gen_bswap64_i64(hi, hi);
536 } else if (use_two_i64_for_i128(memop)) {
537 MemOp mop[2];
538 TCGTemp *addr_p8;
539 TCGv_i64 x, y;
540 bool need_bswap;
542 canonicalize_memop_i128_as_i64(mop, memop);
543 need_bswap = (mop[0] ^ memop) & MO_BSWAP;
545 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
546 opc = INDEX_op_qemu_ld_a32_i64;
547 } else {
548 opc = INDEX_op_qemu_ld_a64_i64;
552 * Since there are no global TCGv_i128, there is no visible state
553 * changed if the second load faults. Load directly into the two
554 * subwords.
556 if ((memop & MO_BSWAP) == MO_LE) {
557 x = TCGV128_LOW(val);
558 y = TCGV128_HIGH(val);
559 } else {
560 x = TCGV128_HIGH(val);
561 y = TCGV128_LOW(val);
564 gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
566 if (need_bswap) {
567 tcg_gen_bswap64_i64(x, x);
570 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
571 TCGv_i32 t = tcg_temp_ebb_new_i32();
572 tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
573 addr_p8 = tcgv_i32_temp(t);
574 } else {
575 TCGv_i64 t = tcg_temp_ebb_new_i64();
576 tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
577 addr_p8 = tcgv_i64_temp(t);
580 gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
581 tcg_temp_free_internal(addr_p8);
583 if (need_bswap) {
584 tcg_gen_bswap64_i64(y, y);
586 } else {
587 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
588 ext_addr = tcg_temp_ebb_new_i64();
589 tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
590 addr = tcgv_i64_temp(ext_addr);
592 gen_helper_ld_i128(val, tcg_env, temp_tcgv_i64(addr),
593 tcg_constant_i32(orig_oi));
596 plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
599 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
600 MemOp memop, TCGType addr_type)
602 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
603 tcg_debug_assert((memop & MO_SIZE) == MO_128);
604 tcg_debug_assert((memop & MO_SIGN) == 0);
605 tcg_gen_qemu_ld_i128_int(val, addr, idx, memop);
608 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
609 TCGArg idx, MemOp memop)
611 const MemOpIdx orig_oi = make_memop_idx(memop, idx);
612 TCGv_i64 ext_addr = NULL;
613 TCGOpcode opc;
615 check_max_alignment(get_alignment_bits(memop));
616 tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
618 /* TODO: For now, force 32-bit hosts to use the helper. */
620 if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
621 TCGv_i64 lo, hi;
622 MemOpIdx oi = orig_oi;
623 bool need_bswap = false;
625 if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
626 lo = tcg_temp_ebb_new_i64();
627 hi = tcg_temp_ebb_new_i64();
628 tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
629 tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
630 oi = make_memop_idx(memop & ~MO_BSWAP, idx);
631 need_bswap = true;
632 } else {
633 lo = TCGV128_LOW(val);
634 hi = TCGV128_HIGH(val);
637 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
638 opc = INDEX_op_qemu_st_a32_i128;
639 } else {
640 opc = INDEX_op_qemu_st_a64_i128;
642 gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
644 if (need_bswap) {
645 tcg_temp_free_i64(lo);
646 tcg_temp_free_i64(hi);
648 } else if (use_two_i64_for_i128(memop)) {
649 MemOp mop[2];
650 TCGTemp *addr_p8;
651 TCGv_i64 x, y, b = NULL;
653 canonicalize_memop_i128_as_i64(mop, memop);
655 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
656 opc = INDEX_op_qemu_st_a32_i64;
657 } else {
658 opc = INDEX_op_qemu_st_a64_i64;
661 if ((memop & MO_BSWAP) == MO_LE) {
662 x = TCGV128_LOW(val);
663 y = TCGV128_HIGH(val);
664 } else {
665 x = TCGV128_HIGH(val);
666 y = TCGV128_LOW(val);
669 if ((mop[0] ^ memop) & MO_BSWAP) {
670 b = tcg_temp_ebb_new_i64();
671 tcg_gen_bswap64_i64(b, x);
672 x = b;
675 gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
677 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
678 TCGv_i32 t = tcg_temp_ebb_new_i32();
679 tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
680 addr_p8 = tcgv_i32_temp(t);
681 } else {
682 TCGv_i64 t = tcg_temp_ebb_new_i64();
683 tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
684 addr_p8 = tcgv_i64_temp(t);
687 if (b) {
688 tcg_gen_bswap64_i64(b, y);
689 gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx));
690 tcg_temp_free_i64(b);
691 } else {
692 gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
694 tcg_temp_free_internal(addr_p8);
695 } else {
696 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
697 ext_addr = tcg_temp_ebb_new_i64();
698 tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
699 addr = tcgv_i64_temp(ext_addr);
701 gen_helper_st_i128(tcg_env, temp_tcgv_i64(addr), val,
702 tcg_constant_i32(orig_oi));
705 plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W);
708 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
709 MemOp memop, TCGType addr_type)
711 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
712 tcg_debug_assert((memop & MO_SIZE) == MO_128);
713 tcg_debug_assert((memop & MO_SIGN) == 0);
714 tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
717 void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
719 switch (opc & MO_SSIZE) {
720 case MO_SB:
721 tcg_gen_ext8s_i32(ret, val);
722 break;
723 case MO_UB:
724 tcg_gen_ext8u_i32(ret, val);
725 break;
726 case MO_SW:
727 tcg_gen_ext16s_i32(ret, val);
728 break;
729 case MO_UW:
730 tcg_gen_ext16u_i32(ret, val);
731 break;
732 case MO_UL:
733 case MO_SL:
734 tcg_gen_mov_i32(ret, val);
735 break;
736 default:
737 g_assert_not_reached();
741 void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
743 switch (opc & MO_SSIZE) {
744 case MO_SB:
745 tcg_gen_ext8s_i64(ret, val);
746 break;
747 case MO_UB:
748 tcg_gen_ext8u_i64(ret, val);
749 break;
750 case MO_SW:
751 tcg_gen_ext16s_i64(ret, val);
752 break;
753 case MO_UW:
754 tcg_gen_ext16u_i64(ret, val);
755 break;
756 case MO_SL:
757 tcg_gen_ext32s_i64(ret, val);
758 break;
759 case MO_UL:
760 tcg_gen_ext32u_i64(ret, val);
761 break;
762 case MO_UQ:
763 case MO_SQ:
764 tcg_gen_mov_i64(ret, val);
765 break;
766 default:
767 g_assert_not_reached();
771 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64,
772 TCGv_i32, TCGv_i32, TCGv_i32);
773 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64,
774 TCGv_i64, TCGv_i64, TCGv_i32);
775 typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64,
776 TCGv_i128, TCGv_i128, TCGv_i32);
777 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
778 TCGv_i32, TCGv_i32);
779 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
780 TCGv_i64, TCGv_i32);
782 #ifdef CONFIG_ATOMIC64
783 # define WITH_ATOMIC64(X) X,
784 #else
785 # define WITH_ATOMIC64(X)
786 #endif
787 #if HAVE_CMPXCHG128
788 # define WITH_ATOMIC128(X) X,
789 #else
790 # define WITH_ATOMIC128(X)
791 #endif
793 static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
794 [MO_8] = gen_helper_atomic_cmpxchgb,
795 [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
796 [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
797 [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
798 [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
799 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
800 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
801 WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le)
802 WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
805 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
806 TCGv_i32 cmpv, TCGv_i32 newv,
807 TCGArg idx, MemOp memop)
809 TCGv_i32 t1 = tcg_temp_ebb_new_i32();
810 TCGv_i32 t2 = tcg_temp_ebb_new_i32();
812 tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
814 tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN);
815 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
816 tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
817 tcg_temp_free_i32(t2);
819 if (memop & MO_SIGN) {
820 tcg_gen_ext_i32(retv, t1, memop);
821 } else {
822 tcg_gen_mov_i32(retv, t1);
824 tcg_temp_free_i32(t1);
827 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
828 TCGv_i32 cmpv, TCGv_i32 newv,
829 TCGArg idx, MemOp memop,
830 TCGType addr_type)
832 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
833 tcg_debug_assert((memop & MO_SIZE) <= MO_32);
834 tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
837 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
838 TCGv_i32 cmpv, TCGv_i32 newv,
839 TCGArg idx, MemOp memop)
841 gen_atomic_cx_i32 gen;
842 TCGv_i64 a64;
843 MemOpIdx oi;
845 if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
846 tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
847 return;
850 memop = tcg_canonicalize_memop(memop, 0, 0);
851 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
852 tcg_debug_assert(gen != NULL);
854 oi = make_memop_idx(memop & ~MO_SIGN, idx);
855 a64 = maybe_extend_addr64(addr);
856 gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
857 maybe_free_addr64(a64);
859 if (memop & MO_SIGN) {
860 tcg_gen_ext_i32(retv, retv, memop);
864 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
865 TCGv_i32 cmpv, TCGv_i32 newv,
866 TCGArg idx, MemOp memop,
867 TCGType addr_type)
869 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
870 tcg_debug_assert((memop & MO_SIZE) <= MO_32);
871 tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
874 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
875 TCGv_i64 cmpv, TCGv_i64 newv,
876 TCGArg idx, MemOp memop)
878 TCGv_i64 t1, t2;
880 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
881 tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
882 TCGV_LOW(newv), idx, memop);
883 if (memop & MO_SIGN) {
884 tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
885 } else {
886 tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
888 return;
891 t1 = tcg_temp_ebb_new_i64();
892 t2 = tcg_temp_ebb_new_i64();
894 tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
896 tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN);
897 tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
898 tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
899 tcg_temp_free_i64(t2);
901 if (memop & MO_SIGN) {
902 tcg_gen_ext_i64(retv, t1, memop);
903 } else {
904 tcg_gen_mov_i64(retv, t1);
906 tcg_temp_free_i64(t1);
909 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
910 TCGv_i64 cmpv, TCGv_i64 newv,
911 TCGArg idx, MemOp memop,
912 TCGType addr_type)
914 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
915 tcg_debug_assert((memop & MO_SIZE) <= MO_64);
916 tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
919 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
920 TCGv_i64 cmpv, TCGv_i64 newv,
921 TCGArg idx, MemOp memop)
923 if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
924 tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
925 return;
928 if ((memop & MO_SIZE) == MO_64) {
929 gen_atomic_cx_i64 gen;
931 memop = tcg_canonicalize_memop(memop, 1, 0);
932 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
933 if (gen) {
934 MemOpIdx oi = make_memop_idx(memop, idx);
935 TCGv_i64 a64 = maybe_extend_addr64(addr);
936 gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
937 maybe_free_addr64(a64);
938 return;
941 gen_helper_exit_atomic(tcg_env);
944 * Produce a result for a well-formed opcode stream. This satisfies
945 * liveness for set before used, which happens before this dead code
946 * is removed.
948 tcg_gen_movi_i64(retv, 0);
949 return;
952 if (TCG_TARGET_REG_BITS == 32) {
953 tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
954 TCGV_LOW(newv), idx, memop);
955 if (memop & MO_SIGN) {
956 tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
957 } else {
958 tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
960 } else {
961 TCGv_i32 c32 = tcg_temp_ebb_new_i32();
962 TCGv_i32 n32 = tcg_temp_ebb_new_i32();
963 TCGv_i32 r32 = tcg_temp_ebb_new_i32();
965 tcg_gen_extrl_i64_i32(c32, cmpv);
966 tcg_gen_extrl_i64_i32(n32, newv);
967 tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32,
968 idx, memop & ~MO_SIGN);
969 tcg_temp_free_i32(c32);
970 tcg_temp_free_i32(n32);
972 tcg_gen_extu_i32_i64(retv, r32);
973 tcg_temp_free_i32(r32);
975 if (memop & MO_SIGN) {
976 tcg_gen_ext_i64(retv, retv, memop);
981 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
982 TCGv_i64 cmpv, TCGv_i64 newv,
983 TCGArg idx, MemOp memop, TCGType addr_type)
985 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
986 tcg_debug_assert((memop & MO_SIZE) <= MO_64);
987 tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
990 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
991 TCGv_i128 cmpv, TCGv_i128 newv,
992 TCGArg idx, MemOp memop)
994 if (TCG_TARGET_REG_BITS == 32) {
995 /* Inline expansion below is simply too large for 32-bit hosts. */
996 MemOpIdx oi = make_memop_idx(memop, idx);
997 TCGv_i64 a64 = maybe_extend_addr64(addr);
999 gen_helper_nonatomic_cmpxchgo(retv, tcg_env, a64, cmpv, newv,
1000 tcg_constant_i32(oi));
1001 maybe_free_addr64(a64);
1002 } else {
1003 TCGv_i128 oldv = tcg_temp_ebb_new_i128();
1004 TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
1005 TCGv_i64 t0 = tcg_temp_ebb_new_i64();
1006 TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1007 TCGv_i64 z = tcg_constant_i64(0);
1009 tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
1011 /* Compare i128 */
1012 tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
1013 tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
1014 tcg_gen_or_i64(t0, t0, t1);
1016 /* tmpv = equal ? newv : oldv */
1017 tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
1018 TCGV128_LOW(newv), TCGV128_LOW(oldv));
1019 tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
1020 TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
1022 /* Unconditional writeback. */
1023 tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
1024 tcg_gen_mov_i128(retv, oldv);
1026 tcg_temp_free_i64(t0);
1027 tcg_temp_free_i64(t1);
1028 tcg_temp_free_i128(tmpv);
1029 tcg_temp_free_i128(oldv);
1033 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1034 TCGv_i128 cmpv, TCGv_i128 newv,
1035 TCGArg idx, MemOp memop,
1036 TCGType addr_type)
1038 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1039 tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1040 tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1043 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1044 TCGv_i128 cmpv, TCGv_i128 newv,
1045 TCGArg idx, MemOp memop)
1047 gen_atomic_cx_i128 gen;
1049 if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
1050 tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1051 return;
1054 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
1055 if (gen) {
1056 MemOpIdx oi = make_memop_idx(memop, idx);
1057 TCGv_i64 a64 = maybe_extend_addr64(addr);
1058 gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
1059 maybe_free_addr64(a64);
1060 return;
1063 gen_helper_exit_atomic(tcg_env);
1066 * Produce a result for a well-formed opcode stream. This satisfies
1067 * liveness for set before used, which happens before this dead code
1068 * is removed.
1070 tcg_gen_movi_i64(TCGV128_LOW(retv), 0);
1071 tcg_gen_movi_i64(TCGV128_HIGH(retv), 0);
1074 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1075 TCGv_i128 cmpv, TCGv_i128 newv,
1076 TCGArg idx, MemOp memop,
1077 TCGType addr_type)
1079 tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1080 tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1081 tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1084 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1085 TCGArg idx, MemOp memop, bool new_val,
1086 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1088 TCGv_i32 t1 = tcg_temp_ebb_new_i32();
1089 TCGv_i32 t2 = tcg_temp_ebb_new_i32();
1091 memop = tcg_canonicalize_memop(memop, 0, 0);
1093 tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop);
1094 tcg_gen_ext_i32(t2, val, memop);
1095 gen(t2, t1, t2);
1096 tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
1098 tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
1099 tcg_temp_free_i32(t1);
1100 tcg_temp_free_i32(t2);
1103 static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1104 TCGArg idx, MemOp memop, void * const table[])
1106 gen_atomic_op_i32 gen;
1107 TCGv_i64 a64;
1108 MemOpIdx oi;
1110 memop = tcg_canonicalize_memop(memop, 0, 0);
1112 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1113 tcg_debug_assert(gen != NULL);
1115 oi = make_memop_idx(memop & ~MO_SIGN, idx);
1116 a64 = maybe_extend_addr64(addr);
1117 gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1118 maybe_free_addr64(a64);
1120 if (memop & MO_SIGN) {
1121 tcg_gen_ext_i32(ret, ret, memop);
1125 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1126 TCGArg idx, MemOp memop, bool new_val,
1127 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1129 TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1130 TCGv_i64 t2 = tcg_temp_ebb_new_i64();
1132 memop = tcg_canonicalize_memop(memop, 1, 0);
1134 tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop);
1135 tcg_gen_ext_i64(t2, val, memop);
1136 gen(t2, t1, t2);
1137 tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
1139 tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
1140 tcg_temp_free_i64(t1);
1141 tcg_temp_free_i64(t2);
1144 static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1145 TCGArg idx, MemOp memop, void * const table[])
1147 memop = tcg_canonicalize_memop(memop, 1, 0);
1149 if ((memop & MO_SIZE) == MO_64) {
1150 gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1152 if (gen) {
1153 MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
1154 TCGv_i64 a64 = maybe_extend_addr64(addr);
1155 gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
1156 maybe_free_addr64(a64);
1157 return;
1160 gen_helper_exit_atomic(tcg_env);
1161 /* Produce a result, so that we have a well-formed opcode stream
1162 with respect to uses of the result in the (dead) code following. */
1163 tcg_gen_movi_i64(ret, 0);
1164 } else {
1165 TCGv_i32 v32 = tcg_temp_ebb_new_i32();
1166 TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1168 tcg_gen_extrl_i64_i32(v32, val);
1169 do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
1170 tcg_temp_free_i32(v32);
1172 tcg_gen_extu_i32_i64(ret, r32);
1173 tcg_temp_free_i32(r32);
1175 if (memop & MO_SIGN) {
1176 tcg_gen_ext_i64(ret, ret, memop);
1181 #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
1182 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
1183 [MO_8] = gen_helper_atomic_##NAME##b, \
1184 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
1185 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
1186 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
1187 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
1188 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
1189 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
1190 }; \
1191 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr, \
1192 TCGv_i32 val, TCGArg idx, \
1193 MemOp memop, TCGType addr_type) \
1195 tcg_debug_assert(addr_type == tcg_ctx->addr_type); \
1196 tcg_debug_assert((memop & MO_SIZE) <= MO_32); \
1197 if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
1198 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
1199 } else { \
1200 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
1201 tcg_gen_##OP##_i32); \
1204 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \
1205 TCGv_i64 val, TCGArg idx, \
1206 MemOp memop, TCGType addr_type) \
1208 tcg_debug_assert(addr_type == tcg_ctx->addr_type); \
1209 tcg_debug_assert((memop & MO_SIZE) <= MO_64); \
1210 if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
1211 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
1212 } else { \
1213 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
1214 tcg_gen_##OP##_i64); \
1218 GEN_ATOMIC_HELPER(fetch_add, add, 0)
1219 GEN_ATOMIC_HELPER(fetch_and, and, 0)
1220 GEN_ATOMIC_HELPER(fetch_or, or, 0)
1221 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
1222 GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
1223 GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
1224 GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
1225 GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
1227 GEN_ATOMIC_HELPER(add_fetch, add, 1)
1228 GEN_ATOMIC_HELPER(and_fetch, and, 1)
1229 GEN_ATOMIC_HELPER(or_fetch, or, 1)
1230 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
1231 GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
1232 GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
1233 GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
1234 GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
1236 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
1238 tcg_gen_mov_i32(r, b);
1241 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
1243 tcg_gen_mov_i64(r, b);
1246 GEN_ATOMIC_HELPER(xchg, mov2, 0)
1248 #undef GEN_ATOMIC_HELPER