2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
28 #include "exec/exec-all.h"
32 #include "trace-tcg.h"
33 #include "trace/mem.h"
35 /* Reduce the number of ifdefs below. This assumes that all uses of
36 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
37 the compiler can eliminate. */
38 #if TCG_TARGET_REG_BITS == 64
39 extern TCGv_i32
TCGV_LOW_link_error(TCGv_i64
);
40 extern TCGv_i32
TCGV_HIGH_link_error(TCGv_i64
);
41 #define TCGV_LOW TCGV_LOW_link_error
42 #define TCGV_HIGH TCGV_HIGH_link_error
45 void tcg_gen_op1(TCGOpcode opc
, TCGArg a1
)
47 TCGOp
*op
= tcg_emit_op(opc
);
51 void tcg_gen_op2(TCGOpcode opc
, TCGArg a1
, TCGArg a2
)
53 TCGOp
*op
= tcg_emit_op(opc
);
58 void tcg_gen_op3(TCGOpcode opc
, TCGArg a1
, TCGArg a2
, TCGArg a3
)
60 TCGOp
*op
= tcg_emit_op(opc
);
66 void tcg_gen_op4(TCGOpcode opc
, TCGArg a1
, TCGArg a2
, TCGArg a3
, TCGArg a4
)
68 TCGOp
*op
= tcg_emit_op(opc
);
75 void tcg_gen_op5(TCGOpcode opc
, TCGArg a1
, TCGArg a2
, TCGArg a3
,
78 TCGOp
*op
= tcg_emit_op(opc
);
86 void tcg_gen_op6(TCGOpcode opc
, TCGArg a1
, TCGArg a2
, TCGArg a3
,
87 TCGArg a4
, TCGArg a5
, TCGArg a6
)
89 TCGOp
*op
= tcg_emit_op(opc
);
98 void tcg_gen_mb(TCGBar mb_type
)
100 if (tcg_ctx
->tb_cflags
& CF_PARALLEL
) {
101 tcg_gen_op1(INDEX_op_mb
, mb_type
);
107 void tcg_gen_addi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
109 /* some cases can be optimized here */
111 tcg_gen_mov_i32(ret
, arg1
);
113 TCGv_i32 t0
= tcg_const_i32(arg2
);
114 tcg_gen_add_i32(ret
, arg1
, t0
);
115 tcg_temp_free_i32(t0
);
119 void tcg_gen_subfi_i32(TCGv_i32 ret
, int32_t arg1
, TCGv_i32 arg2
)
121 if (arg1
== 0 && TCG_TARGET_HAS_neg_i32
) {
122 /* Don't recurse with tcg_gen_neg_i32. */
123 tcg_gen_op2_i32(INDEX_op_neg_i32
, ret
, arg2
);
125 TCGv_i32 t0
= tcg_const_i32(arg1
);
126 tcg_gen_sub_i32(ret
, t0
, arg2
);
127 tcg_temp_free_i32(t0
);
131 void tcg_gen_subi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
133 /* some cases can be optimized here */
135 tcg_gen_mov_i32(ret
, arg1
);
137 TCGv_i32 t0
= tcg_const_i32(arg2
);
138 tcg_gen_sub_i32(ret
, arg1
, t0
);
139 tcg_temp_free_i32(t0
);
143 void tcg_gen_andi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
146 /* Some cases can be optimized here. */
149 tcg_gen_movi_i32(ret
, 0);
152 tcg_gen_mov_i32(ret
, arg1
);
155 /* Don't recurse with tcg_gen_ext8u_i32. */
156 if (TCG_TARGET_HAS_ext8u_i32
) {
157 tcg_gen_op2_i32(INDEX_op_ext8u_i32
, ret
, arg1
);
162 if (TCG_TARGET_HAS_ext16u_i32
) {
163 tcg_gen_op2_i32(INDEX_op_ext16u_i32
, ret
, arg1
);
168 t0
= tcg_const_i32(arg2
);
169 tcg_gen_and_i32(ret
, arg1
, t0
);
170 tcg_temp_free_i32(t0
);
173 void tcg_gen_ori_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
175 /* Some cases can be optimized here. */
177 tcg_gen_movi_i32(ret
, -1);
178 } else if (arg2
== 0) {
179 tcg_gen_mov_i32(ret
, arg1
);
181 TCGv_i32 t0
= tcg_const_i32(arg2
);
182 tcg_gen_or_i32(ret
, arg1
, t0
);
183 tcg_temp_free_i32(t0
);
187 void tcg_gen_xori_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
189 /* Some cases can be optimized here. */
191 tcg_gen_mov_i32(ret
, arg1
);
192 } else if (arg2
== -1 && TCG_TARGET_HAS_not_i32
) {
193 /* Don't recurse with tcg_gen_not_i32. */
194 tcg_gen_op2_i32(INDEX_op_not_i32
, ret
, arg1
);
196 TCGv_i32 t0
= tcg_const_i32(arg2
);
197 tcg_gen_xor_i32(ret
, arg1
, t0
);
198 tcg_temp_free_i32(t0
);
202 void tcg_gen_shli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
204 tcg_debug_assert(arg2
>= 0 && arg2
< 32);
206 tcg_gen_mov_i32(ret
, arg1
);
208 TCGv_i32 t0
= tcg_const_i32(arg2
);
209 tcg_gen_shl_i32(ret
, arg1
, t0
);
210 tcg_temp_free_i32(t0
);
214 void tcg_gen_shri_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
216 tcg_debug_assert(arg2
>= 0 && arg2
< 32);
218 tcg_gen_mov_i32(ret
, arg1
);
220 TCGv_i32 t0
= tcg_const_i32(arg2
);
221 tcg_gen_shr_i32(ret
, arg1
, t0
);
222 tcg_temp_free_i32(t0
);
226 void tcg_gen_sari_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
228 tcg_debug_assert(arg2
>= 0 && arg2
< 32);
230 tcg_gen_mov_i32(ret
, arg1
);
232 TCGv_i32 t0
= tcg_const_i32(arg2
);
233 tcg_gen_sar_i32(ret
, arg1
, t0
);
234 tcg_temp_free_i32(t0
);
238 void tcg_gen_brcond_i32(TCGCond cond
, TCGv_i32 arg1
, TCGv_i32 arg2
, TCGLabel
*l
)
240 if (cond
== TCG_COND_ALWAYS
) {
242 } else if (cond
!= TCG_COND_NEVER
) {
243 tcg_gen_op4ii_i32(INDEX_op_brcond_i32
, arg1
, arg2
, cond
, label_arg(l
));
247 void tcg_gen_brcondi_i32(TCGCond cond
, TCGv_i32 arg1
, int32_t arg2
, TCGLabel
*l
)
249 if (cond
== TCG_COND_ALWAYS
) {
251 } else if (cond
!= TCG_COND_NEVER
) {
252 TCGv_i32 t0
= tcg_const_i32(arg2
);
253 tcg_gen_brcond_i32(cond
, arg1
, t0
, l
);
254 tcg_temp_free_i32(t0
);
258 void tcg_gen_setcond_i32(TCGCond cond
, TCGv_i32 ret
,
259 TCGv_i32 arg1
, TCGv_i32 arg2
)
261 if (cond
== TCG_COND_ALWAYS
) {
262 tcg_gen_movi_i32(ret
, 1);
263 } else if (cond
== TCG_COND_NEVER
) {
264 tcg_gen_movi_i32(ret
, 0);
266 tcg_gen_op4i_i32(INDEX_op_setcond_i32
, ret
, arg1
, arg2
, cond
);
270 void tcg_gen_setcondi_i32(TCGCond cond
, TCGv_i32 ret
,
271 TCGv_i32 arg1
, int32_t arg2
)
273 TCGv_i32 t0
= tcg_const_i32(arg2
);
274 tcg_gen_setcond_i32(cond
, ret
, arg1
, t0
);
275 tcg_temp_free_i32(t0
);
278 void tcg_gen_muli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
281 tcg_gen_movi_i32(ret
, 0);
282 } else if (is_power_of_2(arg2
)) {
283 tcg_gen_shli_i32(ret
, arg1
, ctz32(arg2
));
285 TCGv_i32 t0
= tcg_const_i32(arg2
);
286 tcg_gen_mul_i32(ret
, arg1
, t0
);
287 tcg_temp_free_i32(t0
);
291 void tcg_gen_div_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
293 if (TCG_TARGET_HAS_div_i32
) {
294 tcg_gen_op3_i32(INDEX_op_div_i32
, ret
, arg1
, arg2
);
295 } else if (TCG_TARGET_HAS_div2_i32
) {
296 TCGv_i32 t0
= tcg_temp_new_i32();
297 tcg_gen_sari_i32(t0
, arg1
, 31);
298 tcg_gen_op5_i32(INDEX_op_div2_i32
, ret
, t0
, arg1
, t0
, arg2
);
299 tcg_temp_free_i32(t0
);
301 gen_helper_div_i32(ret
, arg1
, arg2
);
305 void tcg_gen_rem_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
307 if (TCG_TARGET_HAS_rem_i32
) {
308 tcg_gen_op3_i32(INDEX_op_rem_i32
, ret
, arg1
, arg2
);
309 } else if (TCG_TARGET_HAS_div_i32
) {
310 TCGv_i32 t0
= tcg_temp_new_i32();
311 tcg_gen_op3_i32(INDEX_op_div_i32
, t0
, arg1
, arg2
);
312 tcg_gen_mul_i32(t0
, t0
, arg2
);
313 tcg_gen_sub_i32(ret
, arg1
, t0
);
314 tcg_temp_free_i32(t0
);
315 } else if (TCG_TARGET_HAS_div2_i32
) {
316 TCGv_i32 t0
= tcg_temp_new_i32();
317 tcg_gen_sari_i32(t0
, arg1
, 31);
318 tcg_gen_op5_i32(INDEX_op_div2_i32
, t0
, ret
, arg1
, t0
, arg2
);
319 tcg_temp_free_i32(t0
);
321 gen_helper_rem_i32(ret
, arg1
, arg2
);
325 void tcg_gen_divu_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
327 if (TCG_TARGET_HAS_div_i32
) {
328 tcg_gen_op3_i32(INDEX_op_divu_i32
, ret
, arg1
, arg2
);
329 } else if (TCG_TARGET_HAS_div2_i32
) {
330 TCGv_i32 t0
= tcg_temp_new_i32();
331 tcg_gen_movi_i32(t0
, 0);
332 tcg_gen_op5_i32(INDEX_op_divu2_i32
, ret
, t0
, arg1
, t0
, arg2
);
333 tcg_temp_free_i32(t0
);
335 gen_helper_divu_i32(ret
, arg1
, arg2
);
339 void tcg_gen_remu_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
341 if (TCG_TARGET_HAS_rem_i32
) {
342 tcg_gen_op3_i32(INDEX_op_remu_i32
, ret
, arg1
, arg2
);
343 } else if (TCG_TARGET_HAS_div_i32
) {
344 TCGv_i32 t0
= tcg_temp_new_i32();
345 tcg_gen_op3_i32(INDEX_op_divu_i32
, t0
, arg1
, arg2
);
346 tcg_gen_mul_i32(t0
, t0
, arg2
);
347 tcg_gen_sub_i32(ret
, arg1
, t0
);
348 tcg_temp_free_i32(t0
);
349 } else if (TCG_TARGET_HAS_div2_i32
) {
350 TCGv_i32 t0
= tcg_temp_new_i32();
351 tcg_gen_movi_i32(t0
, 0);
352 tcg_gen_op5_i32(INDEX_op_divu2_i32
, t0
, ret
, arg1
, t0
, arg2
);
353 tcg_temp_free_i32(t0
);
355 gen_helper_remu_i32(ret
, arg1
, arg2
);
359 void tcg_gen_andc_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
361 if (TCG_TARGET_HAS_andc_i32
) {
362 tcg_gen_op3_i32(INDEX_op_andc_i32
, ret
, arg1
, arg2
);
364 TCGv_i32 t0
= tcg_temp_new_i32();
365 tcg_gen_not_i32(t0
, arg2
);
366 tcg_gen_and_i32(ret
, arg1
, t0
);
367 tcg_temp_free_i32(t0
);
371 void tcg_gen_eqv_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
373 if (TCG_TARGET_HAS_eqv_i32
) {
374 tcg_gen_op3_i32(INDEX_op_eqv_i32
, ret
, arg1
, arg2
);
376 tcg_gen_xor_i32(ret
, arg1
, arg2
);
377 tcg_gen_not_i32(ret
, ret
);
381 void tcg_gen_nand_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
383 if (TCG_TARGET_HAS_nand_i32
) {
384 tcg_gen_op3_i32(INDEX_op_nand_i32
, ret
, arg1
, arg2
);
386 tcg_gen_and_i32(ret
, arg1
, arg2
);
387 tcg_gen_not_i32(ret
, ret
);
391 void tcg_gen_nor_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
393 if (TCG_TARGET_HAS_nor_i32
) {
394 tcg_gen_op3_i32(INDEX_op_nor_i32
, ret
, arg1
, arg2
);
396 tcg_gen_or_i32(ret
, arg1
, arg2
);
397 tcg_gen_not_i32(ret
, ret
);
401 void tcg_gen_orc_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
403 if (TCG_TARGET_HAS_orc_i32
) {
404 tcg_gen_op3_i32(INDEX_op_orc_i32
, ret
, arg1
, arg2
);
406 TCGv_i32 t0
= tcg_temp_new_i32();
407 tcg_gen_not_i32(t0
, arg2
);
408 tcg_gen_or_i32(ret
, arg1
, t0
);
409 tcg_temp_free_i32(t0
);
413 void tcg_gen_clz_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
415 if (TCG_TARGET_HAS_clz_i32
) {
416 tcg_gen_op3_i32(INDEX_op_clz_i32
, ret
, arg1
, arg2
);
417 } else if (TCG_TARGET_HAS_clz_i64
) {
418 TCGv_i64 t1
= tcg_temp_new_i64();
419 TCGv_i64 t2
= tcg_temp_new_i64();
420 tcg_gen_extu_i32_i64(t1
, arg1
);
421 tcg_gen_extu_i32_i64(t2
, arg2
);
422 tcg_gen_addi_i64(t2
, t2
, 32);
423 tcg_gen_clz_i64(t1
, t1
, t2
);
424 tcg_gen_extrl_i64_i32(ret
, t1
);
425 tcg_temp_free_i64(t1
);
426 tcg_temp_free_i64(t2
);
427 tcg_gen_subi_i32(ret
, ret
, 32);
429 gen_helper_clz_i32(ret
, arg1
, arg2
);
433 void tcg_gen_clzi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
435 TCGv_i32 t
= tcg_const_i32(arg2
);
436 tcg_gen_clz_i32(ret
, arg1
, t
);
437 tcg_temp_free_i32(t
);
440 void tcg_gen_ctz_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
442 if (TCG_TARGET_HAS_ctz_i32
) {
443 tcg_gen_op3_i32(INDEX_op_ctz_i32
, ret
, arg1
, arg2
);
444 } else if (TCG_TARGET_HAS_ctz_i64
) {
445 TCGv_i64 t1
= tcg_temp_new_i64();
446 TCGv_i64 t2
= tcg_temp_new_i64();
447 tcg_gen_extu_i32_i64(t1
, arg1
);
448 tcg_gen_extu_i32_i64(t2
, arg2
);
449 tcg_gen_ctz_i64(t1
, t1
, t2
);
450 tcg_gen_extrl_i64_i32(ret
, t1
);
451 tcg_temp_free_i64(t1
);
452 tcg_temp_free_i64(t2
);
453 } else if (TCG_TARGET_HAS_ctpop_i32
454 || TCG_TARGET_HAS_ctpop_i64
455 || TCG_TARGET_HAS_clz_i32
456 || TCG_TARGET_HAS_clz_i64
) {
457 TCGv_i32 z
, t
= tcg_temp_new_i32();
459 if (TCG_TARGET_HAS_ctpop_i32
|| TCG_TARGET_HAS_ctpop_i64
) {
460 tcg_gen_subi_i32(t
, arg1
, 1);
461 tcg_gen_andc_i32(t
, t
, arg1
);
462 tcg_gen_ctpop_i32(t
, t
);
464 /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
465 tcg_gen_neg_i32(t
, arg1
);
466 tcg_gen_and_i32(t
, t
, arg1
);
467 tcg_gen_clzi_i32(t
, t
, 32);
468 tcg_gen_xori_i32(t
, t
, 31);
470 z
= tcg_const_i32(0);
471 tcg_gen_movcond_i32(TCG_COND_EQ
, ret
, arg1
, z
, arg2
, t
);
472 tcg_temp_free_i32(t
);
473 tcg_temp_free_i32(z
);
475 gen_helper_ctz_i32(ret
, arg1
, arg2
);
479 void tcg_gen_ctzi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
481 if (!TCG_TARGET_HAS_ctz_i32
&& TCG_TARGET_HAS_ctpop_i32
&& arg2
== 32) {
482 /* This equivalence has the advantage of not requiring a fixup. */
483 TCGv_i32 t
= tcg_temp_new_i32();
484 tcg_gen_subi_i32(t
, arg1
, 1);
485 tcg_gen_andc_i32(t
, t
, arg1
);
486 tcg_gen_ctpop_i32(ret
, t
);
487 tcg_temp_free_i32(t
);
489 TCGv_i32 t
= tcg_const_i32(arg2
);
490 tcg_gen_ctz_i32(ret
, arg1
, t
);
491 tcg_temp_free_i32(t
);
495 void tcg_gen_clrsb_i32(TCGv_i32 ret
, TCGv_i32 arg
)
497 if (TCG_TARGET_HAS_clz_i32
) {
498 TCGv_i32 t
= tcg_temp_new_i32();
499 tcg_gen_sari_i32(t
, arg
, 31);
500 tcg_gen_xor_i32(t
, t
, arg
);
501 tcg_gen_clzi_i32(t
, t
, 32);
502 tcg_gen_subi_i32(ret
, t
, 1);
503 tcg_temp_free_i32(t
);
505 gen_helper_clrsb_i32(ret
, arg
);
509 void tcg_gen_ctpop_i32(TCGv_i32 ret
, TCGv_i32 arg1
)
511 if (TCG_TARGET_HAS_ctpop_i32
) {
512 tcg_gen_op2_i32(INDEX_op_ctpop_i32
, ret
, arg1
);
513 } else if (TCG_TARGET_HAS_ctpop_i64
) {
514 TCGv_i64 t
= tcg_temp_new_i64();
515 tcg_gen_extu_i32_i64(t
, arg1
);
516 tcg_gen_ctpop_i64(t
, t
);
517 tcg_gen_extrl_i64_i32(ret
, t
);
518 tcg_temp_free_i64(t
);
520 gen_helper_ctpop_i32(ret
, arg1
);
524 void tcg_gen_rotl_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
526 if (TCG_TARGET_HAS_rot_i32
) {
527 tcg_gen_op3_i32(INDEX_op_rotl_i32
, ret
, arg1
, arg2
);
531 t0
= tcg_temp_new_i32();
532 t1
= tcg_temp_new_i32();
533 tcg_gen_shl_i32(t0
, arg1
, arg2
);
534 tcg_gen_subfi_i32(t1
, 32, arg2
);
535 tcg_gen_shr_i32(t1
, arg1
, t1
);
536 tcg_gen_or_i32(ret
, t0
, t1
);
537 tcg_temp_free_i32(t0
);
538 tcg_temp_free_i32(t1
);
542 void tcg_gen_rotli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
544 tcg_debug_assert(arg2
< 32);
545 /* some cases can be optimized here */
547 tcg_gen_mov_i32(ret
, arg1
);
548 } else if (TCG_TARGET_HAS_rot_i32
) {
549 TCGv_i32 t0
= tcg_const_i32(arg2
);
550 tcg_gen_rotl_i32(ret
, arg1
, t0
);
551 tcg_temp_free_i32(t0
);
554 t0
= tcg_temp_new_i32();
555 t1
= tcg_temp_new_i32();
556 tcg_gen_shli_i32(t0
, arg1
, arg2
);
557 tcg_gen_shri_i32(t1
, arg1
, 32 - arg2
);
558 tcg_gen_or_i32(ret
, t0
, t1
);
559 tcg_temp_free_i32(t0
);
560 tcg_temp_free_i32(t1
);
564 void tcg_gen_rotr_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
566 if (TCG_TARGET_HAS_rot_i32
) {
567 tcg_gen_op3_i32(INDEX_op_rotr_i32
, ret
, arg1
, arg2
);
571 t0
= tcg_temp_new_i32();
572 t1
= tcg_temp_new_i32();
573 tcg_gen_shr_i32(t0
, arg1
, arg2
);
574 tcg_gen_subfi_i32(t1
, 32, arg2
);
575 tcg_gen_shl_i32(t1
, arg1
, t1
);
576 tcg_gen_or_i32(ret
, t0
, t1
);
577 tcg_temp_free_i32(t0
);
578 tcg_temp_free_i32(t1
);
582 void tcg_gen_rotri_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
584 tcg_debug_assert(arg2
< 32);
585 /* some cases can be optimized here */
587 tcg_gen_mov_i32(ret
, arg1
);
589 tcg_gen_rotli_i32(ret
, arg1
, 32 - arg2
);
593 void tcg_gen_deposit_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
,
594 unsigned int ofs
, unsigned int len
)
599 tcg_debug_assert(ofs
< 32);
600 tcg_debug_assert(len
> 0);
601 tcg_debug_assert(len
<= 32);
602 tcg_debug_assert(ofs
+ len
<= 32);
605 tcg_gen_mov_i32(ret
, arg2
);
608 if (TCG_TARGET_HAS_deposit_i32
&& TCG_TARGET_deposit_i32_valid(ofs
, len
)) {
609 tcg_gen_op5ii_i32(INDEX_op_deposit_i32
, ret
, arg1
, arg2
, ofs
, len
);
613 mask
= (1u << len
) - 1;
614 t1
= tcg_temp_new_i32();
616 if (ofs
+ len
< 32) {
617 tcg_gen_andi_i32(t1
, arg2
, mask
);
618 tcg_gen_shli_i32(t1
, t1
, ofs
);
620 tcg_gen_shli_i32(t1
, arg2
, ofs
);
622 tcg_gen_andi_i32(ret
, arg1
, ~(mask
<< ofs
));
623 tcg_gen_or_i32(ret
, ret
, t1
);
625 tcg_temp_free_i32(t1
);
628 void tcg_gen_deposit_z_i32(TCGv_i32 ret
, TCGv_i32 arg
,
629 unsigned int ofs
, unsigned int len
)
631 tcg_debug_assert(ofs
< 32);
632 tcg_debug_assert(len
> 0);
633 tcg_debug_assert(len
<= 32);
634 tcg_debug_assert(ofs
+ len
<= 32);
636 if (ofs
+ len
== 32) {
637 tcg_gen_shli_i32(ret
, arg
, ofs
);
638 } else if (ofs
== 0) {
639 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
640 } else if (TCG_TARGET_HAS_deposit_i32
641 && TCG_TARGET_deposit_i32_valid(ofs
, len
)) {
642 TCGv_i32 zero
= tcg_const_i32(0);
643 tcg_gen_op5ii_i32(INDEX_op_deposit_i32
, ret
, zero
, arg
, ofs
, len
);
644 tcg_temp_free_i32(zero
);
646 /* To help two-operand hosts we prefer to zero-extend first,
647 which allows ARG to stay live. */
650 if (TCG_TARGET_HAS_ext16u_i32
) {
651 tcg_gen_ext16u_i32(ret
, arg
);
652 tcg_gen_shli_i32(ret
, ret
, ofs
);
657 if (TCG_TARGET_HAS_ext8u_i32
) {
658 tcg_gen_ext8u_i32(ret
, arg
);
659 tcg_gen_shli_i32(ret
, ret
, ofs
);
664 /* Otherwise prefer zero-extension over AND for code size. */
667 if (TCG_TARGET_HAS_ext16u_i32
) {
668 tcg_gen_shli_i32(ret
, arg
, ofs
);
669 tcg_gen_ext16u_i32(ret
, ret
);
674 if (TCG_TARGET_HAS_ext8u_i32
) {
675 tcg_gen_shli_i32(ret
, arg
, ofs
);
676 tcg_gen_ext8u_i32(ret
, ret
);
681 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
682 tcg_gen_shli_i32(ret
, ret
, ofs
);
686 void tcg_gen_extract_i32(TCGv_i32 ret
, TCGv_i32 arg
,
687 unsigned int ofs
, unsigned int len
)
689 tcg_debug_assert(ofs
< 32);
690 tcg_debug_assert(len
> 0);
691 tcg_debug_assert(len
<= 32);
692 tcg_debug_assert(ofs
+ len
<= 32);
694 /* Canonicalize certain special cases, even if extract is supported. */
695 if (ofs
+ len
== 32) {
696 tcg_gen_shri_i32(ret
, arg
, 32 - len
);
700 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
704 if (TCG_TARGET_HAS_extract_i32
705 && TCG_TARGET_extract_i32_valid(ofs
, len
)) {
706 tcg_gen_op4ii_i32(INDEX_op_extract_i32
, ret
, arg
, ofs
, len
);
710 /* Assume that zero-extension, if available, is cheaper than a shift. */
713 if (TCG_TARGET_HAS_ext16u_i32
) {
714 tcg_gen_ext16u_i32(ret
, arg
);
715 tcg_gen_shri_i32(ret
, ret
, ofs
);
720 if (TCG_TARGET_HAS_ext8u_i32
) {
721 tcg_gen_ext8u_i32(ret
, arg
);
722 tcg_gen_shri_i32(ret
, ret
, ofs
);
728 /* ??? Ideally we'd know what values are available for immediate AND.
729 Assume that 8 bits are available, plus the special case of 16,
730 so that we get ext8u, ext16u. */
732 case 1 ... 8: case 16:
733 tcg_gen_shri_i32(ret
, arg
, ofs
);
734 tcg_gen_andi_i32(ret
, ret
, (1u << len
) - 1);
737 tcg_gen_shli_i32(ret
, arg
, 32 - len
- ofs
);
738 tcg_gen_shri_i32(ret
, ret
, 32 - len
);
743 void tcg_gen_sextract_i32(TCGv_i32 ret
, TCGv_i32 arg
,
744 unsigned int ofs
, unsigned int len
)
746 tcg_debug_assert(ofs
< 32);
747 tcg_debug_assert(len
> 0);
748 tcg_debug_assert(len
<= 32);
749 tcg_debug_assert(ofs
+ len
<= 32);
751 /* Canonicalize certain special cases, even if extract is supported. */
752 if (ofs
+ len
== 32) {
753 tcg_gen_sari_i32(ret
, arg
, 32 - len
);
759 tcg_gen_ext16s_i32(ret
, arg
);
762 tcg_gen_ext8s_i32(ret
, arg
);
767 if (TCG_TARGET_HAS_sextract_i32
768 && TCG_TARGET_extract_i32_valid(ofs
, len
)) {
769 tcg_gen_op4ii_i32(INDEX_op_sextract_i32
, ret
, arg
, ofs
, len
);
773 /* Assume that sign-extension, if available, is cheaper than a shift. */
776 if (TCG_TARGET_HAS_ext16s_i32
) {
777 tcg_gen_ext16s_i32(ret
, arg
);
778 tcg_gen_sari_i32(ret
, ret
, ofs
);
783 if (TCG_TARGET_HAS_ext8s_i32
) {
784 tcg_gen_ext8s_i32(ret
, arg
);
785 tcg_gen_sari_i32(ret
, ret
, ofs
);
792 if (TCG_TARGET_HAS_ext16s_i32
) {
793 tcg_gen_shri_i32(ret
, arg
, ofs
);
794 tcg_gen_ext16s_i32(ret
, ret
);
799 if (TCG_TARGET_HAS_ext8s_i32
) {
800 tcg_gen_shri_i32(ret
, arg
, ofs
);
801 tcg_gen_ext8s_i32(ret
, ret
);
807 tcg_gen_shli_i32(ret
, arg
, 32 - len
- ofs
);
808 tcg_gen_sari_i32(ret
, ret
, 32 - len
);
811 void tcg_gen_movcond_i32(TCGCond cond
, TCGv_i32 ret
, TCGv_i32 c1
,
812 TCGv_i32 c2
, TCGv_i32 v1
, TCGv_i32 v2
)
814 if (cond
== TCG_COND_ALWAYS
) {
815 tcg_gen_mov_i32(ret
, v1
);
816 } else if (cond
== TCG_COND_NEVER
) {
817 tcg_gen_mov_i32(ret
, v2
);
818 } else if (TCG_TARGET_HAS_movcond_i32
) {
819 tcg_gen_op6i_i32(INDEX_op_movcond_i32
, ret
, c1
, c2
, v1
, v2
, cond
);
821 TCGv_i32 t0
= tcg_temp_new_i32();
822 TCGv_i32 t1
= tcg_temp_new_i32();
823 tcg_gen_setcond_i32(cond
, t0
, c1
, c2
);
824 tcg_gen_neg_i32(t0
, t0
);
825 tcg_gen_and_i32(t1
, v1
, t0
);
826 tcg_gen_andc_i32(ret
, v2
, t0
);
827 tcg_gen_or_i32(ret
, ret
, t1
);
828 tcg_temp_free_i32(t0
);
829 tcg_temp_free_i32(t1
);
833 void tcg_gen_add2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 al
,
834 TCGv_i32 ah
, TCGv_i32 bl
, TCGv_i32 bh
)
836 if (TCG_TARGET_HAS_add2_i32
) {
837 tcg_gen_op6_i32(INDEX_op_add2_i32
, rl
, rh
, al
, ah
, bl
, bh
);
839 TCGv_i64 t0
= tcg_temp_new_i64();
840 TCGv_i64 t1
= tcg_temp_new_i64();
841 tcg_gen_concat_i32_i64(t0
, al
, ah
);
842 tcg_gen_concat_i32_i64(t1
, bl
, bh
);
843 tcg_gen_add_i64(t0
, t0
, t1
);
844 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
845 tcg_temp_free_i64(t0
);
846 tcg_temp_free_i64(t1
);
850 void tcg_gen_sub2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 al
,
851 TCGv_i32 ah
, TCGv_i32 bl
, TCGv_i32 bh
)
853 if (TCG_TARGET_HAS_sub2_i32
) {
854 tcg_gen_op6_i32(INDEX_op_sub2_i32
, rl
, rh
, al
, ah
, bl
, bh
);
856 TCGv_i64 t0
= tcg_temp_new_i64();
857 TCGv_i64 t1
= tcg_temp_new_i64();
858 tcg_gen_concat_i32_i64(t0
, al
, ah
);
859 tcg_gen_concat_i32_i64(t1
, bl
, bh
);
860 tcg_gen_sub_i64(t0
, t0
, t1
);
861 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
862 tcg_temp_free_i64(t0
);
863 tcg_temp_free_i64(t1
);
867 void tcg_gen_mulu2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
869 if (TCG_TARGET_HAS_mulu2_i32
) {
870 tcg_gen_op4_i32(INDEX_op_mulu2_i32
, rl
, rh
, arg1
, arg2
);
871 } else if (TCG_TARGET_HAS_muluh_i32
) {
872 TCGv_i32 t
= tcg_temp_new_i32();
873 tcg_gen_op3_i32(INDEX_op_mul_i32
, t
, arg1
, arg2
);
874 tcg_gen_op3_i32(INDEX_op_muluh_i32
, rh
, arg1
, arg2
);
875 tcg_gen_mov_i32(rl
, t
);
876 tcg_temp_free_i32(t
);
878 TCGv_i64 t0
= tcg_temp_new_i64();
879 TCGv_i64 t1
= tcg_temp_new_i64();
880 tcg_gen_extu_i32_i64(t0
, arg1
);
881 tcg_gen_extu_i32_i64(t1
, arg2
);
882 tcg_gen_mul_i64(t0
, t0
, t1
);
883 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
884 tcg_temp_free_i64(t0
);
885 tcg_temp_free_i64(t1
);
889 void tcg_gen_muls2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
891 if (TCG_TARGET_HAS_muls2_i32
) {
892 tcg_gen_op4_i32(INDEX_op_muls2_i32
, rl
, rh
, arg1
, arg2
);
893 } else if (TCG_TARGET_HAS_mulsh_i32
) {
894 TCGv_i32 t
= tcg_temp_new_i32();
895 tcg_gen_op3_i32(INDEX_op_mul_i32
, t
, arg1
, arg2
);
896 tcg_gen_op3_i32(INDEX_op_mulsh_i32
, rh
, arg1
, arg2
);
897 tcg_gen_mov_i32(rl
, t
);
898 tcg_temp_free_i32(t
);
899 } else if (TCG_TARGET_REG_BITS
== 32) {
900 TCGv_i32 t0
= tcg_temp_new_i32();
901 TCGv_i32 t1
= tcg_temp_new_i32();
902 TCGv_i32 t2
= tcg_temp_new_i32();
903 TCGv_i32 t3
= tcg_temp_new_i32();
904 tcg_gen_mulu2_i32(t0
, t1
, arg1
, arg2
);
905 /* Adjust for negative inputs. */
906 tcg_gen_sari_i32(t2
, arg1
, 31);
907 tcg_gen_sari_i32(t3
, arg2
, 31);
908 tcg_gen_and_i32(t2
, t2
, arg2
);
909 tcg_gen_and_i32(t3
, t3
, arg1
);
910 tcg_gen_sub_i32(rh
, t1
, t2
);
911 tcg_gen_sub_i32(rh
, rh
, t3
);
912 tcg_gen_mov_i32(rl
, t0
);
913 tcg_temp_free_i32(t0
);
914 tcg_temp_free_i32(t1
);
915 tcg_temp_free_i32(t2
);
916 tcg_temp_free_i32(t3
);
918 TCGv_i64 t0
= tcg_temp_new_i64();
919 TCGv_i64 t1
= tcg_temp_new_i64();
920 tcg_gen_ext_i32_i64(t0
, arg1
);
921 tcg_gen_ext_i32_i64(t1
, arg2
);
922 tcg_gen_mul_i64(t0
, t0
, t1
);
923 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
924 tcg_temp_free_i64(t0
);
925 tcg_temp_free_i64(t1
);
929 void tcg_gen_mulsu2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
931 if (TCG_TARGET_REG_BITS
== 32) {
932 TCGv_i32 t0
= tcg_temp_new_i32();
933 TCGv_i32 t1
= tcg_temp_new_i32();
934 TCGv_i32 t2
= tcg_temp_new_i32();
935 tcg_gen_mulu2_i32(t0
, t1
, arg1
, arg2
);
936 /* Adjust for negative input for the signed arg1. */
937 tcg_gen_sari_i32(t2
, arg1
, 31);
938 tcg_gen_and_i32(t2
, t2
, arg2
);
939 tcg_gen_sub_i32(rh
, t1
, t2
);
940 tcg_gen_mov_i32(rl
, t0
);
941 tcg_temp_free_i32(t0
);
942 tcg_temp_free_i32(t1
);
943 tcg_temp_free_i32(t2
);
945 TCGv_i64 t0
= tcg_temp_new_i64();
946 TCGv_i64 t1
= tcg_temp_new_i64();
947 tcg_gen_ext_i32_i64(t0
, arg1
);
948 tcg_gen_extu_i32_i64(t1
, arg2
);
949 tcg_gen_mul_i64(t0
, t0
, t1
);
950 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
951 tcg_temp_free_i64(t0
);
952 tcg_temp_free_i64(t1
);
956 void tcg_gen_ext8s_i32(TCGv_i32 ret
, TCGv_i32 arg
)
958 if (TCG_TARGET_HAS_ext8s_i32
) {
959 tcg_gen_op2_i32(INDEX_op_ext8s_i32
, ret
, arg
);
961 tcg_gen_shli_i32(ret
, arg
, 24);
962 tcg_gen_sari_i32(ret
, ret
, 24);
966 void tcg_gen_ext16s_i32(TCGv_i32 ret
, TCGv_i32 arg
)
968 if (TCG_TARGET_HAS_ext16s_i32
) {
969 tcg_gen_op2_i32(INDEX_op_ext16s_i32
, ret
, arg
);
971 tcg_gen_shli_i32(ret
, arg
, 16);
972 tcg_gen_sari_i32(ret
, ret
, 16);
976 void tcg_gen_ext8u_i32(TCGv_i32 ret
, TCGv_i32 arg
)
978 if (TCG_TARGET_HAS_ext8u_i32
) {
979 tcg_gen_op2_i32(INDEX_op_ext8u_i32
, ret
, arg
);
981 tcg_gen_andi_i32(ret
, arg
, 0xffu
);
985 void tcg_gen_ext16u_i32(TCGv_i32 ret
, TCGv_i32 arg
)
987 if (TCG_TARGET_HAS_ext16u_i32
) {
988 tcg_gen_op2_i32(INDEX_op_ext16u_i32
, ret
, arg
);
990 tcg_gen_andi_i32(ret
, arg
, 0xffffu
);
994 /* Note: we assume the two high bytes are set to zero */
995 void tcg_gen_bswap16_i32(TCGv_i32 ret
, TCGv_i32 arg
)
997 if (TCG_TARGET_HAS_bswap16_i32
) {
998 tcg_gen_op2_i32(INDEX_op_bswap16_i32
, ret
, arg
);
1000 TCGv_i32 t0
= tcg_temp_new_i32();
1002 tcg_gen_ext8u_i32(t0
, arg
);
1003 tcg_gen_shli_i32(t0
, t0
, 8);
1004 tcg_gen_shri_i32(ret
, arg
, 8);
1005 tcg_gen_or_i32(ret
, ret
, t0
);
1006 tcg_temp_free_i32(t0
);
1010 void tcg_gen_bswap32_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1012 if (TCG_TARGET_HAS_bswap32_i32
) {
1013 tcg_gen_op2_i32(INDEX_op_bswap32_i32
, ret
, arg
);
1015 TCGv_i32 t0
= tcg_temp_new_i32();
1016 TCGv_i32 t1
= tcg_temp_new_i32();
1017 TCGv_i32 t2
= tcg_const_i32(0x00ff00ff);
1020 tcg_gen_shri_i32(t0
, arg
, 8); /* t0 = .abc */
1021 tcg_gen_and_i32(t1
, arg
, t2
); /* t1 = .b.d */
1022 tcg_gen_and_i32(t0
, t0
, t2
); /* t0 = .a.c */
1023 tcg_gen_shli_i32(t1
, t1
, 8); /* t1 = b.d. */
1024 tcg_gen_or_i32(ret
, t0
, t1
); /* ret = badc */
1026 tcg_gen_shri_i32(t0
, ret
, 16); /* t0 = ..ba */
1027 tcg_gen_shli_i32(t1
, ret
, 16); /* t1 = dc.. */
1028 tcg_gen_or_i32(ret
, t0
, t1
); /* ret = dcba */
1030 tcg_temp_free_i32(t0
);
1031 tcg_temp_free_i32(t1
);
1032 tcg_temp_free_i32(t2
);
1036 void tcg_gen_smin_i32(TCGv_i32 ret
, TCGv_i32 a
, TCGv_i32 b
)
1038 tcg_gen_movcond_i32(TCG_COND_LT
, ret
, a
, b
, a
, b
);
1041 void tcg_gen_umin_i32(TCGv_i32 ret
, TCGv_i32 a
, TCGv_i32 b
)
1043 tcg_gen_movcond_i32(TCG_COND_LTU
, ret
, a
, b
, a
, b
);
1046 void tcg_gen_smax_i32(TCGv_i32 ret
, TCGv_i32 a
, TCGv_i32 b
)
1048 tcg_gen_movcond_i32(TCG_COND_LT
, ret
, a
, b
, b
, a
);
1051 void tcg_gen_umax_i32(TCGv_i32 ret
, TCGv_i32 a
, TCGv_i32 b
)
1053 tcg_gen_movcond_i32(TCG_COND_LTU
, ret
, a
, b
, b
, a
);
1058 #if TCG_TARGET_REG_BITS == 32
1059 /* These are all inline for TCG_TARGET_REG_BITS == 64. */
1061 void tcg_gen_discard_i64(TCGv_i64 arg
)
1063 tcg_gen_discard_i32(TCGV_LOW(arg
));
1064 tcg_gen_discard_i32(TCGV_HIGH(arg
));
1067 void tcg_gen_mov_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1069 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1070 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
));
1073 void tcg_gen_movi_i64(TCGv_i64 ret
, int64_t arg
)
1075 tcg_gen_movi_i32(TCGV_LOW(ret
), arg
);
1076 tcg_gen_movi_i32(TCGV_HIGH(ret
), arg
>> 32);
1079 void tcg_gen_ld8u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1081 tcg_gen_ld8u_i32(TCGV_LOW(ret
), arg2
, offset
);
1082 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1085 void tcg_gen_ld8s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1087 tcg_gen_ld8s_i32(TCGV_LOW(ret
), arg2
, offset
);
1088 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1091 void tcg_gen_ld16u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1093 tcg_gen_ld16u_i32(TCGV_LOW(ret
), arg2
, offset
);
1094 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1097 void tcg_gen_ld16s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1099 tcg_gen_ld16s_i32(TCGV_LOW(ret
), arg2
, offset
);
1100 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1103 void tcg_gen_ld32u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1105 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1106 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1109 void tcg_gen_ld32s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1111 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1112 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1115 void tcg_gen_ld_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1117 /* Since arg2 and ret have different types,
1118 they cannot be the same temporary */
1119 #ifdef HOST_WORDS_BIGENDIAN
1120 tcg_gen_ld_i32(TCGV_HIGH(ret
), arg2
, offset
);
1121 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
+ 4);
1123 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1124 tcg_gen_ld_i32(TCGV_HIGH(ret
), arg2
, offset
+ 4);
1128 void tcg_gen_st_i64(TCGv_i64 arg1
, TCGv_ptr arg2
, tcg_target_long offset
)
1130 #ifdef HOST_WORDS_BIGENDIAN
1131 tcg_gen_st_i32(TCGV_HIGH(arg1
), arg2
, offset
);
1132 tcg_gen_st_i32(TCGV_LOW(arg1
), arg2
, offset
+ 4);
1134 tcg_gen_st_i32(TCGV_LOW(arg1
), arg2
, offset
);
1135 tcg_gen_st_i32(TCGV_HIGH(arg1
), arg2
, offset
+ 4);
1139 void tcg_gen_and_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1141 tcg_gen_and_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1142 tcg_gen_and_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1145 void tcg_gen_or_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1147 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1148 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1151 void tcg_gen_xor_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1153 tcg_gen_xor_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1154 tcg_gen_xor_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1157 void tcg_gen_shl_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1159 gen_helper_shl_i64(ret
, arg1
, arg2
);
1162 void tcg_gen_shr_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1164 gen_helper_shr_i64(ret
, arg1
, arg2
);
1167 void tcg_gen_sar_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1169 gen_helper_sar_i64(ret
, arg1
, arg2
);
1172 void tcg_gen_mul_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1177 t0
= tcg_temp_new_i64();
1178 t1
= tcg_temp_new_i32();
1180 tcg_gen_mulu2_i32(TCGV_LOW(t0
), TCGV_HIGH(t0
),
1181 TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1183 tcg_gen_mul_i32(t1
, TCGV_LOW(arg1
), TCGV_HIGH(arg2
));
1184 tcg_gen_add_i32(TCGV_HIGH(t0
), TCGV_HIGH(t0
), t1
);
1185 tcg_gen_mul_i32(t1
, TCGV_HIGH(arg1
), TCGV_LOW(arg2
));
1186 tcg_gen_add_i32(TCGV_HIGH(t0
), TCGV_HIGH(t0
), t1
);
1188 tcg_gen_mov_i64(ret
, t0
);
1189 tcg_temp_free_i64(t0
);
1190 tcg_temp_free_i32(t1
);
1192 #endif /* TCG_TARGET_REG_SIZE == 32 */
1194 void tcg_gen_addi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1196 /* some cases can be optimized here */
1198 tcg_gen_mov_i64(ret
, arg1
);
1200 TCGv_i64 t0
= tcg_const_i64(arg2
);
1201 tcg_gen_add_i64(ret
, arg1
, t0
);
1202 tcg_temp_free_i64(t0
);
1206 void tcg_gen_subfi_i64(TCGv_i64 ret
, int64_t arg1
, TCGv_i64 arg2
)
1208 if (arg1
== 0 && TCG_TARGET_HAS_neg_i64
) {
1209 /* Don't recurse with tcg_gen_neg_i64. */
1210 tcg_gen_op2_i64(INDEX_op_neg_i64
, ret
, arg2
);
1212 TCGv_i64 t0
= tcg_const_i64(arg1
);
1213 tcg_gen_sub_i64(ret
, t0
, arg2
);
1214 tcg_temp_free_i64(t0
);
1218 void tcg_gen_subi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1220 /* some cases can be optimized here */
1222 tcg_gen_mov_i64(ret
, arg1
);
1224 TCGv_i64 t0
= tcg_const_i64(arg2
);
1225 tcg_gen_sub_i64(ret
, arg1
, t0
);
1226 tcg_temp_free_i64(t0
);
1230 void tcg_gen_andi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1234 if (TCG_TARGET_REG_BITS
== 32) {
1235 tcg_gen_andi_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1236 tcg_gen_andi_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1240 /* Some cases can be optimized here. */
1243 tcg_gen_movi_i64(ret
, 0);
1246 tcg_gen_mov_i64(ret
, arg1
);
1249 /* Don't recurse with tcg_gen_ext8u_i64. */
1250 if (TCG_TARGET_HAS_ext8u_i64
) {
1251 tcg_gen_op2_i64(INDEX_op_ext8u_i64
, ret
, arg1
);
1256 if (TCG_TARGET_HAS_ext16u_i64
) {
1257 tcg_gen_op2_i64(INDEX_op_ext16u_i64
, ret
, arg1
);
1262 if (TCG_TARGET_HAS_ext32u_i64
) {
1263 tcg_gen_op2_i64(INDEX_op_ext32u_i64
, ret
, arg1
);
1268 t0
= tcg_const_i64(arg2
);
1269 tcg_gen_and_i64(ret
, arg1
, t0
);
1270 tcg_temp_free_i64(t0
);
1273 void tcg_gen_ori_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1275 if (TCG_TARGET_REG_BITS
== 32) {
1276 tcg_gen_ori_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1277 tcg_gen_ori_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1280 /* Some cases can be optimized here. */
1282 tcg_gen_movi_i64(ret
, -1);
1283 } else if (arg2
== 0) {
1284 tcg_gen_mov_i64(ret
, arg1
);
1286 TCGv_i64 t0
= tcg_const_i64(arg2
);
1287 tcg_gen_or_i64(ret
, arg1
, t0
);
1288 tcg_temp_free_i64(t0
);
1292 void tcg_gen_xori_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1294 if (TCG_TARGET_REG_BITS
== 32) {
1295 tcg_gen_xori_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1296 tcg_gen_xori_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1299 /* Some cases can be optimized here. */
1301 tcg_gen_mov_i64(ret
, arg1
);
1302 } else if (arg2
== -1 && TCG_TARGET_HAS_not_i64
) {
1303 /* Don't recurse with tcg_gen_not_i64. */
1304 tcg_gen_op2_i64(INDEX_op_not_i64
, ret
, arg1
);
1306 TCGv_i64 t0
= tcg_const_i64(arg2
);
1307 tcg_gen_xor_i64(ret
, arg1
, t0
);
1308 tcg_temp_free_i64(t0
);
1312 static inline void tcg_gen_shifti_i64(TCGv_i64 ret
, TCGv_i64 arg1
,
1313 unsigned c
, bool right
, bool arith
)
1315 tcg_debug_assert(c
< 64);
1317 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
1318 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
1319 } else if (c
>= 32) {
1323 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
1324 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), 31);
1326 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
1327 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1330 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_LOW(arg1
), c
);
1331 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
1336 t0
= tcg_temp_new_i32();
1337 t1
= tcg_temp_new_i32();
1339 tcg_gen_shli_i32(t0
, TCGV_HIGH(arg1
), 32 - c
);
1341 tcg_gen_sari_i32(t1
, TCGV_HIGH(arg1
), c
);
1343 tcg_gen_shri_i32(t1
, TCGV_HIGH(arg1
), c
);
1345 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), c
);
1346 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t0
);
1347 tcg_gen_mov_i32(TCGV_HIGH(ret
), t1
);
1349 tcg_gen_shri_i32(t0
, TCGV_LOW(arg1
), 32 - c
);
1350 /* Note: ret can be the same as arg1, so we use t1 */
1351 tcg_gen_shli_i32(t1
, TCGV_LOW(arg1
), c
);
1352 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), c
);
1353 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t0
);
1354 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
1356 tcg_temp_free_i32(t0
);
1357 tcg_temp_free_i32(t1
);
1361 void tcg_gen_shli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1363 tcg_debug_assert(arg2
>= 0 && arg2
< 64);
1364 if (TCG_TARGET_REG_BITS
== 32) {
1365 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 0, 0);
1366 } else if (arg2
== 0) {
1367 tcg_gen_mov_i64(ret
, arg1
);
1369 TCGv_i64 t0
= tcg_const_i64(arg2
);
1370 tcg_gen_shl_i64(ret
, arg1
, t0
);
1371 tcg_temp_free_i64(t0
);
1375 void tcg_gen_shri_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1377 tcg_debug_assert(arg2
>= 0 && arg2
< 64);
1378 if (TCG_TARGET_REG_BITS
== 32) {
1379 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 1, 0);
1380 } else if (arg2
== 0) {
1381 tcg_gen_mov_i64(ret
, arg1
);
1383 TCGv_i64 t0
= tcg_const_i64(arg2
);
1384 tcg_gen_shr_i64(ret
, arg1
, t0
);
1385 tcg_temp_free_i64(t0
);
1389 void tcg_gen_sari_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1391 tcg_debug_assert(arg2
>= 0 && arg2
< 64);
1392 if (TCG_TARGET_REG_BITS
== 32) {
1393 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 1, 1);
1394 } else if (arg2
== 0) {
1395 tcg_gen_mov_i64(ret
, arg1
);
1397 TCGv_i64 t0
= tcg_const_i64(arg2
);
1398 tcg_gen_sar_i64(ret
, arg1
, t0
);
1399 tcg_temp_free_i64(t0
);
1403 void tcg_gen_brcond_i64(TCGCond cond
, TCGv_i64 arg1
, TCGv_i64 arg2
, TCGLabel
*l
)
1405 if (cond
== TCG_COND_ALWAYS
) {
1407 } else if (cond
!= TCG_COND_NEVER
) {
1408 if (TCG_TARGET_REG_BITS
== 32) {
1409 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32
, TCGV_LOW(arg1
),
1410 TCGV_HIGH(arg1
), TCGV_LOW(arg2
),
1411 TCGV_HIGH(arg2
), cond
, label_arg(l
));
1413 tcg_gen_op4ii_i64(INDEX_op_brcond_i64
, arg1
, arg2
, cond
,
1419 void tcg_gen_brcondi_i64(TCGCond cond
, TCGv_i64 arg1
, int64_t arg2
, TCGLabel
*l
)
1421 if (cond
== TCG_COND_ALWAYS
) {
1423 } else if (cond
!= TCG_COND_NEVER
) {
1424 TCGv_i64 t0
= tcg_const_i64(arg2
);
1425 tcg_gen_brcond_i64(cond
, arg1
, t0
, l
);
1426 tcg_temp_free_i64(t0
);
1430 void tcg_gen_setcond_i64(TCGCond cond
, TCGv_i64 ret
,
1431 TCGv_i64 arg1
, TCGv_i64 arg2
)
1433 if (cond
== TCG_COND_ALWAYS
) {
1434 tcg_gen_movi_i64(ret
, 1);
1435 } else if (cond
== TCG_COND_NEVER
) {
1436 tcg_gen_movi_i64(ret
, 0);
1438 if (TCG_TARGET_REG_BITS
== 32) {
1439 tcg_gen_op6i_i32(INDEX_op_setcond2_i32
, TCGV_LOW(ret
),
1440 TCGV_LOW(arg1
), TCGV_HIGH(arg1
),
1441 TCGV_LOW(arg2
), TCGV_HIGH(arg2
), cond
);
1442 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1444 tcg_gen_op4i_i64(INDEX_op_setcond_i64
, ret
, arg1
, arg2
, cond
);
1449 void tcg_gen_setcondi_i64(TCGCond cond
, TCGv_i64 ret
,
1450 TCGv_i64 arg1
, int64_t arg2
)
1452 TCGv_i64 t0
= tcg_const_i64(arg2
);
1453 tcg_gen_setcond_i64(cond
, ret
, arg1
, t0
);
1454 tcg_temp_free_i64(t0
);
1457 void tcg_gen_muli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1460 tcg_gen_movi_i64(ret
, 0);
1461 } else if (is_power_of_2(arg2
)) {
1462 tcg_gen_shli_i64(ret
, arg1
, ctz64(arg2
));
1464 TCGv_i64 t0
= tcg_const_i64(arg2
);
1465 tcg_gen_mul_i64(ret
, arg1
, t0
);
1466 tcg_temp_free_i64(t0
);
1470 void tcg_gen_div_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1472 if (TCG_TARGET_HAS_div_i64
) {
1473 tcg_gen_op3_i64(INDEX_op_div_i64
, ret
, arg1
, arg2
);
1474 } else if (TCG_TARGET_HAS_div2_i64
) {
1475 TCGv_i64 t0
= tcg_temp_new_i64();
1476 tcg_gen_sari_i64(t0
, arg1
, 63);
1477 tcg_gen_op5_i64(INDEX_op_div2_i64
, ret
, t0
, arg1
, t0
, arg2
);
1478 tcg_temp_free_i64(t0
);
1480 gen_helper_div_i64(ret
, arg1
, arg2
);
1484 void tcg_gen_rem_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1486 if (TCG_TARGET_HAS_rem_i64
) {
1487 tcg_gen_op3_i64(INDEX_op_rem_i64
, ret
, arg1
, arg2
);
1488 } else if (TCG_TARGET_HAS_div_i64
) {
1489 TCGv_i64 t0
= tcg_temp_new_i64();
1490 tcg_gen_op3_i64(INDEX_op_div_i64
, t0
, arg1
, arg2
);
1491 tcg_gen_mul_i64(t0
, t0
, arg2
);
1492 tcg_gen_sub_i64(ret
, arg1
, t0
);
1493 tcg_temp_free_i64(t0
);
1494 } else if (TCG_TARGET_HAS_div2_i64
) {
1495 TCGv_i64 t0
= tcg_temp_new_i64();
1496 tcg_gen_sari_i64(t0
, arg1
, 63);
1497 tcg_gen_op5_i64(INDEX_op_div2_i64
, t0
, ret
, arg1
, t0
, arg2
);
1498 tcg_temp_free_i64(t0
);
1500 gen_helper_rem_i64(ret
, arg1
, arg2
);
1504 void tcg_gen_divu_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1506 if (TCG_TARGET_HAS_div_i64
) {
1507 tcg_gen_op3_i64(INDEX_op_divu_i64
, ret
, arg1
, arg2
);
1508 } else if (TCG_TARGET_HAS_div2_i64
) {
1509 TCGv_i64 t0
= tcg_temp_new_i64();
1510 tcg_gen_movi_i64(t0
, 0);
1511 tcg_gen_op5_i64(INDEX_op_divu2_i64
, ret
, t0
, arg1
, t0
, arg2
);
1512 tcg_temp_free_i64(t0
);
1514 gen_helper_divu_i64(ret
, arg1
, arg2
);
1518 void tcg_gen_remu_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1520 if (TCG_TARGET_HAS_rem_i64
) {
1521 tcg_gen_op3_i64(INDEX_op_remu_i64
, ret
, arg1
, arg2
);
1522 } else if (TCG_TARGET_HAS_div_i64
) {
1523 TCGv_i64 t0
= tcg_temp_new_i64();
1524 tcg_gen_op3_i64(INDEX_op_divu_i64
, t0
, arg1
, arg2
);
1525 tcg_gen_mul_i64(t0
, t0
, arg2
);
1526 tcg_gen_sub_i64(ret
, arg1
, t0
);
1527 tcg_temp_free_i64(t0
);
1528 } else if (TCG_TARGET_HAS_div2_i64
) {
1529 TCGv_i64 t0
= tcg_temp_new_i64();
1530 tcg_gen_movi_i64(t0
, 0);
1531 tcg_gen_op5_i64(INDEX_op_divu2_i64
, t0
, ret
, arg1
, t0
, arg2
);
1532 tcg_temp_free_i64(t0
);
1534 gen_helper_remu_i64(ret
, arg1
, arg2
);
1538 void tcg_gen_ext8s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1540 if (TCG_TARGET_REG_BITS
== 32) {
1541 tcg_gen_ext8s_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1542 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1543 } else if (TCG_TARGET_HAS_ext8s_i64
) {
1544 tcg_gen_op2_i64(INDEX_op_ext8s_i64
, ret
, arg
);
1546 tcg_gen_shli_i64(ret
, arg
, 56);
1547 tcg_gen_sari_i64(ret
, ret
, 56);
1551 void tcg_gen_ext16s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1553 if (TCG_TARGET_REG_BITS
== 32) {
1554 tcg_gen_ext16s_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1555 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1556 } else if (TCG_TARGET_HAS_ext16s_i64
) {
1557 tcg_gen_op2_i64(INDEX_op_ext16s_i64
, ret
, arg
);
1559 tcg_gen_shli_i64(ret
, arg
, 48);
1560 tcg_gen_sari_i64(ret
, ret
, 48);
1564 void tcg_gen_ext32s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1566 if (TCG_TARGET_REG_BITS
== 32) {
1567 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1568 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1569 } else if (TCG_TARGET_HAS_ext32s_i64
) {
1570 tcg_gen_op2_i64(INDEX_op_ext32s_i64
, ret
, arg
);
1572 tcg_gen_shli_i64(ret
, arg
, 32);
1573 tcg_gen_sari_i64(ret
, ret
, 32);
1577 void tcg_gen_ext8u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1579 if (TCG_TARGET_REG_BITS
== 32) {
1580 tcg_gen_ext8u_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1581 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1582 } else if (TCG_TARGET_HAS_ext8u_i64
) {
1583 tcg_gen_op2_i64(INDEX_op_ext8u_i64
, ret
, arg
);
1585 tcg_gen_andi_i64(ret
, arg
, 0xffu
);
1589 void tcg_gen_ext16u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1591 if (TCG_TARGET_REG_BITS
== 32) {
1592 tcg_gen_ext16u_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1593 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1594 } else if (TCG_TARGET_HAS_ext16u_i64
) {
1595 tcg_gen_op2_i64(INDEX_op_ext16u_i64
, ret
, arg
);
1597 tcg_gen_andi_i64(ret
, arg
, 0xffffu
);
1601 void tcg_gen_ext32u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1603 if (TCG_TARGET_REG_BITS
== 32) {
1604 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1605 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1606 } else if (TCG_TARGET_HAS_ext32u_i64
) {
1607 tcg_gen_op2_i64(INDEX_op_ext32u_i64
, ret
, arg
);
1609 tcg_gen_andi_i64(ret
, arg
, 0xffffffffu
);
1613 /* Note: we assume the six high bytes are set to zero */
1614 void tcg_gen_bswap16_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1616 if (TCG_TARGET_REG_BITS
== 32) {
1617 tcg_gen_bswap16_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1618 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1619 } else if (TCG_TARGET_HAS_bswap16_i64
) {
1620 tcg_gen_op2_i64(INDEX_op_bswap16_i64
, ret
, arg
);
1622 TCGv_i64 t0
= tcg_temp_new_i64();
1624 tcg_gen_ext8u_i64(t0
, arg
);
1625 tcg_gen_shli_i64(t0
, t0
, 8);
1626 tcg_gen_shri_i64(ret
, arg
, 8);
1627 tcg_gen_or_i64(ret
, ret
, t0
);
1628 tcg_temp_free_i64(t0
);
1632 /* Note: we assume the four high bytes are set to zero */
1633 void tcg_gen_bswap32_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1635 if (TCG_TARGET_REG_BITS
== 32) {
1636 tcg_gen_bswap32_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1637 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1638 } else if (TCG_TARGET_HAS_bswap32_i64
) {
1639 tcg_gen_op2_i64(INDEX_op_bswap32_i64
, ret
, arg
);
1641 TCGv_i64 t0
= tcg_temp_new_i64();
1642 TCGv_i64 t1
= tcg_temp_new_i64();
1643 TCGv_i64 t2
= tcg_const_i64(0x00ff00ff);
1645 /* arg = ....abcd */
1646 tcg_gen_shri_i64(t0
, arg
, 8); /* t0 = .....abc */
1647 tcg_gen_and_i64(t1
, arg
, t2
); /* t1 = .....b.d */
1648 tcg_gen_and_i64(t0
, t0
, t2
); /* t0 = .....a.c */
1649 tcg_gen_shli_i64(t1
, t1
, 8); /* t1 = ....b.d. */
1650 tcg_gen_or_i64(ret
, t0
, t1
); /* ret = ....badc */
1652 tcg_gen_shli_i64(t1
, ret
, 48); /* t1 = dc...... */
1653 tcg_gen_shri_i64(t0
, ret
, 16); /* t0 = ......ba */
1654 tcg_gen_shri_i64(t1
, t1
, 32); /* t1 = ....dc.. */
1655 tcg_gen_or_i64(ret
, t0
, t1
); /* ret = ....dcba */
1657 tcg_temp_free_i64(t0
);
1658 tcg_temp_free_i64(t1
);
1659 tcg_temp_free_i64(t2
);
1663 void tcg_gen_bswap64_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1665 if (TCG_TARGET_REG_BITS
== 32) {
1667 t0
= tcg_temp_new_i32();
1668 t1
= tcg_temp_new_i32();
1670 tcg_gen_bswap32_i32(t0
, TCGV_LOW(arg
));
1671 tcg_gen_bswap32_i32(t1
, TCGV_HIGH(arg
));
1672 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
1673 tcg_gen_mov_i32(TCGV_HIGH(ret
), t0
);
1674 tcg_temp_free_i32(t0
);
1675 tcg_temp_free_i32(t1
);
1676 } else if (TCG_TARGET_HAS_bswap64_i64
) {
1677 tcg_gen_op2_i64(INDEX_op_bswap64_i64
, ret
, arg
);
1679 TCGv_i64 t0
= tcg_temp_new_i64();
1680 TCGv_i64 t1
= tcg_temp_new_i64();
1681 TCGv_i64 t2
= tcg_temp_new_i64();
1683 /* arg = abcdefgh */
1684 tcg_gen_movi_i64(t2
, 0x00ff00ff00ff00ffull
);
1685 tcg_gen_shri_i64(t0
, arg
, 8); /* t0 = .abcdefg */
1686 tcg_gen_and_i64(t1
, arg
, t2
); /* t1 = .b.d.f.h */
1687 tcg_gen_and_i64(t0
, t0
, t2
); /* t0 = .a.c.e.g */
1688 tcg_gen_shli_i64(t1
, t1
, 8); /* t1 = b.d.f.h. */
1689 tcg_gen_or_i64(ret
, t0
, t1
); /* ret = badcfehg */
1691 tcg_gen_movi_i64(t2
, 0x0000ffff0000ffffull
);
1692 tcg_gen_shri_i64(t0
, ret
, 16); /* t0 = ..badcfe */
1693 tcg_gen_and_i64(t1
, ret
, t2
); /* t1 = ..dc..hg */
1694 tcg_gen_and_i64(t0
, t0
, t2
); /* t0 = ..ba..fe */
1695 tcg_gen_shli_i64(t1
, t1
, 16); /* t1 = dc..hg.. */
1696 tcg_gen_or_i64(ret
, t0
, t1
); /* ret = dcbahgfe */
1698 tcg_gen_shri_i64(t0
, ret
, 32); /* t0 = ....dcba */
1699 tcg_gen_shli_i64(t1
, ret
, 32); /* t1 = hgfe.... */
1700 tcg_gen_or_i64(ret
, t0
, t1
); /* ret = hgfedcba */
1702 tcg_temp_free_i64(t0
);
1703 tcg_temp_free_i64(t1
);
1704 tcg_temp_free_i64(t2
);
1708 void tcg_gen_not_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1710 if (TCG_TARGET_REG_BITS
== 32) {
1711 tcg_gen_not_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1712 tcg_gen_not_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
));
1713 } else if (TCG_TARGET_HAS_not_i64
) {
1714 tcg_gen_op2_i64(INDEX_op_not_i64
, ret
, arg
);
1716 tcg_gen_xori_i64(ret
, arg
, -1);
1720 void tcg_gen_andc_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1722 if (TCG_TARGET_REG_BITS
== 32) {
1723 tcg_gen_andc_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1724 tcg_gen_andc_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1725 } else if (TCG_TARGET_HAS_andc_i64
) {
1726 tcg_gen_op3_i64(INDEX_op_andc_i64
, ret
, arg1
, arg2
);
1728 TCGv_i64 t0
= tcg_temp_new_i64();
1729 tcg_gen_not_i64(t0
, arg2
);
1730 tcg_gen_and_i64(ret
, arg1
, t0
);
1731 tcg_temp_free_i64(t0
);
1735 void tcg_gen_eqv_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1737 if (TCG_TARGET_REG_BITS
== 32) {
1738 tcg_gen_eqv_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1739 tcg_gen_eqv_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1740 } else if (TCG_TARGET_HAS_eqv_i64
) {
1741 tcg_gen_op3_i64(INDEX_op_eqv_i64
, ret
, arg1
, arg2
);
1743 tcg_gen_xor_i64(ret
, arg1
, arg2
);
1744 tcg_gen_not_i64(ret
, ret
);
1748 void tcg_gen_nand_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1750 if (TCG_TARGET_REG_BITS
== 32) {
1751 tcg_gen_nand_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1752 tcg_gen_nand_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1753 } else if (TCG_TARGET_HAS_nand_i64
) {
1754 tcg_gen_op3_i64(INDEX_op_nand_i64
, ret
, arg1
, arg2
);
1756 tcg_gen_and_i64(ret
, arg1
, arg2
);
1757 tcg_gen_not_i64(ret
, ret
);
1761 void tcg_gen_nor_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1763 if (TCG_TARGET_REG_BITS
== 32) {
1764 tcg_gen_nor_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1765 tcg_gen_nor_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1766 } else if (TCG_TARGET_HAS_nor_i64
) {
1767 tcg_gen_op3_i64(INDEX_op_nor_i64
, ret
, arg1
, arg2
);
1769 tcg_gen_or_i64(ret
, arg1
, arg2
);
1770 tcg_gen_not_i64(ret
, ret
);
1774 void tcg_gen_orc_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1776 if (TCG_TARGET_REG_BITS
== 32) {
1777 tcg_gen_orc_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1778 tcg_gen_orc_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1779 } else if (TCG_TARGET_HAS_orc_i64
) {
1780 tcg_gen_op3_i64(INDEX_op_orc_i64
, ret
, arg1
, arg2
);
1782 TCGv_i64 t0
= tcg_temp_new_i64();
1783 tcg_gen_not_i64(t0
, arg2
);
1784 tcg_gen_or_i64(ret
, arg1
, t0
);
1785 tcg_temp_free_i64(t0
);
1789 void tcg_gen_clz_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1791 if (TCG_TARGET_HAS_clz_i64
) {
1792 tcg_gen_op3_i64(INDEX_op_clz_i64
, ret
, arg1
, arg2
);
1794 gen_helper_clz_i64(ret
, arg1
, arg2
);
1798 void tcg_gen_clzi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1800 if (TCG_TARGET_REG_BITS
== 32
1801 && TCG_TARGET_HAS_clz_i32
1802 && arg2
<= 0xffffffffu
) {
1803 TCGv_i32 t
= tcg_const_i32((uint32_t)arg2
- 32);
1804 tcg_gen_clz_i32(t
, TCGV_LOW(arg1
), t
);
1805 tcg_gen_addi_i32(t
, t
, 32);
1806 tcg_gen_clz_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), t
);
1807 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1808 tcg_temp_free_i32(t
);
1810 TCGv_i64 t
= tcg_const_i64(arg2
);
1811 tcg_gen_clz_i64(ret
, arg1
, t
);
1812 tcg_temp_free_i64(t
);
1816 void tcg_gen_ctz_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1818 if (TCG_TARGET_HAS_ctz_i64
) {
1819 tcg_gen_op3_i64(INDEX_op_ctz_i64
, ret
, arg1
, arg2
);
1820 } else if (TCG_TARGET_HAS_ctpop_i64
|| TCG_TARGET_HAS_clz_i64
) {
1821 TCGv_i64 z
, t
= tcg_temp_new_i64();
1823 if (TCG_TARGET_HAS_ctpop_i64
) {
1824 tcg_gen_subi_i64(t
, arg1
, 1);
1825 tcg_gen_andc_i64(t
, t
, arg1
);
1826 tcg_gen_ctpop_i64(t
, t
);
1828 /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
1829 tcg_gen_neg_i64(t
, arg1
);
1830 tcg_gen_and_i64(t
, t
, arg1
);
1831 tcg_gen_clzi_i64(t
, t
, 64);
1832 tcg_gen_xori_i64(t
, t
, 63);
1834 z
= tcg_const_i64(0);
1835 tcg_gen_movcond_i64(TCG_COND_EQ
, ret
, arg1
, z
, arg2
, t
);
1836 tcg_temp_free_i64(t
);
1837 tcg_temp_free_i64(z
);
1839 gen_helper_ctz_i64(ret
, arg1
, arg2
);
1843 void tcg_gen_ctzi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1845 if (TCG_TARGET_REG_BITS
== 32
1846 && TCG_TARGET_HAS_ctz_i32
1847 && arg2
<= 0xffffffffu
) {
1848 TCGv_i32 t32
= tcg_const_i32((uint32_t)arg2
- 32);
1849 tcg_gen_ctz_i32(t32
, TCGV_HIGH(arg1
), t32
);
1850 tcg_gen_addi_i32(t32
, t32
, 32);
1851 tcg_gen_ctz_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), t32
);
1852 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1853 tcg_temp_free_i32(t32
);
1854 } else if (!TCG_TARGET_HAS_ctz_i64
1855 && TCG_TARGET_HAS_ctpop_i64
1857 /* This equivalence has the advantage of not requiring a fixup. */
1858 TCGv_i64 t
= tcg_temp_new_i64();
1859 tcg_gen_subi_i64(t
, arg1
, 1);
1860 tcg_gen_andc_i64(t
, t
, arg1
);
1861 tcg_gen_ctpop_i64(ret
, t
);
1862 tcg_temp_free_i64(t
);
1864 TCGv_i64 t64
= tcg_const_i64(arg2
);
1865 tcg_gen_ctz_i64(ret
, arg1
, t64
);
1866 tcg_temp_free_i64(t64
);
1870 void tcg_gen_clrsb_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1872 if (TCG_TARGET_HAS_clz_i64
|| TCG_TARGET_HAS_clz_i32
) {
1873 TCGv_i64 t
= tcg_temp_new_i64();
1874 tcg_gen_sari_i64(t
, arg
, 63);
1875 tcg_gen_xor_i64(t
, t
, arg
);
1876 tcg_gen_clzi_i64(t
, t
, 64);
1877 tcg_gen_subi_i64(ret
, t
, 1);
1878 tcg_temp_free_i64(t
);
1880 gen_helper_clrsb_i64(ret
, arg
);
1884 void tcg_gen_ctpop_i64(TCGv_i64 ret
, TCGv_i64 arg1
)
1886 if (TCG_TARGET_HAS_ctpop_i64
) {
1887 tcg_gen_op2_i64(INDEX_op_ctpop_i64
, ret
, arg1
);
1888 } else if (TCG_TARGET_REG_BITS
== 32 && TCG_TARGET_HAS_ctpop_i32
) {
1889 tcg_gen_ctpop_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
1890 tcg_gen_ctpop_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
1891 tcg_gen_add_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), TCGV_HIGH(ret
));
1892 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1894 gen_helper_ctpop_i64(ret
, arg1
);
1898 void tcg_gen_rotl_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1900 if (TCG_TARGET_HAS_rot_i64
) {
1901 tcg_gen_op3_i64(INDEX_op_rotl_i64
, ret
, arg1
, arg2
);
1904 t0
= tcg_temp_new_i64();
1905 t1
= tcg_temp_new_i64();
1906 tcg_gen_shl_i64(t0
, arg1
, arg2
);
1907 tcg_gen_subfi_i64(t1
, 64, arg2
);
1908 tcg_gen_shr_i64(t1
, arg1
, t1
);
1909 tcg_gen_or_i64(ret
, t0
, t1
);
1910 tcg_temp_free_i64(t0
);
1911 tcg_temp_free_i64(t1
);
1915 void tcg_gen_rotli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1917 tcg_debug_assert(arg2
< 64);
1918 /* some cases can be optimized here */
1920 tcg_gen_mov_i64(ret
, arg1
);
1921 } else if (TCG_TARGET_HAS_rot_i64
) {
1922 TCGv_i64 t0
= tcg_const_i64(arg2
);
1923 tcg_gen_rotl_i64(ret
, arg1
, t0
);
1924 tcg_temp_free_i64(t0
);
1927 t0
= tcg_temp_new_i64();
1928 t1
= tcg_temp_new_i64();
1929 tcg_gen_shli_i64(t0
, arg1
, arg2
);
1930 tcg_gen_shri_i64(t1
, arg1
, 64 - arg2
);
1931 tcg_gen_or_i64(ret
, t0
, t1
);
1932 tcg_temp_free_i64(t0
);
1933 tcg_temp_free_i64(t1
);
1937 void tcg_gen_rotr_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1939 if (TCG_TARGET_HAS_rot_i64
) {
1940 tcg_gen_op3_i64(INDEX_op_rotr_i64
, ret
, arg1
, arg2
);
1943 t0
= tcg_temp_new_i64();
1944 t1
= tcg_temp_new_i64();
1945 tcg_gen_shr_i64(t0
, arg1
, arg2
);
1946 tcg_gen_subfi_i64(t1
, 64, arg2
);
1947 tcg_gen_shl_i64(t1
, arg1
, t1
);
1948 tcg_gen_or_i64(ret
, t0
, t1
);
1949 tcg_temp_free_i64(t0
);
1950 tcg_temp_free_i64(t1
);
1954 void tcg_gen_rotri_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1956 tcg_debug_assert(arg2
< 64);
1957 /* some cases can be optimized here */
1959 tcg_gen_mov_i64(ret
, arg1
);
1961 tcg_gen_rotli_i64(ret
, arg1
, 64 - arg2
);
1965 void tcg_gen_deposit_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
,
1966 unsigned int ofs
, unsigned int len
)
1971 tcg_debug_assert(ofs
< 64);
1972 tcg_debug_assert(len
> 0);
1973 tcg_debug_assert(len
<= 64);
1974 tcg_debug_assert(ofs
+ len
<= 64);
1977 tcg_gen_mov_i64(ret
, arg2
);
1980 if (TCG_TARGET_HAS_deposit_i64
&& TCG_TARGET_deposit_i64_valid(ofs
, len
)) {
1981 tcg_gen_op5ii_i64(INDEX_op_deposit_i64
, ret
, arg1
, arg2
, ofs
, len
);
1985 if (TCG_TARGET_REG_BITS
== 32) {
1987 tcg_gen_deposit_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
),
1988 TCGV_LOW(arg2
), ofs
- 32, len
);
1989 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
1992 if (ofs
+ len
<= 32) {
1993 tcg_gen_deposit_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
),
1994 TCGV_LOW(arg2
), ofs
, len
);
1995 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
2000 mask
= (1ull << len
) - 1;
2001 t1
= tcg_temp_new_i64();
2003 if (ofs
+ len
< 64) {
2004 tcg_gen_andi_i64(t1
, arg2
, mask
);
2005 tcg_gen_shli_i64(t1
, t1
, ofs
);
2007 tcg_gen_shli_i64(t1
, arg2
, ofs
);
2009 tcg_gen_andi_i64(ret
, arg1
, ~(mask
<< ofs
));
2010 tcg_gen_or_i64(ret
, ret
, t1
);
2012 tcg_temp_free_i64(t1
);
2015 void tcg_gen_deposit_z_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2016 unsigned int ofs
, unsigned int len
)
2018 tcg_debug_assert(ofs
< 64);
2019 tcg_debug_assert(len
> 0);
2020 tcg_debug_assert(len
<= 64);
2021 tcg_debug_assert(ofs
+ len
<= 64);
2023 if (ofs
+ len
== 64) {
2024 tcg_gen_shli_i64(ret
, arg
, ofs
);
2025 } else if (ofs
== 0) {
2026 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2027 } else if (TCG_TARGET_HAS_deposit_i64
2028 && TCG_TARGET_deposit_i64_valid(ofs
, len
)) {
2029 TCGv_i64 zero
= tcg_const_i64(0);
2030 tcg_gen_op5ii_i64(INDEX_op_deposit_i64
, ret
, zero
, arg
, ofs
, len
);
2031 tcg_temp_free_i64(zero
);
2033 if (TCG_TARGET_REG_BITS
== 32) {
2035 tcg_gen_deposit_z_i32(TCGV_HIGH(ret
), TCGV_LOW(arg
),
2037 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
2040 if (ofs
+ len
<= 32) {
2041 tcg_gen_deposit_z_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2042 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2046 /* To help two-operand hosts we prefer to zero-extend first,
2047 which allows ARG to stay live. */
2050 if (TCG_TARGET_HAS_ext32u_i64
) {
2051 tcg_gen_ext32u_i64(ret
, arg
);
2052 tcg_gen_shli_i64(ret
, ret
, ofs
);
2057 if (TCG_TARGET_HAS_ext16u_i64
) {
2058 tcg_gen_ext16u_i64(ret
, arg
);
2059 tcg_gen_shli_i64(ret
, ret
, ofs
);
2064 if (TCG_TARGET_HAS_ext8u_i64
) {
2065 tcg_gen_ext8u_i64(ret
, arg
);
2066 tcg_gen_shli_i64(ret
, ret
, ofs
);
2071 /* Otherwise prefer zero-extension over AND for code size. */
2072 switch (ofs
+ len
) {
2074 if (TCG_TARGET_HAS_ext32u_i64
) {
2075 tcg_gen_shli_i64(ret
, arg
, ofs
);
2076 tcg_gen_ext32u_i64(ret
, ret
);
2081 if (TCG_TARGET_HAS_ext16u_i64
) {
2082 tcg_gen_shli_i64(ret
, arg
, ofs
);
2083 tcg_gen_ext16u_i64(ret
, ret
);
2088 if (TCG_TARGET_HAS_ext8u_i64
) {
2089 tcg_gen_shli_i64(ret
, arg
, ofs
);
2090 tcg_gen_ext8u_i64(ret
, ret
);
2095 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2096 tcg_gen_shli_i64(ret
, ret
, ofs
);
2100 void tcg_gen_extract_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2101 unsigned int ofs
, unsigned int len
)
2103 tcg_debug_assert(ofs
< 64);
2104 tcg_debug_assert(len
> 0);
2105 tcg_debug_assert(len
<= 64);
2106 tcg_debug_assert(ofs
+ len
<= 64);
2108 /* Canonicalize certain special cases, even if extract is supported. */
2109 if (ofs
+ len
== 64) {
2110 tcg_gen_shri_i64(ret
, arg
, 64 - len
);
2114 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2118 if (TCG_TARGET_REG_BITS
== 32) {
2119 /* Look for a 32-bit extract within one of the two words. */
2121 tcg_gen_extract_i32(TCGV_LOW(ret
), TCGV_HIGH(arg
), ofs
- 32, len
);
2122 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2125 if (ofs
+ len
<= 32) {
2126 tcg_gen_extract_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2127 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2130 /* The field is split across two words. One double-word
2131 shift is better than two double-word shifts. */
2135 if (TCG_TARGET_HAS_extract_i64
2136 && TCG_TARGET_extract_i64_valid(ofs
, len
)) {
2137 tcg_gen_op4ii_i64(INDEX_op_extract_i64
, ret
, arg
, ofs
, len
);
2141 /* Assume that zero-extension, if available, is cheaper than a shift. */
2142 switch (ofs
+ len
) {
2144 if (TCG_TARGET_HAS_ext32u_i64
) {
2145 tcg_gen_ext32u_i64(ret
, arg
);
2146 tcg_gen_shri_i64(ret
, ret
, ofs
);
2151 if (TCG_TARGET_HAS_ext16u_i64
) {
2152 tcg_gen_ext16u_i64(ret
, arg
);
2153 tcg_gen_shri_i64(ret
, ret
, ofs
);
2158 if (TCG_TARGET_HAS_ext8u_i64
) {
2159 tcg_gen_ext8u_i64(ret
, arg
);
2160 tcg_gen_shri_i64(ret
, ret
, ofs
);
2166 /* ??? Ideally we'd know what values are available for immediate AND.
2167 Assume that 8 bits are available, plus the special cases of 16 and 32,
2168 so that we get ext8u, ext16u, and ext32u. */
2170 case 1 ... 8: case 16: case 32:
2172 tcg_gen_shri_i64(ret
, arg
, ofs
);
2173 tcg_gen_andi_i64(ret
, ret
, (1ull << len
) - 1);
2176 tcg_gen_shli_i64(ret
, arg
, 64 - len
- ofs
);
2177 tcg_gen_shri_i64(ret
, ret
, 64 - len
);
2182 void tcg_gen_sextract_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2183 unsigned int ofs
, unsigned int len
)
2185 tcg_debug_assert(ofs
< 64);
2186 tcg_debug_assert(len
> 0);
2187 tcg_debug_assert(len
<= 64);
2188 tcg_debug_assert(ofs
+ len
<= 64);
2190 /* Canonicalize certain special cases, even if sextract is supported. */
2191 if (ofs
+ len
== 64) {
2192 tcg_gen_sari_i64(ret
, arg
, 64 - len
);
2198 tcg_gen_ext32s_i64(ret
, arg
);
2201 tcg_gen_ext16s_i64(ret
, arg
);
2204 tcg_gen_ext8s_i64(ret
, arg
);
2209 if (TCG_TARGET_REG_BITS
== 32) {
2210 /* Look for a 32-bit extract within one of the two words. */
2212 tcg_gen_sextract_i32(TCGV_LOW(ret
), TCGV_HIGH(arg
), ofs
- 32, len
);
2213 } else if (ofs
+ len
<= 32) {
2214 tcg_gen_sextract_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2215 } else if (ofs
== 0) {
2216 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
2217 tcg_gen_sextract_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
), 0, len
- 32);
2219 } else if (len
> 32) {
2220 TCGv_i32 t
= tcg_temp_new_i32();
2221 /* Extract the bits for the high word normally. */
2222 tcg_gen_sextract_i32(t
, TCGV_HIGH(arg
), ofs
+ 32, len
- 32);
2223 /* Shift the field down for the low part. */
2224 tcg_gen_shri_i64(ret
, arg
, ofs
);
2225 /* Overwrite the shift into the high part. */
2226 tcg_gen_mov_i32(TCGV_HIGH(ret
), t
);
2227 tcg_temp_free_i32(t
);
2230 /* Shift the field down for the low part, such that the
2231 field sits at the MSB. */
2232 tcg_gen_shri_i64(ret
, arg
, ofs
+ len
- 32);
2233 /* Shift the field down from the MSB, sign extending. */
2234 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), 32 - len
);
2236 /* Sign-extend the field from 32 bits. */
2237 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
2241 if (TCG_TARGET_HAS_sextract_i64
2242 && TCG_TARGET_extract_i64_valid(ofs
, len
)) {
2243 tcg_gen_op4ii_i64(INDEX_op_sextract_i64
, ret
, arg
, ofs
, len
);
2247 /* Assume that sign-extension, if available, is cheaper than a shift. */
2248 switch (ofs
+ len
) {
2250 if (TCG_TARGET_HAS_ext32s_i64
) {
2251 tcg_gen_ext32s_i64(ret
, arg
);
2252 tcg_gen_sari_i64(ret
, ret
, ofs
);
2257 if (TCG_TARGET_HAS_ext16s_i64
) {
2258 tcg_gen_ext16s_i64(ret
, arg
);
2259 tcg_gen_sari_i64(ret
, ret
, ofs
);
2264 if (TCG_TARGET_HAS_ext8s_i64
) {
2265 tcg_gen_ext8s_i64(ret
, arg
);
2266 tcg_gen_sari_i64(ret
, ret
, ofs
);
2273 if (TCG_TARGET_HAS_ext32s_i64
) {
2274 tcg_gen_shri_i64(ret
, arg
, ofs
);
2275 tcg_gen_ext32s_i64(ret
, ret
);
2280 if (TCG_TARGET_HAS_ext16s_i64
) {
2281 tcg_gen_shri_i64(ret
, arg
, ofs
);
2282 tcg_gen_ext16s_i64(ret
, ret
);
2287 if (TCG_TARGET_HAS_ext8s_i64
) {
2288 tcg_gen_shri_i64(ret
, arg
, ofs
);
2289 tcg_gen_ext8s_i64(ret
, ret
);
2294 tcg_gen_shli_i64(ret
, arg
, 64 - len
- ofs
);
2295 tcg_gen_sari_i64(ret
, ret
, 64 - len
);
2298 void tcg_gen_movcond_i64(TCGCond cond
, TCGv_i64 ret
, TCGv_i64 c1
,
2299 TCGv_i64 c2
, TCGv_i64 v1
, TCGv_i64 v2
)
2301 if (cond
== TCG_COND_ALWAYS
) {
2302 tcg_gen_mov_i64(ret
, v1
);
2303 } else if (cond
== TCG_COND_NEVER
) {
2304 tcg_gen_mov_i64(ret
, v2
);
2305 } else if (TCG_TARGET_REG_BITS
== 32) {
2306 TCGv_i32 t0
= tcg_temp_new_i32();
2307 TCGv_i32 t1
= tcg_temp_new_i32();
2308 tcg_gen_op6i_i32(INDEX_op_setcond2_i32
, t0
,
2309 TCGV_LOW(c1
), TCGV_HIGH(c1
),
2310 TCGV_LOW(c2
), TCGV_HIGH(c2
), cond
);
2312 if (TCG_TARGET_HAS_movcond_i32
) {
2313 tcg_gen_movi_i32(t1
, 0);
2314 tcg_gen_movcond_i32(TCG_COND_NE
, TCGV_LOW(ret
), t0
, t1
,
2315 TCGV_LOW(v1
), TCGV_LOW(v2
));
2316 tcg_gen_movcond_i32(TCG_COND_NE
, TCGV_HIGH(ret
), t0
, t1
,
2317 TCGV_HIGH(v1
), TCGV_HIGH(v2
));
2319 tcg_gen_neg_i32(t0
, t0
);
2321 tcg_gen_and_i32(t1
, TCGV_LOW(v1
), t0
);
2322 tcg_gen_andc_i32(TCGV_LOW(ret
), TCGV_LOW(v2
), t0
);
2323 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t1
);
2325 tcg_gen_and_i32(t1
, TCGV_HIGH(v1
), t0
);
2326 tcg_gen_andc_i32(TCGV_HIGH(ret
), TCGV_HIGH(v2
), t0
);
2327 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t1
);
2329 tcg_temp_free_i32(t0
);
2330 tcg_temp_free_i32(t1
);
2331 } else if (TCG_TARGET_HAS_movcond_i64
) {
2332 tcg_gen_op6i_i64(INDEX_op_movcond_i64
, ret
, c1
, c2
, v1
, v2
, cond
);
2334 TCGv_i64 t0
= tcg_temp_new_i64();
2335 TCGv_i64 t1
= tcg_temp_new_i64();
2336 tcg_gen_setcond_i64(cond
, t0
, c1
, c2
);
2337 tcg_gen_neg_i64(t0
, t0
);
2338 tcg_gen_and_i64(t1
, v1
, t0
);
2339 tcg_gen_andc_i64(ret
, v2
, t0
);
2340 tcg_gen_or_i64(ret
, ret
, t1
);
2341 tcg_temp_free_i64(t0
);
2342 tcg_temp_free_i64(t1
);
2346 void tcg_gen_add2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 al
,
2347 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2349 if (TCG_TARGET_HAS_add2_i64
) {
2350 tcg_gen_op6_i64(INDEX_op_add2_i64
, rl
, rh
, al
, ah
, bl
, bh
);
2352 TCGv_i64 t0
= tcg_temp_new_i64();
2353 TCGv_i64 t1
= tcg_temp_new_i64();
2354 tcg_gen_add_i64(t0
, al
, bl
);
2355 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, t0
, al
);
2356 tcg_gen_add_i64(rh
, ah
, bh
);
2357 tcg_gen_add_i64(rh
, rh
, t1
);
2358 tcg_gen_mov_i64(rl
, t0
);
2359 tcg_temp_free_i64(t0
);
2360 tcg_temp_free_i64(t1
);
2364 void tcg_gen_sub2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 al
,
2365 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2367 if (TCG_TARGET_HAS_sub2_i64
) {
2368 tcg_gen_op6_i64(INDEX_op_sub2_i64
, rl
, rh
, al
, ah
, bl
, bh
);
2370 TCGv_i64 t0
= tcg_temp_new_i64();
2371 TCGv_i64 t1
= tcg_temp_new_i64();
2372 tcg_gen_sub_i64(t0
, al
, bl
);
2373 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, al
, bl
);
2374 tcg_gen_sub_i64(rh
, ah
, bh
);
2375 tcg_gen_sub_i64(rh
, rh
, t1
);
2376 tcg_gen_mov_i64(rl
, t0
);
2377 tcg_temp_free_i64(t0
);
2378 tcg_temp_free_i64(t1
);
2382 void tcg_gen_mulu2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2384 if (TCG_TARGET_HAS_mulu2_i64
) {
2385 tcg_gen_op4_i64(INDEX_op_mulu2_i64
, rl
, rh
, arg1
, arg2
);
2386 } else if (TCG_TARGET_HAS_muluh_i64
) {
2387 TCGv_i64 t
= tcg_temp_new_i64();
2388 tcg_gen_op3_i64(INDEX_op_mul_i64
, t
, arg1
, arg2
);
2389 tcg_gen_op3_i64(INDEX_op_muluh_i64
, rh
, arg1
, arg2
);
2390 tcg_gen_mov_i64(rl
, t
);
2391 tcg_temp_free_i64(t
);
2393 TCGv_i64 t0
= tcg_temp_new_i64();
2394 tcg_gen_mul_i64(t0
, arg1
, arg2
);
2395 gen_helper_muluh_i64(rh
, arg1
, arg2
);
2396 tcg_gen_mov_i64(rl
, t0
);
2397 tcg_temp_free_i64(t0
);
2401 void tcg_gen_muls2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2403 if (TCG_TARGET_HAS_muls2_i64
) {
2404 tcg_gen_op4_i64(INDEX_op_muls2_i64
, rl
, rh
, arg1
, arg2
);
2405 } else if (TCG_TARGET_HAS_mulsh_i64
) {
2406 TCGv_i64 t
= tcg_temp_new_i64();
2407 tcg_gen_op3_i64(INDEX_op_mul_i64
, t
, arg1
, arg2
);
2408 tcg_gen_op3_i64(INDEX_op_mulsh_i64
, rh
, arg1
, arg2
);
2409 tcg_gen_mov_i64(rl
, t
);
2410 tcg_temp_free_i64(t
);
2411 } else if (TCG_TARGET_HAS_mulu2_i64
|| TCG_TARGET_HAS_muluh_i64
) {
2412 TCGv_i64 t0
= tcg_temp_new_i64();
2413 TCGv_i64 t1
= tcg_temp_new_i64();
2414 TCGv_i64 t2
= tcg_temp_new_i64();
2415 TCGv_i64 t3
= tcg_temp_new_i64();
2416 tcg_gen_mulu2_i64(t0
, t1
, arg1
, arg2
);
2417 /* Adjust for negative inputs. */
2418 tcg_gen_sari_i64(t2
, arg1
, 63);
2419 tcg_gen_sari_i64(t3
, arg2
, 63);
2420 tcg_gen_and_i64(t2
, t2
, arg2
);
2421 tcg_gen_and_i64(t3
, t3
, arg1
);
2422 tcg_gen_sub_i64(rh
, t1
, t2
);
2423 tcg_gen_sub_i64(rh
, rh
, t3
);
2424 tcg_gen_mov_i64(rl
, t0
);
2425 tcg_temp_free_i64(t0
);
2426 tcg_temp_free_i64(t1
);
2427 tcg_temp_free_i64(t2
);
2428 tcg_temp_free_i64(t3
);
2430 TCGv_i64 t0
= tcg_temp_new_i64();
2431 tcg_gen_mul_i64(t0
, arg1
, arg2
);
2432 gen_helper_mulsh_i64(rh
, arg1
, arg2
);
2433 tcg_gen_mov_i64(rl
, t0
);
2434 tcg_temp_free_i64(t0
);
2438 void tcg_gen_mulsu2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2440 TCGv_i64 t0
= tcg_temp_new_i64();
2441 TCGv_i64 t1
= tcg_temp_new_i64();
2442 TCGv_i64 t2
= tcg_temp_new_i64();
2443 tcg_gen_mulu2_i64(t0
, t1
, arg1
, arg2
);
2444 /* Adjust for negative input for the signed arg1. */
2445 tcg_gen_sari_i64(t2
, arg1
, 63);
2446 tcg_gen_and_i64(t2
, t2
, arg2
);
2447 tcg_gen_sub_i64(rh
, t1
, t2
);
2448 tcg_gen_mov_i64(rl
, t0
);
2449 tcg_temp_free_i64(t0
);
2450 tcg_temp_free_i64(t1
);
2451 tcg_temp_free_i64(t2
);
2454 void tcg_gen_smin_i64(TCGv_i64 ret
, TCGv_i64 a
, TCGv_i64 b
)
2456 tcg_gen_movcond_i64(TCG_COND_LT
, ret
, a
, b
, a
, b
);
2459 void tcg_gen_umin_i64(TCGv_i64 ret
, TCGv_i64 a
, TCGv_i64 b
)
2461 tcg_gen_movcond_i64(TCG_COND_LTU
, ret
, a
, b
, a
, b
);
2464 void tcg_gen_smax_i64(TCGv_i64 ret
, TCGv_i64 a
, TCGv_i64 b
)
2466 tcg_gen_movcond_i64(TCG_COND_LT
, ret
, a
, b
, b
, a
);
2469 void tcg_gen_umax_i64(TCGv_i64 ret
, TCGv_i64 a
, TCGv_i64 b
)
2471 tcg_gen_movcond_i64(TCG_COND_LTU
, ret
, a
, b
, b
, a
);
2474 /* Size changing operations. */
2476 void tcg_gen_extrl_i64_i32(TCGv_i32 ret
, TCGv_i64 arg
)
2478 if (TCG_TARGET_REG_BITS
== 32) {
2479 tcg_gen_mov_i32(ret
, TCGV_LOW(arg
));
2480 } else if (TCG_TARGET_HAS_extrl_i64_i32
) {
2481 tcg_gen_op2(INDEX_op_extrl_i64_i32
,
2482 tcgv_i32_arg(ret
), tcgv_i64_arg(arg
));
2484 tcg_gen_mov_i32(ret
, (TCGv_i32
)arg
);
2488 void tcg_gen_extrh_i64_i32(TCGv_i32 ret
, TCGv_i64 arg
)
2490 if (TCG_TARGET_REG_BITS
== 32) {
2491 tcg_gen_mov_i32(ret
, TCGV_HIGH(arg
));
2492 } else if (TCG_TARGET_HAS_extrh_i64_i32
) {
2493 tcg_gen_op2(INDEX_op_extrh_i64_i32
,
2494 tcgv_i32_arg(ret
), tcgv_i64_arg(arg
));
2496 TCGv_i64 t
= tcg_temp_new_i64();
2497 tcg_gen_shri_i64(t
, arg
, 32);
2498 tcg_gen_mov_i32(ret
, (TCGv_i32
)t
);
2499 tcg_temp_free_i64(t
);
2503 void tcg_gen_extu_i32_i64(TCGv_i64 ret
, TCGv_i32 arg
)
2505 if (TCG_TARGET_REG_BITS
== 32) {
2506 tcg_gen_mov_i32(TCGV_LOW(ret
), arg
);
2507 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2509 tcg_gen_op2(INDEX_op_extu_i32_i64
,
2510 tcgv_i64_arg(ret
), tcgv_i32_arg(arg
));
2514 void tcg_gen_ext_i32_i64(TCGv_i64 ret
, TCGv_i32 arg
)
2516 if (TCG_TARGET_REG_BITS
== 32) {
2517 tcg_gen_mov_i32(TCGV_LOW(ret
), arg
);
2518 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
2520 tcg_gen_op2(INDEX_op_ext_i32_i64
,
2521 tcgv_i64_arg(ret
), tcgv_i32_arg(arg
));
2525 void tcg_gen_concat_i32_i64(TCGv_i64 dest
, TCGv_i32 low
, TCGv_i32 high
)
2529 if (TCG_TARGET_REG_BITS
== 32) {
2530 tcg_gen_mov_i32(TCGV_LOW(dest
), low
);
2531 tcg_gen_mov_i32(TCGV_HIGH(dest
), high
);
2535 tmp
= tcg_temp_new_i64();
2536 /* These extensions are only needed for type correctness.
2537 We may be able to do better given target specific information. */
2538 tcg_gen_extu_i32_i64(tmp
, high
);
2539 tcg_gen_extu_i32_i64(dest
, low
);
2540 /* If deposit is available, use it. Otherwise use the extra
2541 knowledge that we have of the zero-extensions above. */
2542 if (TCG_TARGET_HAS_deposit_i64
&& TCG_TARGET_deposit_i64_valid(32, 32)) {
2543 tcg_gen_deposit_i64(dest
, dest
, tmp
, 32, 32);
2545 tcg_gen_shli_i64(tmp
, tmp
, 32);
2546 tcg_gen_or_i64(dest
, dest
, tmp
);
2548 tcg_temp_free_i64(tmp
);
2551 void tcg_gen_extr_i64_i32(TCGv_i32 lo
, TCGv_i32 hi
, TCGv_i64 arg
)
2553 if (TCG_TARGET_REG_BITS
== 32) {
2554 tcg_gen_mov_i32(lo
, TCGV_LOW(arg
));
2555 tcg_gen_mov_i32(hi
, TCGV_HIGH(arg
));
2557 tcg_gen_extrl_i64_i32(lo
, arg
);
2558 tcg_gen_extrh_i64_i32(hi
, arg
);
2562 void tcg_gen_extr32_i64(TCGv_i64 lo
, TCGv_i64 hi
, TCGv_i64 arg
)
2564 tcg_gen_ext32u_i64(lo
, arg
);
2565 tcg_gen_shri_i64(hi
, arg
, 32);
2568 /* QEMU specific operations. */
2570 void tcg_gen_exit_tb(TranslationBlock
*tb
, unsigned idx
)
2572 uintptr_t val
= (uintptr_t)tb
+ idx
;
2575 tcg_debug_assert(idx
== 0);
2576 } else if (idx
<= TB_EXIT_IDXMAX
) {
2577 #ifdef CONFIG_DEBUG_TCG
2578 /* This is an exit following a goto_tb. Verify that we have
2579 seen this numbered exit before, via tcg_gen_goto_tb. */
2580 tcg_debug_assert(tcg_ctx
->goto_tb_issue_mask
& (1 << idx
));
2582 /* When not chaining, exit without indicating a link. */
2583 if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
2587 /* This is an exit via the exitreq label. */
2588 tcg_debug_assert(idx
== TB_EXIT_REQUESTED
);
2591 tcg_gen_op1i(INDEX_op_exit_tb
, val
);
2594 void tcg_gen_goto_tb(unsigned idx
)
2596 /* We only support two chained exits. */
2597 tcg_debug_assert(idx
<= TB_EXIT_IDXMAX
);
2598 #ifdef CONFIG_DEBUG_TCG
2599 /* Verify that we havn't seen this numbered exit before. */
2600 tcg_debug_assert((tcg_ctx
->goto_tb_issue_mask
& (1 << idx
)) == 0);
2601 tcg_ctx
->goto_tb_issue_mask
|= 1 << idx
;
2603 /* When not chaining, we simply fall through to the "fallback" exit. */
2604 if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
2605 tcg_gen_op1i(INDEX_op_goto_tb
, idx
);
2609 void tcg_gen_lookup_and_goto_ptr(void)
2611 if (TCG_TARGET_HAS_goto_ptr
&& !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
2612 TCGv_ptr ptr
= tcg_temp_new_ptr();
2613 gen_helper_lookup_tb_ptr(ptr
, cpu_env
);
2614 tcg_gen_op1i(INDEX_op_goto_ptr
, tcgv_ptr_arg(ptr
));
2615 tcg_temp_free_ptr(ptr
);
2617 tcg_gen_exit_tb(NULL
, 0);
2621 static inline TCGMemOp
tcg_canonicalize_memop(TCGMemOp op
, bool is64
, bool st
)
2623 /* Trigger the asserts within as early as possible. */
2624 (void)get_alignment_bits(op
);
2626 switch (op
& MO_SIZE
) {
2649 static void gen_ldst_i32(TCGOpcode opc
, TCGv_i32 val
, TCGv addr
,
2650 TCGMemOp memop
, TCGArg idx
)
2652 TCGMemOpIdx oi
= make_memop_idx(memop
, idx
);
2653 #if TARGET_LONG_BITS == 32
2654 tcg_gen_op3i_i32(opc
, val
, addr
, oi
);
2656 if (TCG_TARGET_REG_BITS
== 32) {
2657 tcg_gen_op4i_i32(opc
, val
, TCGV_LOW(addr
), TCGV_HIGH(addr
), oi
);
2659 tcg_gen_op3(opc
, tcgv_i32_arg(val
), tcgv_i64_arg(addr
), oi
);
2664 static void gen_ldst_i64(TCGOpcode opc
, TCGv_i64 val
, TCGv addr
,
2665 TCGMemOp memop
, TCGArg idx
)
2667 TCGMemOpIdx oi
= make_memop_idx(memop
, idx
);
2668 #if TARGET_LONG_BITS == 32
2669 if (TCG_TARGET_REG_BITS
== 32) {
2670 tcg_gen_op4i_i32(opc
, TCGV_LOW(val
), TCGV_HIGH(val
), addr
, oi
);
2672 tcg_gen_op3(opc
, tcgv_i64_arg(val
), tcgv_i32_arg(addr
), oi
);
2675 if (TCG_TARGET_REG_BITS
== 32) {
2676 tcg_gen_op5i_i32(opc
, TCGV_LOW(val
), TCGV_HIGH(val
),
2677 TCGV_LOW(addr
), TCGV_HIGH(addr
), oi
);
2679 tcg_gen_op3i_i64(opc
, val
, addr
, oi
);
2684 static void tcg_gen_req_mo(TCGBar type
)
2686 #ifdef TCG_GUEST_DEFAULT_MO
2687 type
&= TCG_GUEST_DEFAULT_MO
;
2689 type
&= ~TCG_TARGET_DEFAULT_MO
;
2691 tcg_gen_mb(type
| TCG_BAR_SC
);
2695 void tcg_gen_qemu_ld_i32(TCGv_i32 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2697 TCGMemOp orig_memop
;
2699 tcg_gen_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2700 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2701 trace_guest_mem_before_tcg(tcg_ctx
->cpu
, cpu_env
,
2702 addr
, trace_mem_get_info(memop
, 0));
2705 if (!TCG_TARGET_HAS_MEMORY_BSWAP
&& (memop
& MO_BSWAP
)) {
2707 /* The bswap primitive requires zero-extended input. */
2708 if ((memop
& MO_SSIZE
) == MO_SW
) {
2713 gen_ldst_i32(INDEX_op_qemu_ld_i32
, val
, addr
, memop
, idx
);
2715 if ((orig_memop
^ memop
) & MO_BSWAP
) {
2716 switch (orig_memop
& MO_SIZE
) {
2718 tcg_gen_bswap16_i32(val
, val
);
2719 if (orig_memop
& MO_SIGN
) {
2720 tcg_gen_ext16s_i32(val
, val
);
2724 tcg_gen_bswap32_i32(val
, val
);
2727 g_assert_not_reached();
2732 void tcg_gen_qemu_st_i32(TCGv_i32 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2734 TCGv_i32 swap
= NULL
;
2736 tcg_gen_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2737 memop
= tcg_canonicalize_memop(memop
, 0, 1);
2738 trace_guest_mem_before_tcg(tcg_ctx
->cpu
, cpu_env
,
2739 addr
, trace_mem_get_info(memop
, 1));
2741 if (!TCG_TARGET_HAS_MEMORY_BSWAP
&& (memop
& MO_BSWAP
)) {
2742 swap
= tcg_temp_new_i32();
2743 switch (memop
& MO_SIZE
) {
2745 tcg_gen_ext16u_i32(swap
, val
);
2746 tcg_gen_bswap16_i32(swap
, swap
);
2749 tcg_gen_bswap32_i32(swap
, val
);
2752 g_assert_not_reached();
2758 gen_ldst_i32(INDEX_op_qemu_st_i32
, val
, addr
, memop
, idx
);
2761 tcg_temp_free_i32(swap
);
2765 void tcg_gen_qemu_ld_i64(TCGv_i64 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2767 TCGMemOp orig_memop
;
2769 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
2770 tcg_gen_qemu_ld_i32(TCGV_LOW(val
), addr
, idx
, memop
);
2771 if (memop
& MO_SIGN
) {
2772 tcg_gen_sari_i32(TCGV_HIGH(val
), TCGV_LOW(val
), 31);
2774 tcg_gen_movi_i32(TCGV_HIGH(val
), 0);
2779 tcg_gen_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2780 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2781 trace_guest_mem_before_tcg(tcg_ctx
->cpu
, cpu_env
,
2782 addr
, trace_mem_get_info(memop
, 0));
2785 if (!TCG_TARGET_HAS_MEMORY_BSWAP
&& (memop
& MO_BSWAP
)) {
2787 /* The bswap primitive requires zero-extended input. */
2788 if ((memop
& MO_SIGN
) && (memop
& MO_SIZE
) < MO_64
) {
2793 gen_ldst_i64(INDEX_op_qemu_ld_i64
, val
, addr
, memop
, idx
);
2795 if ((orig_memop
^ memop
) & MO_BSWAP
) {
2796 switch (orig_memop
& MO_SIZE
) {
2798 tcg_gen_bswap16_i64(val
, val
);
2799 if (orig_memop
& MO_SIGN
) {
2800 tcg_gen_ext16s_i64(val
, val
);
2804 tcg_gen_bswap32_i64(val
, val
);
2805 if (orig_memop
& MO_SIGN
) {
2806 tcg_gen_ext32s_i64(val
, val
);
2810 tcg_gen_bswap64_i64(val
, val
);
2813 g_assert_not_reached();
2818 void tcg_gen_qemu_st_i64(TCGv_i64 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2820 TCGv_i64 swap
= NULL
;
2822 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
2823 tcg_gen_qemu_st_i32(TCGV_LOW(val
), addr
, idx
, memop
);
2827 tcg_gen_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2828 memop
= tcg_canonicalize_memop(memop
, 1, 1);
2829 trace_guest_mem_before_tcg(tcg_ctx
->cpu
, cpu_env
,
2830 addr
, trace_mem_get_info(memop
, 1));
2832 if (!TCG_TARGET_HAS_MEMORY_BSWAP
&& (memop
& MO_BSWAP
)) {
2833 swap
= tcg_temp_new_i64();
2834 switch (memop
& MO_SIZE
) {
2836 tcg_gen_ext16u_i64(swap
, val
);
2837 tcg_gen_bswap16_i64(swap
, swap
);
2840 tcg_gen_ext32u_i64(swap
, val
);
2841 tcg_gen_bswap32_i64(swap
, swap
);
2844 tcg_gen_bswap64_i64(swap
, val
);
2847 g_assert_not_reached();
2853 gen_ldst_i64(INDEX_op_qemu_st_i64
, val
, addr
, memop
, idx
);
2856 tcg_temp_free_i64(swap
);
2860 static void tcg_gen_ext_i32(TCGv_i32 ret
, TCGv_i32 val
, TCGMemOp opc
)
2862 switch (opc
& MO_SSIZE
) {
2864 tcg_gen_ext8s_i32(ret
, val
);
2867 tcg_gen_ext8u_i32(ret
, val
);
2870 tcg_gen_ext16s_i32(ret
, val
);
2873 tcg_gen_ext16u_i32(ret
, val
);
2876 tcg_gen_mov_i32(ret
, val
);
2881 static void tcg_gen_ext_i64(TCGv_i64 ret
, TCGv_i64 val
, TCGMemOp opc
)
2883 switch (opc
& MO_SSIZE
) {
2885 tcg_gen_ext8s_i64(ret
, val
);
2888 tcg_gen_ext8u_i64(ret
, val
);
2891 tcg_gen_ext16s_i64(ret
, val
);
2894 tcg_gen_ext16u_i64(ret
, val
);
2897 tcg_gen_ext32s_i64(ret
, val
);
2900 tcg_gen_ext32u_i64(ret
, val
);
2903 tcg_gen_mov_i64(ret
, val
);
2908 #ifdef CONFIG_SOFTMMU
2909 typedef void (*gen_atomic_cx_i32
)(TCGv_i32
, TCGv_env
, TCGv
,
2910 TCGv_i32
, TCGv_i32
, TCGv_i32
);
2911 typedef void (*gen_atomic_cx_i64
)(TCGv_i64
, TCGv_env
, TCGv
,
2912 TCGv_i64
, TCGv_i64
, TCGv_i32
);
2913 typedef void (*gen_atomic_op_i32
)(TCGv_i32
, TCGv_env
, TCGv
,
2914 TCGv_i32
, TCGv_i32
);
2915 typedef void (*gen_atomic_op_i64
)(TCGv_i64
, TCGv_env
, TCGv
,
2916 TCGv_i64
, TCGv_i32
);
2918 typedef void (*gen_atomic_cx_i32
)(TCGv_i32
, TCGv_env
, TCGv
, TCGv_i32
, TCGv_i32
);
2919 typedef void (*gen_atomic_cx_i64
)(TCGv_i64
, TCGv_env
, TCGv
, TCGv_i64
, TCGv_i64
);
2920 typedef void (*gen_atomic_op_i32
)(TCGv_i32
, TCGv_env
, TCGv
, TCGv_i32
);
2921 typedef void (*gen_atomic_op_i64
)(TCGv_i64
, TCGv_env
, TCGv
, TCGv_i64
);
2924 #ifdef CONFIG_ATOMIC64
2925 # define WITH_ATOMIC64(X) X,
2927 # define WITH_ATOMIC64(X)
2930 static void * const table_cmpxchg
[16] = {
2931 [MO_8
] = gen_helper_atomic_cmpxchgb
,
2932 [MO_16
| MO_LE
] = gen_helper_atomic_cmpxchgw_le
,
2933 [MO_16
| MO_BE
] = gen_helper_atomic_cmpxchgw_be
,
2934 [MO_32
| MO_LE
] = gen_helper_atomic_cmpxchgl_le
,
2935 [MO_32
| MO_BE
] = gen_helper_atomic_cmpxchgl_be
,
2936 WITH_ATOMIC64([MO_64
| MO_LE
] = gen_helper_atomic_cmpxchgq_le
)
2937 WITH_ATOMIC64([MO_64
| MO_BE
] = gen_helper_atomic_cmpxchgq_be
)
2940 void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv
, TCGv addr
, TCGv_i32 cmpv
,
2941 TCGv_i32 newv
, TCGArg idx
, TCGMemOp memop
)
2943 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2945 if (!(tcg_ctx
->tb_cflags
& CF_PARALLEL
)) {
2946 TCGv_i32 t1
= tcg_temp_new_i32();
2947 TCGv_i32 t2
= tcg_temp_new_i32();
2949 tcg_gen_ext_i32(t2
, cmpv
, memop
& MO_SIZE
);
2951 tcg_gen_qemu_ld_i32(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2952 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
2953 tcg_gen_qemu_st_i32(t2
, addr
, idx
, memop
);
2954 tcg_temp_free_i32(t2
);
2956 if (memop
& MO_SIGN
) {
2957 tcg_gen_ext_i32(retv
, t1
, memop
);
2959 tcg_gen_mov_i32(retv
, t1
);
2961 tcg_temp_free_i32(t1
);
2963 gen_atomic_cx_i32 gen
;
2965 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
2966 tcg_debug_assert(gen
!= NULL
);
2968 #ifdef CONFIG_SOFTMMU
2970 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
2971 gen(retv
, cpu_env
, addr
, cmpv
, newv
, oi
);
2972 tcg_temp_free_i32(oi
);
2975 gen(retv
, cpu_env
, addr
, cmpv
, newv
);
2978 if (memop
& MO_SIGN
) {
2979 tcg_gen_ext_i32(retv
, retv
, memop
);
2984 void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv
, TCGv addr
, TCGv_i64 cmpv
,
2985 TCGv_i64 newv
, TCGArg idx
, TCGMemOp memop
)
2987 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2989 if (!(tcg_ctx
->tb_cflags
& CF_PARALLEL
)) {
2990 TCGv_i64 t1
= tcg_temp_new_i64();
2991 TCGv_i64 t2
= tcg_temp_new_i64();
2993 tcg_gen_ext_i64(t2
, cmpv
, memop
& MO_SIZE
);
2995 tcg_gen_qemu_ld_i64(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2996 tcg_gen_movcond_i64(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
2997 tcg_gen_qemu_st_i64(t2
, addr
, idx
, memop
);
2998 tcg_temp_free_i64(t2
);
3000 if (memop
& MO_SIGN
) {
3001 tcg_gen_ext_i64(retv
, t1
, memop
);
3003 tcg_gen_mov_i64(retv
, t1
);
3005 tcg_temp_free_i64(t1
);
3006 } else if ((memop
& MO_SIZE
) == MO_64
) {
3007 #ifdef CONFIG_ATOMIC64
3008 gen_atomic_cx_i64 gen
;
3010 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
3011 tcg_debug_assert(gen
!= NULL
);
3013 #ifdef CONFIG_SOFTMMU
3015 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
, idx
));
3016 gen(retv
, cpu_env
, addr
, cmpv
, newv
, oi
);
3017 tcg_temp_free_i32(oi
);
3020 gen(retv
, cpu_env
, addr
, cmpv
, newv
);
3023 gen_helper_exit_atomic(cpu_env
);
3024 /* Produce a result, so that we have a well-formed opcode stream
3025 with respect to uses of the result in the (dead) code following. */
3026 tcg_gen_movi_i64(retv
, 0);
3027 #endif /* CONFIG_ATOMIC64 */
3029 TCGv_i32 c32
= tcg_temp_new_i32();
3030 TCGv_i32 n32
= tcg_temp_new_i32();
3031 TCGv_i32 r32
= tcg_temp_new_i32();
3033 tcg_gen_extrl_i64_i32(c32
, cmpv
);
3034 tcg_gen_extrl_i64_i32(n32
, newv
);
3035 tcg_gen_atomic_cmpxchg_i32(r32
, addr
, c32
, n32
, idx
, memop
& ~MO_SIGN
);
3036 tcg_temp_free_i32(c32
);
3037 tcg_temp_free_i32(n32
);
3039 tcg_gen_extu_i32_i64(retv
, r32
);
3040 tcg_temp_free_i32(r32
);
3042 if (memop
& MO_SIGN
) {
3043 tcg_gen_ext_i64(retv
, retv
, memop
);
3048 static void do_nonatomic_op_i32(TCGv_i32 ret
, TCGv addr
, TCGv_i32 val
,
3049 TCGArg idx
, TCGMemOp memop
, bool new_val
,
3050 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
3052 TCGv_i32 t1
= tcg_temp_new_i32();
3053 TCGv_i32 t2
= tcg_temp_new_i32();
3055 memop
= tcg_canonicalize_memop(memop
, 0, 0);
3057 tcg_gen_qemu_ld_i32(t1
, addr
, idx
, memop
& ~MO_SIGN
);
3059 tcg_gen_qemu_st_i32(t2
, addr
, idx
, memop
);
3061 tcg_gen_ext_i32(ret
, (new_val
? t2
: t1
), memop
);
3062 tcg_temp_free_i32(t1
);
3063 tcg_temp_free_i32(t2
);
3066 static void do_atomic_op_i32(TCGv_i32 ret
, TCGv addr
, TCGv_i32 val
,
3067 TCGArg idx
, TCGMemOp memop
, void * const table
[])
3069 gen_atomic_op_i32 gen
;
3071 memop
= tcg_canonicalize_memop(memop
, 0, 0);
3073 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
3074 tcg_debug_assert(gen
!= NULL
);
3076 #ifdef CONFIG_SOFTMMU
3078 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
3079 gen(ret
, cpu_env
, addr
, val
, oi
);
3080 tcg_temp_free_i32(oi
);
3083 gen(ret
, cpu_env
, addr
, val
);
3086 if (memop
& MO_SIGN
) {
3087 tcg_gen_ext_i32(ret
, ret
, memop
);
3091 static void do_nonatomic_op_i64(TCGv_i64 ret
, TCGv addr
, TCGv_i64 val
,
3092 TCGArg idx
, TCGMemOp memop
, bool new_val
,
3093 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
3095 TCGv_i64 t1
= tcg_temp_new_i64();
3096 TCGv_i64 t2
= tcg_temp_new_i64();
3098 memop
= tcg_canonicalize_memop(memop
, 1, 0);
3100 tcg_gen_qemu_ld_i64(t1
, addr
, idx
, memop
& ~MO_SIGN
);
3102 tcg_gen_qemu_st_i64(t2
, addr
, idx
, memop
);
3104 tcg_gen_ext_i64(ret
, (new_val
? t2
: t1
), memop
);
3105 tcg_temp_free_i64(t1
);
3106 tcg_temp_free_i64(t2
);
3109 static void do_atomic_op_i64(TCGv_i64 ret
, TCGv addr
, TCGv_i64 val
,
3110 TCGArg idx
, TCGMemOp memop
, void * const table
[])
3112 memop
= tcg_canonicalize_memop(memop
, 1, 0);
3114 if ((memop
& MO_SIZE
) == MO_64
) {
3115 #ifdef CONFIG_ATOMIC64
3116 gen_atomic_op_i64 gen
;
3118 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
3119 tcg_debug_assert(gen
!= NULL
);
3121 #ifdef CONFIG_SOFTMMU
3123 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
3124 gen(ret
, cpu_env
, addr
, val
, oi
);
3125 tcg_temp_free_i32(oi
);
3128 gen(ret
, cpu_env
, addr
, val
);
3131 gen_helper_exit_atomic(cpu_env
);
3132 /* Produce a result, so that we have a well-formed opcode stream
3133 with respect to uses of the result in the (dead) code following. */
3134 tcg_gen_movi_i64(ret
, 0);
3135 #endif /* CONFIG_ATOMIC64 */
3137 TCGv_i32 v32
= tcg_temp_new_i32();
3138 TCGv_i32 r32
= tcg_temp_new_i32();
3140 tcg_gen_extrl_i64_i32(v32
, val
);
3141 do_atomic_op_i32(r32
, addr
, v32
, idx
, memop
& ~MO_SIGN
, table
);
3142 tcg_temp_free_i32(v32
);
3144 tcg_gen_extu_i32_i64(ret
, r32
);
3145 tcg_temp_free_i32(r32
);
3147 if (memop
& MO_SIGN
) {
3148 tcg_gen_ext_i64(ret
, ret
, memop
);
3153 #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
3154 static void * const table_##NAME[16] = { \
3155 [MO_8] = gen_helper_atomic_##NAME##b, \
3156 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
3157 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
3158 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
3159 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
3160 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
3161 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
3163 void tcg_gen_atomic_##NAME##_i32 \
3164 (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
3166 if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
3167 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
3169 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
3170 tcg_gen_##OP##_i32); \
3173 void tcg_gen_atomic_##NAME##_i64 \
3174 (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
3176 if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
3177 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
3179 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
3180 tcg_gen_##OP##_i64); \
3184 GEN_ATOMIC_HELPER(fetch_add
, add
, 0)
3185 GEN_ATOMIC_HELPER(fetch_and
, and, 0)
3186 GEN_ATOMIC_HELPER(fetch_or
, or, 0)
3187 GEN_ATOMIC_HELPER(fetch_xor
, xor, 0)
3188 GEN_ATOMIC_HELPER(fetch_smin
, smin
, 0)
3189 GEN_ATOMIC_HELPER(fetch_umin
, umin
, 0)
3190 GEN_ATOMIC_HELPER(fetch_smax
, smax
, 0)
3191 GEN_ATOMIC_HELPER(fetch_umax
, umax
, 0)
3193 GEN_ATOMIC_HELPER(add_fetch
, add
, 1)
3194 GEN_ATOMIC_HELPER(and_fetch
, and, 1)
3195 GEN_ATOMIC_HELPER(or_fetch
, or, 1)
3196 GEN_ATOMIC_HELPER(xor_fetch
, xor, 1)
3197 GEN_ATOMIC_HELPER(smin_fetch
, smin
, 1)
3198 GEN_ATOMIC_HELPER(umin_fetch
, umin
, 1)
3199 GEN_ATOMIC_HELPER(smax_fetch
, smax
, 1)
3200 GEN_ATOMIC_HELPER(umax_fetch
, umax
, 1)
3202 static void tcg_gen_mov2_i32(TCGv_i32 r
, TCGv_i32 a
, TCGv_i32 b
)
3204 tcg_gen_mov_i32(r
, b
);
3207 static void tcg_gen_mov2_i64(TCGv_i64 r
, TCGv_i64 a
, TCGv_i64 b
)
3209 tcg_gen_mov_i64(r
, b
);
3212 GEN_ATOMIC_HELPER(xchg
, mov2
, 0)
3214 #undef GEN_ATOMIC_HELPER