2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
28 #include "exec/exec-all.h"
32 #include "trace-tcg.h"
33 #include "trace/mem.h"
35 /* Reduce the number of ifdefs below. This assumes that all uses of
36 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
37 the compiler can eliminate. */
38 #if TCG_TARGET_REG_BITS == 64
39 extern TCGv_i32
TCGV_LOW_link_error(TCGv_i64
);
40 extern TCGv_i32
TCGV_HIGH_link_error(TCGv_i64
);
41 #define TCGV_LOW TCGV_LOW_link_error
42 #define TCGV_HIGH TCGV_HIGH_link_error
45 /* Note that this is optimized for sequential allocation during translate.
46 Up to and including filling in the forward link immediately. We'll do
47 proper termination of the end of the list after we finish translation. */
49 static void tcg_emit_op(TCGContext
*ctx
, TCGOpcode opc
, int args
)
51 int oi
= ctx
->gen_next_op_idx
;
55 tcg_debug_assert(oi
< OPC_BUF_SIZE
);
56 ctx
->gen_op_buf
[0].prev
= oi
;
57 ctx
->gen_next_op_idx
= ni
;
59 ctx
->gen_op_buf
[oi
] = (TCGOp
){
67 void tcg_gen_op1(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
)
69 int pi
= ctx
->gen_next_parm_idx
;
71 tcg_debug_assert(pi
+ 1 <= OPPARAM_BUF_SIZE
);
72 ctx
->gen_next_parm_idx
= pi
+ 1;
73 ctx
->gen_opparam_buf
[pi
] = a1
;
75 tcg_emit_op(ctx
, opc
, pi
);
78 void tcg_gen_op2(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
, TCGArg a2
)
80 int pi
= ctx
->gen_next_parm_idx
;
82 tcg_debug_assert(pi
+ 2 <= OPPARAM_BUF_SIZE
);
83 ctx
->gen_next_parm_idx
= pi
+ 2;
84 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
85 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
87 tcg_emit_op(ctx
, opc
, pi
);
90 void tcg_gen_op3(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
,
93 int pi
= ctx
->gen_next_parm_idx
;
95 tcg_debug_assert(pi
+ 3 <= OPPARAM_BUF_SIZE
);
96 ctx
->gen_next_parm_idx
= pi
+ 3;
97 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
98 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
99 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
101 tcg_emit_op(ctx
, opc
, pi
);
104 void tcg_gen_op4(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
,
105 TCGArg a2
, TCGArg a3
, TCGArg a4
)
107 int pi
= ctx
->gen_next_parm_idx
;
109 tcg_debug_assert(pi
+ 4 <= OPPARAM_BUF_SIZE
);
110 ctx
->gen_next_parm_idx
= pi
+ 4;
111 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
112 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
113 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
114 ctx
->gen_opparam_buf
[pi
+ 3] = a4
;
116 tcg_emit_op(ctx
, opc
, pi
);
119 void tcg_gen_op5(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
,
120 TCGArg a2
, TCGArg a3
, TCGArg a4
, TCGArg a5
)
122 int pi
= ctx
->gen_next_parm_idx
;
124 tcg_debug_assert(pi
+ 5 <= OPPARAM_BUF_SIZE
);
125 ctx
->gen_next_parm_idx
= pi
+ 5;
126 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
127 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
128 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
129 ctx
->gen_opparam_buf
[pi
+ 3] = a4
;
130 ctx
->gen_opparam_buf
[pi
+ 4] = a5
;
132 tcg_emit_op(ctx
, opc
, pi
);
135 void tcg_gen_op6(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
, TCGArg a2
,
136 TCGArg a3
, TCGArg a4
, TCGArg a5
, TCGArg a6
)
138 int pi
= ctx
->gen_next_parm_idx
;
140 tcg_debug_assert(pi
+ 6 <= OPPARAM_BUF_SIZE
);
141 ctx
->gen_next_parm_idx
= pi
+ 6;
142 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
143 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
144 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
145 ctx
->gen_opparam_buf
[pi
+ 3] = a4
;
146 ctx
->gen_opparam_buf
[pi
+ 4] = a5
;
147 ctx
->gen_opparam_buf
[pi
+ 5] = a6
;
149 tcg_emit_op(ctx
, opc
, pi
);
152 void tcg_gen_mb(TCGBar mb_type
)
155 tcg_gen_op1(&tcg_ctx
, INDEX_op_mb
, mb_type
);
161 void tcg_gen_addi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
163 /* some cases can be optimized here */
165 tcg_gen_mov_i32(ret
, arg1
);
167 TCGv_i32 t0
= tcg_const_i32(arg2
);
168 tcg_gen_add_i32(ret
, arg1
, t0
);
169 tcg_temp_free_i32(t0
);
173 void tcg_gen_subfi_i32(TCGv_i32 ret
, int32_t arg1
, TCGv_i32 arg2
)
175 if (arg1
== 0 && TCG_TARGET_HAS_neg_i32
) {
176 /* Don't recurse with tcg_gen_neg_i32. */
177 tcg_gen_op2_i32(INDEX_op_neg_i32
, ret
, arg2
);
179 TCGv_i32 t0
= tcg_const_i32(arg1
);
180 tcg_gen_sub_i32(ret
, t0
, arg2
);
181 tcg_temp_free_i32(t0
);
185 void tcg_gen_subi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
187 /* some cases can be optimized here */
189 tcg_gen_mov_i32(ret
, arg1
);
191 TCGv_i32 t0
= tcg_const_i32(arg2
);
192 tcg_gen_sub_i32(ret
, arg1
, t0
);
193 tcg_temp_free_i32(t0
);
197 void tcg_gen_andi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
200 /* Some cases can be optimized here. */
203 tcg_gen_movi_i32(ret
, 0);
206 tcg_gen_mov_i32(ret
, arg1
);
209 /* Don't recurse with tcg_gen_ext8u_i32. */
210 if (TCG_TARGET_HAS_ext8u_i32
) {
211 tcg_gen_op2_i32(INDEX_op_ext8u_i32
, ret
, arg1
);
216 if (TCG_TARGET_HAS_ext16u_i32
) {
217 tcg_gen_op2_i32(INDEX_op_ext16u_i32
, ret
, arg1
);
222 t0
= tcg_const_i32(arg2
);
223 tcg_gen_and_i32(ret
, arg1
, t0
);
224 tcg_temp_free_i32(t0
);
227 void tcg_gen_ori_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
229 /* Some cases can be optimized here. */
231 tcg_gen_movi_i32(ret
, -1);
232 } else if (arg2
== 0) {
233 tcg_gen_mov_i32(ret
, arg1
);
235 TCGv_i32 t0
= tcg_const_i32(arg2
);
236 tcg_gen_or_i32(ret
, arg1
, t0
);
237 tcg_temp_free_i32(t0
);
241 void tcg_gen_xori_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
243 /* Some cases can be optimized here. */
245 tcg_gen_mov_i32(ret
, arg1
);
246 } else if (arg2
== -1 && TCG_TARGET_HAS_not_i32
) {
247 /* Don't recurse with tcg_gen_not_i32. */
248 tcg_gen_op2_i32(INDEX_op_not_i32
, ret
, arg1
);
250 TCGv_i32 t0
= tcg_const_i32(arg2
);
251 tcg_gen_xor_i32(ret
, arg1
, t0
);
252 tcg_temp_free_i32(t0
);
256 void tcg_gen_shli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
258 tcg_debug_assert(arg2
< 32);
260 tcg_gen_mov_i32(ret
, arg1
);
262 TCGv_i32 t0
= tcg_const_i32(arg2
);
263 tcg_gen_shl_i32(ret
, arg1
, t0
);
264 tcg_temp_free_i32(t0
);
268 void tcg_gen_shri_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
270 tcg_debug_assert(arg2
< 32);
272 tcg_gen_mov_i32(ret
, arg1
);
274 TCGv_i32 t0
= tcg_const_i32(arg2
);
275 tcg_gen_shr_i32(ret
, arg1
, t0
);
276 tcg_temp_free_i32(t0
);
280 void tcg_gen_sari_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
282 tcg_debug_assert(arg2
< 32);
284 tcg_gen_mov_i32(ret
, arg1
);
286 TCGv_i32 t0
= tcg_const_i32(arg2
);
287 tcg_gen_sar_i32(ret
, arg1
, t0
);
288 tcg_temp_free_i32(t0
);
292 void tcg_gen_brcond_i32(TCGCond cond
, TCGv_i32 arg1
, TCGv_i32 arg2
, TCGLabel
*l
)
294 if (cond
== TCG_COND_ALWAYS
) {
296 } else if (cond
!= TCG_COND_NEVER
) {
297 tcg_gen_op4ii_i32(INDEX_op_brcond_i32
, arg1
, arg2
, cond
, label_arg(l
));
301 void tcg_gen_brcondi_i32(TCGCond cond
, TCGv_i32 arg1
, int32_t arg2
, TCGLabel
*l
)
303 if (cond
== TCG_COND_ALWAYS
) {
305 } else if (cond
!= TCG_COND_NEVER
) {
306 TCGv_i32 t0
= tcg_const_i32(arg2
);
307 tcg_gen_brcond_i32(cond
, arg1
, t0
, l
);
308 tcg_temp_free_i32(t0
);
312 void tcg_gen_setcond_i32(TCGCond cond
, TCGv_i32 ret
,
313 TCGv_i32 arg1
, TCGv_i32 arg2
)
315 if (cond
== TCG_COND_ALWAYS
) {
316 tcg_gen_movi_i32(ret
, 1);
317 } else if (cond
== TCG_COND_NEVER
) {
318 tcg_gen_movi_i32(ret
, 0);
320 tcg_gen_op4i_i32(INDEX_op_setcond_i32
, ret
, arg1
, arg2
, cond
);
324 void tcg_gen_setcondi_i32(TCGCond cond
, TCGv_i32 ret
,
325 TCGv_i32 arg1
, int32_t arg2
)
327 TCGv_i32 t0
= tcg_const_i32(arg2
);
328 tcg_gen_setcond_i32(cond
, ret
, arg1
, t0
);
329 tcg_temp_free_i32(t0
);
332 void tcg_gen_muli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
334 TCGv_i32 t0
= tcg_const_i32(arg2
);
335 tcg_gen_mul_i32(ret
, arg1
, t0
);
336 tcg_temp_free_i32(t0
);
339 void tcg_gen_div_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
341 if (TCG_TARGET_HAS_div_i32
) {
342 tcg_gen_op3_i32(INDEX_op_div_i32
, ret
, arg1
, arg2
);
343 } else if (TCG_TARGET_HAS_div2_i32
) {
344 TCGv_i32 t0
= tcg_temp_new_i32();
345 tcg_gen_sari_i32(t0
, arg1
, 31);
346 tcg_gen_op5_i32(INDEX_op_div2_i32
, ret
, t0
, arg1
, t0
, arg2
);
347 tcg_temp_free_i32(t0
);
349 gen_helper_div_i32(ret
, arg1
, arg2
);
353 void tcg_gen_rem_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
355 if (TCG_TARGET_HAS_rem_i32
) {
356 tcg_gen_op3_i32(INDEX_op_rem_i32
, ret
, arg1
, arg2
);
357 } else if (TCG_TARGET_HAS_div_i32
) {
358 TCGv_i32 t0
= tcg_temp_new_i32();
359 tcg_gen_op3_i32(INDEX_op_div_i32
, t0
, arg1
, arg2
);
360 tcg_gen_mul_i32(t0
, t0
, arg2
);
361 tcg_gen_sub_i32(ret
, arg1
, t0
);
362 tcg_temp_free_i32(t0
);
363 } else if (TCG_TARGET_HAS_div2_i32
) {
364 TCGv_i32 t0
= tcg_temp_new_i32();
365 tcg_gen_sari_i32(t0
, arg1
, 31);
366 tcg_gen_op5_i32(INDEX_op_div2_i32
, t0
, ret
, arg1
, t0
, arg2
);
367 tcg_temp_free_i32(t0
);
369 gen_helper_rem_i32(ret
, arg1
, arg2
);
373 void tcg_gen_divu_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
375 if (TCG_TARGET_HAS_div_i32
) {
376 tcg_gen_op3_i32(INDEX_op_divu_i32
, ret
, arg1
, arg2
);
377 } else if (TCG_TARGET_HAS_div2_i32
) {
378 TCGv_i32 t0
= tcg_temp_new_i32();
379 tcg_gen_movi_i32(t0
, 0);
380 tcg_gen_op5_i32(INDEX_op_divu2_i32
, ret
, t0
, arg1
, t0
, arg2
);
381 tcg_temp_free_i32(t0
);
383 gen_helper_divu_i32(ret
, arg1
, arg2
);
387 void tcg_gen_remu_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
389 if (TCG_TARGET_HAS_rem_i32
) {
390 tcg_gen_op3_i32(INDEX_op_remu_i32
, ret
, arg1
, arg2
);
391 } else if (TCG_TARGET_HAS_div_i32
) {
392 TCGv_i32 t0
= tcg_temp_new_i32();
393 tcg_gen_op3_i32(INDEX_op_divu_i32
, t0
, arg1
, arg2
);
394 tcg_gen_mul_i32(t0
, t0
, arg2
);
395 tcg_gen_sub_i32(ret
, arg1
, t0
);
396 tcg_temp_free_i32(t0
);
397 } else if (TCG_TARGET_HAS_div2_i32
) {
398 TCGv_i32 t0
= tcg_temp_new_i32();
399 tcg_gen_movi_i32(t0
, 0);
400 tcg_gen_op5_i32(INDEX_op_divu2_i32
, t0
, ret
, arg1
, t0
, arg2
);
401 tcg_temp_free_i32(t0
);
403 gen_helper_remu_i32(ret
, arg1
, arg2
);
407 void tcg_gen_andc_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
409 if (TCG_TARGET_HAS_andc_i32
) {
410 tcg_gen_op3_i32(INDEX_op_andc_i32
, ret
, arg1
, arg2
);
412 TCGv_i32 t0
= tcg_temp_new_i32();
413 tcg_gen_not_i32(t0
, arg2
);
414 tcg_gen_and_i32(ret
, arg1
, t0
);
415 tcg_temp_free_i32(t0
);
419 void tcg_gen_eqv_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
421 if (TCG_TARGET_HAS_eqv_i32
) {
422 tcg_gen_op3_i32(INDEX_op_eqv_i32
, ret
, arg1
, arg2
);
424 tcg_gen_xor_i32(ret
, arg1
, arg2
);
425 tcg_gen_not_i32(ret
, ret
);
429 void tcg_gen_nand_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
431 if (TCG_TARGET_HAS_nand_i32
) {
432 tcg_gen_op3_i32(INDEX_op_nand_i32
, ret
, arg1
, arg2
);
434 tcg_gen_and_i32(ret
, arg1
, arg2
);
435 tcg_gen_not_i32(ret
, ret
);
439 void tcg_gen_nor_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
441 if (TCG_TARGET_HAS_nor_i32
) {
442 tcg_gen_op3_i32(INDEX_op_nor_i32
, ret
, arg1
, arg2
);
444 tcg_gen_or_i32(ret
, arg1
, arg2
);
445 tcg_gen_not_i32(ret
, ret
);
449 void tcg_gen_orc_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
451 if (TCG_TARGET_HAS_orc_i32
) {
452 tcg_gen_op3_i32(INDEX_op_orc_i32
, ret
, arg1
, arg2
);
454 TCGv_i32 t0
= tcg_temp_new_i32();
455 tcg_gen_not_i32(t0
, arg2
);
456 tcg_gen_or_i32(ret
, arg1
, t0
);
457 tcg_temp_free_i32(t0
);
461 void tcg_gen_clz_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
463 if (TCG_TARGET_HAS_clz_i32
) {
464 tcg_gen_op3_i32(INDEX_op_clz_i32
, ret
, arg1
, arg2
);
465 } else if (TCG_TARGET_HAS_clz_i64
) {
466 TCGv_i64 t1
= tcg_temp_new_i64();
467 TCGv_i64 t2
= tcg_temp_new_i64();
468 tcg_gen_extu_i32_i64(t1
, arg1
);
469 tcg_gen_extu_i32_i64(t2
, arg2
);
470 tcg_gen_addi_i64(t2
, t2
, 32);
471 tcg_gen_clz_i64(t1
, t1
, t2
);
472 tcg_gen_extrl_i64_i32(ret
, t1
);
473 tcg_temp_free_i64(t1
);
474 tcg_temp_free_i64(t2
);
475 tcg_gen_subi_i32(ret
, ret
, 32);
477 gen_helper_clz_i32(ret
, arg1
, arg2
);
481 void tcg_gen_clzi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
483 TCGv_i32 t
= tcg_const_i32(arg2
);
484 tcg_gen_clz_i32(ret
, arg1
, t
);
485 tcg_temp_free_i32(t
);
488 void tcg_gen_ctz_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
490 if (TCG_TARGET_HAS_ctz_i32
) {
491 tcg_gen_op3_i32(INDEX_op_ctz_i32
, ret
, arg1
, arg2
);
492 } else if (TCG_TARGET_HAS_ctz_i64
) {
493 TCGv_i64 t1
= tcg_temp_new_i64();
494 TCGv_i64 t2
= tcg_temp_new_i64();
495 tcg_gen_extu_i32_i64(t1
, arg1
);
496 tcg_gen_extu_i32_i64(t2
, arg2
);
497 tcg_gen_ctz_i64(t1
, t1
, t2
);
498 tcg_gen_extrl_i64_i32(ret
, t1
);
499 tcg_temp_free_i64(t1
);
500 tcg_temp_free_i64(t2
);
501 } else if (TCG_TARGET_HAS_ctpop_i32
502 || TCG_TARGET_HAS_ctpop_i64
503 || TCG_TARGET_HAS_clz_i32
504 || TCG_TARGET_HAS_clz_i64
) {
505 TCGv_i32 z
, t
= tcg_temp_new_i32();
507 if (TCG_TARGET_HAS_ctpop_i32
|| TCG_TARGET_HAS_ctpop_i64
) {
508 tcg_gen_subi_i32(t
, arg1
, 1);
509 tcg_gen_andc_i32(t
, t
, arg1
);
510 tcg_gen_ctpop_i32(t
, t
);
512 /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
513 tcg_gen_neg_i32(t
, arg1
);
514 tcg_gen_and_i32(t
, t
, arg1
);
515 tcg_gen_clzi_i32(t
, t
, 32);
516 tcg_gen_xori_i32(t
, t
, 31);
518 z
= tcg_const_i32(0);
519 tcg_gen_movcond_i32(TCG_COND_EQ
, ret
, arg1
, z
, arg2
, t
);
520 tcg_temp_free_i32(t
);
521 tcg_temp_free_i32(z
);
523 gen_helper_ctz_i32(ret
, arg1
, arg2
);
527 void tcg_gen_ctzi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
529 if (!TCG_TARGET_HAS_ctz_i32
&& TCG_TARGET_HAS_ctpop_i32
&& arg2
== 32) {
530 /* This equivalence has the advantage of not requiring a fixup. */
531 TCGv_i32 t
= tcg_temp_new_i32();
532 tcg_gen_subi_i32(t
, arg1
, 1);
533 tcg_gen_andc_i32(t
, t
, arg1
);
534 tcg_gen_ctpop_i32(ret
, t
);
535 tcg_temp_free_i32(t
);
537 TCGv_i32 t
= tcg_const_i32(arg2
);
538 tcg_gen_ctz_i32(ret
, arg1
, t
);
539 tcg_temp_free_i32(t
);
543 void tcg_gen_clrsb_i32(TCGv_i32 ret
, TCGv_i32 arg
)
545 if (TCG_TARGET_HAS_clz_i32
) {
546 TCGv_i32 t
= tcg_temp_new_i32();
547 tcg_gen_sari_i32(t
, arg
, 31);
548 tcg_gen_xor_i32(t
, t
, arg
);
549 tcg_gen_clzi_i32(t
, t
, 32);
550 tcg_gen_subi_i32(ret
, t
, 1);
551 tcg_temp_free_i32(t
);
553 gen_helper_clrsb_i32(ret
, arg
);
557 void tcg_gen_ctpop_i32(TCGv_i32 ret
, TCGv_i32 arg1
)
559 if (TCG_TARGET_HAS_ctpop_i32
) {
560 tcg_gen_op2_i32(INDEX_op_ctpop_i32
, ret
, arg1
);
561 } else if (TCG_TARGET_HAS_ctpop_i64
) {
562 TCGv_i64 t
= tcg_temp_new_i64();
563 tcg_gen_extu_i32_i64(t
, arg1
);
564 tcg_gen_ctpop_i64(t
, t
);
565 tcg_gen_extrl_i64_i32(ret
, t
);
566 tcg_temp_free_i64(t
);
568 gen_helper_ctpop_i32(ret
, arg1
);
572 void tcg_gen_rotl_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
574 if (TCG_TARGET_HAS_rot_i32
) {
575 tcg_gen_op3_i32(INDEX_op_rotl_i32
, ret
, arg1
, arg2
);
579 t0
= tcg_temp_new_i32();
580 t1
= tcg_temp_new_i32();
581 tcg_gen_shl_i32(t0
, arg1
, arg2
);
582 tcg_gen_subfi_i32(t1
, 32, arg2
);
583 tcg_gen_shr_i32(t1
, arg1
, t1
);
584 tcg_gen_or_i32(ret
, t0
, t1
);
585 tcg_temp_free_i32(t0
);
586 tcg_temp_free_i32(t1
);
590 void tcg_gen_rotli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
592 tcg_debug_assert(arg2
< 32);
593 /* some cases can be optimized here */
595 tcg_gen_mov_i32(ret
, arg1
);
596 } else if (TCG_TARGET_HAS_rot_i32
) {
597 TCGv_i32 t0
= tcg_const_i32(arg2
);
598 tcg_gen_rotl_i32(ret
, arg1
, t0
);
599 tcg_temp_free_i32(t0
);
602 t0
= tcg_temp_new_i32();
603 t1
= tcg_temp_new_i32();
604 tcg_gen_shli_i32(t0
, arg1
, arg2
);
605 tcg_gen_shri_i32(t1
, arg1
, 32 - arg2
);
606 tcg_gen_or_i32(ret
, t0
, t1
);
607 tcg_temp_free_i32(t0
);
608 tcg_temp_free_i32(t1
);
612 void tcg_gen_rotr_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
614 if (TCG_TARGET_HAS_rot_i32
) {
615 tcg_gen_op3_i32(INDEX_op_rotr_i32
, ret
, arg1
, arg2
);
619 t0
= tcg_temp_new_i32();
620 t1
= tcg_temp_new_i32();
621 tcg_gen_shr_i32(t0
, arg1
, arg2
);
622 tcg_gen_subfi_i32(t1
, 32, arg2
);
623 tcg_gen_shl_i32(t1
, arg1
, t1
);
624 tcg_gen_or_i32(ret
, t0
, t1
);
625 tcg_temp_free_i32(t0
);
626 tcg_temp_free_i32(t1
);
630 void tcg_gen_rotri_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
632 tcg_debug_assert(arg2
< 32);
633 /* some cases can be optimized here */
635 tcg_gen_mov_i32(ret
, arg1
);
637 tcg_gen_rotli_i32(ret
, arg1
, 32 - arg2
);
641 void tcg_gen_deposit_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
,
642 unsigned int ofs
, unsigned int len
)
647 tcg_debug_assert(ofs
< 32);
648 tcg_debug_assert(len
> 0);
649 tcg_debug_assert(len
<= 32);
650 tcg_debug_assert(ofs
+ len
<= 32);
653 tcg_gen_mov_i32(ret
, arg2
);
656 if (TCG_TARGET_HAS_deposit_i32
&& TCG_TARGET_deposit_i32_valid(ofs
, len
)) {
657 tcg_gen_op5ii_i32(INDEX_op_deposit_i32
, ret
, arg1
, arg2
, ofs
, len
);
661 mask
= (1u << len
) - 1;
662 t1
= tcg_temp_new_i32();
664 if (ofs
+ len
< 32) {
665 tcg_gen_andi_i32(t1
, arg2
, mask
);
666 tcg_gen_shli_i32(t1
, t1
, ofs
);
668 tcg_gen_shli_i32(t1
, arg2
, ofs
);
670 tcg_gen_andi_i32(ret
, arg1
, ~(mask
<< ofs
));
671 tcg_gen_or_i32(ret
, ret
, t1
);
673 tcg_temp_free_i32(t1
);
676 void tcg_gen_deposit_z_i32(TCGv_i32 ret
, TCGv_i32 arg
,
677 unsigned int ofs
, unsigned int len
)
679 tcg_debug_assert(ofs
< 32);
680 tcg_debug_assert(len
> 0);
681 tcg_debug_assert(len
<= 32);
682 tcg_debug_assert(ofs
+ len
<= 32);
684 if (ofs
+ len
== 32) {
685 tcg_gen_shli_i32(ret
, arg
, ofs
);
686 } else if (ofs
== 0) {
687 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
688 } else if (TCG_TARGET_HAS_deposit_i32
689 && TCG_TARGET_deposit_i32_valid(ofs
, len
)) {
690 TCGv_i32 zero
= tcg_const_i32(0);
691 tcg_gen_op5ii_i32(INDEX_op_deposit_i32
, ret
, zero
, arg
, ofs
, len
);
692 tcg_temp_free_i32(zero
);
694 /* To help two-operand hosts we prefer to zero-extend first,
695 which allows ARG to stay live. */
698 if (TCG_TARGET_HAS_ext16u_i32
) {
699 tcg_gen_ext16u_i32(ret
, arg
);
700 tcg_gen_shli_i32(ret
, ret
, ofs
);
705 if (TCG_TARGET_HAS_ext8u_i32
) {
706 tcg_gen_ext8u_i32(ret
, arg
);
707 tcg_gen_shli_i32(ret
, ret
, ofs
);
712 /* Otherwise prefer zero-extension over AND for code size. */
715 if (TCG_TARGET_HAS_ext16u_i32
) {
716 tcg_gen_shli_i32(ret
, arg
, ofs
);
717 tcg_gen_ext16u_i32(ret
, ret
);
722 if (TCG_TARGET_HAS_ext8u_i32
) {
723 tcg_gen_shli_i32(ret
, arg
, ofs
);
724 tcg_gen_ext8u_i32(ret
, ret
);
729 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
730 tcg_gen_shli_i32(ret
, ret
, ofs
);
734 void tcg_gen_extract_i32(TCGv_i32 ret
, TCGv_i32 arg
,
735 unsigned int ofs
, unsigned int len
)
737 tcg_debug_assert(ofs
< 32);
738 tcg_debug_assert(len
> 0);
739 tcg_debug_assert(len
<= 32);
740 tcg_debug_assert(ofs
+ len
<= 32);
742 /* Canonicalize certain special cases, even if extract is supported. */
743 if (ofs
+ len
== 32) {
744 tcg_gen_shri_i32(ret
, arg
, 32 - len
);
748 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
752 if (TCG_TARGET_HAS_extract_i32
753 && TCG_TARGET_extract_i32_valid(ofs
, len
)) {
754 tcg_gen_op4ii_i32(INDEX_op_extract_i32
, ret
, arg
, ofs
, len
);
758 /* Assume that zero-extension, if available, is cheaper than a shift. */
761 if (TCG_TARGET_HAS_ext16u_i32
) {
762 tcg_gen_ext16u_i32(ret
, arg
);
763 tcg_gen_shri_i32(ret
, ret
, ofs
);
768 if (TCG_TARGET_HAS_ext8u_i32
) {
769 tcg_gen_ext8u_i32(ret
, arg
);
770 tcg_gen_shri_i32(ret
, ret
, ofs
);
776 /* ??? Ideally we'd know what values are available for immediate AND.
777 Assume that 8 bits are available, plus the special case of 16,
778 so that we get ext8u, ext16u. */
780 case 1 ... 8: case 16:
781 tcg_gen_shri_i32(ret
, arg
, ofs
);
782 tcg_gen_andi_i32(ret
, ret
, (1u << len
) - 1);
785 tcg_gen_shli_i32(ret
, arg
, 32 - len
- ofs
);
786 tcg_gen_shri_i32(ret
, ret
, 32 - len
);
791 void tcg_gen_sextract_i32(TCGv_i32 ret
, TCGv_i32 arg
,
792 unsigned int ofs
, unsigned int len
)
794 tcg_debug_assert(ofs
< 32);
795 tcg_debug_assert(len
> 0);
796 tcg_debug_assert(len
<= 32);
797 tcg_debug_assert(ofs
+ len
<= 32);
799 /* Canonicalize certain special cases, even if extract is supported. */
800 if (ofs
+ len
== 32) {
801 tcg_gen_sari_i32(ret
, arg
, 32 - len
);
807 tcg_gen_ext16s_i32(ret
, arg
);
810 tcg_gen_ext8s_i32(ret
, arg
);
815 if (TCG_TARGET_HAS_sextract_i32
816 && TCG_TARGET_extract_i32_valid(ofs
, len
)) {
817 tcg_gen_op4ii_i32(INDEX_op_sextract_i32
, ret
, arg
, ofs
, len
);
821 /* Assume that sign-extension, if available, is cheaper than a shift. */
824 if (TCG_TARGET_HAS_ext16s_i32
) {
825 tcg_gen_ext16s_i32(ret
, arg
);
826 tcg_gen_sari_i32(ret
, ret
, ofs
);
831 if (TCG_TARGET_HAS_ext8s_i32
) {
832 tcg_gen_ext8s_i32(ret
, arg
);
833 tcg_gen_sari_i32(ret
, ret
, ofs
);
840 if (TCG_TARGET_HAS_ext16s_i32
) {
841 tcg_gen_shri_i32(ret
, arg
, ofs
);
842 tcg_gen_ext16s_i32(ret
, ret
);
847 if (TCG_TARGET_HAS_ext8s_i32
) {
848 tcg_gen_shri_i32(ret
, arg
, ofs
);
849 tcg_gen_ext8s_i32(ret
, ret
);
855 tcg_gen_shli_i32(ret
, arg
, 32 - len
- ofs
);
856 tcg_gen_sari_i32(ret
, ret
, 32 - len
);
859 void tcg_gen_movcond_i32(TCGCond cond
, TCGv_i32 ret
, TCGv_i32 c1
,
860 TCGv_i32 c2
, TCGv_i32 v1
, TCGv_i32 v2
)
862 if (cond
== TCG_COND_ALWAYS
) {
863 tcg_gen_mov_i32(ret
, v1
);
864 } else if (cond
== TCG_COND_NEVER
) {
865 tcg_gen_mov_i32(ret
, v2
);
866 } else if (TCG_TARGET_HAS_movcond_i32
) {
867 tcg_gen_op6i_i32(INDEX_op_movcond_i32
, ret
, c1
, c2
, v1
, v2
, cond
);
869 TCGv_i32 t0
= tcg_temp_new_i32();
870 TCGv_i32 t1
= tcg_temp_new_i32();
871 tcg_gen_setcond_i32(cond
, t0
, c1
, c2
);
872 tcg_gen_neg_i32(t0
, t0
);
873 tcg_gen_and_i32(t1
, v1
, t0
);
874 tcg_gen_andc_i32(ret
, v2
, t0
);
875 tcg_gen_or_i32(ret
, ret
, t1
);
876 tcg_temp_free_i32(t0
);
877 tcg_temp_free_i32(t1
);
881 void tcg_gen_add2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 al
,
882 TCGv_i32 ah
, TCGv_i32 bl
, TCGv_i32 bh
)
884 if (TCG_TARGET_HAS_add2_i32
) {
885 tcg_gen_op6_i32(INDEX_op_add2_i32
, rl
, rh
, al
, ah
, bl
, bh
);
887 TCGv_i64 t0
= tcg_temp_new_i64();
888 TCGv_i64 t1
= tcg_temp_new_i64();
889 tcg_gen_concat_i32_i64(t0
, al
, ah
);
890 tcg_gen_concat_i32_i64(t1
, bl
, bh
);
891 tcg_gen_add_i64(t0
, t0
, t1
);
892 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
893 tcg_temp_free_i64(t0
);
894 tcg_temp_free_i64(t1
);
898 void tcg_gen_sub2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 al
,
899 TCGv_i32 ah
, TCGv_i32 bl
, TCGv_i32 bh
)
901 if (TCG_TARGET_HAS_sub2_i32
) {
902 tcg_gen_op6_i32(INDEX_op_sub2_i32
, rl
, rh
, al
, ah
, bl
, bh
);
904 TCGv_i64 t0
= tcg_temp_new_i64();
905 TCGv_i64 t1
= tcg_temp_new_i64();
906 tcg_gen_concat_i32_i64(t0
, al
, ah
);
907 tcg_gen_concat_i32_i64(t1
, bl
, bh
);
908 tcg_gen_sub_i64(t0
, t0
, t1
);
909 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
910 tcg_temp_free_i64(t0
);
911 tcg_temp_free_i64(t1
);
915 void tcg_gen_mulu2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
917 if (TCG_TARGET_HAS_mulu2_i32
) {
918 tcg_gen_op4_i32(INDEX_op_mulu2_i32
, rl
, rh
, arg1
, arg2
);
919 } else if (TCG_TARGET_HAS_muluh_i32
) {
920 TCGv_i32 t
= tcg_temp_new_i32();
921 tcg_gen_op3_i32(INDEX_op_mul_i32
, t
, arg1
, arg2
);
922 tcg_gen_op3_i32(INDEX_op_muluh_i32
, rh
, arg1
, arg2
);
923 tcg_gen_mov_i32(rl
, t
);
924 tcg_temp_free_i32(t
);
926 TCGv_i64 t0
= tcg_temp_new_i64();
927 TCGv_i64 t1
= tcg_temp_new_i64();
928 tcg_gen_extu_i32_i64(t0
, arg1
);
929 tcg_gen_extu_i32_i64(t1
, arg2
);
930 tcg_gen_mul_i64(t0
, t0
, t1
);
931 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
932 tcg_temp_free_i64(t0
);
933 tcg_temp_free_i64(t1
);
937 void tcg_gen_muls2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
939 if (TCG_TARGET_HAS_muls2_i32
) {
940 tcg_gen_op4_i32(INDEX_op_muls2_i32
, rl
, rh
, arg1
, arg2
);
941 } else if (TCG_TARGET_HAS_mulsh_i32
) {
942 TCGv_i32 t
= tcg_temp_new_i32();
943 tcg_gen_op3_i32(INDEX_op_mul_i32
, t
, arg1
, arg2
);
944 tcg_gen_op3_i32(INDEX_op_mulsh_i32
, rh
, arg1
, arg2
);
945 tcg_gen_mov_i32(rl
, t
);
946 tcg_temp_free_i32(t
);
947 } else if (TCG_TARGET_REG_BITS
== 32) {
948 TCGv_i32 t0
= tcg_temp_new_i32();
949 TCGv_i32 t1
= tcg_temp_new_i32();
950 TCGv_i32 t2
= tcg_temp_new_i32();
951 TCGv_i32 t3
= tcg_temp_new_i32();
952 tcg_gen_mulu2_i32(t0
, t1
, arg1
, arg2
);
953 /* Adjust for negative inputs. */
954 tcg_gen_sari_i32(t2
, arg1
, 31);
955 tcg_gen_sari_i32(t3
, arg2
, 31);
956 tcg_gen_and_i32(t2
, t2
, arg2
);
957 tcg_gen_and_i32(t3
, t3
, arg1
);
958 tcg_gen_sub_i32(rh
, t1
, t2
);
959 tcg_gen_sub_i32(rh
, rh
, t3
);
960 tcg_gen_mov_i32(rl
, t0
);
961 tcg_temp_free_i32(t0
);
962 tcg_temp_free_i32(t1
);
963 tcg_temp_free_i32(t2
);
964 tcg_temp_free_i32(t3
);
966 TCGv_i64 t0
= tcg_temp_new_i64();
967 TCGv_i64 t1
= tcg_temp_new_i64();
968 tcg_gen_ext_i32_i64(t0
, arg1
);
969 tcg_gen_ext_i32_i64(t1
, arg2
);
970 tcg_gen_mul_i64(t0
, t0
, t1
);
971 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
972 tcg_temp_free_i64(t0
);
973 tcg_temp_free_i64(t1
);
977 void tcg_gen_mulsu2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
979 if (TCG_TARGET_REG_BITS
== 32) {
980 TCGv_i32 t0
= tcg_temp_new_i32();
981 TCGv_i32 t1
= tcg_temp_new_i32();
982 TCGv_i32 t2
= tcg_temp_new_i32();
983 tcg_gen_mulu2_i32(t0
, t1
, arg1
, arg2
);
984 /* Adjust for negative input for the signed arg1. */
985 tcg_gen_sari_i32(t2
, arg1
, 31);
986 tcg_gen_and_i32(t2
, t2
, arg2
);
987 tcg_gen_sub_i32(rh
, t1
, t2
);
988 tcg_gen_mov_i32(rl
, t0
);
989 tcg_temp_free_i32(t0
);
990 tcg_temp_free_i32(t1
);
991 tcg_temp_free_i32(t2
);
993 TCGv_i64 t0
= tcg_temp_new_i64();
994 TCGv_i64 t1
= tcg_temp_new_i64();
995 tcg_gen_ext_i32_i64(t0
, arg1
);
996 tcg_gen_extu_i32_i64(t1
, arg2
);
997 tcg_gen_mul_i64(t0
, t0
, t1
);
998 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
999 tcg_temp_free_i64(t0
);
1000 tcg_temp_free_i64(t1
);
1004 void tcg_gen_ext8s_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1006 if (TCG_TARGET_HAS_ext8s_i32
) {
1007 tcg_gen_op2_i32(INDEX_op_ext8s_i32
, ret
, arg
);
1009 tcg_gen_shli_i32(ret
, arg
, 24);
1010 tcg_gen_sari_i32(ret
, ret
, 24);
1014 void tcg_gen_ext16s_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1016 if (TCG_TARGET_HAS_ext16s_i32
) {
1017 tcg_gen_op2_i32(INDEX_op_ext16s_i32
, ret
, arg
);
1019 tcg_gen_shli_i32(ret
, arg
, 16);
1020 tcg_gen_sari_i32(ret
, ret
, 16);
1024 void tcg_gen_ext8u_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1026 if (TCG_TARGET_HAS_ext8u_i32
) {
1027 tcg_gen_op2_i32(INDEX_op_ext8u_i32
, ret
, arg
);
1029 tcg_gen_andi_i32(ret
, arg
, 0xffu
);
1033 void tcg_gen_ext16u_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1035 if (TCG_TARGET_HAS_ext16u_i32
) {
1036 tcg_gen_op2_i32(INDEX_op_ext16u_i32
, ret
, arg
);
1038 tcg_gen_andi_i32(ret
, arg
, 0xffffu
);
1042 /* Note: we assume the two high bytes are set to zero */
1043 void tcg_gen_bswap16_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1045 if (TCG_TARGET_HAS_bswap16_i32
) {
1046 tcg_gen_op2_i32(INDEX_op_bswap16_i32
, ret
, arg
);
1048 TCGv_i32 t0
= tcg_temp_new_i32();
1050 tcg_gen_ext8u_i32(t0
, arg
);
1051 tcg_gen_shli_i32(t0
, t0
, 8);
1052 tcg_gen_shri_i32(ret
, arg
, 8);
1053 tcg_gen_or_i32(ret
, ret
, t0
);
1054 tcg_temp_free_i32(t0
);
1058 void tcg_gen_bswap32_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1060 if (TCG_TARGET_HAS_bswap32_i32
) {
1061 tcg_gen_op2_i32(INDEX_op_bswap32_i32
, ret
, arg
);
1064 t0
= tcg_temp_new_i32();
1065 t1
= tcg_temp_new_i32();
1067 tcg_gen_shli_i32(t0
, arg
, 24);
1069 tcg_gen_andi_i32(t1
, arg
, 0x0000ff00);
1070 tcg_gen_shli_i32(t1
, t1
, 8);
1071 tcg_gen_or_i32(t0
, t0
, t1
);
1073 tcg_gen_shri_i32(t1
, arg
, 8);
1074 tcg_gen_andi_i32(t1
, t1
, 0x0000ff00);
1075 tcg_gen_or_i32(t0
, t0
, t1
);
1077 tcg_gen_shri_i32(t1
, arg
, 24);
1078 tcg_gen_or_i32(ret
, t0
, t1
);
1079 tcg_temp_free_i32(t0
);
1080 tcg_temp_free_i32(t1
);
1086 #if TCG_TARGET_REG_BITS == 32
1087 /* These are all inline for TCG_TARGET_REG_BITS == 64. */
1089 void tcg_gen_discard_i64(TCGv_i64 arg
)
1091 tcg_gen_discard_i32(TCGV_LOW(arg
));
1092 tcg_gen_discard_i32(TCGV_HIGH(arg
));
1095 void tcg_gen_mov_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1097 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1098 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
));
1101 void tcg_gen_movi_i64(TCGv_i64 ret
, int64_t arg
)
1103 tcg_gen_movi_i32(TCGV_LOW(ret
), arg
);
1104 tcg_gen_movi_i32(TCGV_HIGH(ret
), arg
>> 32);
1107 void tcg_gen_ld8u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1109 tcg_gen_ld8u_i32(TCGV_LOW(ret
), arg2
, offset
);
1110 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1113 void tcg_gen_ld8s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1115 tcg_gen_ld8s_i32(TCGV_LOW(ret
), arg2
, offset
);
1116 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1119 void tcg_gen_ld16u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1121 tcg_gen_ld16u_i32(TCGV_LOW(ret
), arg2
, offset
);
1122 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1125 void tcg_gen_ld16s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1127 tcg_gen_ld16s_i32(TCGV_LOW(ret
), arg2
, offset
);
1128 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1131 void tcg_gen_ld32u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1133 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1134 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1137 void tcg_gen_ld32s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1139 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1140 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1143 void tcg_gen_ld_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1145 /* Since arg2 and ret have different types,
1146 they cannot be the same temporary */
1147 #ifdef HOST_WORDS_BIGENDIAN
1148 tcg_gen_ld_i32(TCGV_HIGH(ret
), arg2
, offset
);
1149 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
+ 4);
1151 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1152 tcg_gen_ld_i32(TCGV_HIGH(ret
), arg2
, offset
+ 4);
1156 void tcg_gen_st_i64(TCGv_i64 arg1
, TCGv_ptr arg2
, tcg_target_long offset
)
1158 #ifdef HOST_WORDS_BIGENDIAN
1159 tcg_gen_st_i32(TCGV_HIGH(arg1
), arg2
, offset
);
1160 tcg_gen_st_i32(TCGV_LOW(arg1
), arg2
, offset
+ 4);
1162 tcg_gen_st_i32(TCGV_LOW(arg1
), arg2
, offset
);
1163 tcg_gen_st_i32(TCGV_HIGH(arg1
), arg2
, offset
+ 4);
1167 void tcg_gen_and_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1169 tcg_gen_and_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1170 tcg_gen_and_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1173 void tcg_gen_or_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1175 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1176 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1179 void tcg_gen_xor_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1181 tcg_gen_xor_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1182 tcg_gen_xor_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1185 void tcg_gen_shl_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1187 gen_helper_shl_i64(ret
, arg1
, arg2
);
1190 void tcg_gen_shr_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1192 gen_helper_shr_i64(ret
, arg1
, arg2
);
1195 void tcg_gen_sar_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1197 gen_helper_sar_i64(ret
, arg1
, arg2
);
1200 void tcg_gen_mul_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1205 t0
= tcg_temp_new_i64();
1206 t1
= tcg_temp_new_i32();
1208 tcg_gen_mulu2_i32(TCGV_LOW(t0
), TCGV_HIGH(t0
),
1209 TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1211 tcg_gen_mul_i32(t1
, TCGV_LOW(arg1
), TCGV_HIGH(arg2
));
1212 tcg_gen_add_i32(TCGV_HIGH(t0
), TCGV_HIGH(t0
), t1
);
1213 tcg_gen_mul_i32(t1
, TCGV_HIGH(arg1
), TCGV_LOW(arg2
));
1214 tcg_gen_add_i32(TCGV_HIGH(t0
), TCGV_HIGH(t0
), t1
);
1216 tcg_gen_mov_i64(ret
, t0
);
1217 tcg_temp_free_i64(t0
);
1218 tcg_temp_free_i32(t1
);
1220 #endif /* TCG_TARGET_REG_SIZE == 32 */
1222 void tcg_gen_addi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1224 /* some cases can be optimized here */
1226 tcg_gen_mov_i64(ret
, arg1
);
1228 TCGv_i64 t0
= tcg_const_i64(arg2
);
1229 tcg_gen_add_i64(ret
, arg1
, t0
);
1230 tcg_temp_free_i64(t0
);
1234 void tcg_gen_subfi_i64(TCGv_i64 ret
, int64_t arg1
, TCGv_i64 arg2
)
1236 if (arg1
== 0 && TCG_TARGET_HAS_neg_i64
) {
1237 /* Don't recurse with tcg_gen_neg_i64. */
1238 tcg_gen_op2_i64(INDEX_op_neg_i64
, ret
, arg2
);
1240 TCGv_i64 t0
= tcg_const_i64(arg1
);
1241 tcg_gen_sub_i64(ret
, t0
, arg2
);
1242 tcg_temp_free_i64(t0
);
1246 void tcg_gen_subi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1248 /* some cases can be optimized here */
1250 tcg_gen_mov_i64(ret
, arg1
);
1252 TCGv_i64 t0
= tcg_const_i64(arg2
);
1253 tcg_gen_sub_i64(ret
, arg1
, t0
);
1254 tcg_temp_free_i64(t0
);
1258 void tcg_gen_andi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1262 if (TCG_TARGET_REG_BITS
== 32) {
1263 tcg_gen_andi_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1264 tcg_gen_andi_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1268 /* Some cases can be optimized here. */
1271 tcg_gen_movi_i64(ret
, 0);
1273 case 0xffffffffffffffffull
:
1274 tcg_gen_mov_i64(ret
, arg1
);
1277 /* Don't recurse with tcg_gen_ext8u_i64. */
1278 if (TCG_TARGET_HAS_ext8u_i64
) {
1279 tcg_gen_op2_i64(INDEX_op_ext8u_i64
, ret
, arg1
);
1284 if (TCG_TARGET_HAS_ext16u_i64
) {
1285 tcg_gen_op2_i64(INDEX_op_ext16u_i64
, ret
, arg1
);
1290 if (TCG_TARGET_HAS_ext32u_i64
) {
1291 tcg_gen_op2_i64(INDEX_op_ext32u_i64
, ret
, arg1
);
1296 t0
= tcg_const_i64(arg2
);
1297 tcg_gen_and_i64(ret
, arg1
, t0
);
1298 tcg_temp_free_i64(t0
);
1301 void tcg_gen_ori_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1303 if (TCG_TARGET_REG_BITS
== 32) {
1304 tcg_gen_ori_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1305 tcg_gen_ori_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1308 /* Some cases can be optimized here. */
1310 tcg_gen_movi_i64(ret
, -1);
1311 } else if (arg2
== 0) {
1312 tcg_gen_mov_i64(ret
, arg1
);
1314 TCGv_i64 t0
= tcg_const_i64(arg2
);
1315 tcg_gen_or_i64(ret
, arg1
, t0
);
1316 tcg_temp_free_i64(t0
);
1320 void tcg_gen_xori_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1322 if (TCG_TARGET_REG_BITS
== 32) {
1323 tcg_gen_xori_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1324 tcg_gen_xori_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1327 /* Some cases can be optimized here. */
1329 tcg_gen_mov_i64(ret
, arg1
);
1330 } else if (arg2
== -1 && TCG_TARGET_HAS_not_i64
) {
1331 /* Don't recurse with tcg_gen_not_i64. */
1332 tcg_gen_op2_i64(INDEX_op_not_i64
, ret
, arg1
);
1334 TCGv_i64 t0
= tcg_const_i64(arg2
);
1335 tcg_gen_xor_i64(ret
, arg1
, t0
);
1336 tcg_temp_free_i64(t0
);
1340 static inline void tcg_gen_shifti_i64(TCGv_i64 ret
, TCGv_i64 arg1
,
1341 unsigned c
, bool right
, bool arith
)
1343 tcg_debug_assert(c
< 64);
1345 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
1346 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
1347 } else if (c
>= 32) {
1351 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
1352 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), 31);
1354 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
1355 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1358 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_LOW(arg1
), c
);
1359 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
1364 t0
= tcg_temp_new_i32();
1365 t1
= tcg_temp_new_i32();
1367 tcg_gen_shli_i32(t0
, TCGV_HIGH(arg1
), 32 - c
);
1369 tcg_gen_sari_i32(t1
, TCGV_HIGH(arg1
), c
);
1371 tcg_gen_shri_i32(t1
, TCGV_HIGH(arg1
), c
);
1373 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), c
);
1374 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t0
);
1375 tcg_gen_mov_i32(TCGV_HIGH(ret
), t1
);
1377 tcg_gen_shri_i32(t0
, TCGV_LOW(arg1
), 32 - c
);
1378 /* Note: ret can be the same as arg1, so we use t1 */
1379 tcg_gen_shli_i32(t1
, TCGV_LOW(arg1
), c
);
1380 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), c
);
1381 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t0
);
1382 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
1384 tcg_temp_free_i32(t0
);
1385 tcg_temp_free_i32(t1
);
1389 void tcg_gen_shli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1391 tcg_debug_assert(arg2
< 64);
1392 if (TCG_TARGET_REG_BITS
== 32) {
1393 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 0, 0);
1394 } else if (arg2
== 0) {
1395 tcg_gen_mov_i64(ret
, arg1
);
1397 TCGv_i64 t0
= tcg_const_i64(arg2
);
1398 tcg_gen_shl_i64(ret
, arg1
, t0
);
1399 tcg_temp_free_i64(t0
);
1403 void tcg_gen_shri_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1405 tcg_debug_assert(arg2
< 64);
1406 if (TCG_TARGET_REG_BITS
== 32) {
1407 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 1, 0);
1408 } else if (arg2
== 0) {
1409 tcg_gen_mov_i64(ret
, arg1
);
1411 TCGv_i64 t0
= tcg_const_i64(arg2
);
1412 tcg_gen_shr_i64(ret
, arg1
, t0
);
1413 tcg_temp_free_i64(t0
);
1417 void tcg_gen_sari_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1419 tcg_debug_assert(arg2
< 64);
1420 if (TCG_TARGET_REG_BITS
== 32) {
1421 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 1, 1);
1422 } else if (arg2
== 0) {
1423 tcg_gen_mov_i64(ret
, arg1
);
1425 TCGv_i64 t0
= tcg_const_i64(arg2
);
1426 tcg_gen_sar_i64(ret
, arg1
, t0
);
1427 tcg_temp_free_i64(t0
);
1431 void tcg_gen_brcond_i64(TCGCond cond
, TCGv_i64 arg1
, TCGv_i64 arg2
, TCGLabel
*l
)
1433 if (cond
== TCG_COND_ALWAYS
) {
1435 } else if (cond
!= TCG_COND_NEVER
) {
1436 if (TCG_TARGET_REG_BITS
== 32) {
1437 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32
, TCGV_LOW(arg1
),
1438 TCGV_HIGH(arg1
), TCGV_LOW(arg2
),
1439 TCGV_HIGH(arg2
), cond
, label_arg(l
));
1441 tcg_gen_op4ii_i64(INDEX_op_brcond_i64
, arg1
, arg2
, cond
,
1447 void tcg_gen_brcondi_i64(TCGCond cond
, TCGv_i64 arg1
, int64_t arg2
, TCGLabel
*l
)
1449 if (cond
== TCG_COND_ALWAYS
) {
1451 } else if (cond
!= TCG_COND_NEVER
) {
1452 TCGv_i64 t0
= tcg_const_i64(arg2
);
1453 tcg_gen_brcond_i64(cond
, arg1
, t0
, l
);
1454 tcg_temp_free_i64(t0
);
1458 void tcg_gen_setcond_i64(TCGCond cond
, TCGv_i64 ret
,
1459 TCGv_i64 arg1
, TCGv_i64 arg2
)
1461 if (cond
== TCG_COND_ALWAYS
) {
1462 tcg_gen_movi_i64(ret
, 1);
1463 } else if (cond
== TCG_COND_NEVER
) {
1464 tcg_gen_movi_i64(ret
, 0);
1466 if (TCG_TARGET_REG_BITS
== 32) {
1467 tcg_gen_op6i_i32(INDEX_op_setcond2_i32
, TCGV_LOW(ret
),
1468 TCGV_LOW(arg1
), TCGV_HIGH(arg1
),
1469 TCGV_LOW(arg2
), TCGV_HIGH(arg2
), cond
);
1470 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1472 tcg_gen_op4i_i64(INDEX_op_setcond_i64
, ret
, arg1
, arg2
, cond
);
1477 void tcg_gen_setcondi_i64(TCGCond cond
, TCGv_i64 ret
,
1478 TCGv_i64 arg1
, int64_t arg2
)
1480 TCGv_i64 t0
= tcg_const_i64(arg2
);
1481 tcg_gen_setcond_i64(cond
, ret
, arg1
, t0
);
1482 tcg_temp_free_i64(t0
);
1485 void tcg_gen_muli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1487 TCGv_i64 t0
= tcg_const_i64(arg2
);
1488 tcg_gen_mul_i64(ret
, arg1
, t0
);
1489 tcg_temp_free_i64(t0
);
1492 void tcg_gen_div_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1494 if (TCG_TARGET_HAS_div_i64
) {
1495 tcg_gen_op3_i64(INDEX_op_div_i64
, ret
, arg1
, arg2
);
1496 } else if (TCG_TARGET_HAS_div2_i64
) {
1497 TCGv_i64 t0
= tcg_temp_new_i64();
1498 tcg_gen_sari_i64(t0
, arg1
, 63);
1499 tcg_gen_op5_i64(INDEX_op_div2_i64
, ret
, t0
, arg1
, t0
, arg2
);
1500 tcg_temp_free_i64(t0
);
1502 gen_helper_div_i64(ret
, arg1
, arg2
);
1506 void tcg_gen_rem_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1508 if (TCG_TARGET_HAS_rem_i64
) {
1509 tcg_gen_op3_i64(INDEX_op_rem_i64
, ret
, arg1
, arg2
);
1510 } else if (TCG_TARGET_HAS_div_i64
) {
1511 TCGv_i64 t0
= tcg_temp_new_i64();
1512 tcg_gen_op3_i64(INDEX_op_div_i64
, t0
, arg1
, arg2
);
1513 tcg_gen_mul_i64(t0
, t0
, arg2
);
1514 tcg_gen_sub_i64(ret
, arg1
, t0
);
1515 tcg_temp_free_i64(t0
);
1516 } else if (TCG_TARGET_HAS_div2_i64
) {
1517 TCGv_i64 t0
= tcg_temp_new_i64();
1518 tcg_gen_sari_i64(t0
, arg1
, 63);
1519 tcg_gen_op5_i64(INDEX_op_div2_i64
, t0
, ret
, arg1
, t0
, arg2
);
1520 tcg_temp_free_i64(t0
);
1522 gen_helper_rem_i64(ret
, arg1
, arg2
);
1526 void tcg_gen_divu_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1528 if (TCG_TARGET_HAS_div_i64
) {
1529 tcg_gen_op3_i64(INDEX_op_divu_i64
, ret
, arg1
, arg2
);
1530 } else if (TCG_TARGET_HAS_div2_i64
) {
1531 TCGv_i64 t0
= tcg_temp_new_i64();
1532 tcg_gen_movi_i64(t0
, 0);
1533 tcg_gen_op5_i64(INDEX_op_divu2_i64
, ret
, t0
, arg1
, t0
, arg2
);
1534 tcg_temp_free_i64(t0
);
1536 gen_helper_divu_i64(ret
, arg1
, arg2
);
1540 void tcg_gen_remu_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1542 if (TCG_TARGET_HAS_rem_i64
) {
1543 tcg_gen_op3_i64(INDEX_op_remu_i64
, ret
, arg1
, arg2
);
1544 } else if (TCG_TARGET_HAS_div_i64
) {
1545 TCGv_i64 t0
= tcg_temp_new_i64();
1546 tcg_gen_op3_i64(INDEX_op_divu_i64
, t0
, arg1
, arg2
);
1547 tcg_gen_mul_i64(t0
, t0
, arg2
);
1548 tcg_gen_sub_i64(ret
, arg1
, t0
);
1549 tcg_temp_free_i64(t0
);
1550 } else if (TCG_TARGET_HAS_div2_i64
) {
1551 TCGv_i64 t0
= tcg_temp_new_i64();
1552 tcg_gen_movi_i64(t0
, 0);
1553 tcg_gen_op5_i64(INDEX_op_divu2_i64
, t0
, ret
, arg1
, t0
, arg2
);
1554 tcg_temp_free_i64(t0
);
1556 gen_helper_remu_i64(ret
, arg1
, arg2
);
1560 void tcg_gen_ext8s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1562 if (TCG_TARGET_REG_BITS
== 32) {
1563 tcg_gen_ext8s_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1564 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1565 } else if (TCG_TARGET_HAS_ext8s_i64
) {
1566 tcg_gen_op2_i64(INDEX_op_ext8s_i64
, ret
, arg
);
1568 tcg_gen_shli_i64(ret
, arg
, 56);
1569 tcg_gen_sari_i64(ret
, ret
, 56);
1573 void tcg_gen_ext16s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1575 if (TCG_TARGET_REG_BITS
== 32) {
1576 tcg_gen_ext16s_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1577 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1578 } else if (TCG_TARGET_HAS_ext16s_i64
) {
1579 tcg_gen_op2_i64(INDEX_op_ext16s_i64
, ret
, arg
);
1581 tcg_gen_shli_i64(ret
, arg
, 48);
1582 tcg_gen_sari_i64(ret
, ret
, 48);
1586 void tcg_gen_ext32s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1588 if (TCG_TARGET_REG_BITS
== 32) {
1589 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1590 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1591 } else if (TCG_TARGET_HAS_ext32s_i64
) {
1592 tcg_gen_op2_i64(INDEX_op_ext32s_i64
, ret
, arg
);
1594 tcg_gen_shli_i64(ret
, arg
, 32);
1595 tcg_gen_sari_i64(ret
, ret
, 32);
1599 void tcg_gen_ext8u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1601 if (TCG_TARGET_REG_BITS
== 32) {
1602 tcg_gen_ext8u_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1603 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1604 } else if (TCG_TARGET_HAS_ext8u_i64
) {
1605 tcg_gen_op2_i64(INDEX_op_ext8u_i64
, ret
, arg
);
1607 tcg_gen_andi_i64(ret
, arg
, 0xffu
);
1611 void tcg_gen_ext16u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1613 if (TCG_TARGET_REG_BITS
== 32) {
1614 tcg_gen_ext16u_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1615 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1616 } else if (TCG_TARGET_HAS_ext16u_i64
) {
1617 tcg_gen_op2_i64(INDEX_op_ext16u_i64
, ret
, arg
);
1619 tcg_gen_andi_i64(ret
, arg
, 0xffffu
);
1623 void tcg_gen_ext32u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1625 if (TCG_TARGET_REG_BITS
== 32) {
1626 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1627 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1628 } else if (TCG_TARGET_HAS_ext32u_i64
) {
1629 tcg_gen_op2_i64(INDEX_op_ext32u_i64
, ret
, arg
);
1631 tcg_gen_andi_i64(ret
, arg
, 0xffffffffu
);
1635 /* Note: we assume the six high bytes are set to zero */
1636 void tcg_gen_bswap16_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1638 if (TCG_TARGET_REG_BITS
== 32) {
1639 tcg_gen_bswap16_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1640 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1641 } else if (TCG_TARGET_HAS_bswap16_i64
) {
1642 tcg_gen_op2_i64(INDEX_op_bswap16_i64
, ret
, arg
);
1644 TCGv_i64 t0
= tcg_temp_new_i64();
1646 tcg_gen_ext8u_i64(t0
, arg
);
1647 tcg_gen_shli_i64(t0
, t0
, 8);
1648 tcg_gen_shri_i64(ret
, arg
, 8);
1649 tcg_gen_or_i64(ret
, ret
, t0
);
1650 tcg_temp_free_i64(t0
);
1654 /* Note: we assume the four high bytes are set to zero */
1655 void tcg_gen_bswap32_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1657 if (TCG_TARGET_REG_BITS
== 32) {
1658 tcg_gen_bswap32_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1659 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1660 } else if (TCG_TARGET_HAS_bswap32_i64
) {
1661 tcg_gen_op2_i64(INDEX_op_bswap32_i64
, ret
, arg
);
1664 t0
= tcg_temp_new_i64();
1665 t1
= tcg_temp_new_i64();
1667 tcg_gen_shli_i64(t0
, arg
, 24);
1668 tcg_gen_ext32u_i64(t0
, t0
);
1670 tcg_gen_andi_i64(t1
, arg
, 0x0000ff00);
1671 tcg_gen_shli_i64(t1
, t1
, 8);
1672 tcg_gen_or_i64(t0
, t0
, t1
);
1674 tcg_gen_shri_i64(t1
, arg
, 8);
1675 tcg_gen_andi_i64(t1
, t1
, 0x0000ff00);
1676 tcg_gen_or_i64(t0
, t0
, t1
);
1678 tcg_gen_shri_i64(t1
, arg
, 24);
1679 tcg_gen_or_i64(ret
, t0
, t1
);
1680 tcg_temp_free_i64(t0
);
1681 tcg_temp_free_i64(t1
);
1685 void tcg_gen_bswap64_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1687 if (TCG_TARGET_REG_BITS
== 32) {
1689 t0
= tcg_temp_new_i32();
1690 t1
= tcg_temp_new_i32();
1692 tcg_gen_bswap32_i32(t0
, TCGV_LOW(arg
));
1693 tcg_gen_bswap32_i32(t1
, TCGV_HIGH(arg
));
1694 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
1695 tcg_gen_mov_i32(TCGV_HIGH(ret
), t0
);
1696 tcg_temp_free_i32(t0
);
1697 tcg_temp_free_i32(t1
);
1698 } else if (TCG_TARGET_HAS_bswap64_i64
) {
1699 tcg_gen_op2_i64(INDEX_op_bswap64_i64
, ret
, arg
);
1701 TCGv_i64 t0
= tcg_temp_new_i64();
1702 TCGv_i64 t1
= tcg_temp_new_i64();
1704 tcg_gen_shli_i64(t0
, arg
, 56);
1706 tcg_gen_andi_i64(t1
, arg
, 0x0000ff00);
1707 tcg_gen_shli_i64(t1
, t1
, 40);
1708 tcg_gen_or_i64(t0
, t0
, t1
);
1710 tcg_gen_andi_i64(t1
, arg
, 0x00ff0000);
1711 tcg_gen_shli_i64(t1
, t1
, 24);
1712 tcg_gen_or_i64(t0
, t0
, t1
);
1714 tcg_gen_andi_i64(t1
, arg
, 0xff000000);
1715 tcg_gen_shli_i64(t1
, t1
, 8);
1716 tcg_gen_or_i64(t0
, t0
, t1
);
1718 tcg_gen_shri_i64(t1
, arg
, 8);
1719 tcg_gen_andi_i64(t1
, t1
, 0xff000000);
1720 tcg_gen_or_i64(t0
, t0
, t1
);
1722 tcg_gen_shri_i64(t1
, arg
, 24);
1723 tcg_gen_andi_i64(t1
, t1
, 0x00ff0000);
1724 tcg_gen_or_i64(t0
, t0
, t1
);
1726 tcg_gen_shri_i64(t1
, arg
, 40);
1727 tcg_gen_andi_i64(t1
, t1
, 0x0000ff00);
1728 tcg_gen_or_i64(t0
, t0
, t1
);
1730 tcg_gen_shri_i64(t1
, arg
, 56);
1731 tcg_gen_or_i64(ret
, t0
, t1
);
1732 tcg_temp_free_i64(t0
);
1733 tcg_temp_free_i64(t1
);
1737 void tcg_gen_not_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1739 if (TCG_TARGET_REG_BITS
== 32) {
1740 tcg_gen_not_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1741 tcg_gen_not_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
));
1742 } else if (TCG_TARGET_HAS_not_i64
) {
1743 tcg_gen_op2_i64(INDEX_op_not_i64
, ret
, arg
);
1745 tcg_gen_xori_i64(ret
, arg
, -1);
1749 void tcg_gen_andc_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1751 if (TCG_TARGET_REG_BITS
== 32) {
1752 tcg_gen_andc_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1753 tcg_gen_andc_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1754 } else if (TCG_TARGET_HAS_andc_i64
) {
1755 tcg_gen_op3_i64(INDEX_op_andc_i64
, ret
, arg1
, arg2
);
1757 TCGv_i64 t0
= tcg_temp_new_i64();
1758 tcg_gen_not_i64(t0
, arg2
);
1759 tcg_gen_and_i64(ret
, arg1
, t0
);
1760 tcg_temp_free_i64(t0
);
1764 void tcg_gen_eqv_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1766 if (TCG_TARGET_REG_BITS
== 32) {
1767 tcg_gen_eqv_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1768 tcg_gen_eqv_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1769 } else if (TCG_TARGET_HAS_eqv_i64
) {
1770 tcg_gen_op3_i64(INDEX_op_eqv_i64
, ret
, arg1
, arg2
);
1772 tcg_gen_xor_i64(ret
, arg1
, arg2
);
1773 tcg_gen_not_i64(ret
, ret
);
1777 void tcg_gen_nand_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1779 if (TCG_TARGET_REG_BITS
== 32) {
1780 tcg_gen_nand_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1781 tcg_gen_nand_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1782 } else if (TCG_TARGET_HAS_nand_i64
) {
1783 tcg_gen_op3_i64(INDEX_op_nand_i64
, ret
, arg1
, arg2
);
1785 tcg_gen_and_i64(ret
, arg1
, arg2
);
1786 tcg_gen_not_i64(ret
, ret
);
1790 void tcg_gen_nor_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1792 if (TCG_TARGET_REG_BITS
== 32) {
1793 tcg_gen_nor_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1794 tcg_gen_nor_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1795 } else if (TCG_TARGET_HAS_nor_i64
) {
1796 tcg_gen_op3_i64(INDEX_op_nor_i64
, ret
, arg1
, arg2
);
1798 tcg_gen_or_i64(ret
, arg1
, arg2
);
1799 tcg_gen_not_i64(ret
, ret
);
1803 void tcg_gen_orc_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1805 if (TCG_TARGET_REG_BITS
== 32) {
1806 tcg_gen_orc_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1807 tcg_gen_orc_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1808 } else if (TCG_TARGET_HAS_orc_i64
) {
1809 tcg_gen_op3_i64(INDEX_op_orc_i64
, ret
, arg1
, arg2
);
1811 TCGv_i64 t0
= tcg_temp_new_i64();
1812 tcg_gen_not_i64(t0
, arg2
);
1813 tcg_gen_or_i64(ret
, arg1
, t0
);
1814 tcg_temp_free_i64(t0
);
1818 void tcg_gen_clz_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1820 if (TCG_TARGET_HAS_clz_i64
) {
1821 tcg_gen_op3_i64(INDEX_op_clz_i64
, ret
, arg1
, arg2
);
1823 gen_helper_clz_i64(ret
, arg1
, arg2
);
1827 void tcg_gen_clzi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1829 if (TCG_TARGET_REG_BITS
== 32
1830 && TCG_TARGET_HAS_clz_i32
1831 && arg2
<= 0xffffffffu
) {
1832 TCGv_i32 t
= tcg_const_i32((uint32_t)arg2
- 32);
1833 tcg_gen_clz_i32(t
, TCGV_LOW(arg1
), t
);
1834 tcg_gen_addi_i32(t
, t
, 32);
1835 tcg_gen_clz_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), t
);
1836 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1837 tcg_temp_free_i32(t
);
1839 TCGv_i64 t
= tcg_const_i64(arg2
);
1840 tcg_gen_clz_i64(ret
, arg1
, t
);
1841 tcg_temp_free_i64(t
);
1845 void tcg_gen_ctz_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1847 if (TCG_TARGET_HAS_ctz_i64
) {
1848 tcg_gen_op3_i64(INDEX_op_ctz_i64
, ret
, arg1
, arg2
);
1849 } else if (TCG_TARGET_HAS_ctpop_i64
|| TCG_TARGET_HAS_clz_i64
) {
1850 TCGv_i64 z
, t
= tcg_temp_new_i64();
1852 if (TCG_TARGET_HAS_ctpop_i64
) {
1853 tcg_gen_subi_i64(t
, arg1
, 1);
1854 tcg_gen_andc_i64(t
, t
, arg1
);
1855 tcg_gen_ctpop_i64(t
, t
);
1857 /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
1858 tcg_gen_neg_i64(t
, arg1
);
1859 tcg_gen_and_i64(t
, t
, arg1
);
1860 tcg_gen_clzi_i64(t
, t
, 64);
1861 tcg_gen_xori_i64(t
, t
, 63);
1863 z
= tcg_const_i64(0);
1864 tcg_gen_movcond_i64(TCG_COND_EQ
, ret
, arg1
, z
, arg2
, t
);
1865 tcg_temp_free_i64(t
);
1866 tcg_temp_free_i64(z
);
1868 gen_helper_ctz_i64(ret
, arg1
, arg2
);
1872 void tcg_gen_ctzi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1874 if (TCG_TARGET_REG_BITS
== 32
1875 && TCG_TARGET_HAS_ctz_i32
1876 && arg2
<= 0xffffffffu
) {
1877 TCGv_i32 t32
= tcg_const_i32((uint32_t)arg2
- 32);
1878 tcg_gen_ctz_i32(t32
, TCGV_HIGH(arg1
), t32
);
1879 tcg_gen_addi_i32(t32
, t32
, 32);
1880 tcg_gen_ctz_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), t32
);
1881 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1882 tcg_temp_free_i32(t32
);
1883 } else if (!TCG_TARGET_HAS_ctz_i64
1884 && TCG_TARGET_HAS_ctpop_i64
1886 /* This equivalence has the advantage of not requiring a fixup. */
1887 TCGv_i64 t
= tcg_temp_new_i64();
1888 tcg_gen_subi_i64(t
, arg1
, 1);
1889 tcg_gen_andc_i64(t
, t
, arg1
);
1890 tcg_gen_ctpop_i64(ret
, t
);
1891 tcg_temp_free_i64(t
);
1893 TCGv_i64 t64
= tcg_const_i64(arg2
);
1894 tcg_gen_ctz_i64(ret
, arg1
, t64
);
1895 tcg_temp_free_i64(t64
);
1899 void tcg_gen_clrsb_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1901 if (TCG_TARGET_HAS_clz_i64
|| TCG_TARGET_HAS_clz_i32
) {
1902 TCGv_i64 t
= tcg_temp_new_i64();
1903 tcg_gen_sari_i64(t
, arg
, 63);
1904 tcg_gen_xor_i64(t
, t
, arg
);
1905 tcg_gen_clzi_i64(t
, t
, 64);
1906 tcg_gen_subi_i64(ret
, t
, 1);
1907 tcg_temp_free_i64(t
);
1909 gen_helper_clrsb_i64(ret
, arg
);
1913 void tcg_gen_ctpop_i64(TCGv_i64 ret
, TCGv_i64 arg1
)
1915 if (TCG_TARGET_HAS_ctpop_i64
) {
1916 tcg_gen_op2_i64(INDEX_op_ctpop_i64
, ret
, arg1
);
1917 } else if (TCG_TARGET_REG_BITS
== 32 && TCG_TARGET_HAS_ctpop_i32
) {
1918 tcg_gen_ctpop_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
1919 tcg_gen_ctpop_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
1920 tcg_gen_add_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), TCGV_HIGH(ret
));
1921 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1923 gen_helper_ctpop_i64(ret
, arg1
);
1927 void tcg_gen_rotl_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1929 if (TCG_TARGET_HAS_rot_i64
) {
1930 tcg_gen_op3_i64(INDEX_op_rotl_i64
, ret
, arg1
, arg2
);
1933 t0
= tcg_temp_new_i64();
1934 t1
= tcg_temp_new_i64();
1935 tcg_gen_shl_i64(t0
, arg1
, arg2
);
1936 tcg_gen_subfi_i64(t1
, 64, arg2
);
1937 tcg_gen_shr_i64(t1
, arg1
, t1
);
1938 tcg_gen_or_i64(ret
, t0
, t1
);
1939 tcg_temp_free_i64(t0
);
1940 tcg_temp_free_i64(t1
);
1944 void tcg_gen_rotli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1946 tcg_debug_assert(arg2
< 64);
1947 /* some cases can be optimized here */
1949 tcg_gen_mov_i64(ret
, arg1
);
1950 } else if (TCG_TARGET_HAS_rot_i64
) {
1951 TCGv_i64 t0
= tcg_const_i64(arg2
);
1952 tcg_gen_rotl_i64(ret
, arg1
, t0
);
1953 tcg_temp_free_i64(t0
);
1956 t0
= tcg_temp_new_i64();
1957 t1
= tcg_temp_new_i64();
1958 tcg_gen_shli_i64(t0
, arg1
, arg2
);
1959 tcg_gen_shri_i64(t1
, arg1
, 64 - arg2
);
1960 tcg_gen_or_i64(ret
, t0
, t1
);
1961 tcg_temp_free_i64(t0
);
1962 tcg_temp_free_i64(t1
);
1966 void tcg_gen_rotr_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1968 if (TCG_TARGET_HAS_rot_i64
) {
1969 tcg_gen_op3_i64(INDEX_op_rotr_i64
, ret
, arg1
, arg2
);
1972 t0
= tcg_temp_new_i64();
1973 t1
= tcg_temp_new_i64();
1974 tcg_gen_shr_i64(t0
, arg1
, arg2
);
1975 tcg_gen_subfi_i64(t1
, 64, arg2
);
1976 tcg_gen_shl_i64(t1
, arg1
, t1
);
1977 tcg_gen_or_i64(ret
, t0
, t1
);
1978 tcg_temp_free_i64(t0
);
1979 tcg_temp_free_i64(t1
);
1983 void tcg_gen_rotri_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1985 tcg_debug_assert(arg2
< 64);
1986 /* some cases can be optimized here */
1988 tcg_gen_mov_i64(ret
, arg1
);
1990 tcg_gen_rotli_i64(ret
, arg1
, 64 - arg2
);
1994 void tcg_gen_deposit_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
,
1995 unsigned int ofs
, unsigned int len
)
2000 tcg_debug_assert(ofs
< 64);
2001 tcg_debug_assert(len
> 0);
2002 tcg_debug_assert(len
<= 64);
2003 tcg_debug_assert(ofs
+ len
<= 64);
2006 tcg_gen_mov_i64(ret
, arg2
);
2009 if (TCG_TARGET_HAS_deposit_i64
&& TCG_TARGET_deposit_i64_valid(ofs
, len
)) {
2010 tcg_gen_op5ii_i64(INDEX_op_deposit_i64
, ret
, arg1
, arg2
, ofs
, len
);
2014 if (TCG_TARGET_REG_BITS
== 32) {
2016 tcg_gen_deposit_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
),
2017 TCGV_LOW(arg2
), ofs
- 32, len
);
2018 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
2021 if (ofs
+ len
<= 32) {
2022 tcg_gen_deposit_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
),
2023 TCGV_LOW(arg2
), ofs
, len
);
2024 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
2029 mask
= (1ull << len
) - 1;
2030 t1
= tcg_temp_new_i64();
2032 if (ofs
+ len
< 64) {
2033 tcg_gen_andi_i64(t1
, arg2
, mask
);
2034 tcg_gen_shli_i64(t1
, t1
, ofs
);
2036 tcg_gen_shli_i64(t1
, arg2
, ofs
);
2038 tcg_gen_andi_i64(ret
, arg1
, ~(mask
<< ofs
));
2039 tcg_gen_or_i64(ret
, ret
, t1
);
2041 tcg_temp_free_i64(t1
);
2044 void tcg_gen_deposit_z_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2045 unsigned int ofs
, unsigned int len
)
2047 tcg_debug_assert(ofs
< 64);
2048 tcg_debug_assert(len
> 0);
2049 tcg_debug_assert(len
<= 64);
2050 tcg_debug_assert(ofs
+ len
<= 64);
2052 if (ofs
+ len
== 64) {
2053 tcg_gen_shli_i64(ret
, arg
, ofs
);
2054 } else if (ofs
== 0) {
2055 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2056 } else if (TCG_TARGET_HAS_deposit_i64
2057 && TCG_TARGET_deposit_i64_valid(ofs
, len
)) {
2058 TCGv_i64 zero
= tcg_const_i64(0);
2059 tcg_gen_op5ii_i64(INDEX_op_deposit_i64
, ret
, zero
, arg
, ofs
, len
);
2060 tcg_temp_free_i64(zero
);
2062 if (TCG_TARGET_REG_BITS
== 32) {
2064 tcg_gen_deposit_z_i32(TCGV_HIGH(ret
), TCGV_LOW(arg
),
2066 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
2069 if (ofs
+ len
<= 32) {
2070 tcg_gen_deposit_z_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2071 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2075 /* To help two-operand hosts we prefer to zero-extend first,
2076 which allows ARG to stay live. */
2079 if (TCG_TARGET_HAS_ext32u_i64
) {
2080 tcg_gen_ext32u_i64(ret
, arg
);
2081 tcg_gen_shli_i64(ret
, ret
, ofs
);
2086 if (TCG_TARGET_HAS_ext16u_i64
) {
2087 tcg_gen_ext16u_i64(ret
, arg
);
2088 tcg_gen_shli_i64(ret
, ret
, ofs
);
2093 if (TCG_TARGET_HAS_ext8u_i64
) {
2094 tcg_gen_ext8u_i64(ret
, arg
);
2095 tcg_gen_shli_i64(ret
, ret
, ofs
);
2100 /* Otherwise prefer zero-extension over AND for code size. */
2101 switch (ofs
+ len
) {
2103 if (TCG_TARGET_HAS_ext32u_i64
) {
2104 tcg_gen_shli_i64(ret
, arg
, ofs
);
2105 tcg_gen_ext32u_i64(ret
, ret
);
2110 if (TCG_TARGET_HAS_ext16u_i64
) {
2111 tcg_gen_shli_i64(ret
, arg
, ofs
);
2112 tcg_gen_ext16u_i64(ret
, ret
);
2117 if (TCG_TARGET_HAS_ext8u_i64
) {
2118 tcg_gen_shli_i64(ret
, arg
, ofs
);
2119 tcg_gen_ext8u_i64(ret
, ret
);
2124 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2125 tcg_gen_shli_i64(ret
, ret
, ofs
);
2129 void tcg_gen_extract_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2130 unsigned int ofs
, unsigned int len
)
2132 tcg_debug_assert(ofs
< 64);
2133 tcg_debug_assert(len
> 0);
2134 tcg_debug_assert(len
<= 64);
2135 tcg_debug_assert(ofs
+ len
<= 64);
2137 /* Canonicalize certain special cases, even if extract is supported. */
2138 if (ofs
+ len
== 64) {
2139 tcg_gen_shri_i64(ret
, arg
, 64 - len
);
2143 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2147 if (TCG_TARGET_REG_BITS
== 32) {
2148 /* Look for a 32-bit extract within one of the two words. */
2150 tcg_gen_extract_i32(TCGV_LOW(ret
), TCGV_HIGH(arg
), ofs
- 32, len
);
2151 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2154 if (ofs
+ len
<= 32) {
2155 tcg_gen_extract_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2156 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2159 /* The field is split across two words. One double-word
2160 shift is better than two double-word shifts. */
2164 if (TCG_TARGET_HAS_extract_i64
2165 && TCG_TARGET_extract_i64_valid(ofs
, len
)) {
2166 tcg_gen_op4ii_i64(INDEX_op_extract_i64
, ret
, arg
, ofs
, len
);
2170 /* Assume that zero-extension, if available, is cheaper than a shift. */
2171 switch (ofs
+ len
) {
2173 if (TCG_TARGET_HAS_ext32u_i64
) {
2174 tcg_gen_ext32u_i64(ret
, arg
);
2175 tcg_gen_shri_i64(ret
, ret
, ofs
);
2180 if (TCG_TARGET_HAS_ext16u_i64
) {
2181 tcg_gen_ext16u_i64(ret
, arg
);
2182 tcg_gen_shri_i64(ret
, ret
, ofs
);
2187 if (TCG_TARGET_HAS_ext8u_i64
) {
2188 tcg_gen_ext8u_i64(ret
, arg
);
2189 tcg_gen_shri_i64(ret
, ret
, ofs
);
2195 /* ??? Ideally we'd know what values are available for immediate AND.
2196 Assume that 8 bits are available, plus the special cases of 16 and 32,
2197 so that we get ext8u, ext16u, and ext32u. */
2199 case 1 ... 8: case 16: case 32:
2201 tcg_gen_shri_i64(ret
, arg
, ofs
);
2202 tcg_gen_andi_i64(ret
, ret
, (1ull << len
) - 1);
2205 tcg_gen_shli_i64(ret
, arg
, 64 - len
- ofs
);
2206 tcg_gen_shri_i64(ret
, ret
, 64 - len
);
2211 void tcg_gen_sextract_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2212 unsigned int ofs
, unsigned int len
)
2214 tcg_debug_assert(ofs
< 64);
2215 tcg_debug_assert(len
> 0);
2216 tcg_debug_assert(len
<= 64);
2217 tcg_debug_assert(ofs
+ len
<= 64);
2219 /* Canonicalize certain special cases, even if sextract is supported. */
2220 if (ofs
+ len
== 64) {
2221 tcg_gen_sari_i64(ret
, arg
, 64 - len
);
2227 tcg_gen_ext32s_i64(ret
, arg
);
2230 tcg_gen_ext16s_i64(ret
, arg
);
2233 tcg_gen_ext8s_i64(ret
, arg
);
2238 if (TCG_TARGET_REG_BITS
== 32) {
2239 /* Look for a 32-bit extract within one of the two words. */
2241 tcg_gen_sextract_i32(TCGV_LOW(ret
), TCGV_HIGH(arg
), ofs
- 32, len
);
2242 } else if (ofs
+ len
<= 32) {
2243 tcg_gen_sextract_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2244 } else if (ofs
== 0) {
2245 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
2246 tcg_gen_sextract_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
), 0, len
- 32);
2248 } else if (len
> 32) {
2249 TCGv_i32 t
= tcg_temp_new_i32();
2250 /* Extract the bits for the high word normally. */
2251 tcg_gen_sextract_i32(t
, TCGV_HIGH(arg
), ofs
+ 32, len
- 32);
2252 /* Shift the field down for the low part. */
2253 tcg_gen_shri_i64(ret
, arg
, ofs
);
2254 /* Overwrite the shift into the high part. */
2255 tcg_gen_mov_i32(TCGV_HIGH(ret
), t
);
2256 tcg_temp_free_i32(t
);
2259 /* Shift the field down for the low part, such that the
2260 field sits at the MSB. */
2261 tcg_gen_shri_i64(ret
, arg
, ofs
+ len
- 32);
2262 /* Shift the field down from the MSB, sign extending. */
2263 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), 32 - len
);
2265 /* Sign-extend the field from 32 bits. */
2266 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
2270 if (TCG_TARGET_HAS_sextract_i64
2271 && TCG_TARGET_extract_i64_valid(ofs
, len
)) {
2272 tcg_gen_op4ii_i64(INDEX_op_sextract_i64
, ret
, arg
, ofs
, len
);
2276 /* Assume that sign-extension, if available, is cheaper than a shift. */
2277 switch (ofs
+ len
) {
2279 if (TCG_TARGET_HAS_ext32s_i64
) {
2280 tcg_gen_ext32s_i64(ret
, arg
);
2281 tcg_gen_sari_i64(ret
, ret
, ofs
);
2286 if (TCG_TARGET_HAS_ext16s_i64
) {
2287 tcg_gen_ext16s_i64(ret
, arg
);
2288 tcg_gen_sari_i64(ret
, ret
, ofs
);
2293 if (TCG_TARGET_HAS_ext8s_i64
) {
2294 tcg_gen_ext8s_i64(ret
, arg
);
2295 tcg_gen_sari_i64(ret
, ret
, ofs
);
2302 if (TCG_TARGET_HAS_ext32s_i64
) {
2303 tcg_gen_shri_i64(ret
, arg
, ofs
);
2304 tcg_gen_ext32s_i64(ret
, ret
);
2309 if (TCG_TARGET_HAS_ext16s_i64
) {
2310 tcg_gen_shri_i64(ret
, arg
, ofs
);
2311 tcg_gen_ext16s_i64(ret
, ret
);
2316 if (TCG_TARGET_HAS_ext8s_i64
) {
2317 tcg_gen_shri_i64(ret
, arg
, ofs
);
2318 tcg_gen_ext8s_i64(ret
, ret
);
2323 tcg_gen_shli_i64(ret
, arg
, 64 - len
- ofs
);
2324 tcg_gen_sari_i64(ret
, ret
, 64 - len
);
2327 void tcg_gen_movcond_i64(TCGCond cond
, TCGv_i64 ret
, TCGv_i64 c1
,
2328 TCGv_i64 c2
, TCGv_i64 v1
, TCGv_i64 v2
)
2330 if (cond
== TCG_COND_ALWAYS
) {
2331 tcg_gen_mov_i64(ret
, v1
);
2332 } else if (cond
== TCG_COND_NEVER
) {
2333 tcg_gen_mov_i64(ret
, v2
);
2334 } else if (TCG_TARGET_REG_BITS
== 32) {
2335 TCGv_i32 t0
= tcg_temp_new_i32();
2336 TCGv_i32 t1
= tcg_temp_new_i32();
2337 tcg_gen_op6i_i32(INDEX_op_setcond2_i32
, t0
,
2338 TCGV_LOW(c1
), TCGV_HIGH(c1
),
2339 TCGV_LOW(c2
), TCGV_HIGH(c2
), cond
);
2341 if (TCG_TARGET_HAS_movcond_i32
) {
2342 tcg_gen_movi_i32(t1
, 0);
2343 tcg_gen_movcond_i32(TCG_COND_NE
, TCGV_LOW(ret
), t0
, t1
,
2344 TCGV_LOW(v1
), TCGV_LOW(v2
));
2345 tcg_gen_movcond_i32(TCG_COND_NE
, TCGV_HIGH(ret
), t0
, t1
,
2346 TCGV_HIGH(v1
), TCGV_HIGH(v2
));
2348 tcg_gen_neg_i32(t0
, t0
);
2350 tcg_gen_and_i32(t1
, TCGV_LOW(v1
), t0
);
2351 tcg_gen_andc_i32(TCGV_LOW(ret
), TCGV_LOW(v2
), t0
);
2352 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t1
);
2354 tcg_gen_and_i32(t1
, TCGV_HIGH(v1
), t0
);
2355 tcg_gen_andc_i32(TCGV_HIGH(ret
), TCGV_HIGH(v2
), t0
);
2356 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t1
);
2358 tcg_temp_free_i32(t0
);
2359 tcg_temp_free_i32(t1
);
2360 } else if (TCG_TARGET_HAS_movcond_i64
) {
2361 tcg_gen_op6i_i64(INDEX_op_movcond_i64
, ret
, c1
, c2
, v1
, v2
, cond
);
2363 TCGv_i64 t0
= tcg_temp_new_i64();
2364 TCGv_i64 t1
= tcg_temp_new_i64();
2365 tcg_gen_setcond_i64(cond
, t0
, c1
, c2
);
2366 tcg_gen_neg_i64(t0
, t0
);
2367 tcg_gen_and_i64(t1
, v1
, t0
);
2368 tcg_gen_andc_i64(ret
, v2
, t0
);
2369 tcg_gen_or_i64(ret
, ret
, t1
);
2370 tcg_temp_free_i64(t0
);
2371 tcg_temp_free_i64(t1
);
2375 void tcg_gen_add2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 al
,
2376 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2378 if (TCG_TARGET_HAS_add2_i64
) {
2379 tcg_gen_op6_i64(INDEX_op_add2_i64
, rl
, rh
, al
, ah
, bl
, bh
);
2381 TCGv_i64 t0
= tcg_temp_new_i64();
2382 TCGv_i64 t1
= tcg_temp_new_i64();
2383 tcg_gen_add_i64(t0
, al
, bl
);
2384 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, t0
, al
);
2385 tcg_gen_add_i64(rh
, ah
, bh
);
2386 tcg_gen_add_i64(rh
, rh
, t1
);
2387 tcg_gen_mov_i64(rl
, t0
);
2388 tcg_temp_free_i64(t0
);
2389 tcg_temp_free_i64(t1
);
2393 void tcg_gen_sub2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 al
,
2394 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2396 if (TCG_TARGET_HAS_sub2_i64
) {
2397 tcg_gen_op6_i64(INDEX_op_sub2_i64
, rl
, rh
, al
, ah
, bl
, bh
);
2399 TCGv_i64 t0
= tcg_temp_new_i64();
2400 TCGv_i64 t1
= tcg_temp_new_i64();
2401 tcg_gen_sub_i64(t0
, al
, bl
);
2402 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, al
, bl
);
2403 tcg_gen_sub_i64(rh
, ah
, bh
);
2404 tcg_gen_sub_i64(rh
, rh
, t1
);
2405 tcg_gen_mov_i64(rl
, t0
);
2406 tcg_temp_free_i64(t0
);
2407 tcg_temp_free_i64(t1
);
2411 void tcg_gen_mulu2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2413 if (TCG_TARGET_HAS_mulu2_i64
) {
2414 tcg_gen_op4_i64(INDEX_op_mulu2_i64
, rl
, rh
, arg1
, arg2
);
2415 } else if (TCG_TARGET_HAS_muluh_i64
) {
2416 TCGv_i64 t
= tcg_temp_new_i64();
2417 tcg_gen_op3_i64(INDEX_op_mul_i64
, t
, arg1
, arg2
);
2418 tcg_gen_op3_i64(INDEX_op_muluh_i64
, rh
, arg1
, arg2
);
2419 tcg_gen_mov_i64(rl
, t
);
2420 tcg_temp_free_i64(t
);
2422 TCGv_i64 t0
= tcg_temp_new_i64();
2423 tcg_gen_mul_i64(t0
, arg1
, arg2
);
2424 gen_helper_muluh_i64(rh
, arg1
, arg2
);
2425 tcg_gen_mov_i64(rl
, t0
);
2426 tcg_temp_free_i64(t0
);
2430 void tcg_gen_muls2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2432 if (TCG_TARGET_HAS_muls2_i64
) {
2433 tcg_gen_op4_i64(INDEX_op_muls2_i64
, rl
, rh
, arg1
, arg2
);
2434 } else if (TCG_TARGET_HAS_mulsh_i64
) {
2435 TCGv_i64 t
= tcg_temp_new_i64();
2436 tcg_gen_op3_i64(INDEX_op_mul_i64
, t
, arg1
, arg2
);
2437 tcg_gen_op3_i64(INDEX_op_mulsh_i64
, rh
, arg1
, arg2
);
2438 tcg_gen_mov_i64(rl
, t
);
2439 tcg_temp_free_i64(t
);
2440 } else if (TCG_TARGET_HAS_mulu2_i64
|| TCG_TARGET_HAS_muluh_i64
) {
2441 TCGv_i64 t0
= tcg_temp_new_i64();
2442 TCGv_i64 t1
= tcg_temp_new_i64();
2443 TCGv_i64 t2
= tcg_temp_new_i64();
2444 TCGv_i64 t3
= tcg_temp_new_i64();
2445 tcg_gen_mulu2_i64(t0
, t1
, arg1
, arg2
);
2446 /* Adjust for negative inputs. */
2447 tcg_gen_sari_i64(t2
, arg1
, 63);
2448 tcg_gen_sari_i64(t3
, arg2
, 63);
2449 tcg_gen_and_i64(t2
, t2
, arg2
);
2450 tcg_gen_and_i64(t3
, t3
, arg1
);
2451 tcg_gen_sub_i64(rh
, t1
, t2
);
2452 tcg_gen_sub_i64(rh
, rh
, t3
);
2453 tcg_gen_mov_i64(rl
, t0
);
2454 tcg_temp_free_i64(t0
);
2455 tcg_temp_free_i64(t1
);
2456 tcg_temp_free_i64(t2
);
2457 tcg_temp_free_i64(t3
);
2459 TCGv_i64 t0
= tcg_temp_new_i64();
2460 tcg_gen_mul_i64(t0
, arg1
, arg2
);
2461 gen_helper_mulsh_i64(rh
, arg1
, arg2
);
2462 tcg_gen_mov_i64(rl
, t0
);
2463 tcg_temp_free_i64(t0
);
2467 void tcg_gen_mulsu2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2469 TCGv_i64 t0
= tcg_temp_new_i64();
2470 TCGv_i64 t1
= tcg_temp_new_i64();
2471 TCGv_i64 t2
= tcg_temp_new_i64();
2472 tcg_gen_mulu2_i64(t0
, t1
, arg1
, arg2
);
2473 /* Adjust for negative input for the signed arg1. */
2474 tcg_gen_sari_i64(t2
, arg1
, 63);
2475 tcg_gen_and_i64(t2
, t2
, arg2
);
2476 tcg_gen_sub_i64(rh
, t1
, t2
);
2477 tcg_gen_mov_i64(rl
, t0
);
2478 tcg_temp_free_i64(t0
);
2479 tcg_temp_free_i64(t1
);
2480 tcg_temp_free_i64(t2
);
2483 /* Size changing operations. */
2485 void tcg_gen_extrl_i64_i32(TCGv_i32 ret
, TCGv_i64 arg
)
2487 if (TCG_TARGET_REG_BITS
== 32) {
2488 tcg_gen_mov_i32(ret
, TCGV_LOW(arg
));
2489 } else if (TCG_TARGET_HAS_extrl_i64_i32
) {
2490 tcg_gen_op2(&tcg_ctx
, INDEX_op_extrl_i64_i32
,
2491 GET_TCGV_I32(ret
), GET_TCGV_I64(arg
));
2493 tcg_gen_mov_i32(ret
, MAKE_TCGV_I32(GET_TCGV_I64(arg
)));
2497 void tcg_gen_extrh_i64_i32(TCGv_i32 ret
, TCGv_i64 arg
)
2499 if (TCG_TARGET_REG_BITS
== 32) {
2500 tcg_gen_mov_i32(ret
, TCGV_HIGH(arg
));
2501 } else if (TCG_TARGET_HAS_extrh_i64_i32
) {
2502 tcg_gen_op2(&tcg_ctx
, INDEX_op_extrh_i64_i32
,
2503 GET_TCGV_I32(ret
), GET_TCGV_I64(arg
));
2505 TCGv_i64 t
= tcg_temp_new_i64();
2506 tcg_gen_shri_i64(t
, arg
, 32);
2507 tcg_gen_mov_i32(ret
, MAKE_TCGV_I32(GET_TCGV_I64(t
)));
2508 tcg_temp_free_i64(t
);
2512 void tcg_gen_extu_i32_i64(TCGv_i64 ret
, TCGv_i32 arg
)
2514 if (TCG_TARGET_REG_BITS
== 32) {
2515 tcg_gen_mov_i32(TCGV_LOW(ret
), arg
);
2516 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2518 tcg_gen_op2(&tcg_ctx
, INDEX_op_extu_i32_i64
,
2519 GET_TCGV_I64(ret
), GET_TCGV_I32(arg
));
2523 void tcg_gen_ext_i32_i64(TCGv_i64 ret
, TCGv_i32 arg
)
2525 if (TCG_TARGET_REG_BITS
== 32) {
2526 tcg_gen_mov_i32(TCGV_LOW(ret
), arg
);
2527 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
2529 tcg_gen_op2(&tcg_ctx
, INDEX_op_ext_i32_i64
,
2530 GET_TCGV_I64(ret
), GET_TCGV_I32(arg
));
2534 void tcg_gen_concat_i32_i64(TCGv_i64 dest
, TCGv_i32 low
, TCGv_i32 high
)
2538 if (TCG_TARGET_REG_BITS
== 32) {
2539 tcg_gen_mov_i32(TCGV_LOW(dest
), low
);
2540 tcg_gen_mov_i32(TCGV_HIGH(dest
), high
);
2544 tmp
= tcg_temp_new_i64();
2545 /* These extensions are only needed for type correctness.
2546 We may be able to do better given target specific information. */
2547 tcg_gen_extu_i32_i64(tmp
, high
);
2548 tcg_gen_extu_i32_i64(dest
, low
);
2549 /* If deposit is available, use it. Otherwise use the extra
2550 knowledge that we have of the zero-extensions above. */
2551 if (TCG_TARGET_HAS_deposit_i64
&& TCG_TARGET_deposit_i64_valid(32, 32)) {
2552 tcg_gen_deposit_i64(dest
, dest
, tmp
, 32, 32);
2554 tcg_gen_shli_i64(tmp
, tmp
, 32);
2555 tcg_gen_or_i64(dest
, dest
, tmp
);
2557 tcg_temp_free_i64(tmp
);
2560 void tcg_gen_extr_i64_i32(TCGv_i32 lo
, TCGv_i32 hi
, TCGv_i64 arg
)
2562 if (TCG_TARGET_REG_BITS
== 32) {
2563 tcg_gen_mov_i32(lo
, TCGV_LOW(arg
));
2564 tcg_gen_mov_i32(hi
, TCGV_HIGH(arg
));
2566 tcg_gen_extrl_i64_i32(lo
, arg
);
2567 tcg_gen_extrh_i64_i32(hi
, arg
);
2571 void tcg_gen_extr32_i64(TCGv_i64 lo
, TCGv_i64 hi
, TCGv_i64 arg
)
2573 tcg_gen_ext32u_i64(lo
, arg
);
2574 tcg_gen_shri_i64(hi
, arg
, 32);
2577 /* QEMU specific operations. */
2579 void tcg_gen_goto_tb(unsigned idx
)
2581 /* We only support two chained exits. */
2582 tcg_debug_assert(idx
<= 1);
2583 #ifdef CONFIG_DEBUG_TCG
2584 /* Verify that we havn't seen this numbered exit before. */
2585 tcg_debug_assert((tcg_ctx
.goto_tb_issue_mask
& (1 << idx
)) == 0);
2586 tcg_ctx
.goto_tb_issue_mask
|= 1 << idx
;
2588 tcg_gen_op1i(INDEX_op_goto_tb
, idx
);
2591 void tcg_gen_lookup_and_goto_ptr(TCGv addr
)
2593 if (TCG_TARGET_HAS_goto_ptr
&& !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
2594 TCGv_ptr ptr
= tcg_temp_new_ptr();
2595 gen_helper_lookup_tb_ptr(ptr
, tcg_ctx
.tcg_env
, addr
);
2596 tcg_gen_op1i(INDEX_op_goto_ptr
, GET_TCGV_PTR(ptr
));
2597 tcg_temp_free_ptr(ptr
);
2603 static inline TCGMemOp
tcg_canonicalize_memop(TCGMemOp op
, bool is64
, bool st
)
2605 /* Trigger the asserts within as early as possible. */
2606 (void)get_alignment_bits(op
);
2608 switch (op
& MO_SIZE
) {
2631 static void gen_ldst_i32(TCGOpcode opc
, TCGv_i32 val
, TCGv addr
,
2632 TCGMemOp memop
, TCGArg idx
)
2634 TCGMemOpIdx oi
= make_memop_idx(memop
, idx
);
2635 #if TARGET_LONG_BITS == 32
2636 tcg_gen_op3i_i32(opc
, val
, addr
, oi
);
2638 if (TCG_TARGET_REG_BITS
== 32) {
2639 tcg_gen_op4i_i32(opc
, val
, TCGV_LOW(addr
), TCGV_HIGH(addr
), oi
);
2641 tcg_gen_op3(&tcg_ctx
, opc
, GET_TCGV_I32(val
), GET_TCGV_I64(addr
), oi
);
2646 static void gen_ldst_i64(TCGOpcode opc
, TCGv_i64 val
, TCGv addr
,
2647 TCGMemOp memop
, TCGArg idx
)
2649 TCGMemOpIdx oi
= make_memop_idx(memop
, idx
);
2650 #if TARGET_LONG_BITS == 32
2651 if (TCG_TARGET_REG_BITS
== 32) {
2652 tcg_gen_op4i_i32(opc
, TCGV_LOW(val
), TCGV_HIGH(val
), addr
, oi
);
2654 tcg_gen_op3(&tcg_ctx
, opc
, GET_TCGV_I64(val
), GET_TCGV_I32(addr
), oi
);
2657 if (TCG_TARGET_REG_BITS
== 32) {
2658 tcg_gen_op5i_i32(opc
, TCGV_LOW(val
), TCGV_HIGH(val
),
2659 TCGV_LOW(addr
), TCGV_HIGH(addr
), oi
);
2661 tcg_gen_op3i_i64(opc
, val
, addr
, oi
);
2666 static void tcg_gen_req_mo(TCGBar type
)
2668 #ifdef TCG_GUEST_DEFAULT_MO
2669 type
&= TCG_GUEST_DEFAULT_MO
;
2671 type
&= ~TCG_TARGET_DEFAULT_MO
;
2673 tcg_gen_mb(type
| TCG_BAR_SC
);
2677 void tcg_gen_qemu_ld_i32(TCGv_i32 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2679 tcg_gen_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2680 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2681 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2682 addr
, trace_mem_get_info(memop
, 0));
2683 gen_ldst_i32(INDEX_op_qemu_ld_i32
, val
, addr
, memop
, idx
);
2686 void tcg_gen_qemu_st_i32(TCGv_i32 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2688 tcg_gen_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2689 memop
= tcg_canonicalize_memop(memop
, 0, 1);
2690 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2691 addr
, trace_mem_get_info(memop
, 1));
2692 gen_ldst_i32(INDEX_op_qemu_st_i32
, val
, addr
, memop
, idx
);
2695 void tcg_gen_qemu_ld_i64(TCGv_i64 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2697 tcg_gen_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
2698 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
2699 tcg_gen_qemu_ld_i32(TCGV_LOW(val
), addr
, idx
, memop
);
2700 if (memop
& MO_SIGN
) {
2701 tcg_gen_sari_i32(TCGV_HIGH(val
), TCGV_LOW(val
), 31);
2703 tcg_gen_movi_i32(TCGV_HIGH(val
), 0);
2708 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2709 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2710 addr
, trace_mem_get_info(memop
, 0));
2711 gen_ldst_i64(INDEX_op_qemu_ld_i64
, val
, addr
, memop
, idx
);
2714 void tcg_gen_qemu_st_i64(TCGv_i64 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2716 tcg_gen_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
2717 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
2718 tcg_gen_qemu_st_i32(TCGV_LOW(val
), addr
, idx
, memop
);
2722 memop
= tcg_canonicalize_memop(memop
, 1, 1);
2723 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2724 addr
, trace_mem_get_info(memop
, 1));
2725 gen_ldst_i64(INDEX_op_qemu_st_i64
, val
, addr
, memop
, idx
);
2728 static void tcg_gen_ext_i32(TCGv_i32 ret
, TCGv_i32 val
, TCGMemOp opc
)
2730 switch (opc
& MO_SSIZE
) {
2732 tcg_gen_ext8s_i32(ret
, val
);
2735 tcg_gen_ext8u_i32(ret
, val
);
2738 tcg_gen_ext16s_i32(ret
, val
);
2741 tcg_gen_ext16u_i32(ret
, val
);
2744 tcg_gen_mov_i32(ret
, val
);
2749 static void tcg_gen_ext_i64(TCGv_i64 ret
, TCGv_i64 val
, TCGMemOp opc
)
2751 switch (opc
& MO_SSIZE
) {
2753 tcg_gen_ext8s_i64(ret
, val
);
2756 tcg_gen_ext8u_i64(ret
, val
);
2759 tcg_gen_ext16s_i64(ret
, val
);
2762 tcg_gen_ext16u_i64(ret
, val
);
2765 tcg_gen_ext32s_i64(ret
, val
);
2768 tcg_gen_ext32u_i64(ret
, val
);
2771 tcg_gen_mov_i64(ret
, val
);
2776 #ifdef CONFIG_SOFTMMU
2777 typedef void (*gen_atomic_cx_i32
)(TCGv_i32
, TCGv_env
, TCGv
,
2778 TCGv_i32
, TCGv_i32
, TCGv_i32
);
2779 typedef void (*gen_atomic_cx_i64
)(TCGv_i64
, TCGv_env
, TCGv
,
2780 TCGv_i64
, TCGv_i64
, TCGv_i32
);
2781 typedef void (*gen_atomic_op_i32
)(TCGv_i32
, TCGv_env
, TCGv
,
2782 TCGv_i32
, TCGv_i32
);
2783 typedef void (*gen_atomic_op_i64
)(TCGv_i64
, TCGv_env
, TCGv
,
2784 TCGv_i64
, TCGv_i32
);
2786 typedef void (*gen_atomic_cx_i32
)(TCGv_i32
, TCGv_env
, TCGv
, TCGv_i32
, TCGv_i32
);
2787 typedef void (*gen_atomic_cx_i64
)(TCGv_i64
, TCGv_env
, TCGv
, TCGv_i64
, TCGv_i64
);
2788 typedef void (*gen_atomic_op_i32
)(TCGv_i32
, TCGv_env
, TCGv
, TCGv_i32
);
2789 typedef void (*gen_atomic_op_i64
)(TCGv_i64
, TCGv_env
, TCGv
, TCGv_i64
);
2792 #ifdef CONFIG_ATOMIC64
2793 # define WITH_ATOMIC64(X) X,
2795 # define WITH_ATOMIC64(X)
2798 static void * const table_cmpxchg
[16] = {
2799 [MO_8
] = gen_helper_atomic_cmpxchgb
,
2800 [MO_16
| MO_LE
] = gen_helper_atomic_cmpxchgw_le
,
2801 [MO_16
| MO_BE
] = gen_helper_atomic_cmpxchgw_be
,
2802 [MO_32
| MO_LE
] = gen_helper_atomic_cmpxchgl_le
,
2803 [MO_32
| MO_BE
] = gen_helper_atomic_cmpxchgl_be
,
2804 WITH_ATOMIC64([MO_64
| MO_LE
] = gen_helper_atomic_cmpxchgq_le
)
2805 WITH_ATOMIC64([MO_64
| MO_BE
] = gen_helper_atomic_cmpxchgq_be
)
2808 void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv
, TCGv addr
, TCGv_i32 cmpv
,
2809 TCGv_i32 newv
, TCGArg idx
, TCGMemOp memop
)
2811 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2813 if (!parallel_cpus
) {
2814 TCGv_i32 t1
= tcg_temp_new_i32();
2815 TCGv_i32 t2
= tcg_temp_new_i32();
2817 tcg_gen_ext_i32(t2
, cmpv
, memop
& MO_SIZE
);
2819 tcg_gen_qemu_ld_i32(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2820 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
2821 tcg_gen_qemu_st_i32(t2
, addr
, idx
, memop
);
2822 tcg_temp_free_i32(t2
);
2824 if (memop
& MO_SIGN
) {
2825 tcg_gen_ext_i32(retv
, t1
, memop
);
2827 tcg_gen_mov_i32(retv
, t1
);
2829 tcg_temp_free_i32(t1
);
2831 gen_atomic_cx_i32 gen
;
2833 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
2834 tcg_debug_assert(gen
!= NULL
);
2836 #ifdef CONFIG_SOFTMMU
2838 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
2839 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
, oi
);
2840 tcg_temp_free_i32(oi
);
2843 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
);
2846 if (memop
& MO_SIGN
) {
2847 tcg_gen_ext_i32(retv
, retv
, memop
);
2852 void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv
, TCGv addr
, TCGv_i64 cmpv
,
2853 TCGv_i64 newv
, TCGArg idx
, TCGMemOp memop
)
2855 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2857 if (!parallel_cpus
) {
2858 TCGv_i64 t1
= tcg_temp_new_i64();
2859 TCGv_i64 t2
= tcg_temp_new_i64();
2861 tcg_gen_ext_i64(t2
, cmpv
, memop
& MO_SIZE
);
2863 tcg_gen_qemu_ld_i64(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2864 tcg_gen_movcond_i64(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
2865 tcg_gen_qemu_st_i64(t2
, addr
, idx
, memop
);
2866 tcg_temp_free_i64(t2
);
2868 if (memop
& MO_SIGN
) {
2869 tcg_gen_ext_i64(retv
, t1
, memop
);
2871 tcg_gen_mov_i64(retv
, t1
);
2873 tcg_temp_free_i64(t1
);
2874 } else if ((memop
& MO_SIZE
) == MO_64
) {
2875 #ifdef CONFIG_ATOMIC64
2876 gen_atomic_cx_i64 gen
;
2878 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
2879 tcg_debug_assert(gen
!= NULL
);
2881 #ifdef CONFIG_SOFTMMU
2883 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
, idx
));
2884 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
, oi
);
2885 tcg_temp_free_i32(oi
);
2888 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
);
2891 gen_helper_exit_atomic(tcg_ctx
.tcg_env
);
2892 /* Produce a result, so that we have a well-formed opcode stream
2893 with respect to uses of the result in the (dead) code following. */
2894 tcg_gen_movi_i64(retv
, 0);
2895 #endif /* CONFIG_ATOMIC64 */
2897 TCGv_i32 c32
= tcg_temp_new_i32();
2898 TCGv_i32 n32
= tcg_temp_new_i32();
2899 TCGv_i32 r32
= tcg_temp_new_i32();
2901 tcg_gen_extrl_i64_i32(c32
, cmpv
);
2902 tcg_gen_extrl_i64_i32(n32
, newv
);
2903 tcg_gen_atomic_cmpxchg_i32(r32
, addr
, c32
, n32
, idx
, memop
& ~MO_SIGN
);
2904 tcg_temp_free_i32(c32
);
2905 tcg_temp_free_i32(n32
);
2907 tcg_gen_extu_i32_i64(retv
, r32
);
2908 tcg_temp_free_i32(r32
);
2910 if (memop
& MO_SIGN
) {
2911 tcg_gen_ext_i64(retv
, retv
, memop
);
2916 static void do_nonatomic_op_i32(TCGv_i32 ret
, TCGv addr
, TCGv_i32 val
,
2917 TCGArg idx
, TCGMemOp memop
, bool new_val
,
2918 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
2920 TCGv_i32 t1
= tcg_temp_new_i32();
2921 TCGv_i32 t2
= tcg_temp_new_i32();
2923 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2925 tcg_gen_qemu_ld_i32(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2927 tcg_gen_qemu_st_i32(t2
, addr
, idx
, memop
);
2929 tcg_gen_ext_i32(ret
, (new_val
? t2
: t1
), memop
);
2930 tcg_temp_free_i32(t1
);
2931 tcg_temp_free_i32(t2
);
2934 static void do_atomic_op_i32(TCGv_i32 ret
, TCGv addr
, TCGv_i32 val
,
2935 TCGArg idx
, TCGMemOp memop
, void * const table
[])
2937 gen_atomic_op_i32 gen
;
2939 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2941 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
2942 tcg_debug_assert(gen
!= NULL
);
2944 #ifdef CONFIG_SOFTMMU
2946 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
2947 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
, oi
);
2948 tcg_temp_free_i32(oi
);
2951 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
);
2954 if (memop
& MO_SIGN
) {
2955 tcg_gen_ext_i32(ret
, ret
, memop
);
2959 static void do_nonatomic_op_i64(TCGv_i64 ret
, TCGv addr
, TCGv_i64 val
,
2960 TCGArg idx
, TCGMemOp memop
, bool new_val
,
2961 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
2963 TCGv_i64 t1
= tcg_temp_new_i64();
2964 TCGv_i64 t2
= tcg_temp_new_i64();
2966 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2968 tcg_gen_qemu_ld_i64(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2970 tcg_gen_qemu_st_i64(t2
, addr
, idx
, memop
);
2972 tcg_gen_ext_i64(ret
, (new_val
? t2
: t1
), memop
);
2973 tcg_temp_free_i64(t1
);
2974 tcg_temp_free_i64(t2
);
2977 static void do_atomic_op_i64(TCGv_i64 ret
, TCGv addr
, TCGv_i64 val
,
2978 TCGArg idx
, TCGMemOp memop
, void * const table
[])
2980 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2982 if ((memop
& MO_SIZE
) == MO_64
) {
2983 #ifdef CONFIG_ATOMIC64
2984 gen_atomic_op_i64 gen
;
2986 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
2987 tcg_debug_assert(gen
!= NULL
);
2989 #ifdef CONFIG_SOFTMMU
2991 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
2992 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
, oi
);
2993 tcg_temp_free_i32(oi
);
2996 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
);
2999 gen_helper_exit_atomic(tcg_ctx
.tcg_env
);
3000 /* Produce a result, so that we have a well-formed opcode stream
3001 with respect to uses of the result in the (dead) code following. */
3002 tcg_gen_movi_i64(ret
, 0);
3003 #endif /* CONFIG_ATOMIC64 */
3005 TCGv_i32 v32
= tcg_temp_new_i32();
3006 TCGv_i32 r32
= tcg_temp_new_i32();
3008 tcg_gen_extrl_i64_i32(v32
, val
);
3009 do_atomic_op_i32(r32
, addr
, v32
, idx
, memop
& ~MO_SIGN
, table
);
3010 tcg_temp_free_i32(v32
);
3012 tcg_gen_extu_i32_i64(ret
, r32
);
3013 tcg_temp_free_i32(r32
);
3015 if (memop
& MO_SIGN
) {
3016 tcg_gen_ext_i64(ret
, ret
, memop
);
3021 #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
3022 static void * const table_##NAME[16] = { \
3023 [MO_8] = gen_helper_atomic_##NAME##b, \
3024 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
3025 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
3026 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
3027 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
3028 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
3029 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
3031 void tcg_gen_atomic_##NAME##_i32 \
3032 (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
3034 if (parallel_cpus) { \
3035 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
3037 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
3038 tcg_gen_##OP##_i32); \
3041 void tcg_gen_atomic_##NAME##_i64 \
3042 (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
3044 if (parallel_cpus) { \
3045 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
3047 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
3048 tcg_gen_##OP##_i64); \
3052 GEN_ATOMIC_HELPER(fetch_add
, add
, 0)
3053 GEN_ATOMIC_HELPER(fetch_and
, and, 0)
3054 GEN_ATOMIC_HELPER(fetch_or
, or, 0)
3055 GEN_ATOMIC_HELPER(fetch_xor
, xor, 0)
3057 GEN_ATOMIC_HELPER(add_fetch
, add
, 1)
3058 GEN_ATOMIC_HELPER(and_fetch
, and, 1)
3059 GEN_ATOMIC_HELPER(or_fetch
, or, 1)
3060 GEN_ATOMIC_HELPER(xor_fetch
, xor, 1)
3062 static void tcg_gen_mov2_i32(TCGv_i32 r
, TCGv_i32 a
, TCGv_i32 b
)
3064 tcg_gen_mov_i32(r
, b
);
3067 static void tcg_gen_mov2_i64(TCGv_i64 r
, TCGv_i64 a
, TCGv_i64 b
)
3069 tcg_gen_mov_i64(r
, b
);
3072 GEN_ATOMIC_HELPER(xchg
, mov2
, 0)
3074 #undef GEN_ATOMIC_HELPER