2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
28 #include "exec/exec-all.h"
31 #include "trace-tcg.h"
32 #include "trace/mem.h"
34 /* Reduce the number of ifdefs below. This assumes that all uses of
35 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
36 the compiler can eliminate. */
37 #if TCG_TARGET_REG_BITS == 64
38 extern TCGv_i32
TCGV_LOW_link_error(TCGv_i64
);
39 extern TCGv_i32
TCGV_HIGH_link_error(TCGv_i64
);
40 #define TCGV_LOW TCGV_LOW_link_error
41 #define TCGV_HIGH TCGV_HIGH_link_error
44 /* Note that this is optimized for sequential allocation during translate.
45 Up to and including filling in the forward link immediately. We'll do
46 proper termination of the end of the list after we finish translation. */
48 static void tcg_emit_op(TCGContext
*ctx
, TCGOpcode opc
, int args
)
50 int oi
= ctx
->gen_next_op_idx
;
54 tcg_debug_assert(oi
< OPC_BUF_SIZE
);
55 ctx
->gen_op_buf
[0].prev
= oi
;
56 ctx
->gen_next_op_idx
= ni
;
58 ctx
->gen_op_buf
[oi
] = (TCGOp
){
66 void tcg_gen_op1(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
)
68 int pi
= ctx
->gen_next_parm_idx
;
70 tcg_debug_assert(pi
+ 1 <= OPPARAM_BUF_SIZE
);
71 ctx
->gen_next_parm_idx
= pi
+ 1;
72 ctx
->gen_opparam_buf
[pi
] = a1
;
74 tcg_emit_op(ctx
, opc
, pi
);
77 void tcg_gen_op2(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
, TCGArg a2
)
79 int pi
= ctx
->gen_next_parm_idx
;
81 tcg_debug_assert(pi
+ 2 <= OPPARAM_BUF_SIZE
);
82 ctx
->gen_next_parm_idx
= pi
+ 2;
83 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
84 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
86 tcg_emit_op(ctx
, opc
, pi
);
89 void tcg_gen_op3(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
,
92 int pi
= ctx
->gen_next_parm_idx
;
94 tcg_debug_assert(pi
+ 3 <= OPPARAM_BUF_SIZE
);
95 ctx
->gen_next_parm_idx
= pi
+ 3;
96 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
97 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
98 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
100 tcg_emit_op(ctx
, opc
, pi
);
103 void tcg_gen_op4(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
,
104 TCGArg a2
, TCGArg a3
, TCGArg a4
)
106 int pi
= ctx
->gen_next_parm_idx
;
108 tcg_debug_assert(pi
+ 4 <= OPPARAM_BUF_SIZE
);
109 ctx
->gen_next_parm_idx
= pi
+ 4;
110 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
111 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
112 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
113 ctx
->gen_opparam_buf
[pi
+ 3] = a4
;
115 tcg_emit_op(ctx
, opc
, pi
);
118 void tcg_gen_op5(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
,
119 TCGArg a2
, TCGArg a3
, TCGArg a4
, TCGArg a5
)
121 int pi
= ctx
->gen_next_parm_idx
;
123 tcg_debug_assert(pi
+ 5 <= OPPARAM_BUF_SIZE
);
124 ctx
->gen_next_parm_idx
= pi
+ 5;
125 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
126 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
127 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
128 ctx
->gen_opparam_buf
[pi
+ 3] = a4
;
129 ctx
->gen_opparam_buf
[pi
+ 4] = a5
;
131 tcg_emit_op(ctx
, opc
, pi
);
134 void tcg_gen_op6(TCGContext
*ctx
, TCGOpcode opc
, TCGArg a1
, TCGArg a2
,
135 TCGArg a3
, TCGArg a4
, TCGArg a5
, TCGArg a6
)
137 int pi
= ctx
->gen_next_parm_idx
;
139 tcg_debug_assert(pi
+ 6 <= OPPARAM_BUF_SIZE
);
140 ctx
->gen_next_parm_idx
= pi
+ 6;
141 ctx
->gen_opparam_buf
[pi
+ 0] = a1
;
142 ctx
->gen_opparam_buf
[pi
+ 1] = a2
;
143 ctx
->gen_opparam_buf
[pi
+ 2] = a3
;
144 ctx
->gen_opparam_buf
[pi
+ 3] = a4
;
145 ctx
->gen_opparam_buf
[pi
+ 4] = a5
;
146 ctx
->gen_opparam_buf
[pi
+ 5] = a6
;
148 tcg_emit_op(ctx
, opc
, pi
);
151 void tcg_gen_mb(TCGBar mb_type
)
154 tcg_gen_op1(&tcg_ctx
, INDEX_op_mb
, mb_type
);
160 void tcg_gen_addi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
162 /* some cases can be optimized here */
164 tcg_gen_mov_i32(ret
, arg1
);
166 TCGv_i32 t0
= tcg_const_i32(arg2
);
167 tcg_gen_add_i32(ret
, arg1
, t0
);
168 tcg_temp_free_i32(t0
);
172 void tcg_gen_subfi_i32(TCGv_i32 ret
, int32_t arg1
, TCGv_i32 arg2
)
174 if (arg1
== 0 && TCG_TARGET_HAS_neg_i32
) {
175 /* Don't recurse with tcg_gen_neg_i32. */
176 tcg_gen_op2_i32(INDEX_op_neg_i32
, ret
, arg2
);
178 TCGv_i32 t0
= tcg_const_i32(arg1
);
179 tcg_gen_sub_i32(ret
, t0
, arg2
);
180 tcg_temp_free_i32(t0
);
184 void tcg_gen_subi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
186 /* some cases can be optimized here */
188 tcg_gen_mov_i32(ret
, arg1
);
190 TCGv_i32 t0
= tcg_const_i32(arg2
);
191 tcg_gen_sub_i32(ret
, arg1
, t0
);
192 tcg_temp_free_i32(t0
);
196 void tcg_gen_andi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
199 /* Some cases can be optimized here. */
202 tcg_gen_movi_i32(ret
, 0);
205 tcg_gen_mov_i32(ret
, arg1
);
208 /* Don't recurse with tcg_gen_ext8u_i32. */
209 if (TCG_TARGET_HAS_ext8u_i32
) {
210 tcg_gen_op2_i32(INDEX_op_ext8u_i32
, ret
, arg1
);
215 if (TCG_TARGET_HAS_ext16u_i32
) {
216 tcg_gen_op2_i32(INDEX_op_ext16u_i32
, ret
, arg1
);
221 t0
= tcg_const_i32(arg2
);
222 tcg_gen_and_i32(ret
, arg1
, t0
);
223 tcg_temp_free_i32(t0
);
226 void tcg_gen_ori_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
228 /* Some cases can be optimized here. */
230 tcg_gen_movi_i32(ret
, -1);
231 } else if (arg2
== 0) {
232 tcg_gen_mov_i32(ret
, arg1
);
234 TCGv_i32 t0
= tcg_const_i32(arg2
);
235 tcg_gen_or_i32(ret
, arg1
, t0
);
236 tcg_temp_free_i32(t0
);
240 void tcg_gen_xori_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
242 /* Some cases can be optimized here. */
244 tcg_gen_mov_i32(ret
, arg1
);
245 } else if (arg2
== -1 && TCG_TARGET_HAS_not_i32
) {
246 /* Don't recurse with tcg_gen_not_i32. */
247 tcg_gen_op2_i32(INDEX_op_not_i32
, ret
, arg1
);
249 TCGv_i32 t0
= tcg_const_i32(arg2
);
250 tcg_gen_xor_i32(ret
, arg1
, t0
);
251 tcg_temp_free_i32(t0
);
255 void tcg_gen_shli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
257 tcg_debug_assert(arg2
< 32);
259 tcg_gen_mov_i32(ret
, arg1
);
261 TCGv_i32 t0
= tcg_const_i32(arg2
);
262 tcg_gen_shl_i32(ret
, arg1
, t0
);
263 tcg_temp_free_i32(t0
);
267 void tcg_gen_shri_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
269 tcg_debug_assert(arg2
< 32);
271 tcg_gen_mov_i32(ret
, arg1
);
273 TCGv_i32 t0
= tcg_const_i32(arg2
);
274 tcg_gen_shr_i32(ret
, arg1
, t0
);
275 tcg_temp_free_i32(t0
);
279 void tcg_gen_sari_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
281 tcg_debug_assert(arg2
< 32);
283 tcg_gen_mov_i32(ret
, arg1
);
285 TCGv_i32 t0
= tcg_const_i32(arg2
);
286 tcg_gen_sar_i32(ret
, arg1
, t0
);
287 tcg_temp_free_i32(t0
);
291 void tcg_gen_brcond_i32(TCGCond cond
, TCGv_i32 arg1
, TCGv_i32 arg2
, TCGLabel
*l
)
293 if (cond
== TCG_COND_ALWAYS
) {
295 } else if (cond
!= TCG_COND_NEVER
) {
296 tcg_gen_op4ii_i32(INDEX_op_brcond_i32
, arg1
, arg2
, cond
, label_arg(l
));
300 void tcg_gen_brcondi_i32(TCGCond cond
, TCGv_i32 arg1
, int32_t arg2
, TCGLabel
*l
)
302 if (cond
== TCG_COND_ALWAYS
) {
304 } else if (cond
!= TCG_COND_NEVER
) {
305 TCGv_i32 t0
= tcg_const_i32(arg2
);
306 tcg_gen_brcond_i32(cond
, arg1
, t0
, l
);
307 tcg_temp_free_i32(t0
);
311 void tcg_gen_setcond_i32(TCGCond cond
, TCGv_i32 ret
,
312 TCGv_i32 arg1
, TCGv_i32 arg2
)
314 if (cond
== TCG_COND_ALWAYS
) {
315 tcg_gen_movi_i32(ret
, 1);
316 } else if (cond
== TCG_COND_NEVER
) {
317 tcg_gen_movi_i32(ret
, 0);
319 tcg_gen_op4i_i32(INDEX_op_setcond_i32
, ret
, arg1
, arg2
, cond
);
323 void tcg_gen_setcondi_i32(TCGCond cond
, TCGv_i32 ret
,
324 TCGv_i32 arg1
, int32_t arg2
)
326 TCGv_i32 t0
= tcg_const_i32(arg2
);
327 tcg_gen_setcond_i32(cond
, ret
, arg1
, t0
);
328 tcg_temp_free_i32(t0
);
331 void tcg_gen_muli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, int32_t arg2
)
333 TCGv_i32 t0
= tcg_const_i32(arg2
);
334 tcg_gen_mul_i32(ret
, arg1
, t0
);
335 tcg_temp_free_i32(t0
);
338 void tcg_gen_div_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
340 if (TCG_TARGET_HAS_div_i32
) {
341 tcg_gen_op3_i32(INDEX_op_div_i32
, ret
, arg1
, arg2
);
342 } else if (TCG_TARGET_HAS_div2_i32
) {
343 TCGv_i32 t0
= tcg_temp_new_i32();
344 tcg_gen_sari_i32(t0
, arg1
, 31);
345 tcg_gen_op5_i32(INDEX_op_div2_i32
, ret
, t0
, arg1
, t0
, arg2
);
346 tcg_temp_free_i32(t0
);
348 gen_helper_div_i32(ret
, arg1
, arg2
);
352 void tcg_gen_rem_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
354 if (TCG_TARGET_HAS_rem_i32
) {
355 tcg_gen_op3_i32(INDEX_op_rem_i32
, ret
, arg1
, arg2
);
356 } else if (TCG_TARGET_HAS_div_i32
) {
357 TCGv_i32 t0
= tcg_temp_new_i32();
358 tcg_gen_op3_i32(INDEX_op_div_i32
, t0
, arg1
, arg2
);
359 tcg_gen_mul_i32(t0
, t0
, arg2
);
360 tcg_gen_sub_i32(ret
, arg1
, t0
);
361 tcg_temp_free_i32(t0
);
362 } else if (TCG_TARGET_HAS_div2_i32
) {
363 TCGv_i32 t0
= tcg_temp_new_i32();
364 tcg_gen_sari_i32(t0
, arg1
, 31);
365 tcg_gen_op5_i32(INDEX_op_div2_i32
, t0
, ret
, arg1
, t0
, arg2
);
366 tcg_temp_free_i32(t0
);
368 gen_helper_rem_i32(ret
, arg1
, arg2
);
372 void tcg_gen_divu_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
374 if (TCG_TARGET_HAS_div_i32
) {
375 tcg_gen_op3_i32(INDEX_op_divu_i32
, ret
, arg1
, arg2
);
376 } else if (TCG_TARGET_HAS_div2_i32
) {
377 TCGv_i32 t0
= tcg_temp_new_i32();
378 tcg_gen_movi_i32(t0
, 0);
379 tcg_gen_op5_i32(INDEX_op_divu2_i32
, ret
, t0
, arg1
, t0
, arg2
);
380 tcg_temp_free_i32(t0
);
382 gen_helper_divu_i32(ret
, arg1
, arg2
);
386 void tcg_gen_remu_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
388 if (TCG_TARGET_HAS_rem_i32
) {
389 tcg_gen_op3_i32(INDEX_op_remu_i32
, ret
, arg1
, arg2
);
390 } else if (TCG_TARGET_HAS_div_i32
) {
391 TCGv_i32 t0
= tcg_temp_new_i32();
392 tcg_gen_op3_i32(INDEX_op_divu_i32
, t0
, arg1
, arg2
);
393 tcg_gen_mul_i32(t0
, t0
, arg2
);
394 tcg_gen_sub_i32(ret
, arg1
, t0
);
395 tcg_temp_free_i32(t0
);
396 } else if (TCG_TARGET_HAS_div2_i32
) {
397 TCGv_i32 t0
= tcg_temp_new_i32();
398 tcg_gen_movi_i32(t0
, 0);
399 tcg_gen_op5_i32(INDEX_op_divu2_i32
, t0
, ret
, arg1
, t0
, arg2
);
400 tcg_temp_free_i32(t0
);
402 gen_helper_remu_i32(ret
, arg1
, arg2
);
406 void tcg_gen_andc_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
408 if (TCG_TARGET_HAS_andc_i32
) {
409 tcg_gen_op3_i32(INDEX_op_andc_i32
, ret
, arg1
, arg2
);
411 TCGv_i32 t0
= tcg_temp_new_i32();
412 tcg_gen_not_i32(t0
, arg2
);
413 tcg_gen_and_i32(ret
, arg1
, t0
);
414 tcg_temp_free_i32(t0
);
418 void tcg_gen_eqv_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
420 if (TCG_TARGET_HAS_eqv_i32
) {
421 tcg_gen_op3_i32(INDEX_op_eqv_i32
, ret
, arg1
, arg2
);
423 tcg_gen_xor_i32(ret
, arg1
, arg2
);
424 tcg_gen_not_i32(ret
, ret
);
428 void tcg_gen_nand_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
430 if (TCG_TARGET_HAS_nand_i32
) {
431 tcg_gen_op3_i32(INDEX_op_nand_i32
, ret
, arg1
, arg2
);
433 tcg_gen_and_i32(ret
, arg1
, arg2
);
434 tcg_gen_not_i32(ret
, ret
);
438 void tcg_gen_nor_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
440 if (TCG_TARGET_HAS_nor_i32
) {
441 tcg_gen_op3_i32(INDEX_op_nor_i32
, ret
, arg1
, arg2
);
443 tcg_gen_or_i32(ret
, arg1
, arg2
);
444 tcg_gen_not_i32(ret
, ret
);
448 void tcg_gen_orc_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
450 if (TCG_TARGET_HAS_orc_i32
) {
451 tcg_gen_op3_i32(INDEX_op_orc_i32
, ret
, arg1
, arg2
);
453 TCGv_i32 t0
= tcg_temp_new_i32();
454 tcg_gen_not_i32(t0
, arg2
);
455 tcg_gen_or_i32(ret
, arg1
, t0
);
456 tcg_temp_free_i32(t0
);
460 void tcg_gen_clz_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
462 if (TCG_TARGET_HAS_clz_i32
) {
463 tcg_gen_op3_i32(INDEX_op_clz_i32
, ret
, arg1
, arg2
);
464 } else if (TCG_TARGET_HAS_clz_i64
) {
465 TCGv_i64 t1
= tcg_temp_new_i64();
466 TCGv_i64 t2
= tcg_temp_new_i64();
467 tcg_gen_extu_i32_i64(t1
, arg1
);
468 tcg_gen_extu_i32_i64(t2
, arg2
);
469 tcg_gen_addi_i64(t2
, t2
, 32);
470 tcg_gen_clz_i64(t1
, t1
, t2
);
471 tcg_gen_extrl_i64_i32(ret
, t1
);
472 tcg_temp_free_i64(t1
);
473 tcg_temp_free_i64(t2
);
474 tcg_gen_subi_i32(ret
, ret
, 32);
476 gen_helper_clz_i32(ret
, arg1
, arg2
);
480 void tcg_gen_clzi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
482 TCGv_i32 t
= tcg_const_i32(arg2
);
483 tcg_gen_clz_i32(ret
, arg1
, t
);
484 tcg_temp_free_i32(t
);
487 void tcg_gen_ctz_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
489 if (TCG_TARGET_HAS_ctz_i32
) {
490 tcg_gen_op3_i32(INDEX_op_ctz_i32
, ret
, arg1
, arg2
);
491 } else if (TCG_TARGET_HAS_ctz_i64
) {
492 TCGv_i64 t1
= tcg_temp_new_i64();
493 TCGv_i64 t2
= tcg_temp_new_i64();
494 tcg_gen_extu_i32_i64(t1
, arg1
);
495 tcg_gen_extu_i32_i64(t2
, arg2
);
496 tcg_gen_ctz_i64(t1
, t1
, t2
);
497 tcg_gen_extrl_i64_i32(ret
, t1
);
498 tcg_temp_free_i64(t1
);
499 tcg_temp_free_i64(t2
);
500 } else if (TCG_TARGET_HAS_ctpop_i32
501 || TCG_TARGET_HAS_ctpop_i64
502 || TCG_TARGET_HAS_clz_i32
503 || TCG_TARGET_HAS_clz_i64
) {
504 TCGv_i32 z
, t
= tcg_temp_new_i32();
506 if (TCG_TARGET_HAS_ctpop_i32
|| TCG_TARGET_HAS_ctpop_i64
) {
507 tcg_gen_subi_i32(t
, arg1
, 1);
508 tcg_gen_andc_i32(t
, t
, arg1
);
509 tcg_gen_ctpop_i32(t
, t
);
511 /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
512 tcg_gen_neg_i32(t
, arg1
);
513 tcg_gen_and_i32(t
, t
, arg1
);
514 tcg_gen_clzi_i32(t
, t
, 32);
515 tcg_gen_xori_i32(t
, t
, 31);
517 z
= tcg_const_i32(0);
518 tcg_gen_movcond_i32(TCG_COND_EQ
, ret
, arg1
, z
, arg2
, t
);
519 tcg_temp_free_i32(t
);
520 tcg_temp_free_i32(z
);
522 gen_helper_ctz_i32(ret
, arg1
, arg2
);
526 void tcg_gen_ctzi_i32(TCGv_i32 ret
, TCGv_i32 arg1
, uint32_t arg2
)
528 if (!TCG_TARGET_HAS_ctz_i32
&& TCG_TARGET_HAS_ctpop_i32
&& arg2
== 32) {
529 /* This equivalence has the advantage of not requiring a fixup. */
530 TCGv_i32 t
= tcg_temp_new_i32();
531 tcg_gen_subi_i32(t
, arg1
, 1);
532 tcg_gen_andc_i32(t
, t
, arg1
);
533 tcg_gen_ctpop_i32(ret
, t
);
534 tcg_temp_free_i32(t
);
536 TCGv_i32 t
= tcg_const_i32(arg2
);
537 tcg_gen_ctz_i32(ret
, arg1
, t
);
538 tcg_temp_free_i32(t
);
542 void tcg_gen_clrsb_i32(TCGv_i32 ret
, TCGv_i32 arg
)
544 if (TCG_TARGET_HAS_clz_i32
) {
545 TCGv_i32 t
= tcg_temp_new_i32();
546 tcg_gen_sari_i32(t
, arg
, 31);
547 tcg_gen_xor_i32(t
, t
, arg
);
548 tcg_gen_clzi_i32(t
, t
, 32);
549 tcg_gen_subi_i32(ret
, t
, 1);
550 tcg_temp_free_i32(t
);
552 gen_helper_clrsb_i32(ret
, arg
);
556 void tcg_gen_ctpop_i32(TCGv_i32 ret
, TCGv_i32 arg1
)
558 if (TCG_TARGET_HAS_ctpop_i32
) {
559 tcg_gen_op2_i32(INDEX_op_ctpop_i32
, ret
, arg1
);
560 } else if (TCG_TARGET_HAS_ctpop_i64
) {
561 TCGv_i64 t
= tcg_temp_new_i64();
562 tcg_gen_extu_i32_i64(t
, arg1
);
563 tcg_gen_ctpop_i64(t
, t
);
564 tcg_gen_extrl_i64_i32(ret
, t
);
565 tcg_temp_free_i64(t
);
567 gen_helper_ctpop_i32(ret
, arg1
);
571 void tcg_gen_rotl_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
573 if (TCG_TARGET_HAS_rot_i32
) {
574 tcg_gen_op3_i32(INDEX_op_rotl_i32
, ret
, arg1
, arg2
);
578 t0
= tcg_temp_new_i32();
579 t1
= tcg_temp_new_i32();
580 tcg_gen_shl_i32(t0
, arg1
, arg2
);
581 tcg_gen_subfi_i32(t1
, 32, arg2
);
582 tcg_gen_shr_i32(t1
, arg1
, t1
);
583 tcg_gen_or_i32(ret
, t0
, t1
);
584 tcg_temp_free_i32(t0
);
585 tcg_temp_free_i32(t1
);
589 void tcg_gen_rotli_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
591 tcg_debug_assert(arg2
< 32);
592 /* some cases can be optimized here */
594 tcg_gen_mov_i32(ret
, arg1
);
595 } else if (TCG_TARGET_HAS_rot_i32
) {
596 TCGv_i32 t0
= tcg_const_i32(arg2
);
597 tcg_gen_rotl_i32(ret
, arg1
, t0
);
598 tcg_temp_free_i32(t0
);
601 t0
= tcg_temp_new_i32();
602 t1
= tcg_temp_new_i32();
603 tcg_gen_shli_i32(t0
, arg1
, arg2
);
604 tcg_gen_shri_i32(t1
, arg1
, 32 - arg2
);
605 tcg_gen_or_i32(ret
, t0
, t1
);
606 tcg_temp_free_i32(t0
);
607 tcg_temp_free_i32(t1
);
611 void tcg_gen_rotr_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
)
613 if (TCG_TARGET_HAS_rot_i32
) {
614 tcg_gen_op3_i32(INDEX_op_rotr_i32
, ret
, arg1
, arg2
);
618 t0
= tcg_temp_new_i32();
619 t1
= tcg_temp_new_i32();
620 tcg_gen_shr_i32(t0
, arg1
, arg2
);
621 tcg_gen_subfi_i32(t1
, 32, arg2
);
622 tcg_gen_shl_i32(t1
, arg1
, t1
);
623 tcg_gen_or_i32(ret
, t0
, t1
);
624 tcg_temp_free_i32(t0
);
625 tcg_temp_free_i32(t1
);
629 void tcg_gen_rotri_i32(TCGv_i32 ret
, TCGv_i32 arg1
, unsigned arg2
)
631 tcg_debug_assert(arg2
< 32);
632 /* some cases can be optimized here */
634 tcg_gen_mov_i32(ret
, arg1
);
636 tcg_gen_rotli_i32(ret
, arg1
, 32 - arg2
);
640 void tcg_gen_deposit_i32(TCGv_i32 ret
, TCGv_i32 arg1
, TCGv_i32 arg2
,
641 unsigned int ofs
, unsigned int len
)
646 tcg_debug_assert(ofs
< 32);
647 tcg_debug_assert(len
> 0);
648 tcg_debug_assert(len
<= 32);
649 tcg_debug_assert(ofs
+ len
<= 32);
652 tcg_gen_mov_i32(ret
, arg2
);
655 if (TCG_TARGET_HAS_deposit_i32
&& TCG_TARGET_deposit_i32_valid(ofs
, len
)) {
656 tcg_gen_op5ii_i32(INDEX_op_deposit_i32
, ret
, arg1
, arg2
, ofs
, len
);
660 mask
= (1u << len
) - 1;
661 t1
= tcg_temp_new_i32();
663 if (ofs
+ len
< 32) {
664 tcg_gen_andi_i32(t1
, arg2
, mask
);
665 tcg_gen_shli_i32(t1
, t1
, ofs
);
667 tcg_gen_shli_i32(t1
, arg2
, ofs
);
669 tcg_gen_andi_i32(ret
, arg1
, ~(mask
<< ofs
));
670 tcg_gen_or_i32(ret
, ret
, t1
);
672 tcg_temp_free_i32(t1
);
675 void tcg_gen_deposit_z_i32(TCGv_i32 ret
, TCGv_i32 arg
,
676 unsigned int ofs
, unsigned int len
)
678 tcg_debug_assert(ofs
< 32);
679 tcg_debug_assert(len
> 0);
680 tcg_debug_assert(len
<= 32);
681 tcg_debug_assert(ofs
+ len
<= 32);
683 if (ofs
+ len
== 32) {
684 tcg_gen_shli_i32(ret
, arg
, ofs
);
685 } else if (ofs
== 0) {
686 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
687 } else if (TCG_TARGET_HAS_deposit_i32
688 && TCG_TARGET_deposit_i32_valid(ofs
, len
)) {
689 TCGv_i32 zero
= tcg_const_i32(0);
690 tcg_gen_op5ii_i32(INDEX_op_deposit_i32
, ret
, zero
, arg
, ofs
, len
);
691 tcg_temp_free_i32(zero
);
693 /* To help two-operand hosts we prefer to zero-extend first,
694 which allows ARG to stay live. */
697 if (TCG_TARGET_HAS_ext16u_i32
) {
698 tcg_gen_ext16u_i32(ret
, arg
);
699 tcg_gen_shli_i32(ret
, ret
, ofs
);
704 if (TCG_TARGET_HAS_ext8u_i32
) {
705 tcg_gen_ext8u_i32(ret
, arg
);
706 tcg_gen_shli_i32(ret
, ret
, ofs
);
711 /* Otherwise prefer zero-extension over AND for code size. */
714 if (TCG_TARGET_HAS_ext16u_i32
) {
715 tcg_gen_shli_i32(ret
, arg
, ofs
);
716 tcg_gen_ext16u_i32(ret
, ret
);
721 if (TCG_TARGET_HAS_ext8u_i32
) {
722 tcg_gen_shli_i32(ret
, arg
, ofs
);
723 tcg_gen_ext8u_i32(ret
, ret
);
728 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
729 tcg_gen_shli_i32(ret
, ret
, ofs
);
733 void tcg_gen_extract_i32(TCGv_i32 ret
, TCGv_i32 arg
,
734 unsigned int ofs
, unsigned int len
)
736 tcg_debug_assert(ofs
< 32);
737 tcg_debug_assert(len
> 0);
738 tcg_debug_assert(len
<= 32);
739 tcg_debug_assert(ofs
+ len
<= 32);
741 /* Canonicalize certain special cases, even if extract is supported. */
742 if (ofs
+ len
== 32) {
743 tcg_gen_shri_i32(ret
, arg
, 32 - len
);
747 tcg_gen_andi_i32(ret
, arg
, (1u << len
) - 1);
751 if (TCG_TARGET_HAS_extract_i32
752 && TCG_TARGET_extract_i32_valid(ofs
, len
)) {
753 tcg_gen_op4ii_i32(INDEX_op_extract_i32
, ret
, arg
, ofs
, len
);
757 /* Assume that zero-extension, if available, is cheaper than a shift. */
760 if (TCG_TARGET_HAS_ext16u_i32
) {
761 tcg_gen_ext16u_i32(ret
, arg
);
762 tcg_gen_shri_i32(ret
, ret
, ofs
);
767 if (TCG_TARGET_HAS_ext8u_i32
) {
768 tcg_gen_ext8u_i32(ret
, arg
);
769 tcg_gen_shri_i32(ret
, ret
, ofs
);
775 /* ??? Ideally we'd know what values are available for immediate AND.
776 Assume that 8 bits are available, plus the special case of 16,
777 so that we get ext8u, ext16u. */
779 case 1 ... 8: case 16:
780 tcg_gen_shri_i32(ret
, arg
, ofs
);
781 tcg_gen_andi_i32(ret
, ret
, (1u << len
) - 1);
784 tcg_gen_shli_i32(ret
, arg
, 32 - len
- ofs
);
785 tcg_gen_shri_i32(ret
, ret
, 32 - len
);
790 void tcg_gen_sextract_i32(TCGv_i32 ret
, TCGv_i32 arg
,
791 unsigned int ofs
, unsigned int len
)
793 tcg_debug_assert(ofs
< 32);
794 tcg_debug_assert(len
> 0);
795 tcg_debug_assert(len
<= 32);
796 tcg_debug_assert(ofs
+ len
<= 32);
798 /* Canonicalize certain special cases, even if extract is supported. */
799 if (ofs
+ len
== 32) {
800 tcg_gen_sari_i32(ret
, arg
, 32 - len
);
806 tcg_gen_ext16s_i32(ret
, arg
);
809 tcg_gen_ext8s_i32(ret
, arg
);
814 if (TCG_TARGET_HAS_sextract_i32
815 && TCG_TARGET_extract_i32_valid(ofs
, len
)) {
816 tcg_gen_op4ii_i32(INDEX_op_sextract_i32
, ret
, arg
, ofs
, len
);
820 /* Assume that sign-extension, if available, is cheaper than a shift. */
823 if (TCG_TARGET_HAS_ext16s_i32
) {
824 tcg_gen_ext16s_i32(ret
, arg
);
825 tcg_gen_sari_i32(ret
, ret
, ofs
);
830 if (TCG_TARGET_HAS_ext8s_i32
) {
831 tcg_gen_ext8s_i32(ret
, arg
);
832 tcg_gen_sari_i32(ret
, ret
, ofs
);
839 if (TCG_TARGET_HAS_ext16s_i32
) {
840 tcg_gen_shri_i32(ret
, arg
, ofs
);
841 tcg_gen_ext16s_i32(ret
, ret
);
846 if (TCG_TARGET_HAS_ext8s_i32
) {
847 tcg_gen_shri_i32(ret
, arg
, ofs
);
848 tcg_gen_ext8s_i32(ret
, ret
);
854 tcg_gen_shli_i32(ret
, arg
, 32 - len
- ofs
);
855 tcg_gen_sari_i32(ret
, ret
, 32 - len
);
858 void tcg_gen_movcond_i32(TCGCond cond
, TCGv_i32 ret
, TCGv_i32 c1
,
859 TCGv_i32 c2
, TCGv_i32 v1
, TCGv_i32 v2
)
861 if (cond
== TCG_COND_ALWAYS
) {
862 tcg_gen_mov_i32(ret
, v1
);
863 } else if (cond
== TCG_COND_NEVER
) {
864 tcg_gen_mov_i32(ret
, v2
);
865 } else if (TCG_TARGET_HAS_movcond_i32
) {
866 tcg_gen_op6i_i32(INDEX_op_movcond_i32
, ret
, c1
, c2
, v1
, v2
, cond
);
868 TCGv_i32 t0
= tcg_temp_new_i32();
869 TCGv_i32 t1
= tcg_temp_new_i32();
870 tcg_gen_setcond_i32(cond
, t0
, c1
, c2
);
871 tcg_gen_neg_i32(t0
, t0
);
872 tcg_gen_and_i32(t1
, v1
, t0
);
873 tcg_gen_andc_i32(ret
, v2
, t0
);
874 tcg_gen_or_i32(ret
, ret
, t1
);
875 tcg_temp_free_i32(t0
);
876 tcg_temp_free_i32(t1
);
880 void tcg_gen_add2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 al
,
881 TCGv_i32 ah
, TCGv_i32 bl
, TCGv_i32 bh
)
883 if (TCG_TARGET_HAS_add2_i32
) {
884 tcg_gen_op6_i32(INDEX_op_add2_i32
, rl
, rh
, al
, ah
, bl
, bh
);
886 TCGv_i64 t0
= tcg_temp_new_i64();
887 TCGv_i64 t1
= tcg_temp_new_i64();
888 tcg_gen_concat_i32_i64(t0
, al
, ah
);
889 tcg_gen_concat_i32_i64(t1
, bl
, bh
);
890 tcg_gen_add_i64(t0
, t0
, t1
);
891 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
892 tcg_temp_free_i64(t0
);
893 tcg_temp_free_i64(t1
);
897 void tcg_gen_sub2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 al
,
898 TCGv_i32 ah
, TCGv_i32 bl
, TCGv_i32 bh
)
900 if (TCG_TARGET_HAS_sub2_i32
) {
901 tcg_gen_op6_i32(INDEX_op_sub2_i32
, rl
, rh
, al
, ah
, bl
, bh
);
903 TCGv_i64 t0
= tcg_temp_new_i64();
904 TCGv_i64 t1
= tcg_temp_new_i64();
905 tcg_gen_concat_i32_i64(t0
, al
, ah
);
906 tcg_gen_concat_i32_i64(t1
, bl
, bh
);
907 tcg_gen_sub_i64(t0
, t0
, t1
);
908 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
909 tcg_temp_free_i64(t0
);
910 tcg_temp_free_i64(t1
);
914 void tcg_gen_mulu2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
916 if (TCG_TARGET_HAS_mulu2_i32
) {
917 tcg_gen_op4_i32(INDEX_op_mulu2_i32
, rl
, rh
, arg1
, arg2
);
918 } else if (TCG_TARGET_HAS_muluh_i32
) {
919 TCGv_i32 t
= tcg_temp_new_i32();
920 tcg_gen_op3_i32(INDEX_op_mul_i32
, t
, arg1
, arg2
);
921 tcg_gen_op3_i32(INDEX_op_muluh_i32
, rh
, arg1
, arg2
);
922 tcg_gen_mov_i32(rl
, t
);
923 tcg_temp_free_i32(t
);
925 TCGv_i64 t0
= tcg_temp_new_i64();
926 TCGv_i64 t1
= tcg_temp_new_i64();
927 tcg_gen_extu_i32_i64(t0
, arg1
);
928 tcg_gen_extu_i32_i64(t1
, arg2
);
929 tcg_gen_mul_i64(t0
, t0
, t1
);
930 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
931 tcg_temp_free_i64(t0
);
932 tcg_temp_free_i64(t1
);
936 void tcg_gen_muls2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
938 if (TCG_TARGET_HAS_muls2_i32
) {
939 tcg_gen_op4_i32(INDEX_op_muls2_i32
, rl
, rh
, arg1
, arg2
);
940 } else if (TCG_TARGET_HAS_mulsh_i32
) {
941 TCGv_i32 t
= tcg_temp_new_i32();
942 tcg_gen_op3_i32(INDEX_op_mul_i32
, t
, arg1
, arg2
);
943 tcg_gen_op3_i32(INDEX_op_mulsh_i32
, rh
, arg1
, arg2
);
944 tcg_gen_mov_i32(rl
, t
);
945 tcg_temp_free_i32(t
);
946 } else if (TCG_TARGET_REG_BITS
== 32) {
947 TCGv_i32 t0
= tcg_temp_new_i32();
948 TCGv_i32 t1
= tcg_temp_new_i32();
949 TCGv_i32 t2
= tcg_temp_new_i32();
950 TCGv_i32 t3
= tcg_temp_new_i32();
951 tcg_gen_mulu2_i32(t0
, t1
, arg1
, arg2
);
952 /* Adjust for negative inputs. */
953 tcg_gen_sari_i32(t2
, arg1
, 31);
954 tcg_gen_sari_i32(t3
, arg2
, 31);
955 tcg_gen_and_i32(t2
, t2
, arg2
);
956 tcg_gen_and_i32(t3
, t3
, arg1
);
957 tcg_gen_sub_i32(rh
, t1
, t2
);
958 tcg_gen_sub_i32(rh
, rh
, t3
);
959 tcg_gen_mov_i32(rl
, t0
);
960 tcg_temp_free_i32(t0
);
961 tcg_temp_free_i32(t1
);
962 tcg_temp_free_i32(t2
);
963 tcg_temp_free_i32(t3
);
965 TCGv_i64 t0
= tcg_temp_new_i64();
966 TCGv_i64 t1
= tcg_temp_new_i64();
967 tcg_gen_ext_i32_i64(t0
, arg1
);
968 tcg_gen_ext_i32_i64(t1
, arg2
);
969 tcg_gen_mul_i64(t0
, t0
, t1
);
970 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
971 tcg_temp_free_i64(t0
);
972 tcg_temp_free_i64(t1
);
976 void tcg_gen_mulsu2_i32(TCGv_i32 rl
, TCGv_i32 rh
, TCGv_i32 arg1
, TCGv_i32 arg2
)
978 if (TCG_TARGET_REG_BITS
== 32) {
979 TCGv_i32 t0
= tcg_temp_new_i32();
980 TCGv_i32 t1
= tcg_temp_new_i32();
981 TCGv_i32 t2
= tcg_temp_new_i32();
982 tcg_gen_mulu2_i32(t0
, t1
, arg1
, arg2
);
983 /* Adjust for negative input for the signed arg1. */
984 tcg_gen_sari_i32(t2
, arg1
, 31);
985 tcg_gen_and_i32(t2
, t2
, arg2
);
986 tcg_gen_sub_i32(rh
, t1
, t2
);
987 tcg_gen_mov_i32(rl
, t0
);
988 tcg_temp_free_i32(t0
);
989 tcg_temp_free_i32(t1
);
990 tcg_temp_free_i32(t2
);
992 TCGv_i64 t0
= tcg_temp_new_i64();
993 TCGv_i64 t1
= tcg_temp_new_i64();
994 tcg_gen_ext_i32_i64(t0
, arg1
);
995 tcg_gen_extu_i32_i64(t1
, arg2
);
996 tcg_gen_mul_i64(t0
, t0
, t1
);
997 tcg_gen_extr_i64_i32(rl
, rh
, t0
);
998 tcg_temp_free_i64(t0
);
999 tcg_temp_free_i64(t1
);
1003 void tcg_gen_ext8s_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1005 if (TCG_TARGET_HAS_ext8s_i32
) {
1006 tcg_gen_op2_i32(INDEX_op_ext8s_i32
, ret
, arg
);
1008 tcg_gen_shli_i32(ret
, arg
, 24);
1009 tcg_gen_sari_i32(ret
, ret
, 24);
1013 void tcg_gen_ext16s_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1015 if (TCG_TARGET_HAS_ext16s_i32
) {
1016 tcg_gen_op2_i32(INDEX_op_ext16s_i32
, ret
, arg
);
1018 tcg_gen_shli_i32(ret
, arg
, 16);
1019 tcg_gen_sari_i32(ret
, ret
, 16);
1023 void tcg_gen_ext8u_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1025 if (TCG_TARGET_HAS_ext8u_i32
) {
1026 tcg_gen_op2_i32(INDEX_op_ext8u_i32
, ret
, arg
);
1028 tcg_gen_andi_i32(ret
, arg
, 0xffu
);
1032 void tcg_gen_ext16u_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1034 if (TCG_TARGET_HAS_ext16u_i32
) {
1035 tcg_gen_op2_i32(INDEX_op_ext16u_i32
, ret
, arg
);
1037 tcg_gen_andi_i32(ret
, arg
, 0xffffu
);
1041 /* Note: we assume the two high bytes are set to zero */
1042 void tcg_gen_bswap16_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1044 if (TCG_TARGET_HAS_bswap16_i32
) {
1045 tcg_gen_op2_i32(INDEX_op_bswap16_i32
, ret
, arg
);
1047 TCGv_i32 t0
= tcg_temp_new_i32();
1049 tcg_gen_ext8u_i32(t0
, arg
);
1050 tcg_gen_shli_i32(t0
, t0
, 8);
1051 tcg_gen_shri_i32(ret
, arg
, 8);
1052 tcg_gen_or_i32(ret
, ret
, t0
);
1053 tcg_temp_free_i32(t0
);
1057 void tcg_gen_bswap32_i32(TCGv_i32 ret
, TCGv_i32 arg
)
1059 if (TCG_TARGET_HAS_bswap32_i32
) {
1060 tcg_gen_op2_i32(INDEX_op_bswap32_i32
, ret
, arg
);
1063 t0
= tcg_temp_new_i32();
1064 t1
= tcg_temp_new_i32();
1066 tcg_gen_shli_i32(t0
, arg
, 24);
1068 tcg_gen_andi_i32(t1
, arg
, 0x0000ff00);
1069 tcg_gen_shli_i32(t1
, t1
, 8);
1070 tcg_gen_or_i32(t0
, t0
, t1
);
1072 tcg_gen_shri_i32(t1
, arg
, 8);
1073 tcg_gen_andi_i32(t1
, t1
, 0x0000ff00);
1074 tcg_gen_or_i32(t0
, t0
, t1
);
1076 tcg_gen_shri_i32(t1
, arg
, 24);
1077 tcg_gen_or_i32(ret
, t0
, t1
);
1078 tcg_temp_free_i32(t0
);
1079 tcg_temp_free_i32(t1
);
1085 #if TCG_TARGET_REG_BITS == 32
1086 /* These are all inline for TCG_TARGET_REG_BITS == 64. */
1088 void tcg_gen_discard_i64(TCGv_i64 arg
)
1090 tcg_gen_discard_i32(TCGV_LOW(arg
));
1091 tcg_gen_discard_i32(TCGV_HIGH(arg
));
1094 void tcg_gen_mov_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1096 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1097 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
));
1100 void tcg_gen_movi_i64(TCGv_i64 ret
, int64_t arg
)
1102 tcg_gen_movi_i32(TCGV_LOW(ret
), arg
);
1103 tcg_gen_movi_i32(TCGV_HIGH(ret
), arg
>> 32);
1106 void tcg_gen_ld8u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1108 tcg_gen_ld8u_i32(TCGV_LOW(ret
), arg2
, offset
);
1109 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1112 void tcg_gen_ld8s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1114 tcg_gen_ld8s_i32(TCGV_LOW(ret
), arg2
, offset
);
1115 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1118 void tcg_gen_ld16u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1120 tcg_gen_ld16u_i32(TCGV_LOW(ret
), arg2
, offset
);
1121 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1124 void tcg_gen_ld16s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1126 tcg_gen_ld16s_i32(TCGV_LOW(ret
), arg2
, offset
);
1127 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1130 void tcg_gen_ld32u_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1132 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1133 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1136 void tcg_gen_ld32s_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1138 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1139 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1142 void tcg_gen_ld_i64(TCGv_i64 ret
, TCGv_ptr arg2
, tcg_target_long offset
)
1144 /* Since arg2 and ret have different types,
1145 they cannot be the same temporary */
1146 #ifdef HOST_WORDS_BIGENDIAN
1147 tcg_gen_ld_i32(TCGV_HIGH(ret
), arg2
, offset
);
1148 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
+ 4);
1150 tcg_gen_ld_i32(TCGV_LOW(ret
), arg2
, offset
);
1151 tcg_gen_ld_i32(TCGV_HIGH(ret
), arg2
, offset
+ 4);
1155 void tcg_gen_st_i64(TCGv_i64 arg1
, TCGv_ptr arg2
, tcg_target_long offset
)
1157 #ifdef HOST_WORDS_BIGENDIAN
1158 tcg_gen_st_i32(TCGV_HIGH(arg1
), arg2
, offset
);
1159 tcg_gen_st_i32(TCGV_LOW(arg1
), arg2
, offset
+ 4);
1161 tcg_gen_st_i32(TCGV_LOW(arg1
), arg2
, offset
);
1162 tcg_gen_st_i32(TCGV_HIGH(arg1
), arg2
, offset
+ 4);
1166 void tcg_gen_and_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1168 tcg_gen_and_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1169 tcg_gen_and_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1172 void tcg_gen_or_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1174 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1175 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1178 void tcg_gen_xor_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1180 tcg_gen_xor_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1181 tcg_gen_xor_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1184 void tcg_gen_shl_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1186 gen_helper_shl_i64(ret
, arg1
, arg2
);
1189 void tcg_gen_shr_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1191 gen_helper_shr_i64(ret
, arg1
, arg2
);
1194 void tcg_gen_sar_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1196 gen_helper_sar_i64(ret
, arg1
, arg2
);
1199 void tcg_gen_mul_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1204 t0
= tcg_temp_new_i64();
1205 t1
= tcg_temp_new_i32();
1207 tcg_gen_mulu2_i32(TCGV_LOW(t0
), TCGV_HIGH(t0
),
1208 TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1210 tcg_gen_mul_i32(t1
, TCGV_LOW(arg1
), TCGV_HIGH(arg2
));
1211 tcg_gen_add_i32(TCGV_HIGH(t0
), TCGV_HIGH(t0
), t1
);
1212 tcg_gen_mul_i32(t1
, TCGV_HIGH(arg1
), TCGV_LOW(arg2
));
1213 tcg_gen_add_i32(TCGV_HIGH(t0
), TCGV_HIGH(t0
), t1
);
1215 tcg_gen_mov_i64(ret
, t0
);
1216 tcg_temp_free_i64(t0
);
1217 tcg_temp_free_i32(t1
);
1219 #endif /* TCG_TARGET_REG_SIZE == 32 */
1221 void tcg_gen_addi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1223 /* some cases can be optimized here */
1225 tcg_gen_mov_i64(ret
, arg1
);
1227 TCGv_i64 t0
= tcg_const_i64(arg2
);
1228 tcg_gen_add_i64(ret
, arg1
, t0
);
1229 tcg_temp_free_i64(t0
);
1233 void tcg_gen_subfi_i64(TCGv_i64 ret
, int64_t arg1
, TCGv_i64 arg2
)
1235 if (arg1
== 0 && TCG_TARGET_HAS_neg_i64
) {
1236 /* Don't recurse with tcg_gen_neg_i64. */
1237 tcg_gen_op2_i64(INDEX_op_neg_i64
, ret
, arg2
);
1239 TCGv_i64 t0
= tcg_const_i64(arg1
);
1240 tcg_gen_sub_i64(ret
, t0
, arg2
);
1241 tcg_temp_free_i64(t0
);
1245 void tcg_gen_subi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1247 /* some cases can be optimized here */
1249 tcg_gen_mov_i64(ret
, arg1
);
1251 TCGv_i64 t0
= tcg_const_i64(arg2
);
1252 tcg_gen_sub_i64(ret
, arg1
, t0
);
1253 tcg_temp_free_i64(t0
);
1257 void tcg_gen_andi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1261 if (TCG_TARGET_REG_BITS
== 32) {
1262 tcg_gen_andi_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1263 tcg_gen_andi_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1267 /* Some cases can be optimized here. */
1270 tcg_gen_movi_i64(ret
, 0);
1272 case 0xffffffffffffffffull
:
1273 tcg_gen_mov_i64(ret
, arg1
);
1276 /* Don't recurse with tcg_gen_ext8u_i64. */
1277 if (TCG_TARGET_HAS_ext8u_i64
) {
1278 tcg_gen_op2_i64(INDEX_op_ext8u_i64
, ret
, arg1
);
1283 if (TCG_TARGET_HAS_ext16u_i64
) {
1284 tcg_gen_op2_i64(INDEX_op_ext16u_i64
, ret
, arg1
);
1289 if (TCG_TARGET_HAS_ext32u_i64
) {
1290 tcg_gen_op2_i64(INDEX_op_ext32u_i64
, ret
, arg1
);
1295 t0
= tcg_const_i64(arg2
);
1296 tcg_gen_and_i64(ret
, arg1
, t0
);
1297 tcg_temp_free_i64(t0
);
1300 void tcg_gen_ori_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1302 if (TCG_TARGET_REG_BITS
== 32) {
1303 tcg_gen_ori_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1304 tcg_gen_ori_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1307 /* Some cases can be optimized here. */
1309 tcg_gen_movi_i64(ret
, -1);
1310 } else if (arg2
== 0) {
1311 tcg_gen_mov_i64(ret
, arg1
);
1313 TCGv_i64 t0
= tcg_const_i64(arg2
);
1314 tcg_gen_or_i64(ret
, arg1
, t0
);
1315 tcg_temp_free_i64(t0
);
1319 void tcg_gen_xori_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1321 if (TCG_TARGET_REG_BITS
== 32) {
1322 tcg_gen_xori_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), arg2
);
1323 tcg_gen_xori_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), arg2
>> 32);
1326 /* Some cases can be optimized here. */
1328 tcg_gen_mov_i64(ret
, arg1
);
1329 } else if (arg2
== -1 && TCG_TARGET_HAS_not_i64
) {
1330 /* Don't recurse with tcg_gen_not_i64. */
1331 tcg_gen_op2_i64(INDEX_op_not_i64
, ret
, arg1
);
1333 TCGv_i64 t0
= tcg_const_i64(arg2
);
1334 tcg_gen_xor_i64(ret
, arg1
, t0
);
1335 tcg_temp_free_i64(t0
);
1339 static inline void tcg_gen_shifti_i64(TCGv_i64 ret
, TCGv_i64 arg1
,
1340 unsigned c
, bool right
, bool arith
)
1342 tcg_debug_assert(c
< 64);
1344 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
1345 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
1346 } else if (c
>= 32) {
1350 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
1351 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), 31);
1353 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), c
);
1354 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1357 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_LOW(arg1
), c
);
1358 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
1363 t0
= tcg_temp_new_i32();
1364 t1
= tcg_temp_new_i32();
1366 tcg_gen_shli_i32(t0
, TCGV_HIGH(arg1
), 32 - c
);
1368 tcg_gen_sari_i32(t1
, TCGV_HIGH(arg1
), c
);
1370 tcg_gen_shri_i32(t1
, TCGV_HIGH(arg1
), c
);
1372 tcg_gen_shri_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), c
);
1373 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t0
);
1374 tcg_gen_mov_i32(TCGV_HIGH(ret
), t1
);
1376 tcg_gen_shri_i32(t0
, TCGV_LOW(arg1
), 32 - c
);
1377 /* Note: ret can be the same as arg1, so we use t1 */
1378 tcg_gen_shli_i32(t1
, TCGV_LOW(arg1
), c
);
1379 tcg_gen_shli_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), c
);
1380 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t0
);
1381 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
1383 tcg_temp_free_i32(t0
);
1384 tcg_temp_free_i32(t1
);
1388 void tcg_gen_shli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1390 tcg_debug_assert(arg2
< 64);
1391 if (TCG_TARGET_REG_BITS
== 32) {
1392 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 0, 0);
1393 } else if (arg2
== 0) {
1394 tcg_gen_mov_i64(ret
, arg1
);
1396 TCGv_i64 t0
= tcg_const_i64(arg2
);
1397 tcg_gen_shl_i64(ret
, arg1
, t0
);
1398 tcg_temp_free_i64(t0
);
1402 void tcg_gen_shri_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1404 tcg_debug_assert(arg2
< 64);
1405 if (TCG_TARGET_REG_BITS
== 32) {
1406 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 1, 0);
1407 } else if (arg2
== 0) {
1408 tcg_gen_mov_i64(ret
, arg1
);
1410 TCGv_i64 t0
= tcg_const_i64(arg2
);
1411 tcg_gen_shr_i64(ret
, arg1
, t0
);
1412 tcg_temp_free_i64(t0
);
1416 void tcg_gen_sari_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1418 tcg_debug_assert(arg2
< 64);
1419 if (TCG_TARGET_REG_BITS
== 32) {
1420 tcg_gen_shifti_i64(ret
, arg1
, arg2
, 1, 1);
1421 } else if (arg2
== 0) {
1422 tcg_gen_mov_i64(ret
, arg1
);
1424 TCGv_i64 t0
= tcg_const_i64(arg2
);
1425 tcg_gen_sar_i64(ret
, arg1
, t0
);
1426 tcg_temp_free_i64(t0
);
1430 void tcg_gen_brcond_i64(TCGCond cond
, TCGv_i64 arg1
, TCGv_i64 arg2
, TCGLabel
*l
)
1432 if (cond
== TCG_COND_ALWAYS
) {
1434 } else if (cond
!= TCG_COND_NEVER
) {
1435 if (TCG_TARGET_REG_BITS
== 32) {
1436 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32
, TCGV_LOW(arg1
),
1437 TCGV_HIGH(arg1
), TCGV_LOW(arg2
),
1438 TCGV_HIGH(arg2
), cond
, label_arg(l
));
1440 tcg_gen_op4ii_i64(INDEX_op_brcond_i64
, arg1
, arg2
, cond
,
1446 void tcg_gen_brcondi_i64(TCGCond cond
, TCGv_i64 arg1
, int64_t arg2
, TCGLabel
*l
)
1448 if (cond
== TCG_COND_ALWAYS
) {
1450 } else if (cond
!= TCG_COND_NEVER
) {
1451 TCGv_i64 t0
= tcg_const_i64(arg2
);
1452 tcg_gen_brcond_i64(cond
, arg1
, t0
, l
);
1453 tcg_temp_free_i64(t0
);
1457 void tcg_gen_setcond_i64(TCGCond cond
, TCGv_i64 ret
,
1458 TCGv_i64 arg1
, TCGv_i64 arg2
)
1460 if (cond
== TCG_COND_ALWAYS
) {
1461 tcg_gen_movi_i64(ret
, 1);
1462 } else if (cond
== TCG_COND_NEVER
) {
1463 tcg_gen_movi_i64(ret
, 0);
1465 if (TCG_TARGET_REG_BITS
== 32) {
1466 tcg_gen_op6i_i32(INDEX_op_setcond2_i32
, TCGV_LOW(ret
),
1467 TCGV_LOW(arg1
), TCGV_HIGH(arg1
),
1468 TCGV_LOW(arg2
), TCGV_HIGH(arg2
), cond
);
1469 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1471 tcg_gen_op4i_i64(INDEX_op_setcond_i64
, ret
, arg1
, arg2
, cond
);
1476 void tcg_gen_setcondi_i64(TCGCond cond
, TCGv_i64 ret
,
1477 TCGv_i64 arg1
, int64_t arg2
)
1479 TCGv_i64 t0
= tcg_const_i64(arg2
);
1480 tcg_gen_setcond_i64(cond
, ret
, arg1
, t0
);
1481 tcg_temp_free_i64(t0
);
1484 void tcg_gen_muli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, int64_t arg2
)
1486 TCGv_i64 t0
= tcg_const_i64(arg2
);
1487 tcg_gen_mul_i64(ret
, arg1
, t0
);
1488 tcg_temp_free_i64(t0
);
1491 void tcg_gen_div_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1493 if (TCG_TARGET_HAS_div_i64
) {
1494 tcg_gen_op3_i64(INDEX_op_div_i64
, ret
, arg1
, arg2
);
1495 } else if (TCG_TARGET_HAS_div2_i64
) {
1496 TCGv_i64 t0
= tcg_temp_new_i64();
1497 tcg_gen_sari_i64(t0
, arg1
, 63);
1498 tcg_gen_op5_i64(INDEX_op_div2_i64
, ret
, t0
, arg1
, t0
, arg2
);
1499 tcg_temp_free_i64(t0
);
1501 gen_helper_div_i64(ret
, arg1
, arg2
);
1505 void tcg_gen_rem_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1507 if (TCG_TARGET_HAS_rem_i64
) {
1508 tcg_gen_op3_i64(INDEX_op_rem_i64
, ret
, arg1
, arg2
);
1509 } else if (TCG_TARGET_HAS_div_i64
) {
1510 TCGv_i64 t0
= tcg_temp_new_i64();
1511 tcg_gen_op3_i64(INDEX_op_div_i64
, t0
, arg1
, arg2
);
1512 tcg_gen_mul_i64(t0
, t0
, arg2
);
1513 tcg_gen_sub_i64(ret
, arg1
, t0
);
1514 tcg_temp_free_i64(t0
);
1515 } else if (TCG_TARGET_HAS_div2_i64
) {
1516 TCGv_i64 t0
= tcg_temp_new_i64();
1517 tcg_gen_sari_i64(t0
, arg1
, 63);
1518 tcg_gen_op5_i64(INDEX_op_div2_i64
, t0
, ret
, arg1
, t0
, arg2
);
1519 tcg_temp_free_i64(t0
);
1521 gen_helper_rem_i64(ret
, arg1
, arg2
);
1525 void tcg_gen_divu_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1527 if (TCG_TARGET_HAS_div_i64
) {
1528 tcg_gen_op3_i64(INDEX_op_divu_i64
, ret
, arg1
, arg2
);
1529 } else if (TCG_TARGET_HAS_div2_i64
) {
1530 TCGv_i64 t0
= tcg_temp_new_i64();
1531 tcg_gen_movi_i64(t0
, 0);
1532 tcg_gen_op5_i64(INDEX_op_divu2_i64
, ret
, t0
, arg1
, t0
, arg2
);
1533 tcg_temp_free_i64(t0
);
1535 gen_helper_divu_i64(ret
, arg1
, arg2
);
1539 void tcg_gen_remu_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1541 if (TCG_TARGET_HAS_rem_i64
) {
1542 tcg_gen_op3_i64(INDEX_op_remu_i64
, ret
, arg1
, arg2
);
1543 } else if (TCG_TARGET_HAS_div_i64
) {
1544 TCGv_i64 t0
= tcg_temp_new_i64();
1545 tcg_gen_op3_i64(INDEX_op_divu_i64
, t0
, arg1
, arg2
);
1546 tcg_gen_mul_i64(t0
, t0
, arg2
);
1547 tcg_gen_sub_i64(ret
, arg1
, t0
);
1548 tcg_temp_free_i64(t0
);
1549 } else if (TCG_TARGET_HAS_div2_i64
) {
1550 TCGv_i64 t0
= tcg_temp_new_i64();
1551 tcg_gen_movi_i64(t0
, 0);
1552 tcg_gen_op5_i64(INDEX_op_divu2_i64
, t0
, ret
, arg1
, t0
, arg2
);
1553 tcg_temp_free_i64(t0
);
1555 gen_helper_remu_i64(ret
, arg1
, arg2
);
1559 void tcg_gen_ext8s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1561 if (TCG_TARGET_REG_BITS
== 32) {
1562 tcg_gen_ext8s_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1563 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1564 } else if (TCG_TARGET_HAS_ext8s_i64
) {
1565 tcg_gen_op2_i64(INDEX_op_ext8s_i64
, ret
, arg
);
1567 tcg_gen_shli_i64(ret
, arg
, 56);
1568 tcg_gen_sari_i64(ret
, ret
, 56);
1572 void tcg_gen_ext16s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1574 if (TCG_TARGET_REG_BITS
== 32) {
1575 tcg_gen_ext16s_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1576 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1577 } else if (TCG_TARGET_HAS_ext16s_i64
) {
1578 tcg_gen_op2_i64(INDEX_op_ext16s_i64
, ret
, arg
);
1580 tcg_gen_shli_i64(ret
, arg
, 48);
1581 tcg_gen_sari_i64(ret
, ret
, 48);
1585 void tcg_gen_ext32s_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1587 if (TCG_TARGET_REG_BITS
== 32) {
1588 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1589 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
1590 } else if (TCG_TARGET_HAS_ext32s_i64
) {
1591 tcg_gen_op2_i64(INDEX_op_ext32s_i64
, ret
, arg
);
1593 tcg_gen_shli_i64(ret
, arg
, 32);
1594 tcg_gen_sari_i64(ret
, ret
, 32);
1598 void tcg_gen_ext8u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1600 if (TCG_TARGET_REG_BITS
== 32) {
1601 tcg_gen_ext8u_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1602 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1603 } else if (TCG_TARGET_HAS_ext8u_i64
) {
1604 tcg_gen_op2_i64(INDEX_op_ext8u_i64
, ret
, arg
);
1606 tcg_gen_andi_i64(ret
, arg
, 0xffu
);
1610 void tcg_gen_ext16u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1612 if (TCG_TARGET_REG_BITS
== 32) {
1613 tcg_gen_ext16u_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1614 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1615 } else if (TCG_TARGET_HAS_ext16u_i64
) {
1616 tcg_gen_op2_i64(INDEX_op_ext16u_i64
, ret
, arg
);
1618 tcg_gen_andi_i64(ret
, arg
, 0xffffu
);
1622 void tcg_gen_ext32u_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1624 if (TCG_TARGET_REG_BITS
== 32) {
1625 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1626 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1627 } else if (TCG_TARGET_HAS_ext32u_i64
) {
1628 tcg_gen_op2_i64(INDEX_op_ext32u_i64
, ret
, arg
);
1630 tcg_gen_andi_i64(ret
, arg
, 0xffffffffu
);
1634 /* Note: we assume the six high bytes are set to zero */
1635 void tcg_gen_bswap16_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1637 if (TCG_TARGET_REG_BITS
== 32) {
1638 tcg_gen_bswap16_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1639 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1640 } else if (TCG_TARGET_HAS_bswap16_i64
) {
1641 tcg_gen_op2_i64(INDEX_op_bswap16_i64
, ret
, arg
);
1643 TCGv_i64 t0
= tcg_temp_new_i64();
1645 tcg_gen_ext8u_i64(t0
, arg
);
1646 tcg_gen_shli_i64(t0
, t0
, 8);
1647 tcg_gen_shri_i64(ret
, arg
, 8);
1648 tcg_gen_or_i64(ret
, ret
, t0
);
1649 tcg_temp_free_i64(t0
);
1653 /* Note: we assume the four high bytes are set to zero */
1654 void tcg_gen_bswap32_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1656 if (TCG_TARGET_REG_BITS
== 32) {
1657 tcg_gen_bswap32_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1658 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1659 } else if (TCG_TARGET_HAS_bswap32_i64
) {
1660 tcg_gen_op2_i64(INDEX_op_bswap32_i64
, ret
, arg
);
1663 t0
= tcg_temp_new_i64();
1664 t1
= tcg_temp_new_i64();
1666 tcg_gen_shli_i64(t0
, arg
, 24);
1667 tcg_gen_ext32u_i64(t0
, t0
);
1669 tcg_gen_andi_i64(t1
, arg
, 0x0000ff00);
1670 tcg_gen_shli_i64(t1
, t1
, 8);
1671 tcg_gen_or_i64(t0
, t0
, t1
);
1673 tcg_gen_shri_i64(t1
, arg
, 8);
1674 tcg_gen_andi_i64(t1
, t1
, 0x0000ff00);
1675 tcg_gen_or_i64(t0
, t0
, t1
);
1677 tcg_gen_shri_i64(t1
, arg
, 24);
1678 tcg_gen_or_i64(ret
, t0
, t1
);
1679 tcg_temp_free_i64(t0
);
1680 tcg_temp_free_i64(t1
);
1684 void tcg_gen_bswap64_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1686 if (TCG_TARGET_REG_BITS
== 32) {
1688 t0
= tcg_temp_new_i32();
1689 t1
= tcg_temp_new_i32();
1691 tcg_gen_bswap32_i32(t0
, TCGV_LOW(arg
));
1692 tcg_gen_bswap32_i32(t1
, TCGV_HIGH(arg
));
1693 tcg_gen_mov_i32(TCGV_LOW(ret
), t1
);
1694 tcg_gen_mov_i32(TCGV_HIGH(ret
), t0
);
1695 tcg_temp_free_i32(t0
);
1696 tcg_temp_free_i32(t1
);
1697 } else if (TCG_TARGET_HAS_bswap64_i64
) {
1698 tcg_gen_op2_i64(INDEX_op_bswap64_i64
, ret
, arg
);
1700 TCGv_i64 t0
= tcg_temp_new_i64();
1701 TCGv_i64 t1
= tcg_temp_new_i64();
1703 tcg_gen_shli_i64(t0
, arg
, 56);
1705 tcg_gen_andi_i64(t1
, arg
, 0x0000ff00);
1706 tcg_gen_shli_i64(t1
, t1
, 40);
1707 tcg_gen_or_i64(t0
, t0
, t1
);
1709 tcg_gen_andi_i64(t1
, arg
, 0x00ff0000);
1710 tcg_gen_shli_i64(t1
, t1
, 24);
1711 tcg_gen_or_i64(t0
, t0
, t1
);
1713 tcg_gen_andi_i64(t1
, arg
, 0xff000000);
1714 tcg_gen_shli_i64(t1
, t1
, 8);
1715 tcg_gen_or_i64(t0
, t0
, t1
);
1717 tcg_gen_shri_i64(t1
, arg
, 8);
1718 tcg_gen_andi_i64(t1
, t1
, 0xff000000);
1719 tcg_gen_or_i64(t0
, t0
, t1
);
1721 tcg_gen_shri_i64(t1
, arg
, 24);
1722 tcg_gen_andi_i64(t1
, t1
, 0x00ff0000);
1723 tcg_gen_or_i64(t0
, t0
, t1
);
1725 tcg_gen_shri_i64(t1
, arg
, 40);
1726 tcg_gen_andi_i64(t1
, t1
, 0x0000ff00);
1727 tcg_gen_or_i64(t0
, t0
, t1
);
1729 tcg_gen_shri_i64(t1
, arg
, 56);
1730 tcg_gen_or_i64(ret
, t0
, t1
);
1731 tcg_temp_free_i64(t0
);
1732 tcg_temp_free_i64(t1
);
1736 void tcg_gen_not_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1738 if (TCG_TARGET_REG_BITS
== 32) {
1739 tcg_gen_not_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
1740 tcg_gen_not_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
));
1741 } else if (TCG_TARGET_HAS_not_i64
) {
1742 tcg_gen_op2_i64(INDEX_op_not_i64
, ret
, arg
);
1744 tcg_gen_xori_i64(ret
, arg
, -1);
1748 void tcg_gen_andc_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1750 if (TCG_TARGET_REG_BITS
== 32) {
1751 tcg_gen_andc_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1752 tcg_gen_andc_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1753 } else if (TCG_TARGET_HAS_andc_i64
) {
1754 tcg_gen_op3_i64(INDEX_op_andc_i64
, ret
, arg1
, arg2
);
1756 TCGv_i64 t0
= tcg_temp_new_i64();
1757 tcg_gen_not_i64(t0
, arg2
);
1758 tcg_gen_and_i64(ret
, arg1
, t0
);
1759 tcg_temp_free_i64(t0
);
1763 void tcg_gen_eqv_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1765 if (TCG_TARGET_REG_BITS
== 32) {
1766 tcg_gen_eqv_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1767 tcg_gen_eqv_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1768 } else if (TCG_TARGET_HAS_eqv_i64
) {
1769 tcg_gen_op3_i64(INDEX_op_eqv_i64
, ret
, arg1
, arg2
);
1771 tcg_gen_xor_i64(ret
, arg1
, arg2
);
1772 tcg_gen_not_i64(ret
, ret
);
1776 void tcg_gen_nand_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1778 if (TCG_TARGET_REG_BITS
== 32) {
1779 tcg_gen_nand_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1780 tcg_gen_nand_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1781 } else if (TCG_TARGET_HAS_nand_i64
) {
1782 tcg_gen_op3_i64(INDEX_op_nand_i64
, ret
, arg1
, arg2
);
1784 tcg_gen_and_i64(ret
, arg1
, arg2
);
1785 tcg_gen_not_i64(ret
, ret
);
1789 void tcg_gen_nor_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1791 if (TCG_TARGET_REG_BITS
== 32) {
1792 tcg_gen_nor_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1793 tcg_gen_nor_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1794 } else if (TCG_TARGET_HAS_nor_i64
) {
1795 tcg_gen_op3_i64(INDEX_op_nor_i64
, ret
, arg1
, arg2
);
1797 tcg_gen_or_i64(ret
, arg1
, arg2
);
1798 tcg_gen_not_i64(ret
, ret
);
1802 void tcg_gen_orc_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1804 if (TCG_TARGET_REG_BITS
== 32) {
1805 tcg_gen_orc_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), TCGV_LOW(arg2
));
1806 tcg_gen_orc_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
), TCGV_HIGH(arg2
));
1807 } else if (TCG_TARGET_HAS_orc_i64
) {
1808 tcg_gen_op3_i64(INDEX_op_orc_i64
, ret
, arg1
, arg2
);
1810 TCGv_i64 t0
= tcg_temp_new_i64();
1811 tcg_gen_not_i64(t0
, arg2
);
1812 tcg_gen_or_i64(ret
, arg1
, t0
);
1813 tcg_temp_free_i64(t0
);
1817 void tcg_gen_clz_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1819 if (TCG_TARGET_HAS_clz_i64
) {
1820 tcg_gen_op3_i64(INDEX_op_clz_i64
, ret
, arg1
, arg2
);
1822 gen_helper_clz_i64(ret
, arg1
, arg2
);
1826 void tcg_gen_clzi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1828 if (TCG_TARGET_REG_BITS
== 32
1829 && TCG_TARGET_HAS_clz_i32
1830 && arg2
<= 0xffffffffu
) {
1831 TCGv_i32 t
= tcg_const_i32((uint32_t)arg2
- 32);
1832 tcg_gen_clz_i32(t
, TCGV_LOW(arg1
), t
);
1833 tcg_gen_addi_i32(t
, t
, 32);
1834 tcg_gen_clz_i32(TCGV_LOW(ret
), TCGV_HIGH(arg1
), t
);
1835 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1836 tcg_temp_free_i32(t
);
1838 TCGv_i64 t
= tcg_const_i64(arg2
);
1839 tcg_gen_clz_i64(ret
, arg1
, t
);
1840 tcg_temp_free_i64(t
);
1844 void tcg_gen_ctz_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1846 if (TCG_TARGET_HAS_ctz_i64
) {
1847 tcg_gen_op3_i64(INDEX_op_ctz_i64
, ret
, arg1
, arg2
);
1848 } else if (TCG_TARGET_HAS_ctpop_i64
|| TCG_TARGET_HAS_clz_i64
) {
1849 TCGv_i64 z
, t
= tcg_temp_new_i64();
1851 if (TCG_TARGET_HAS_ctpop_i64
) {
1852 tcg_gen_subi_i64(t
, arg1
, 1);
1853 tcg_gen_andc_i64(t
, t
, arg1
);
1854 tcg_gen_ctpop_i64(t
, t
);
1856 /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
1857 tcg_gen_neg_i64(t
, arg1
);
1858 tcg_gen_and_i64(t
, t
, arg1
);
1859 tcg_gen_clzi_i64(t
, t
, 64);
1860 tcg_gen_xori_i64(t
, t
, 63);
1862 z
= tcg_const_i64(0);
1863 tcg_gen_movcond_i64(TCG_COND_EQ
, ret
, arg1
, z
, arg2
, t
);
1864 tcg_temp_free_i64(t
);
1865 tcg_temp_free_i64(z
);
1867 gen_helper_ctz_i64(ret
, arg1
, arg2
);
1871 void tcg_gen_ctzi_i64(TCGv_i64 ret
, TCGv_i64 arg1
, uint64_t arg2
)
1873 if (TCG_TARGET_REG_BITS
== 32
1874 && TCG_TARGET_HAS_ctz_i32
1875 && arg2
<= 0xffffffffu
) {
1876 TCGv_i32 t32
= tcg_const_i32((uint32_t)arg2
- 32);
1877 tcg_gen_ctz_i32(t32
, TCGV_HIGH(arg1
), t32
);
1878 tcg_gen_addi_i32(t32
, t32
, 32);
1879 tcg_gen_ctz_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
), t32
);
1880 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1881 tcg_temp_free_i32(t32
);
1882 } else if (!TCG_TARGET_HAS_ctz_i64
1883 && TCG_TARGET_HAS_ctpop_i64
1885 /* This equivalence has the advantage of not requiring a fixup. */
1886 TCGv_i64 t
= tcg_temp_new_i64();
1887 tcg_gen_subi_i64(t
, arg1
, 1);
1888 tcg_gen_andc_i64(t
, t
, arg1
);
1889 tcg_gen_ctpop_i64(ret
, t
);
1890 tcg_temp_free_i64(t
);
1892 TCGv_i64 t64
= tcg_const_i64(arg2
);
1893 tcg_gen_ctz_i64(ret
, arg1
, t64
);
1894 tcg_temp_free_i64(t64
);
1898 void tcg_gen_clrsb_i64(TCGv_i64 ret
, TCGv_i64 arg
)
1900 if (TCG_TARGET_HAS_clz_i64
|| TCG_TARGET_HAS_clz_i32
) {
1901 TCGv_i64 t
= tcg_temp_new_i64();
1902 tcg_gen_sari_i64(t
, arg
, 63);
1903 tcg_gen_xor_i64(t
, t
, arg
);
1904 tcg_gen_clzi_i64(t
, t
, 64);
1905 tcg_gen_subi_i64(ret
, t
, 1);
1906 tcg_temp_free_i64(t
);
1908 gen_helper_clrsb_i64(ret
, arg
);
1912 void tcg_gen_ctpop_i64(TCGv_i64 ret
, TCGv_i64 arg1
)
1914 if (TCG_TARGET_HAS_ctpop_i64
) {
1915 tcg_gen_op2_i64(INDEX_op_ctpop_i64
, ret
, arg1
);
1916 } else if (TCG_TARGET_REG_BITS
== 32 && TCG_TARGET_HAS_ctpop_i32
) {
1917 tcg_gen_ctpop_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
1918 tcg_gen_ctpop_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
1919 tcg_gen_add_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), TCGV_HIGH(ret
));
1920 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
1922 gen_helper_ctpop_i64(ret
, arg1
);
1926 void tcg_gen_rotl_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1928 if (TCG_TARGET_HAS_rot_i64
) {
1929 tcg_gen_op3_i64(INDEX_op_rotl_i64
, ret
, arg1
, arg2
);
1932 t0
= tcg_temp_new_i64();
1933 t1
= tcg_temp_new_i64();
1934 tcg_gen_shl_i64(t0
, arg1
, arg2
);
1935 tcg_gen_subfi_i64(t1
, 64, arg2
);
1936 tcg_gen_shr_i64(t1
, arg1
, t1
);
1937 tcg_gen_or_i64(ret
, t0
, t1
);
1938 tcg_temp_free_i64(t0
);
1939 tcg_temp_free_i64(t1
);
1943 void tcg_gen_rotli_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1945 tcg_debug_assert(arg2
< 64);
1946 /* some cases can be optimized here */
1948 tcg_gen_mov_i64(ret
, arg1
);
1949 } else if (TCG_TARGET_HAS_rot_i64
) {
1950 TCGv_i64 t0
= tcg_const_i64(arg2
);
1951 tcg_gen_rotl_i64(ret
, arg1
, t0
);
1952 tcg_temp_free_i64(t0
);
1955 t0
= tcg_temp_new_i64();
1956 t1
= tcg_temp_new_i64();
1957 tcg_gen_shli_i64(t0
, arg1
, arg2
);
1958 tcg_gen_shri_i64(t1
, arg1
, 64 - arg2
);
1959 tcg_gen_or_i64(ret
, t0
, t1
);
1960 tcg_temp_free_i64(t0
);
1961 tcg_temp_free_i64(t1
);
1965 void tcg_gen_rotr_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
)
1967 if (TCG_TARGET_HAS_rot_i64
) {
1968 tcg_gen_op3_i64(INDEX_op_rotr_i64
, ret
, arg1
, arg2
);
1971 t0
= tcg_temp_new_i64();
1972 t1
= tcg_temp_new_i64();
1973 tcg_gen_shr_i64(t0
, arg1
, arg2
);
1974 tcg_gen_subfi_i64(t1
, 64, arg2
);
1975 tcg_gen_shl_i64(t1
, arg1
, t1
);
1976 tcg_gen_or_i64(ret
, t0
, t1
);
1977 tcg_temp_free_i64(t0
);
1978 tcg_temp_free_i64(t1
);
1982 void tcg_gen_rotri_i64(TCGv_i64 ret
, TCGv_i64 arg1
, unsigned arg2
)
1984 tcg_debug_assert(arg2
< 64);
1985 /* some cases can be optimized here */
1987 tcg_gen_mov_i64(ret
, arg1
);
1989 tcg_gen_rotli_i64(ret
, arg1
, 64 - arg2
);
1993 void tcg_gen_deposit_i64(TCGv_i64 ret
, TCGv_i64 arg1
, TCGv_i64 arg2
,
1994 unsigned int ofs
, unsigned int len
)
1999 tcg_debug_assert(ofs
< 64);
2000 tcg_debug_assert(len
> 0);
2001 tcg_debug_assert(len
<= 64);
2002 tcg_debug_assert(ofs
+ len
<= 64);
2005 tcg_gen_mov_i64(ret
, arg2
);
2008 if (TCG_TARGET_HAS_deposit_i64
&& TCG_TARGET_deposit_i64_valid(ofs
, len
)) {
2009 tcg_gen_op5ii_i64(INDEX_op_deposit_i64
, ret
, arg1
, arg2
, ofs
, len
);
2013 if (TCG_TARGET_REG_BITS
== 32) {
2015 tcg_gen_deposit_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
),
2016 TCGV_LOW(arg2
), ofs
- 32, len
);
2017 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
));
2020 if (ofs
+ len
<= 32) {
2021 tcg_gen_deposit_i32(TCGV_LOW(ret
), TCGV_LOW(arg1
),
2022 TCGV_LOW(arg2
), ofs
, len
);
2023 tcg_gen_mov_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg1
));
2028 mask
= (1ull << len
) - 1;
2029 t1
= tcg_temp_new_i64();
2031 if (ofs
+ len
< 64) {
2032 tcg_gen_andi_i64(t1
, arg2
, mask
);
2033 tcg_gen_shli_i64(t1
, t1
, ofs
);
2035 tcg_gen_shli_i64(t1
, arg2
, ofs
);
2037 tcg_gen_andi_i64(ret
, arg1
, ~(mask
<< ofs
));
2038 tcg_gen_or_i64(ret
, ret
, t1
);
2040 tcg_temp_free_i64(t1
);
2043 void tcg_gen_deposit_z_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2044 unsigned int ofs
, unsigned int len
)
2046 tcg_debug_assert(ofs
< 64);
2047 tcg_debug_assert(len
> 0);
2048 tcg_debug_assert(len
<= 64);
2049 tcg_debug_assert(ofs
+ len
<= 64);
2051 if (ofs
+ len
== 64) {
2052 tcg_gen_shli_i64(ret
, arg
, ofs
);
2053 } else if (ofs
== 0) {
2054 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2055 } else if (TCG_TARGET_HAS_deposit_i64
2056 && TCG_TARGET_deposit_i64_valid(ofs
, len
)) {
2057 TCGv_i64 zero
= tcg_const_i64(0);
2058 tcg_gen_op5ii_i64(INDEX_op_deposit_i64
, ret
, zero
, arg
, ofs
, len
);
2059 tcg_temp_free_i64(zero
);
2061 if (TCG_TARGET_REG_BITS
== 32) {
2063 tcg_gen_deposit_z_i32(TCGV_HIGH(ret
), TCGV_LOW(arg
),
2065 tcg_gen_movi_i32(TCGV_LOW(ret
), 0);
2068 if (ofs
+ len
<= 32) {
2069 tcg_gen_deposit_z_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2070 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2074 /* To help two-operand hosts we prefer to zero-extend first,
2075 which allows ARG to stay live. */
2078 if (TCG_TARGET_HAS_ext32u_i64
) {
2079 tcg_gen_ext32u_i64(ret
, arg
);
2080 tcg_gen_shli_i64(ret
, ret
, ofs
);
2085 if (TCG_TARGET_HAS_ext16u_i64
) {
2086 tcg_gen_ext16u_i64(ret
, arg
);
2087 tcg_gen_shli_i64(ret
, ret
, ofs
);
2092 if (TCG_TARGET_HAS_ext8u_i64
) {
2093 tcg_gen_ext8u_i64(ret
, arg
);
2094 tcg_gen_shli_i64(ret
, ret
, ofs
);
2099 /* Otherwise prefer zero-extension over AND for code size. */
2100 switch (ofs
+ len
) {
2102 if (TCG_TARGET_HAS_ext32u_i64
) {
2103 tcg_gen_shli_i64(ret
, arg
, ofs
);
2104 tcg_gen_ext32u_i64(ret
, ret
);
2109 if (TCG_TARGET_HAS_ext16u_i64
) {
2110 tcg_gen_shli_i64(ret
, arg
, ofs
);
2111 tcg_gen_ext16u_i64(ret
, ret
);
2116 if (TCG_TARGET_HAS_ext8u_i64
) {
2117 tcg_gen_shli_i64(ret
, arg
, ofs
);
2118 tcg_gen_ext8u_i64(ret
, ret
);
2123 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2124 tcg_gen_shli_i64(ret
, ret
, ofs
);
2128 void tcg_gen_extract_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2129 unsigned int ofs
, unsigned int len
)
2131 tcg_debug_assert(ofs
< 64);
2132 tcg_debug_assert(len
> 0);
2133 tcg_debug_assert(len
<= 64);
2134 tcg_debug_assert(ofs
+ len
<= 64);
2136 /* Canonicalize certain special cases, even if extract is supported. */
2137 if (ofs
+ len
== 64) {
2138 tcg_gen_shri_i64(ret
, arg
, 64 - len
);
2142 tcg_gen_andi_i64(ret
, arg
, (1ull << len
) - 1);
2146 if (TCG_TARGET_REG_BITS
== 32) {
2147 /* Look for a 32-bit extract within one of the two words. */
2149 tcg_gen_extract_i32(TCGV_LOW(ret
), TCGV_HIGH(arg
), ofs
- 32, len
);
2150 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2153 if (ofs
+ len
<= 32) {
2154 tcg_gen_extract_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2155 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2158 /* The field is split across two words. One double-word
2159 shift is better than two double-word shifts. */
2163 if (TCG_TARGET_HAS_extract_i64
2164 && TCG_TARGET_extract_i64_valid(ofs
, len
)) {
2165 tcg_gen_op4ii_i64(INDEX_op_extract_i64
, ret
, arg
, ofs
, len
);
2169 /* Assume that zero-extension, if available, is cheaper than a shift. */
2170 switch (ofs
+ len
) {
2172 if (TCG_TARGET_HAS_ext32u_i64
) {
2173 tcg_gen_ext32u_i64(ret
, arg
);
2174 tcg_gen_shri_i64(ret
, ret
, ofs
);
2179 if (TCG_TARGET_HAS_ext16u_i64
) {
2180 tcg_gen_ext16u_i64(ret
, arg
);
2181 tcg_gen_shri_i64(ret
, ret
, ofs
);
2186 if (TCG_TARGET_HAS_ext8u_i64
) {
2187 tcg_gen_ext8u_i64(ret
, arg
);
2188 tcg_gen_shri_i64(ret
, ret
, ofs
);
2194 /* ??? Ideally we'd know what values are available for immediate AND.
2195 Assume that 8 bits are available, plus the special cases of 16 and 32,
2196 so that we get ext8u, ext16u, and ext32u. */
2198 case 1 ... 8: case 16: case 32:
2200 tcg_gen_shri_i64(ret
, arg
, ofs
);
2201 tcg_gen_andi_i64(ret
, ret
, (1ull << len
) - 1);
2204 tcg_gen_shli_i64(ret
, arg
, 64 - len
- ofs
);
2205 tcg_gen_shri_i64(ret
, ret
, 64 - len
);
2210 void tcg_gen_sextract_i64(TCGv_i64 ret
, TCGv_i64 arg
,
2211 unsigned int ofs
, unsigned int len
)
2213 tcg_debug_assert(ofs
< 64);
2214 tcg_debug_assert(len
> 0);
2215 tcg_debug_assert(len
<= 64);
2216 tcg_debug_assert(ofs
+ len
<= 64);
2218 /* Canonicalize certain special cases, even if sextract is supported. */
2219 if (ofs
+ len
== 64) {
2220 tcg_gen_sari_i64(ret
, arg
, 64 - len
);
2226 tcg_gen_ext32s_i64(ret
, arg
);
2229 tcg_gen_ext16s_i64(ret
, arg
);
2232 tcg_gen_ext8s_i64(ret
, arg
);
2237 if (TCG_TARGET_REG_BITS
== 32) {
2238 /* Look for a 32-bit extract within one of the two words. */
2240 tcg_gen_sextract_i32(TCGV_LOW(ret
), TCGV_HIGH(arg
), ofs
- 32, len
);
2241 } else if (ofs
+ len
<= 32) {
2242 tcg_gen_sextract_i32(TCGV_LOW(ret
), TCGV_LOW(arg
), ofs
, len
);
2243 } else if (ofs
== 0) {
2244 tcg_gen_mov_i32(TCGV_LOW(ret
), TCGV_LOW(arg
));
2245 tcg_gen_sextract_i32(TCGV_HIGH(ret
), TCGV_HIGH(arg
), 0, len
- 32);
2247 } else if (len
> 32) {
2248 TCGv_i32 t
= tcg_temp_new_i32();
2249 /* Extract the bits for the high word normally. */
2250 tcg_gen_sextract_i32(t
, TCGV_HIGH(arg
), ofs
+ 32, len
- 32);
2251 /* Shift the field down for the low part. */
2252 tcg_gen_shri_i64(ret
, arg
, ofs
);
2253 /* Overwrite the shift into the high part. */
2254 tcg_gen_mov_i32(TCGV_HIGH(ret
), t
);
2255 tcg_temp_free_i32(t
);
2258 /* Shift the field down for the low part, such that the
2259 field sits at the MSB. */
2260 tcg_gen_shri_i64(ret
, arg
, ofs
+ len
- 32);
2261 /* Shift the field down from the MSB, sign extending. */
2262 tcg_gen_sari_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), 32 - len
);
2264 /* Sign-extend the field from 32 bits. */
2265 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
2269 if (TCG_TARGET_HAS_sextract_i64
2270 && TCG_TARGET_extract_i64_valid(ofs
, len
)) {
2271 tcg_gen_op4ii_i64(INDEX_op_sextract_i64
, ret
, arg
, ofs
, len
);
2275 /* Assume that sign-extension, if available, is cheaper than a shift. */
2276 switch (ofs
+ len
) {
2278 if (TCG_TARGET_HAS_ext32s_i64
) {
2279 tcg_gen_ext32s_i64(ret
, arg
);
2280 tcg_gen_sari_i64(ret
, ret
, ofs
);
2285 if (TCG_TARGET_HAS_ext16s_i64
) {
2286 tcg_gen_ext16s_i64(ret
, arg
);
2287 tcg_gen_sari_i64(ret
, ret
, ofs
);
2292 if (TCG_TARGET_HAS_ext8s_i64
) {
2293 tcg_gen_ext8s_i64(ret
, arg
);
2294 tcg_gen_sari_i64(ret
, ret
, ofs
);
2301 if (TCG_TARGET_HAS_ext32s_i64
) {
2302 tcg_gen_shri_i64(ret
, arg
, ofs
);
2303 tcg_gen_ext32s_i64(ret
, ret
);
2308 if (TCG_TARGET_HAS_ext16s_i64
) {
2309 tcg_gen_shri_i64(ret
, arg
, ofs
);
2310 tcg_gen_ext16s_i64(ret
, ret
);
2315 if (TCG_TARGET_HAS_ext8s_i64
) {
2316 tcg_gen_shri_i64(ret
, arg
, ofs
);
2317 tcg_gen_ext8s_i64(ret
, ret
);
2322 tcg_gen_shli_i64(ret
, arg
, 64 - len
- ofs
);
2323 tcg_gen_sari_i64(ret
, ret
, 64 - len
);
2326 void tcg_gen_movcond_i64(TCGCond cond
, TCGv_i64 ret
, TCGv_i64 c1
,
2327 TCGv_i64 c2
, TCGv_i64 v1
, TCGv_i64 v2
)
2329 if (cond
== TCG_COND_ALWAYS
) {
2330 tcg_gen_mov_i64(ret
, v1
);
2331 } else if (cond
== TCG_COND_NEVER
) {
2332 tcg_gen_mov_i64(ret
, v2
);
2333 } else if (TCG_TARGET_REG_BITS
== 32) {
2334 TCGv_i32 t0
= tcg_temp_new_i32();
2335 TCGv_i32 t1
= tcg_temp_new_i32();
2336 tcg_gen_op6i_i32(INDEX_op_setcond2_i32
, t0
,
2337 TCGV_LOW(c1
), TCGV_HIGH(c1
),
2338 TCGV_LOW(c2
), TCGV_HIGH(c2
), cond
);
2340 if (TCG_TARGET_HAS_movcond_i32
) {
2341 tcg_gen_movi_i32(t1
, 0);
2342 tcg_gen_movcond_i32(TCG_COND_NE
, TCGV_LOW(ret
), t0
, t1
,
2343 TCGV_LOW(v1
), TCGV_LOW(v2
));
2344 tcg_gen_movcond_i32(TCG_COND_NE
, TCGV_HIGH(ret
), t0
, t1
,
2345 TCGV_HIGH(v1
), TCGV_HIGH(v2
));
2347 tcg_gen_neg_i32(t0
, t0
);
2349 tcg_gen_and_i32(t1
, TCGV_LOW(v1
), t0
);
2350 tcg_gen_andc_i32(TCGV_LOW(ret
), TCGV_LOW(v2
), t0
);
2351 tcg_gen_or_i32(TCGV_LOW(ret
), TCGV_LOW(ret
), t1
);
2353 tcg_gen_and_i32(t1
, TCGV_HIGH(v1
), t0
);
2354 tcg_gen_andc_i32(TCGV_HIGH(ret
), TCGV_HIGH(v2
), t0
);
2355 tcg_gen_or_i32(TCGV_HIGH(ret
), TCGV_HIGH(ret
), t1
);
2357 tcg_temp_free_i32(t0
);
2358 tcg_temp_free_i32(t1
);
2359 } else if (TCG_TARGET_HAS_movcond_i64
) {
2360 tcg_gen_op6i_i64(INDEX_op_movcond_i64
, ret
, c1
, c2
, v1
, v2
, cond
);
2362 TCGv_i64 t0
= tcg_temp_new_i64();
2363 TCGv_i64 t1
= tcg_temp_new_i64();
2364 tcg_gen_setcond_i64(cond
, t0
, c1
, c2
);
2365 tcg_gen_neg_i64(t0
, t0
);
2366 tcg_gen_and_i64(t1
, v1
, t0
);
2367 tcg_gen_andc_i64(ret
, v2
, t0
);
2368 tcg_gen_or_i64(ret
, ret
, t1
);
2369 tcg_temp_free_i64(t0
);
2370 tcg_temp_free_i64(t1
);
2374 void tcg_gen_add2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 al
,
2375 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2377 if (TCG_TARGET_HAS_add2_i64
) {
2378 tcg_gen_op6_i64(INDEX_op_add2_i64
, rl
, rh
, al
, ah
, bl
, bh
);
2380 TCGv_i64 t0
= tcg_temp_new_i64();
2381 TCGv_i64 t1
= tcg_temp_new_i64();
2382 tcg_gen_add_i64(t0
, al
, bl
);
2383 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, t0
, al
);
2384 tcg_gen_add_i64(rh
, ah
, bh
);
2385 tcg_gen_add_i64(rh
, rh
, t1
);
2386 tcg_gen_mov_i64(rl
, t0
);
2387 tcg_temp_free_i64(t0
);
2388 tcg_temp_free_i64(t1
);
2392 void tcg_gen_sub2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 al
,
2393 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2395 if (TCG_TARGET_HAS_sub2_i64
) {
2396 tcg_gen_op6_i64(INDEX_op_sub2_i64
, rl
, rh
, al
, ah
, bl
, bh
);
2398 TCGv_i64 t0
= tcg_temp_new_i64();
2399 TCGv_i64 t1
= tcg_temp_new_i64();
2400 tcg_gen_sub_i64(t0
, al
, bl
);
2401 tcg_gen_setcond_i64(TCG_COND_LTU
, t1
, al
, bl
);
2402 tcg_gen_sub_i64(rh
, ah
, bh
);
2403 tcg_gen_sub_i64(rh
, rh
, t1
);
2404 tcg_gen_mov_i64(rl
, t0
);
2405 tcg_temp_free_i64(t0
);
2406 tcg_temp_free_i64(t1
);
2410 void tcg_gen_mulu2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2412 if (TCG_TARGET_HAS_mulu2_i64
) {
2413 tcg_gen_op4_i64(INDEX_op_mulu2_i64
, rl
, rh
, arg1
, arg2
);
2414 } else if (TCG_TARGET_HAS_muluh_i64
) {
2415 TCGv_i64 t
= tcg_temp_new_i64();
2416 tcg_gen_op3_i64(INDEX_op_mul_i64
, t
, arg1
, arg2
);
2417 tcg_gen_op3_i64(INDEX_op_muluh_i64
, rh
, arg1
, arg2
);
2418 tcg_gen_mov_i64(rl
, t
);
2419 tcg_temp_free_i64(t
);
2421 TCGv_i64 t0
= tcg_temp_new_i64();
2422 tcg_gen_mul_i64(t0
, arg1
, arg2
);
2423 gen_helper_muluh_i64(rh
, arg1
, arg2
);
2424 tcg_gen_mov_i64(rl
, t0
);
2425 tcg_temp_free_i64(t0
);
2429 void tcg_gen_muls2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2431 if (TCG_TARGET_HAS_muls2_i64
) {
2432 tcg_gen_op4_i64(INDEX_op_muls2_i64
, rl
, rh
, arg1
, arg2
);
2433 } else if (TCG_TARGET_HAS_mulsh_i64
) {
2434 TCGv_i64 t
= tcg_temp_new_i64();
2435 tcg_gen_op3_i64(INDEX_op_mul_i64
, t
, arg1
, arg2
);
2436 tcg_gen_op3_i64(INDEX_op_mulsh_i64
, rh
, arg1
, arg2
);
2437 tcg_gen_mov_i64(rl
, t
);
2438 tcg_temp_free_i64(t
);
2439 } else if (TCG_TARGET_HAS_mulu2_i64
|| TCG_TARGET_HAS_muluh_i64
) {
2440 TCGv_i64 t0
= tcg_temp_new_i64();
2441 TCGv_i64 t1
= tcg_temp_new_i64();
2442 TCGv_i64 t2
= tcg_temp_new_i64();
2443 TCGv_i64 t3
= tcg_temp_new_i64();
2444 tcg_gen_mulu2_i64(t0
, t1
, arg1
, arg2
);
2445 /* Adjust for negative inputs. */
2446 tcg_gen_sari_i64(t2
, arg1
, 63);
2447 tcg_gen_sari_i64(t3
, arg2
, 63);
2448 tcg_gen_and_i64(t2
, t2
, arg2
);
2449 tcg_gen_and_i64(t3
, t3
, arg1
);
2450 tcg_gen_sub_i64(rh
, t1
, t2
);
2451 tcg_gen_sub_i64(rh
, rh
, t3
);
2452 tcg_gen_mov_i64(rl
, t0
);
2453 tcg_temp_free_i64(t0
);
2454 tcg_temp_free_i64(t1
);
2455 tcg_temp_free_i64(t2
);
2456 tcg_temp_free_i64(t3
);
2458 TCGv_i64 t0
= tcg_temp_new_i64();
2459 tcg_gen_mul_i64(t0
, arg1
, arg2
);
2460 gen_helper_mulsh_i64(rh
, arg1
, arg2
);
2461 tcg_gen_mov_i64(rl
, t0
);
2462 tcg_temp_free_i64(t0
);
2466 void tcg_gen_mulsu2_i64(TCGv_i64 rl
, TCGv_i64 rh
, TCGv_i64 arg1
, TCGv_i64 arg2
)
2468 TCGv_i64 t0
= tcg_temp_new_i64();
2469 TCGv_i64 t1
= tcg_temp_new_i64();
2470 TCGv_i64 t2
= tcg_temp_new_i64();
2471 tcg_gen_mulu2_i64(t0
, t1
, arg1
, arg2
);
2472 /* Adjust for negative input for the signed arg1. */
2473 tcg_gen_sari_i64(t2
, arg1
, 63);
2474 tcg_gen_and_i64(t2
, t2
, arg2
);
2475 tcg_gen_sub_i64(rh
, t1
, t2
);
2476 tcg_gen_mov_i64(rl
, t0
);
2477 tcg_temp_free_i64(t0
);
2478 tcg_temp_free_i64(t1
);
2479 tcg_temp_free_i64(t2
);
2482 /* Size changing operations. */
2484 void tcg_gen_extrl_i64_i32(TCGv_i32 ret
, TCGv_i64 arg
)
2486 if (TCG_TARGET_REG_BITS
== 32) {
2487 tcg_gen_mov_i32(ret
, TCGV_LOW(arg
));
2488 } else if (TCG_TARGET_HAS_extrl_i64_i32
) {
2489 tcg_gen_op2(&tcg_ctx
, INDEX_op_extrl_i64_i32
,
2490 GET_TCGV_I32(ret
), GET_TCGV_I64(arg
));
2492 tcg_gen_mov_i32(ret
, MAKE_TCGV_I32(GET_TCGV_I64(arg
)));
2496 void tcg_gen_extrh_i64_i32(TCGv_i32 ret
, TCGv_i64 arg
)
2498 if (TCG_TARGET_REG_BITS
== 32) {
2499 tcg_gen_mov_i32(ret
, TCGV_HIGH(arg
));
2500 } else if (TCG_TARGET_HAS_extrh_i64_i32
) {
2501 tcg_gen_op2(&tcg_ctx
, INDEX_op_extrh_i64_i32
,
2502 GET_TCGV_I32(ret
), GET_TCGV_I64(arg
));
2504 TCGv_i64 t
= tcg_temp_new_i64();
2505 tcg_gen_shri_i64(t
, arg
, 32);
2506 tcg_gen_mov_i32(ret
, MAKE_TCGV_I32(GET_TCGV_I64(t
)));
2507 tcg_temp_free_i64(t
);
2511 void tcg_gen_extu_i32_i64(TCGv_i64 ret
, TCGv_i32 arg
)
2513 if (TCG_TARGET_REG_BITS
== 32) {
2514 tcg_gen_mov_i32(TCGV_LOW(ret
), arg
);
2515 tcg_gen_movi_i32(TCGV_HIGH(ret
), 0);
2517 tcg_gen_op2(&tcg_ctx
, INDEX_op_extu_i32_i64
,
2518 GET_TCGV_I64(ret
), GET_TCGV_I32(arg
));
2522 void tcg_gen_ext_i32_i64(TCGv_i64 ret
, TCGv_i32 arg
)
2524 if (TCG_TARGET_REG_BITS
== 32) {
2525 tcg_gen_mov_i32(TCGV_LOW(ret
), arg
);
2526 tcg_gen_sari_i32(TCGV_HIGH(ret
), TCGV_LOW(ret
), 31);
2528 tcg_gen_op2(&tcg_ctx
, INDEX_op_ext_i32_i64
,
2529 GET_TCGV_I64(ret
), GET_TCGV_I32(arg
));
2533 void tcg_gen_concat_i32_i64(TCGv_i64 dest
, TCGv_i32 low
, TCGv_i32 high
)
2537 if (TCG_TARGET_REG_BITS
== 32) {
2538 tcg_gen_mov_i32(TCGV_LOW(dest
), low
);
2539 tcg_gen_mov_i32(TCGV_HIGH(dest
), high
);
2543 tmp
= tcg_temp_new_i64();
2544 /* These extensions are only needed for type correctness.
2545 We may be able to do better given target specific information. */
2546 tcg_gen_extu_i32_i64(tmp
, high
);
2547 tcg_gen_extu_i32_i64(dest
, low
);
2548 /* If deposit is available, use it. Otherwise use the extra
2549 knowledge that we have of the zero-extensions above. */
2550 if (TCG_TARGET_HAS_deposit_i64
&& TCG_TARGET_deposit_i64_valid(32, 32)) {
2551 tcg_gen_deposit_i64(dest
, dest
, tmp
, 32, 32);
2553 tcg_gen_shli_i64(tmp
, tmp
, 32);
2554 tcg_gen_or_i64(dest
, dest
, tmp
);
2556 tcg_temp_free_i64(tmp
);
2559 void tcg_gen_extr_i64_i32(TCGv_i32 lo
, TCGv_i32 hi
, TCGv_i64 arg
)
2561 if (TCG_TARGET_REG_BITS
== 32) {
2562 tcg_gen_mov_i32(lo
, TCGV_LOW(arg
));
2563 tcg_gen_mov_i32(hi
, TCGV_HIGH(arg
));
2565 tcg_gen_extrl_i64_i32(lo
, arg
);
2566 tcg_gen_extrh_i64_i32(hi
, arg
);
2570 void tcg_gen_extr32_i64(TCGv_i64 lo
, TCGv_i64 hi
, TCGv_i64 arg
)
2572 tcg_gen_ext32u_i64(lo
, arg
);
2573 tcg_gen_shri_i64(hi
, arg
, 32);
2576 /* QEMU specific operations. */
2578 void tcg_gen_goto_tb(unsigned idx
)
2580 /* We only support two chained exits. */
2581 tcg_debug_assert(idx
<= 1);
2582 #ifdef CONFIG_DEBUG_TCG
2583 /* Verify that we havn't seen this numbered exit before. */
2584 tcg_debug_assert((tcg_ctx
.goto_tb_issue_mask
& (1 << idx
)) == 0);
2585 tcg_ctx
.goto_tb_issue_mask
|= 1 << idx
;
2587 tcg_gen_op1i(INDEX_op_goto_tb
, idx
);
2590 static inline TCGMemOp
tcg_canonicalize_memop(TCGMemOp op
, bool is64
, bool st
)
2592 /* Trigger the asserts within as early as possible. */
2593 (void)get_alignment_bits(op
);
2595 switch (op
& MO_SIZE
) {
2618 static void gen_ldst_i32(TCGOpcode opc
, TCGv_i32 val
, TCGv addr
,
2619 TCGMemOp memop
, TCGArg idx
)
2621 TCGMemOpIdx oi
= make_memop_idx(memop
, idx
);
2622 #if TARGET_LONG_BITS == 32
2623 tcg_gen_op3i_i32(opc
, val
, addr
, oi
);
2625 if (TCG_TARGET_REG_BITS
== 32) {
2626 tcg_gen_op4i_i32(opc
, val
, TCGV_LOW(addr
), TCGV_HIGH(addr
), oi
);
2628 tcg_gen_op3(&tcg_ctx
, opc
, GET_TCGV_I32(val
), GET_TCGV_I64(addr
), oi
);
2633 static void gen_ldst_i64(TCGOpcode opc
, TCGv_i64 val
, TCGv addr
,
2634 TCGMemOp memop
, TCGArg idx
)
2636 TCGMemOpIdx oi
= make_memop_idx(memop
, idx
);
2637 #if TARGET_LONG_BITS == 32
2638 if (TCG_TARGET_REG_BITS
== 32) {
2639 tcg_gen_op4i_i32(opc
, TCGV_LOW(val
), TCGV_HIGH(val
), addr
, oi
);
2641 tcg_gen_op3(&tcg_ctx
, opc
, GET_TCGV_I64(val
), GET_TCGV_I32(addr
), oi
);
2644 if (TCG_TARGET_REG_BITS
== 32) {
2645 tcg_gen_op5i_i32(opc
, TCGV_LOW(val
), TCGV_HIGH(val
),
2646 TCGV_LOW(addr
), TCGV_HIGH(addr
), oi
);
2648 tcg_gen_op3i_i64(opc
, val
, addr
, oi
);
2653 void tcg_gen_qemu_ld_i32(TCGv_i32 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2655 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2656 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2657 addr
, trace_mem_get_info(memop
, 0));
2658 gen_ldst_i32(INDEX_op_qemu_ld_i32
, val
, addr
, memop
, idx
);
2661 void tcg_gen_qemu_st_i32(TCGv_i32 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2663 memop
= tcg_canonicalize_memop(memop
, 0, 1);
2664 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2665 addr
, trace_mem_get_info(memop
, 1));
2666 gen_ldst_i32(INDEX_op_qemu_st_i32
, val
, addr
, memop
, idx
);
2669 void tcg_gen_qemu_ld_i64(TCGv_i64 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2671 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
2672 tcg_gen_qemu_ld_i32(TCGV_LOW(val
), addr
, idx
, memop
);
2673 if (memop
& MO_SIGN
) {
2674 tcg_gen_sari_i32(TCGV_HIGH(val
), TCGV_LOW(val
), 31);
2676 tcg_gen_movi_i32(TCGV_HIGH(val
), 0);
2681 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2682 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2683 addr
, trace_mem_get_info(memop
, 0));
2684 gen_ldst_i64(INDEX_op_qemu_ld_i64
, val
, addr
, memop
, idx
);
2687 void tcg_gen_qemu_st_i64(TCGv_i64 val
, TCGv addr
, TCGArg idx
, TCGMemOp memop
)
2689 if (TCG_TARGET_REG_BITS
== 32 && (memop
& MO_SIZE
) < MO_64
) {
2690 tcg_gen_qemu_st_i32(TCGV_LOW(val
), addr
, idx
, memop
);
2694 memop
= tcg_canonicalize_memop(memop
, 1, 1);
2695 trace_guest_mem_before_tcg(tcg_ctx
.cpu
, tcg_ctx
.tcg_env
,
2696 addr
, trace_mem_get_info(memop
, 1));
2697 gen_ldst_i64(INDEX_op_qemu_st_i64
, val
, addr
, memop
, idx
);
2700 static void tcg_gen_ext_i32(TCGv_i32 ret
, TCGv_i32 val
, TCGMemOp opc
)
2702 switch (opc
& MO_SSIZE
) {
2704 tcg_gen_ext8s_i32(ret
, val
);
2707 tcg_gen_ext8u_i32(ret
, val
);
2710 tcg_gen_ext16s_i32(ret
, val
);
2713 tcg_gen_ext16u_i32(ret
, val
);
2716 tcg_gen_mov_i32(ret
, val
);
2721 static void tcg_gen_ext_i64(TCGv_i64 ret
, TCGv_i64 val
, TCGMemOp opc
)
2723 switch (opc
& MO_SSIZE
) {
2725 tcg_gen_ext8s_i64(ret
, val
);
2728 tcg_gen_ext8u_i64(ret
, val
);
2731 tcg_gen_ext16s_i64(ret
, val
);
2734 tcg_gen_ext16u_i64(ret
, val
);
2737 tcg_gen_ext32s_i64(ret
, val
);
2740 tcg_gen_ext32u_i64(ret
, val
);
2743 tcg_gen_mov_i64(ret
, val
);
2748 #ifdef CONFIG_SOFTMMU
2749 typedef void (*gen_atomic_cx_i32
)(TCGv_i32
, TCGv_env
, TCGv
,
2750 TCGv_i32
, TCGv_i32
, TCGv_i32
);
2751 typedef void (*gen_atomic_cx_i64
)(TCGv_i64
, TCGv_env
, TCGv
,
2752 TCGv_i64
, TCGv_i64
, TCGv_i32
);
2753 typedef void (*gen_atomic_op_i32
)(TCGv_i32
, TCGv_env
, TCGv
,
2754 TCGv_i32
, TCGv_i32
);
2755 typedef void (*gen_atomic_op_i64
)(TCGv_i64
, TCGv_env
, TCGv
,
2756 TCGv_i64
, TCGv_i32
);
2758 typedef void (*gen_atomic_cx_i32
)(TCGv_i32
, TCGv_env
, TCGv
, TCGv_i32
, TCGv_i32
);
2759 typedef void (*gen_atomic_cx_i64
)(TCGv_i64
, TCGv_env
, TCGv
, TCGv_i64
, TCGv_i64
);
2760 typedef void (*gen_atomic_op_i32
)(TCGv_i32
, TCGv_env
, TCGv
, TCGv_i32
);
2761 typedef void (*gen_atomic_op_i64
)(TCGv_i64
, TCGv_env
, TCGv
, TCGv_i64
);
2764 #ifdef CONFIG_ATOMIC64
2765 # define WITH_ATOMIC64(X) X,
2767 # define WITH_ATOMIC64(X)
2770 static void * const table_cmpxchg
[16] = {
2771 [MO_8
] = gen_helper_atomic_cmpxchgb
,
2772 [MO_16
| MO_LE
] = gen_helper_atomic_cmpxchgw_le
,
2773 [MO_16
| MO_BE
] = gen_helper_atomic_cmpxchgw_be
,
2774 [MO_32
| MO_LE
] = gen_helper_atomic_cmpxchgl_le
,
2775 [MO_32
| MO_BE
] = gen_helper_atomic_cmpxchgl_be
,
2776 WITH_ATOMIC64([MO_64
| MO_LE
] = gen_helper_atomic_cmpxchgq_le
)
2777 WITH_ATOMIC64([MO_64
| MO_BE
] = gen_helper_atomic_cmpxchgq_be
)
2780 void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv
, TCGv addr
, TCGv_i32 cmpv
,
2781 TCGv_i32 newv
, TCGArg idx
, TCGMemOp memop
)
2783 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2785 if (!parallel_cpus
) {
2786 TCGv_i32 t1
= tcg_temp_new_i32();
2787 TCGv_i32 t2
= tcg_temp_new_i32();
2789 tcg_gen_ext_i32(t2
, cmpv
, memop
& MO_SIZE
);
2791 tcg_gen_qemu_ld_i32(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2792 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
2793 tcg_gen_qemu_st_i32(t2
, addr
, idx
, memop
);
2794 tcg_temp_free_i32(t2
);
2796 if (memop
& MO_SIGN
) {
2797 tcg_gen_ext_i32(retv
, t1
, memop
);
2799 tcg_gen_mov_i32(retv
, t1
);
2801 tcg_temp_free_i32(t1
);
2803 gen_atomic_cx_i32 gen
;
2805 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
2806 tcg_debug_assert(gen
!= NULL
);
2808 #ifdef CONFIG_SOFTMMU
2810 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
2811 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
, oi
);
2812 tcg_temp_free_i32(oi
);
2815 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
);
2818 if (memop
& MO_SIGN
) {
2819 tcg_gen_ext_i32(retv
, retv
, memop
);
2824 void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv
, TCGv addr
, TCGv_i64 cmpv
,
2825 TCGv_i64 newv
, TCGArg idx
, TCGMemOp memop
)
2827 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2829 if (!parallel_cpus
) {
2830 TCGv_i64 t1
= tcg_temp_new_i64();
2831 TCGv_i64 t2
= tcg_temp_new_i64();
2833 tcg_gen_ext_i64(t2
, cmpv
, memop
& MO_SIZE
);
2835 tcg_gen_qemu_ld_i64(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2836 tcg_gen_movcond_i64(TCG_COND_EQ
, t2
, t1
, t2
, newv
, t1
);
2837 tcg_gen_qemu_st_i64(t2
, addr
, idx
, memop
);
2838 tcg_temp_free_i64(t2
);
2840 if (memop
& MO_SIGN
) {
2841 tcg_gen_ext_i64(retv
, t1
, memop
);
2843 tcg_gen_mov_i64(retv
, t1
);
2845 tcg_temp_free_i64(t1
);
2846 } else if ((memop
& MO_SIZE
) == MO_64
) {
2847 #ifdef CONFIG_ATOMIC64
2848 gen_atomic_cx_i64 gen
;
2850 gen
= table_cmpxchg
[memop
& (MO_SIZE
| MO_BSWAP
)];
2851 tcg_debug_assert(gen
!= NULL
);
2853 #ifdef CONFIG_SOFTMMU
2855 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
, idx
));
2856 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
, oi
);
2857 tcg_temp_free_i32(oi
);
2860 gen(retv
, tcg_ctx
.tcg_env
, addr
, cmpv
, newv
);
2863 gen_helper_exit_atomic(tcg_ctx
.tcg_env
);
2864 /* Produce a result, so that we have a well-formed opcode stream
2865 with respect to uses of the result in the (dead) code following. */
2866 tcg_gen_movi_i64(retv
, 0);
2867 #endif /* CONFIG_ATOMIC64 */
2869 TCGv_i32 c32
= tcg_temp_new_i32();
2870 TCGv_i32 n32
= tcg_temp_new_i32();
2871 TCGv_i32 r32
= tcg_temp_new_i32();
2873 tcg_gen_extrl_i64_i32(c32
, cmpv
);
2874 tcg_gen_extrl_i64_i32(n32
, newv
);
2875 tcg_gen_atomic_cmpxchg_i32(r32
, addr
, c32
, n32
, idx
, memop
& ~MO_SIGN
);
2876 tcg_temp_free_i32(c32
);
2877 tcg_temp_free_i32(n32
);
2879 tcg_gen_extu_i32_i64(retv
, r32
);
2880 tcg_temp_free_i32(r32
);
2882 if (memop
& MO_SIGN
) {
2883 tcg_gen_ext_i64(retv
, retv
, memop
);
2888 static void do_nonatomic_op_i32(TCGv_i32 ret
, TCGv addr
, TCGv_i32 val
,
2889 TCGArg idx
, TCGMemOp memop
, bool new_val
,
2890 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
2892 TCGv_i32 t1
= tcg_temp_new_i32();
2893 TCGv_i32 t2
= tcg_temp_new_i32();
2895 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2897 tcg_gen_qemu_ld_i32(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2899 tcg_gen_qemu_st_i32(t2
, addr
, idx
, memop
);
2901 tcg_gen_ext_i32(ret
, (new_val
? t2
: t1
), memop
);
2902 tcg_temp_free_i32(t1
);
2903 tcg_temp_free_i32(t2
);
2906 static void do_atomic_op_i32(TCGv_i32 ret
, TCGv addr
, TCGv_i32 val
,
2907 TCGArg idx
, TCGMemOp memop
, void * const table
[])
2909 gen_atomic_op_i32 gen
;
2911 memop
= tcg_canonicalize_memop(memop
, 0, 0);
2913 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
2914 tcg_debug_assert(gen
!= NULL
);
2916 #ifdef CONFIG_SOFTMMU
2918 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
2919 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
, oi
);
2920 tcg_temp_free_i32(oi
);
2923 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
);
2926 if (memop
& MO_SIGN
) {
2927 tcg_gen_ext_i32(ret
, ret
, memop
);
2931 static void do_nonatomic_op_i64(TCGv_i64 ret
, TCGv addr
, TCGv_i64 val
,
2932 TCGArg idx
, TCGMemOp memop
, bool new_val
,
2933 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
2935 TCGv_i64 t1
= tcg_temp_new_i64();
2936 TCGv_i64 t2
= tcg_temp_new_i64();
2938 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2940 tcg_gen_qemu_ld_i64(t1
, addr
, idx
, memop
& ~MO_SIGN
);
2942 tcg_gen_qemu_st_i64(t2
, addr
, idx
, memop
);
2944 tcg_gen_ext_i64(ret
, (new_val
? t2
: t1
), memop
);
2945 tcg_temp_free_i64(t1
);
2946 tcg_temp_free_i64(t2
);
2949 static void do_atomic_op_i64(TCGv_i64 ret
, TCGv addr
, TCGv_i64 val
,
2950 TCGArg idx
, TCGMemOp memop
, void * const table
[])
2952 memop
= tcg_canonicalize_memop(memop
, 1, 0);
2954 if ((memop
& MO_SIZE
) == MO_64
) {
2955 #ifdef CONFIG_ATOMIC64
2956 gen_atomic_op_i64 gen
;
2958 gen
= table
[memop
& (MO_SIZE
| MO_BSWAP
)];
2959 tcg_debug_assert(gen
!= NULL
);
2961 #ifdef CONFIG_SOFTMMU
2963 TCGv_i32 oi
= tcg_const_i32(make_memop_idx(memop
& ~MO_SIGN
, idx
));
2964 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
, oi
);
2965 tcg_temp_free_i32(oi
);
2968 gen(ret
, tcg_ctx
.tcg_env
, addr
, val
);
2971 gen_helper_exit_atomic(tcg_ctx
.tcg_env
);
2972 /* Produce a result, so that we have a well-formed opcode stream
2973 with respect to uses of the result in the (dead) code following. */
2974 tcg_gen_movi_i64(ret
, 0);
2975 #endif /* CONFIG_ATOMIC64 */
2977 TCGv_i32 v32
= tcg_temp_new_i32();
2978 TCGv_i32 r32
= tcg_temp_new_i32();
2980 tcg_gen_extrl_i64_i32(v32
, val
);
2981 do_atomic_op_i32(r32
, addr
, v32
, idx
, memop
& ~MO_SIGN
, table
);
2982 tcg_temp_free_i32(v32
);
2984 tcg_gen_extu_i32_i64(ret
, r32
);
2985 tcg_temp_free_i32(r32
);
2987 if (memop
& MO_SIGN
) {
2988 tcg_gen_ext_i64(ret
, ret
, memop
);
2993 #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
2994 static void * const table_##NAME[16] = { \
2995 [MO_8] = gen_helper_atomic_##NAME##b, \
2996 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
2997 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
2998 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
2999 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
3000 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
3001 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
3003 void tcg_gen_atomic_##NAME##_i32 \
3004 (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
3006 if (parallel_cpus) { \
3007 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
3009 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
3010 tcg_gen_##OP##_i32); \
3013 void tcg_gen_atomic_##NAME##_i64 \
3014 (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
3016 if (parallel_cpus) { \
3017 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
3019 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
3020 tcg_gen_##OP##_i64); \
3024 GEN_ATOMIC_HELPER(fetch_add
, add
, 0)
3025 GEN_ATOMIC_HELPER(fetch_and
, and, 0)
3026 GEN_ATOMIC_HELPER(fetch_or
, or, 0)
3027 GEN_ATOMIC_HELPER(fetch_xor
, xor, 0)
3029 GEN_ATOMIC_HELPER(add_fetch
, add
, 1)
3030 GEN_ATOMIC_HELPER(and_fetch
, and, 1)
3031 GEN_ATOMIC_HELPER(or_fetch
, or, 1)
3032 GEN_ATOMIC_HELPER(xor_fetch
, xor, 1)
3034 static void tcg_gen_mov2_i32(TCGv_i32 r
, TCGv_i32 a
, TCGv_i32 b
)
3036 tcg_gen_mov_i32(r
, b
);
3039 static void tcg_gen_mov2_i64(TCGv_i64 r
, TCGv_i64 a
, TCGv_i64 b
)
3041 tcg_gen_mov_i64(r
, b
);
3044 GEN_ATOMIC_HELPER(xchg
, mov2
, 0)
3046 #undef GEN_ATOMIC_HELPER