2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu/int128.h"
28 #include "tcg/tcg-op-common.h"
29 #include "tcg-internal.h"
31 #define CASE_OP_32_64(x) \
32 glue(glue(case INDEX_op_, x), _i32): \
33 glue(glue(case INDEX_op_, x), _i64)
35 #define CASE_OP_32_64_VEC(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64): \
38 glue(glue(case INDEX_op_, x), _vec)
40 typedef struct TempOptInfo
{
45 uint64_t z_mask
; /* mask bit is 0 if and only if value bit is 0 */
46 uint64_t s_mask
; /* a left-aligned mask of clrsb(value) bits. */
49 typedef struct OptContext
{
52 TCGTempSet temps_used
;
54 /* In flight values from optimization. */
55 uint64_t a_mask
; /* mask bit is 0 iff value identical to first input */
56 uint64_t z_mask
; /* mask bit is 0 iff value bit is 0 */
57 uint64_t s_mask
; /* mask of clrsb(value) bits */
61 /* Calculate the smask for a specific value. */
62 static uint64_t smask_from_value(uint64_t value
)
64 int rep
= clrsb64(value
);
65 return ~(~0ull >> rep
);
69 * Calculate the smask for a given set of known-zeros.
70 * If there are lots of zeros on the left, we can consider the remainder
71 * an unsigned field, and thus the corresponding signed field is one bit
74 static uint64_t smask_from_zmask(uint64_t zmask
)
77 * Only the 0 bits are significant for zmask, thus the msb itself
78 * must be zero, else we have no sign information.
80 int rep
= clz64(zmask
);
85 return ~(~0ull >> rep
);
89 * Recreate a properly left-aligned smask after manipulation.
90 * Some bit-shuffling, particularly shifts and rotates, may
91 * retain sign bits on the left, but may scatter disconnected
92 * sign bits on the right. Retain only what remains to the left.
94 static uint64_t smask_from_smask(int64_t smask
)
96 /* Only the 1 bits are significant for smask */
97 return smask_from_zmask(~smask
);
100 static inline TempOptInfo
*ts_info(TCGTemp
*ts
)
102 return ts
->state_ptr
;
105 static inline TempOptInfo
*arg_info(TCGArg arg
)
107 return ts_info(arg_temp(arg
));
110 static inline bool ts_is_const(TCGTemp
*ts
)
112 return ts_info(ts
)->is_const
;
115 static inline bool arg_is_const(TCGArg arg
)
117 return ts_is_const(arg_temp(arg
));
120 static inline bool ts_is_copy(TCGTemp
*ts
)
122 return ts_info(ts
)->next_copy
!= ts
;
125 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
126 static void reset_ts(TCGTemp
*ts
)
128 TempOptInfo
*ti
= ts_info(ts
);
129 TempOptInfo
*pi
= ts_info(ti
->prev_copy
);
130 TempOptInfo
*ni
= ts_info(ti
->next_copy
);
132 ni
->prev_copy
= ti
->prev_copy
;
133 pi
->next_copy
= ti
->next_copy
;
136 ti
->is_const
= false;
141 static void reset_temp(TCGArg arg
)
143 reset_ts(arg_temp(arg
));
146 /* Initialize and activate a temporary. */
147 static void init_ts_info(OptContext
*ctx
, TCGTemp
*ts
)
149 size_t idx
= temp_idx(ts
);
152 if (test_bit(idx
, ctx
->temps_used
.l
)) {
155 set_bit(idx
, ctx
->temps_used
.l
);
159 ti
= tcg_malloc(sizeof(TempOptInfo
));
165 if (ts
->kind
== TEMP_CONST
) {
168 ti
->z_mask
= ts
->val
;
169 ti
->s_mask
= smask_from_value(ts
->val
);
171 ti
->is_const
= false;
177 static TCGTemp
*find_better_copy(TCGContext
*s
, TCGTemp
*ts
)
181 /* If this is already readonly, we can't do better. */
182 if (temp_readonly(ts
)) {
187 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
188 if (temp_readonly(i
)) {
190 } else if (i
->kind
> ts
->kind
) {
191 if (i
->kind
== TEMP_GLOBAL
) {
193 } else if (i
->kind
== TEMP_TB
) {
199 /* If we didn't find a better representation, return the same temp. */
200 return g
? g
: l
? l
: ts
;
203 static bool ts_are_copies(TCGTemp
*ts1
, TCGTemp
*ts2
)
211 if (!ts_is_copy(ts1
) || !ts_is_copy(ts2
)) {
215 for (i
= ts_info(ts1
)->next_copy
; i
!= ts1
; i
= ts_info(i
)->next_copy
) {
224 static bool args_are_copies(TCGArg arg1
, TCGArg arg2
)
226 return ts_are_copies(arg_temp(arg1
), arg_temp(arg2
));
229 static bool tcg_opt_gen_mov(OptContext
*ctx
, TCGOp
*op
, TCGArg dst
, TCGArg src
)
231 TCGTemp
*dst_ts
= arg_temp(dst
);
232 TCGTemp
*src_ts
= arg_temp(src
);
237 if (ts_are_copies(dst_ts
, src_ts
)) {
238 tcg_op_remove(ctx
->tcg
, op
);
243 di
= ts_info(dst_ts
);
244 si
= ts_info(src_ts
);
248 new_op
= INDEX_op_mov_i32
;
251 new_op
= INDEX_op_mov_i64
;
256 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
257 new_op
= INDEX_op_mov_vec
;
260 g_assert_not_reached();
266 di
->z_mask
= si
->z_mask
;
267 di
->s_mask
= si
->s_mask
;
269 if (src_ts
->type
== dst_ts
->type
) {
270 TempOptInfo
*ni
= ts_info(si
->next_copy
);
272 di
->next_copy
= si
->next_copy
;
273 di
->prev_copy
= src_ts
;
274 ni
->prev_copy
= dst_ts
;
275 si
->next_copy
= dst_ts
;
276 di
->is_const
= si
->is_const
;
282 static bool tcg_opt_gen_movi(OptContext
*ctx
, TCGOp
*op
,
283 TCGArg dst
, uint64_t val
)
287 if (ctx
->type
== TCG_TYPE_I32
) {
291 /* Convert movi to mov with constant temp. */
292 tv
= tcg_constant_internal(ctx
->type
, val
);
293 init_ts_info(ctx
, tv
);
294 return tcg_opt_gen_mov(ctx
, op
, dst
, temp_arg(tv
));
297 static uint64_t do_constant_folding_2(TCGOpcode op
, uint64_t x
, uint64_t y
)
311 CASE_OP_32_64_VEC(and):
314 CASE_OP_32_64_VEC(or):
317 CASE_OP_32_64_VEC(xor):
320 case INDEX_op_shl_i32
:
321 return (uint32_t)x
<< (y
& 31);
323 case INDEX_op_shl_i64
:
324 return (uint64_t)x
<< (y
& 63);
326 case INDEX_op_shr_i32
:
327 return (uint32_t)x
>> (y
& 31);
329 case INDEX_op_shr_i64
:
330 return (uint64_t)x
>> (y
& 63);
332 case INDEX_op_sar_i32
:
333 return (int32_t)x
>> (y
& 31);
335 case INDEX_op_sar_i64
:
336 return (int64_t)x
>> (y
& 63);
338 case INDEX_op_rotr_i32
:
339 return ror32(x
, y
& 31);
341 case INDEX_op_rotr_i64
:
342 return ror64(x
, y
& 63);
344 case INDEX_op_rotl_i32
:
345 return rol32(x
, y
& 31);
347 case INDEX_op_rotl_i64
:
348 return rol64(x
, y
& 63);
350 CASE_OP_32_64_VEC(not):
356 CASE_OP_32_64_VEC(andc
):
359 CASE_OP_32_64_VEC(orc
):
362 CASE_OP_32_64_VEC(eqv
):
365 CASE_OP_32_64_VEC(nand
):
368 CASE_OP_32_64_VEC(nor
):
371 case INDEX_op_clz_i32
:
372 return (uint32_t)x
? clz32(x
) : y
;
374 case INDEX_op_clz_i64
:
375 return x
? clz64(x
) : y
;
377 case INDEX_op_ctz_i32
:
378 return (uint32_t)x
? ctz32(x
) : y
;
380 case INDEX_op_ctz_i64
:
381 return x
? ctz64(x
) : y
;
383 case INDEX_op_ctpop_i32
:
386 case INDEX_op_ctpop_i64
:
389 CASE_OP_32_64(ext8s
):
392 CASE_OP_32_64(ext16s
):
395 CASE_OP_32_64(ext8u
):
398 CASE_OP_32_64(ext16u
):
401 CASE_OP_32_64(bswap16
):
403 return y
& TCG_BSWAP_OS
? (int16_t)x
: x
;
405 CASE_OP_32_64(bswap32
):
407 return y
& TCG_BSWAP_OS
? (int32_t)x
: x
;
409 case INDEX_op_bswap64_i64
:
412 case INDEX_op_ext_i32_i64
:
413 case INDEX_op_ext32s_i64
:
416 case INDEX_op_extu_i32_i64
:
417 case INDEX_op_extrl_i64_i32
:
418 case INDEX_op_ext32u_i64
:
421 case INDEX_op_extrh_i64_i32
:
422 return (uint64_t)x
>> 32;
424 case INDEX_op_muluh_i32
:
425 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
426 case INDEX_op_mulsh_i32
:
427 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
429 case INDEX_op_muluh_i64
:
430 mulu64(&l64
, &h64
, x
, y
);
432 case INDEX_op_mulsh_i64
:
433 muls64(&l64
, &h64
, x
, y
);
436 case INDEX_op_div_i32
:
437 /* Avoid crashing on divide by zero, otherwise undefined. */
438 return (int32_t)x
/ ((int32_t)y
? : 1);
439 case INDEX_op_divu_i32
:
440 return (uint32_t)x
/ ((uint32_t)y
? : 1);
441 case INDEX_op_div_i64
:
442 return (int64_t)x
/ ((int64_t)y
? : 1);
443 case INDEX_op_divu_i64
:
444 return (uint64_t)x
/ ((uint64_t)y
? : 1);
446 case INDEX_op_rem_i32
:
447 return (int32_t)x
% ((int32_t)y
? : 1);
448 case INDEX_op_remu_i32
:
449 return (uint32_t)x
% ((uint32_t)y
? : 1);
450 case INDEX_op_rem_i64
:
451 return (int64_t)x
% ((int64_t)y
? : 1);
452 case INDEX_op_remu_i64
:
453 return (uint64_t)x
% ((uint64_t)y
? : 1);
456 g_assert_not_reached();
460 static uint64_t do_constant_folding(TCGOpcode op
, TCGType type
,
461 uint64_t x
, uint64_t y
)
463 uint64_t res
= do_constant_folding_2(op
, x
, y
);
464 if (type
== TCG_TYPE_I32
) {
470 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
478 return (int32_t)x
< (int32_t)y
;
480 return (int32_t)x
>= (int32_t)y
;
482 return (int32_t)x
<= (int32_t)y
;
484 return (int32_t)x
> (int32_t)y
;
494 g_assert_not_reached();
498 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
506 return (int64_t)x
< (int64_t)y
;
508 return (int64_t)x
>= (int64_t)y
;
510 return (int64_t)x
<= (int64_t)y
;
512 return (int64_t)x
> (int64_t)y
;
522 g_assert_not_reached();
526 static bool do_constant_folding_cond_eq(TCGCond c
)
542 g_assert_not_reached();
547 * Return -1 if the condition can't be simplified,
548 * and the result of the condition (0 or 1) if it can.
550 static int do_constant_folding_cond(TCGType type
, TCGArg x
,
553 if (arg_is_const(x
) && arg_is_const(y
)) {
554 uint64_t xv
= arg_info(x
)->val
;
555 uint64_t yv
= arg_info(y
)->val
;
559 return do_constant_folding_cond_32(xv
, yv
, c
);
561 return do_constant_folding_cond_64(xv
, yv
, c
);
563 /* Only scalar comparisons are optimizable */
566 } else if (args_are_copies(x
, y
)) {
567 return do_constant_folding_cond_eq(c
);
568 } else if (arg_is_const(y
) && arg_info(y
)->val
== 0) {
582 * Return -1 if the condition can't be simplified,
583 * and the result of the condition (0 or 1) if it can.
585 static int do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
587 TCGArg al
= p1
[0], ah
= p1
[1];
588 TCGArg bl
= p2
[0], bh
= p2
[1];
590 if (arg_is_const(bl
) && arg_is_const(bh
)) {
591 tcg_target_ulong blv
= arg_info(bl
)->val
;
592 tcg_target_ulong bhv
= arg_info(bh
)->val
;
593 uint64_t b
= deposit64(blv
, 32, 32, bhv
);
595 if (arg_is_const(al
) && arg_is_const(ah
)) {
596 tcg_target_ulong alv
= arg_info(al
)->val
;
597 tcg_target_ulong ahv
= arg_info(ah
)->val
;
598 uint64_t a
= deposit64(alv
, 32, 32, ahv
);
599 return do_constant_folding_cond_64(a
, b
, c
);
612 if (args_are_copies(al
, bl
) && args_are_copies(ah
, bh
)) {
613 return do_constant_folding_cond_eq(c
);
620 * @dest: TCGArg of the destination argument, or NO_DEST.
621 * @p1: first paired argument
622 * @p2: second paired argument
624 * If *@p1 is a constant and *@p2 is not, swap.
625 * If *@p2 matches @dest, swap.
626 * Return true if a swap was performed.
629 #define NO_DEST temp_arg(NULL)
631 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
633 TCGArg a1
= *p1
, a2
= *p2
;
635 sum
+= arg_is_const(a1
);
636 sum
-= arg_is_const(a2
);
638 /* Prefer the constant in second argument, and then the form
639 op a, a, b, which is better handled on non-RISC hosts. */
640 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
648 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
651 sum
+= arg_is_const(p1
[0]);
652 sum
+= arg_is_const(p1
[1]);
653 sum
-= arg_is_const(p2
[0]);
654 sum
-= arg_is_const(p2
[1]);
657 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
658 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
664 static void init_arguments(OptContext
*ctx
, TCGOp
*op
, int nb_args
)
666 for (int i
= 0; i
< nb_args
; i
++) {
667 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
668 init_ts_info(ctx
, ts
);
672 static void copy_propagate(OptContext
*ctx
, TCGOp
*op
,
673 int nb_oargs
, int nb_iargs
)
675 TCGContext
*s
= ctx
->tcg
;
677 for (int i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
678 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
679 if (ts_is_copy(ts
)) {
680 op
->args
[i
] = temp_arg(find_better_copy(s
, ts
));
685 static void finish_folding(OptContext
*ctx
, TCGOp
*op
)
687 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
691 * For an opcode that ends a BB, reset all temp data.
692 * We do no cross-BB optimization.
694 if (def
->flags
& TCG_OPF_BB_END
) {
695 memset(&ctx
->temps_used
, 0, sizeof(ctx
->temps_used
));
700 nb_oargs
= def
->nb_oargs
;
701 for (i
= 0; i
< nb_oargs
; i
++) {
702 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
705 * Save the corresponding known-zero/sign bits mask for the
706 * first output argument (only one supported so far).
709 ts_info(ts
)->z_mask
= ctx
->z_mask
;
710 ts_info(ts
)->s_mask
= ctx
->s_mask
;
716 * The fold_* functions return true when processing is complete,
717 * usually by folding the operation to a constant or to a copy,
718 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
719 * like collect information about the value produced, for use in
720 * optimizing a subsequent operation.
722 * These first fold_* functions are all helpers, used by other
723 * folders for more specific operations.
726 static bool fold_const1(OptContext
*ctx
, TCGOp
*op
)
728 if (arg_is_const(op
->args
[1])) {
731 t
= arg_info(op
->args
[1])->val
;
732 t
= do_constant_folding(op
->opc
, ctx
->type
, t
, 0);
733 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
738 static bool fold_const2(OptContext
*ctx
, TCGOp
*op
)
740 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
741 uint64_t t1
= arg_info(op
->args
[1])->val
;
742 uint64_t t2
= arg_info(op
->args
[2])->val
;
744 t1
= do_constant_folding(op
->opc
, ctx
->type
, t1
, t2
);
745 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
750 static bool fold_commutative(OptContext
*ctx
, TCGOp
*op
)
752 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
756 static bool fold_const2_commutative(OptContext
*ctx
, TCGOp
*op
)
758 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
759 return fold_const2(ctx
, op
);
762 static bool fold_masks(OptContext
*ctx
, TCGOp
*op
)
764 uint64_t a_mask
= ctx
->a_mask
;
765 uint64_t z_mask
= ctx
->z_mask
;
766 uint64_t s_mask
= ctx
->s_mask
;
769 * 32-bit ops generate 32-bit results, which for the purpose of
770 * simplifying tcg are sign-extended. Certainly that's how we
771 * represent our constants elsewhere. Note that the bits will
772 * be reset properly for a 64-bit value when encountering the
773 * type changing opcodes.
775 if (ctx
->type
== TCG_TYPE_I32
) {
776 a_mask
= (int32_t)a_mask
;
777 z_mask
= (int32_t)z_mask
;
778 s_mask
|= MAKE_64BIT_MASK(32, 32);
779 ctx
->z_mask
= z_mask
;
780 ctx
->s_mask
= s_mask
;
784 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], 0);
787 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
793 * Convert @op to NOT, if NOT is supported by the host.
794 * Return true f the conversion is successful, which will still
795 * indicate that the processing is complete.
797 static bool fold_not(OptContext
*ctx
, TCGOp
*op
);
798 static bool fold_to_not(OptContext
*ctx
, TCGOp
*op
, int idx
)
805 not_op
= INDEX_op_not_i32
;
806 have_not
= TCG_TARGET_HAS_not_i32
;
809 not_op
= INDEX_op_not_i64
;
810 have_not
= TCG_TARGET_HAS_not_i64
;
815 not_op
= INDEX_op_not_vec
;
816 have_not
= TCG_TARGET_HAS_not_vec
;
819 g_assert_not_reached();
823 op
->args
[1] = op
->args
[idx
];
824 return fold_not(ctx
, op
);
829 /* If the binary operation has first argument @i, fold to @i. */
830 static bool fold_ix_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
832 if (arg_is_const(op
->args
[1]) && arg_info(op
->args
[1])->val
== i
) {
833 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
838 /* If the binary operation has first argument @i, fold to NOT. */
839 static bool fold_ix_to_not(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
841 if (arg_is_const(op
->args
[1]) && arg_info(op
->args
[1])->val
== i
) {
842 return fold_to_not(ctx
, op
, 2);
847 /* If the binary operation has second argument @i, fold to @i. */
848 static bool fold_xi_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
850 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== i
) {
851 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
856 /* If the binary operation has second argument @i, fold to identity. */
857 static bool fold_xi_to_x(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
859 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== i
) {
860 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
865 /* If the binary operation has second argument @i, fold to NOT. */
866 static bool fold_xi_to_not(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
868 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== i
) {
869 return fold_to_not(ctx
, op
, 1);
874 /* If the binary operation has both arguments equal, fold to @i. */
875 static bool fold_xx_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
877 if (args_are_copies(op
->args
[1], op
->args
[2])) {
878 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
883 /* If the binary operation has both arguments equal, fold to identity. */
884 static bool fold_xx_to_x(OptContext
*ctx
, TCGOp
*op
)
886 if (args_are_copies(op
->args
[1], op
->args
[2])) {
887 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
893 * These outermost fold_<op> functions are sorted alphabetically.
895 * The ordering of the transformations should be:
896 * 1) those that produce a constant
897 * 2) those that produce a copy
898 * 3) those that produce information about the result value.
901 static bool fold_add(OptContext
*ctx
, TCGOp
*op
)
903 if (fold_const2_commutative(ctx
, op
) ||
904 fold_xi_to_x(ctx
, op
, 0)) {
910 /* We cannot as yet do_constant_folding with vectors. */
911 static bool fold_add_vec(OptContext
*ctx
, TCGOp
*op
)
913 if (fold_commutative(ctx
, op
) ||
914 fold_xi_to_x(ctx
, op
, 0)) {
920 static bool fold_addsub2(OptContext
*ctx
, TCGOp
*op
, bool add
)
922 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3]) &&
923 arg_is_const(op
->args
[4]) && arg_is_const(op
->args
[5])) {
924 uint64_t al
= arg_info(op
->args
[2])->val
;
925 uint64_t ah
= arg_info(op
->args
[3])->val
;
926 uint64_t bl
= arg_info(op
->args
[4])->val
;
927 uint64_t bh
= arg_info(op
->args
[5])->val
;
931 if (ctx
->type
== TCG_TYPE_I32
) {
932 uint64_t a
= deposit64(al
, 32, 32, ah
);
933 uint64_t b
= deposit64(bl
, 32, 32, bh
);
941 al
= sextract64(a
, 0, 32);
942 ah
= sextract64(a
, 32, 32);
944 Int128 a
= int128_make128(al
, ah
);
945 Int128 b
= int128_make128(bl
, bh
);
948 a
= int128_add(a
, b
);
950 a
= int128_sub(a
, b
);
953 al
= int128_getlo(a
);
954 ah
= int128_gethi(a
);
960 /* The proper opcode is supplied by tcg_opt_gen_mov. */
961 op2
= tcg_op_insert_before(ctx
->tcg
, op
, 0, 2);
963 tcg_opt_gen_movi(ctx
, op
, rl
, al
);
964 tcg_opt_gen_movi(ctx
, op2
, rh
, ah
);
970 static bool fold_add2(OptContext
*ctx
, TCGOp
*op
)
972 /* Note that the high and low parts may be independently swapped. */
973 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[4]);
974 swap_commutative(op
->args
[1], &op
->args
[3], &op
->args
[5]);
976 return fold_addsub2(ctx
, op
, true);
979 static bool fold_and(OptContext
*ctx
, TCGOp
*op
)
983 if (fold_const2_commutative(ctx
, op
) ||
984 fold_xi_to_i(ctx
, op
, 0) ||
985 fold_xi_to_x(ctx
, op
, -1) ||
986 fold_xx_to_x(ctx
, op
)) {
990 z1
= arg_info(op
->args
[1])->z_mask
;
991 z2
= arg_info(op
->args
[2])->z_mask
;
992 ctx
->z_mask
= z1
& z2
;
995 * Sign repetitions are perforce all identical, whether they are 1 or 0.
996 * Bitwise operations preserve the relative quantity of the repetitions.
998 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
999 & arg_info(op
->args
[2])->s_mask
;
1002 * Known-zeros does not imply known-ones. Therefore unless
1003 * arg2 is constant, we can't infer affected bits from it.
1005 if (arg_is_const(op
->args
[2])) {
1006 ctx
->a_mask
= z1
& ~z2
;
1009 return fold_masks(ctx
, op
);
1012 static bool fold_andc(OptContext
*ctx
, TCGOp
*op
)
1016 if (fold_const2(ctx
, op
) ||
1017 fold_xx_to_i(ctx
, op
, 0) ||
1018 fold_xi_to_x(ctx
, op
, 0) ||
1019 fold_ix_to_not(ctx
, op
, -1)) {
1023 z1
= arg_info(op
->args
[1])->z_mask
;
1026 * Known-zeros does not imply known-ones. Therefore unless
1027 * arg2 is constant, we can't infer anything from it.
1029 if (arg_is_const(op
->args
[2])) {
1030 uint64_t z2
= ~arg_info(op
->args
[2])->z_mask
;
1031 ctx
->a_mask
= z1
& ~z2
;
1036 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1037 & arg_info(op
->args
[2])->s_mask
;
1038 return fold_masks(ctx
, op
);
1041 static bool fold_brcond(OptContext
*ctx
, TCGOp
*op
)
1043 TCGCond cond
= op
->args
[2];
1046 if (swap_commutative(NO_DEST
, &op
->args
[0], &op
->args
[1])) {
1047 op
->args
[2] = cond
= tcg_swap_cond(cond
);
1050 i
= do_constant_folding_cond(ctx
->type
, op
->args
[0], op
->args
[1], cond
);
1052 tcg_op_remove(ctx
->tcg
, op
);
1056 op
->opc
= INDEX_op_br
;
1057 op
->args
[0] = op
->args
[3];
1062 static bool fold_brcond2(OptContext
*ctx
, TCGOp
*op
)
1064 TCGCond cond
= op
->args
[4];
1065 TCGArg label
= op
->args
[5];
1068 if (swap_commutative2(&op
->args
[0], &op
->args
[2])) {
1069 op
->args
[4] = cond
= tcg_swap_cond(cond
);
1072 i
= do_constant_folding_cond2(&op
->args
[0], &op
->args
[2], cond
);
1074 goto do_brcond_const
;
1081 * Simplify LT/GE comparisons vs zero to a single compare
1082 * vs the high word of the input.
1084 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== 0 &&
1085 arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0) {
1086 goto do_brcond_high
;
1095 * Simplify EQ/NE comparisons where one of the pairs
1096 * can be simplified.
1098 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[0],
1102 goto do_brcond_const
;
1104 goto do_brcond_high
;
1107 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[1],
1111 goto do_brcond_const
;
1113 op
->opc
= INDEX_op_brcond_i32
;
1114 op
->args
[1] = op
->args
[2];
1116 op
->args
[3] = label
;
1125 op
->opc
= INDEX_op_brcond_i32
;
1126 op
->args
[0] = op
->args
[1];
1127 op
->args
[1] = op
->args
[3];
1129 op
->args
[3] = label
;
1134 tcg_op_remove(ctx
->tcg
, op
);
1137 op
->opc
= INDEX_op_br
;
1138 op
->args
[0] = label
;
1144 static bool fold_bswap(OptContext
*ctx
, TCGOp
*op
)
1146 uint64_t z_mask
, s_mask
, sign
;
1148 if (arg_is_const(op
->args
[1])) {
1149 uint64_t t
= arg_info(op
->args
[1])->val
;
1151 t
= do_constant_folding(op
->opc
, ctx
->type
, t
, op
->args
[2]);
1152 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1155 z_mask
= arg_info(op
->args
[1])->z_mask
;
1158 case INDEX_op_bswap16_i32
:
1159 case INDEX_op_bswap16_i64
:
1160 z_mask
= bswap16(z_mask
);
1163 case INDEX_op_bswap32_i32
:
1164 case INDEX_op_bswap32_i64
:
1165 z_mask
= bswap32(z_mask
);
1168 case INDEX_op_bswap64_i64
:
1169 z_mask
= bswap64(z_mask
);
1173 g_assert_not_reached();
1175 s_mask
= smask_from_zmask(z_mask
);
1177 switch (op
->args
[2] & (TCG_BSWAP_OZ
| TCG_BSWAP_OS
)) {
1181 /* If the sign bit may be 1, force all the bits above to 1. */
1182 if (z_mask
& sign
) {
1188 /* The high bits are undefined: force all bits above the sign to 1. */
1189 z_mask
|= sign
<< 1;
1193 ctx
->z_mask
= z_mask
;
1194 ctx
->s_mask
= s_mask
;
1196 return fold_masks(ctx
, op
);
1199 static bool fold_call(OptContext
*ctx
, TCGOp
*op
)
1201 TCGContext
*s
= ctx
->tcg
;
1202 int nb_oargs
= TCGOP_CALLO(op
);
1203 int nb_iargs
= TCGOP_CALLI(op
);
1206 init_arguments(ctx
, op
, nb_oargs
+ nb_iargs
);
1207 copy_propagate(ctx
, op
, nb_oargs
, nb_iargs
);
1209 /* If the function reads or writes globals, reset temp data. */
1210 flags
= tcg_call_flags(op
);
1211 if (!(flags
& (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
1212 int nb_globals
= s
->nb_globals
;
1214 for (i
= 0; i
< nb_globals
; i
++) {
1215 if (test_bit(i
, ctx
->temps_used
.l
)) {
1216 reset_ts(&ctx
->tcg
->temps
[i
]);
1221 /* Reset temp data for outputs. */
1222 for (i
= 0; i
< nb_oargs
; i
++) {
1223 reset_temp(op
->args
[i
]);
1226 /* Stop optimizing MB across calls. */
1227 ctx
->prev_mb
= NULL
;
1231 static bool fold_count_zeros(OptContext
*ctx
, TCGOp
*op
)
1235 if (arg_is_const(op
->args
[1])) {
1236 uint64_t t
= arg_info(op
->args
[1])->val
;
1239 t
= do_constant_folding(op
->opc
, ctx
->type
, t
, 0);
1240 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1242 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[2]);
1245 switch (ctx
->type
) {
1253 g_assert_not_reached();
1255 ctx
->z_mask
= arg_info(op
->args
[2])->z_mask
| z_mask
;
1256 ctx
->s_mask
= smask_from_zmask(ctx
->z_mask
);
1260 static bool fold_ctpop(OptContext
*ctx
, TCGOp
*op
)
1262 if (fold_const1(ctx
, op
)) {
1266 switch (ctx
->type
) {
1268 ctx
->z_mask
= 32 | 31;
1271 ctx
->z_mask
= 64 | 63;
1274 g_assert_not_reached();
1276 ctx
->s_mask
= smask_from_zmask(ctx
->z_mask
);
1280 static bool fold_deposit(OptContext
*ctx
, TCGOp
*op
)
1282 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1283 uint64_t t1
= arg_info(op
->args
[1])->val
;
1284 uint64_t t2
= arg_info(op
->args
[2])->val
;
1286 t1
= deposit64(t1
, op
->args
[3], op
->args
[4], t2
);
1287 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
1290 ctx
->z_mask
= deposit64(arg_info(op
->args
[1])->z_mask
,
1291 op
->args
[3], op
->args
[4],
1292 arg_info(op
->args
[2])->z_mask
);
1296 static bool fold_divide(OptContext
*ctx
, TCGOp
*op
)
1298 if (fold_const2(ctx
, op
) ||
1299 fold_xi_to_x(ctx
, op
, 1)) {
1305 static bool fold_dup(OptContext
*ctx
, TCGOp
*op
)
1307 if (arg_is_const(op
->args
[1])) {
1308 uint64_t t
= arg_info(op
->args
[1])->val
;
1309 t
= dup_const(TCGOP_VECE(op
), t
);
1310 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1315 static bool fold_dup2(OptContext
*ctx
, TCGOp
*op
)
1317 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1318 uint64_t t
= deposit64(arg_info(op
->args
[1])->val
, 32, 32,
1319 arg_info(op
->args
[2])->val
);
1320 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1323 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1324 op
->opc
= INDEX_op_dup_vec
;
1325 TCGOP_VECE(op
) = MO_32
;
1330 static bool fold_eqv(OptContext
*ctx
, TCGOp
*op
)
1332 if (fold_const2_commutative(ctx
, op
) ||
1333 fold_xi_to_x(ctx
, op
, -1) ||
1334 fold_xi_to_not(ctx
, op
, 0)) {
1338 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1339 & arg_info(op
->args
[2])->s_mask
;
1343 static bool fold_extract(OptContext
*ctx
, TCGOp
*op
)
1345 uint64_t z_mask_old
, z_mask
;
1346 int pos
= op
->args
[2];
1347 int len
= op
->args
[3];
1349 if (arg_is_const(op
->args
[1])) {
1352 t
= arg_info(op
->args
[1])->val
;
1353 t
= extract64(t
, pos
, len
);
1354 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1357 z_mask_old
= arg_info(op
->args
[1])->z_mask
;
1358 z_mask
= extract64(z_mask_old
, pos
, len
);
1360 ctx
->a_mask
= z_mask_old
^ z_mask
;
1362 ctx
->z_mask
= z_mask
;
1363 ctx
->s_mask
= smask_from_zmask(z_mask
);
1365 return fold_masks(ctx
, op
);
1368 static bool fold_extract2(OptContext
*ctx
, TCGOp
*op
)
1370 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1371 uint64_t v1
= arg_info(op
->args
[1])->val
;
1372 uint64_t v2
= arg_info(op
->args
[2])->val
;
1373 int shr
= op
->args
[3];
1375 if (op
->opc
== INDEX_op_extract2_i64
) {
1379 v1
= (uint32_t)v1
>> shr
;
1380 v2
= (uint64_t)((int32_t)v2
<< (32 - shr
));
1382 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], v1
| v2
);
1387 static bool fold_exts(OptContext
*ctx
, TCGOp
*op
)
1389 uint64_t s_mask_old
, s_mask
, z_mask
, sign
;
1390 bool type_change
= false;
1392 if (fold_const1(ctx
, op
)) {
1396 z_mask
= arg_info(op
->args
[1])->z_mask
;
1397 s_mask
= arg_info(op
->args
[1])->s_mask
;
1398 s_mask_old
= s_mask
;
1401 CASE_OP_32_64(ext8s
):
1403 z_mask
= (uint8_t)z_mask
;
1405 CASE_OP_32_64(ext16s
):
1407 z_mask
= (uint16_t)z_mask
;
1409 case INDEX_op_ext_i32_i64
:
1412 case INDEX_op_ext32s_i64
:
1414 z_mask
= (uint32_t)z_mask
;
1417 g_assert_not_reached();
1420 if (z_mask
& sign
) {
1423 s_mask
|= sign
<< 1;
1425 ctx
->z_mask
= z_mask
;
1426 ctx
->s_mask
= s_mask
;
1428 ctx
->a_mask
= s_mask
& ~s_mask_old
;
1431 return fold_masks(ctx
, op
);
1434 static bool fold_extu(OptContext
*ctx
, TCGOp
*op
)
1436 uint64_t z_mask_old
, z_mask
;
1437 bool type_change
= false;
1439 if (fold_const1(ctx
, op
)) {
1443 z_mask_old
= z_mask
= arg_info(op
->args
[1])->z_mask
;
1446 CASE_OP_32_64(ext8u
):
1447 z_mask
= (uint8_t)z_mask
;
1449 CASE_OP_32_64(ext16u
):
1450 z_mask
= (uint16_t)z_mask
;
1452 case INDEX_op_extrl_i64_i32
:
1453 case INDEX_op_extu_i32_i64
:
1456 case INDEX_op_ext32u_i64
:
1457 z_mask
= (uint32_t)z_mask
;
1459 case INDEX_op_extrh_i64_i32
:
1464 g_assert_not_reached();
1467 ctx
->z_mask
= z_mask
;
1468 ctx
->s_mask
= smask_from_zmask(z_mask
);
1470 ctx
->a_mask
= z_mask_old
^ z_mask
;
1472 return fold_masks(ctx
, op
);
1475 static bool fold_mb(OptContext
*ctx
, TCGOp
*op
)
1477 /* Eliminate duplicate and redundant fence instructions. */
1480 * Merge two barriers of the same type into one,
1481 * or a weaker barrier into a stronger one,
1482 * or two weaker barriers into a stronger one.
1483 * mb X; mb Y => mb X|Y
1484 * mb; strl => mb; st
1485 * ldaq; mb => ld; mb
1486 * ldaq; strl => ld; mb; st
1487 * Other combinations are also merged into a strong
1488 * barrier. This is stricter than specified but for
1489 * the purposes of TCG is better than not optimizing.
1491 ctx
->prev_mb
->args
[0] |= op
->args
[0];
1492 tcg_op_remove(ctx
->tcg
, op
);
1499 static bool fold_mov(OptContext
*ctx
, TCGOp
*op
)
1501 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
1504 static bool fold_movcond(OptContext
*ctx
, TCGOp
*op
)
1506 TCGCond cond
= op
->args
[5];
1509 if (swap_commutative(NO_DEST
, &op
->args
[1], &op
->args
[2])) {
1510 op
->args
[5] = cond
= tcg_swap_cond(cond
);
1513 * Canonicalize the "false" input reg to match the destination reg so
1514 * that the tcg backend can implement a "move if true" operation.
1516 if (swap_commutative(op
->args
[0], &op
->args
[4], &op
->args
[3])) {
1517 op
->args
[5] = cond
= tcg_invert_cond(cond
);
1520 i
= do_constant_folding_cond(ctx
->type
, op
->args
[1], op
->args
[2], cond
);
1522 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[4 - i
]);
1525 ctx
->z_mask
= arg_info(op
->args
[3])->z_mask
1526 | arg_info(op
->args
[4])->z_mask
;
1527 ctx
->s_mask
= arg_info(op
->args
[3])->s_mask
1528 & arg_info(op
->args
[4])->s_mask
;
1530 if (arg_is_const(op
->args
[3]) && arg_is_const(op
->args
[4])) {
1531 uint64_t tv
= arg_info(op
->args
[3])->val
;
1532 uint64_t fv
= arg_info(op
->args
[4])->val
;
1535 switch (ctx
->type
) {
1537 opc
= INDEX_op_setcond_i32
;
1540 opc
= INDEX_op_setcond_i64
;
1543 g_assert_not_reached();
1546 if (tv
== 1 && fv
== 0) {
1549 } else if (fv
== 1 && tv
== 0) {
1551 op
->args
[3] = tcg_invert_cond(cond
);
1557 static bool fold_mul(OptContext
*ctx
, TCGOp
*op
)
1559 if (fold_const2(ctx
, op
) ||
1560 fold_xi_to_i(ctx
, op
, 0) ||
1561 fold_xi_to_x(ctx
, op
, 1)) {
1567 static bool fold_mul_highpart(OptContext
*ctx
, TCGOp
*op
)
1569 if (fold_const2_commutative(ctx
, op
) ||
1570 fold_xi_to_i(ctx
, op
, 0)) {
1576 static bool fold_multiply2(OptContext
*ctx
, TCGOp
*op
)
1578 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[3]);
1580 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])) {
1581 uint64_t a
= arg_info(op
->args
[2])->val
;
1582 uint64_t b
= arg_info(op
->args
[3])->val
;
1588 case INDEX_op_mulu2_i32
:
1589 l
= (uint64_t)(uint32_t)a
* (uint32_t)b
;
1590 h
= (int32_t)(l
>> 32);
1593 case INDEX_op_muls2_i32
:
1594 l
= (int64_t)(int32_t)a
* (int32_t)b
;
1598 case INDEX_op_mulu2_i64
:
1599 mulu64(&l
, &h
, a
, b
);
1601 case INDEX_op_muls2_i64
:
1602 muls64(&l
, &h
, a
, b
);
1605 g_assert_not_reached();
1611 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1612 op2
= tcg_op_insert_before(ctx
->tcg
, op
, 0, 2);
1614 tcg_opt_gen_movi(ctx
, op
, rl
, l
);
1615 tcg_opt_gen_movi(ctx
, op2
, rh
, h
);
1621 static bool fold_nand(OptContext
*ctx
, TCGOp
*op
)
1623 if (fold_const2_commutative(ctx
, op
) ||
1624 fold_xi_to_not(ctx
, op
, -1)) {
1628 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1629 & arg_info(op
->args
[2])->s_mask
;
1633 static bool fold_neg(OptContext
*ctx
, TCGOp
*op
)
1637 if (fold_const1(ctx
, op
)) {
1641 /* Set to 1 all bits to the left of the rightmost. */
1642 z_mask
= arg_info(op
->args
[1])->z_mask
;
1643 ctx
->z_mask
= -(z_mask
& -z_mask
);
1646 * Because of fold_sub_to_neg, we want to always return true,
1647 * via finish_folding.
1649 finish_folding(ctx
, op
);
1653 static bool fold_nor(OptContext
*ctx
, TCGOp
*op
)
1655 if (fold_const2_commutative(ctx
, op
) ||
1656 fold_xi_to_not(ctx
, op
, 0)) {
1660 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1661 & arg_info(op
->args
[2])->s_mask
;
1665 static bool fold_not(OptContext
*ctx
, TCGOp
*op
)
1667 if (fold_const1(ctx
, op
)) {
1671 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
;
1673 /* Because of fold_to_not, we want to always return true, via finish. */
1674 finish_folding(ctx
, op
);
1678 static bool fold_or(OptContext
*ctx
, TCGOp
*op
)
1680 if (fold_const2_commutative(ctx
, op
) ||
1681 fold_xi_to_x(ctx
, op
, 0) ||
1682 fold_xx_to_x(ctx
, op
)) {
1686 ctx
->z_mask
= arg_info(op
->args
[1])->z_mask
1687 | arg_info(op
->args
[2])->z_mask
;
1688 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1689 & arg_info(op
->args
[2])->s_mask
;
1690 return fold_masks(ctx
, op
);
1693 static bool fold_orc(OptContext
*ctx
, TCGOp
*op
)
1695 if (fold_const2(ctx
, op
) ||
1696 fold_xx_to_i(ctx
, op
, -1) ||
1697 fold_xi_to_x(ctx
, op
, -1) ||
1698 fold_ix_to_not(ctx
, op
, 0)) {
1702 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1703 & arg_info(op
->args
[2])->s_mask
;
1707 static bool fold_qemu_ld(OptContext
*ctx
, TCGOp
*op
)
1709 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
1710 MemOpIdx oi
= op
->args
[def
->nb_oargs
+ def
->nb_iargs
];
1711 MemOp mop
= get_memop(oi
);
1712 int width
= 8 * memop_size(mop
);
1715 ctx
->s_mask
= MAKE_64BIT_MASK(width
, 64 - width
);
1716 if (!(mop
& MO_SIGN
)) {
1717 ctx
->z_mask
= MAKE_64BIT_MASK(0, width
);
1722 /* Opcodes that touch guest memory stop the mb optimization. */
1723 ctx
->prev_mb
= NULL
;
1727 static bool fold_qemu_st(OptContext
*ctx
, TCGOp
*op
)
1729 /* Opcodes that touch guest memory stop the mb optimization. */
1730 ctx
->prev_mb
= NULL
;
1734 static bool fold_remainder(OptContext
*ctx
, TCGOp
*op
)
1736 if (fold_const2(ctx
, op
) ||
1737 fold_xx_to_i(ctx
, op
, 0)) {
1743 static bool fold_setcond(OptContext
*ctx
, TCGOp
*op
)
1745 TCGCond cond
= op
->args
[3];
1748 if (swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2])) {
1749 op
->args
[3] = cond
= tcg_swap_cond(cond
);
1752 i
= do_constant_folding_cond(ctx
->type
, op
->args
[1], op
->args
[2], cond
);
1754 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1758 ctx
->s_mask
= smask_from_zmask(1);
1762 static bool fold_setcond2(OptContext
*ctx
, TCGOp
*op
)
1764 TCGCond cond
= op
->args
[5];
1767 if (swap_commutative2(&op
->args
[1], &op
->args
[3])) {
1768 op
->args
[5] = cond
= tcg_swap_cond(cond
);
1771 i
= do_constant_folding_cond2(&op
->args
[1], &op
->args
[3], cond
);
1773 goto do_setcond_const
;
1780 * Simplify LT/GE comparisons vs zero to a single compare
1781 * vs the high word of the input.
1783 if (arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0 &&
1784 arg_is_const(op
->args
[4]) && arg_info(op
->args
[4])->val
== 0) {
1785 goto do_setcond_high
;
1794 * Simplify EQ/NE comparisons where one of the pairs
1795 * can be simplified.
1797 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[1],
1801 goto do_setcond_const
;
1803 goto do_setcond_high
;
1806 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[2],
1810 goto do_setcond_const
;
1812 op
->args
[2] = op
->args
[3];
1814 op
->opc
= INDEX_op_setcond_i32
;
1823 op
->args
[1] = op
->args
[2];
1824 op
->args
[2] = op
->args
[4];
1826 op
->opc
= INDEX_op_setcond_i32
;
1831 ctx
->s_mask
= smask_from_zmask(1);
1835 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1838 static bool fold_sextract(OptContext
*ctx
, TCGOp
*op
)
1840 uint64_t z_mask
, s_mask
, s_mask_old
;
1841 int pos
= op
->args
[2];
1842 int len
= op
->args
[3];
1844 if (arg_is_const(op
->args
[1])) {
1847 t
= arg_info(op
->args
[1])->val
;
1848 t
= sextract64(t
, pos
, len
);
1849 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1852 z_mask
= arg_info(op
->args
[1])->z_mask
;
1853 z_mask
= sextract64(z_mask
, pos
, len
);
1854 ctx
->z_mask
= z_mask
;
1856 s_mask_old
= arg_info(op
->args
[1])->s_mask
;
1857 s_mask
= sextract64(s_mask_old
, pos
, len
);
1858 s_mask
|= MAKE_64BIT_MASK(len
, 64 - len
);
1859 ctx
->s_mask
= s_mask
;
1862 ctx
->a_mask
= s_mask
& ~s_mask_old
;
1865 return fold_masks(ctx
, op
);
1868 static bool fold_shift(OptContext
*ctx
, TCGOp
*op
)
1870 uint64_t s_mask
, z_mask
, sign
;
1872 if (fold_const2(ctx
, op
) ||
1873 fold_ix_to_i(ctx
, op
, 0) ||
1874 fold_xi_to_x(ctx
, op
, 0)) {
1878 s_mask
= arg_info(op
->args
[1])->s_mask
;
1879 z_mask
= arg_info(op
->args
[1])->z_mask
;
1881 if (arg_is_const(op
->args
[2])) {
1882 int sh
= arg_info(op
->args
[2])->val
;
1884 ctx
->z_mask
= do_constant_folding(op
->opc
, ctx
->type
, z_mask
, sh
);
1886 s_mask
= do_constant_folding(op
->opc
, ctx
->type
, s_mask
, sh
);
1887 ctx
->s_mask
= smask_from_smask(s_mask
);
1889 return fold_masks(ctx
, op
);
1895 * Arithmetic right shift will not reduce the number of
1896 * input sign repetitions.
1898 ctx
->s_mask
= s_mask
;
1902 * If the sign bit is known zero, then logical right shift
1903 * will not reduced the number of input sign repetitions.
1905 sign
= (s_mask
& -s_mask
) >> 1;
1906 if (!(z_mask
& sign
)) {
1907 ctx
->s_mask
= s_mask
;
1917 static bool fold_sub_to_neg(OptContext
*ctx
, TCGOp
*op
)
1922 if (!arg_is_const(op
->args
[1]) || arg_info(op
->args
[1])->val
!= 0) {
1926 switch (ctx
->type
) {
1928 neg_op
= INDEX_op_neg_i32
;
1929 have_neg
= TCG_TARGET_HAS_neg_i32
;
1932 neg_op
= INDEX_op_neg_i64
;
1933 have_neg
= TCG_TARGET_HAS_neg_i64
;
1938 neg_op
= INDEX_op_neg_vec
;
1939 have_neg
= (TCG_TARGET_HAS_neg_vec
&&
1940 tcg_can_emit_vec_op(neg_op
, ctx
->type
, TCGOP_VECE(op
)) > 0);
1943 g_assert_not_reached();
1947 op
->args
[1] = op
->args
[2];
1948 return fold_neg(ctx
, op
);
1953 /* We cannot as yet do_constant_folding with vectors. */
1954 static bool fold_sub_vec(OptContext
*ctx
, TCGOp
*op
)
1956 if (fold_xx_to_i(ctx
, op
, 0) ||
1957 fold_xi_to_x(ctx
, op
, 0) ||
1958 fold_sub_to_neg(ctx
, op
)) {
1964 static bool fold_sub(OptContext
*ctx
, TCGOp
*op
)
1966 return fold_const2(ctx
, op
) || fold_sub_vec(ctx
, op
);
1969 static bool fold_sub2(OptContext
*ctx
, TCGOp
*op
)
1971 return fold_addsub2(ctx
, op
, false);
1974 static bool fold_tcg_ld(OptContext
*ctx
, TCGOp
*op
)
1976 /* We can't do any folding with a load, but we can record bits. */
1978 CASE_OP_32_64(ld8s
):
1979 ctx
->s_mask
= MAKE_64BIT_MASK(8, 56);
1981 CASE_OP_32_64(ld8u
):
1982 ctx
->z_mask
= MAKE_64BIT_MASK(0, 8);
1983 ctx
->s_mask
= MAKE_64BIT_MASK(9, 55);
1985 CASE_OP_32_64(ld16s
):
1986 ctx
->s_mask
= MAKE_64BIT_MASK(16, 48);
1988 CASE_OP_32_64(ld16u
):
1989 ctx
->z_mask
= MAKE_64BIT_MASK(0, 16);
1990 ctx
->s_mask
= MAKE_64BIT_MASK(17, 47);
1992 case INDEX_op_ld32s_i64
:
1993 ctx
->s_mask
= MAKE_64BIT_MASK(32, 32);
1995 case INDEX_op_ld32u_i64
:
1996 ctx
->z_mask
= MAKE_64BIT_MASK(0, 32);
1997 ctx
->s_mask
= MAKE_64BIT_MASK(33, 31);
2000 g_assert_not_reached();
2005 static bool fold_xor(OptContext
*ctx
, TCGOp
*op
)
2007 if (fold_const2_commutative(ctx
, op
) ||
2008 fold_xx_to_i(ctx
, op
, 0) ||
2009 fold_xi_to_x(ctx
, op
, 0) ||
2010 fold_xi_to_not(ctx
, op
, -1)) {
2014 ctx
->z_mask
= arg_info(op
->args
[1])->z_mask
2015 | arg_info(op
->args
[2])->z_mask
;
2016 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
2017 & arg_info(op
->args
[2])->s_mask
;
2018 return fold_masks(ctx
, op
);
2021 /* Propagate constants and copies, fold constant expressions. */
2022 void tcg_optimize(TCGContext
*s
)
2025 TCGOp
*op
, *op_next
;
2026 OptContext ctx
= { .tcg
= s
};
2028 /* Array VALS has an element for each temp.
2029 If this temp holds a constant then its value is kept in VALS' element.
2030 If this temp is a copy of other ones then the other copies are
2031 available through the doubly linked circular list. */
2033 nb_temps
= s
->nb_temps
;
2034 for (i
= 0; i
< nb_temps
; ++i
) {
2035 s
->temps
[i
].state_ptr
= NULL
;
2038 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2039 TCGOpcode opc
= op
->opc
;
2040 const TCGOpDef
*def
;
2043 /* Calls are special. */
2044 if (opc
== INDEX_op_call
) {
2045 fold_call(&ctx
, op
);
2049 def
= &tcg_op_defs
[opc
];
2050 init_arguments(&ctx
, op
, def
->nb_oargs
+ def
->nb_iargs
);
2051 copy_propagate(&ctx
, op
, def
->nb_oargs
, def
->nb_iargs
);
2053 /* Pre-compute the type of the operation. */
2054 if (def
->flags
& TCG_OPF_VECTOR
) {
2055 ctx
.type
= TCG_TYPE_V64
+ TCGOP_VECL(op
);
2056 } else if (def
->flags
& TCG_OPF_64BIT
) {
2057 ctx
.type
= TCG_TYPE_I64
;
2059 ctx
.type
= TCG_TYPE_I32
;
2062 /* Assume all bits affected, no bits known zero, no sign reps. */
2068 * Process each opcode.
2069 * Sorted alphabetically by opcode as much as possible.
2073 done
= fold_add(&ctx
, op
);
2075 case INDEX_op_add_vec
:
2076 done
= fold_add_vec(&ctx
, op
);
2078 CASE_OP_32_64(add2
):
2079 done
= fold_add2(&ctx
, op
);
2081 CASE_OP_32_64_VEC(and):
2082 done
= fold_and(&ctx
, op
);
2084 CASE_OP_32_64_VEC(andc
):
2085 done
= fold_andc(&ctx
, op
);
2087 CASE_OP_32_64(brcond
):
2088 done
= fold_brcond(&ctx
, op
);
2090 case INDEX_op_brcond2_i32
:
2091 done
= fold_brcond2(&ctx
, op
);
2093 CASE_OP_32_64(bswap16
):
2094 CASE_OP_32_64(bswap32
):
2095 case INDEX_op_bswap64_i64
:
2096 done
= fold_bswap(&ctx
, op
);
2100 done
= fold_count_zeros(&ctx
, op
);
2102 CASE_OP_32_64(ctpop
):
2103 done
= fold_ctpop(&ctx
, op
);
2105 CASE_OP_32_64(deposit
):
2106 done
= fold_deposit(&ctx
, op
);
2109 CASE_OP_32_64(divu
):
2110 done
= fold_divide(&ctx
, op
);
2112 case INDEX_op_dup_vec
:
2113 done
= fold_dup(&ctx
, op
);
2115 case INDEX_op_dup2_vec
:
2116 done
= fold_dup2(&ctx
, op
);
2118 CASE_OP_32_64_VEC(eqv
):
2119 done
= fold_eqv(&ctx
, op
);
2121 CASE_OP_32_64(extract
):
2122 done
= fold_extract(&ctx
, op
);
2124 CASE_OP_32_64(extract2
):
2125 done
= fold_extract2(&ctx
, op
);
2127 CASE_OP_32_64(ext8s
):
2128 CASE_OP_32_64(ext16s
):
2129 case INDEX_op_ext32s_i64
:
2130 case INDEX_op_ext_i32_i64
:
2131 done
= fold_exts(&ctx
, op
);
2133 CASE_OP_32_64(ext8u
):
2134 CASE_OP_32_64(ext16u
):
2135 case INDEX_op_ext32u_i64
:
2136 case INDEX_op_extu_i32_i64
:
2137 case INDEX_op_extrl_i64_i32
:
2138 case INDEX_op_extrh_i64_i32
:
2139 done
= fold_extu(&ctx
, op
);
2141 CASE_OP_32_64(ld8s
):
2142 CASE_OP_32_64(ld8u
):
2143 CASE_OP_32_64(ld16s
):
2144 CASE_OP_32_64(ld16u
):
2145 case INDEX_op_ld32s_i64
:
2146 case INDEX_op_ld32u_i64
:
2147 done
= fold_tcg_ld(&ctx
, op
);
2150 done
= fold_mb(&ctx
, op
);
2152 CASE_OP_32_64_VEC(mov
):
2153 done
= fold_mov(&ctx
, op
);
2155 CASE_OP_32_64(movcond
):
2156 done
= fold_movcond(&ctx
, op
);
2159 done
= fold_mul(&ctx
, op
);
2161 CASE_OP_32_64(mulsh
):
2162 CASE_OP_32_64(muluh
):
2163 done
= fold_mul_highpart(&ctx
, op
);
2165 CASE_OP_32_64(muls2
):
2166 CASE_OP_32_64(mulu2
):
2167 done
= fold_multiply2(&ctx
, op
);
2169 CASE_OP_32_64_VEC(nand
):
2170 done
= fold_nand(&ctx
, op
);
2173 done
= fold_neg(&ctx
, op
);
2175 CASE_OP_32_64_VEC(nor
):
2176 done
= fold_nor(&ctx
, op
);
2178 CASE_OP_32_64_VEC(not):
2179 done
= fold_not(&ctx
, op
);
2181 CASE_OP_32_64_VEC(or):
2182 done
= fold_or(&ctx
, op
);
2184 CASE_OP_32_64_VEC(orc
):
2185 done
= fold_orc(&ctx
, op
);
2187 case INDEX_op_qemu_ld_a32_i32
:
2188 case INDEX_op_qemu_ld_a64_i32
:
2189 case INDEX_op_qemu_ld_a32_i64
:
2190 case INDEX_op_qemu_ld_a64_i64
:
2191 case INDEX_op_qemu_ld_a32_i128
:
2192 case INDEX_op_qemu_ld_a64_i128
:
2193 done
= fold_qemu_ld(&ctx
, op
);
2195 case INDEX_op_qemu_st8_a32_i32
:
2196 case INDEX_op_qemu_st8_a64_i32
:
2197 case INDEX_op_qemu_st_a32_i32
:
2198 case INDEX_op_qemu_st_a64_i32
:
2199 case INDEX_op_qemu_st_a32_i64
:
2200 case INDEX_op_qemu_st_a64_i64
:
2201 case INDEX_op_qemu_st_a32_i128
:
2202 case INDEX_op_qemu_st_a64_i128
:
2203 done
= fold_qemu_st(&ctx
, op
);
2206 CASE_OP_32_64(remu
):
2207 done
= fold_remainder(&ctx
, op
);
2209 CASE_OP_32_64(rotl
):
2210 CASE_OP_32_64(rotr
):
2214 done
= fold_shift(&ctx
, op
);
2216 CASE_OP_32_64(setcond
):
2217 done
= fold_setcond(&ctx
, op
);
2219 case INDEX_op_setcond2_i32
:
2220 done
= fold_setcond2(&ctx
, op
);
2222 CASE_OP_32_64(sextract
):
2223 done
= fold_sextract(&ctx
, op
);
2226 done
= fold_sub(&ctx
, op
);
2228 case INDEX_op_sub_vec
:
2229 done
= fold_sub_vec(&ctx
, op
);
2231 CASE_OP_32_64(sub2
):
2232 done
= fold_sub2(&ctx
, op
);
2234 CASE_OP_32_64_VEC(xor):
2235 done
= fold_xor(&ctx
, op
);
2242 finish_folding(&ctx
, op
);