2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu/int128.h"
28 #include "qemu/interval-tree.h"
29 #include "tcg/tcg-op-common.h"
30 #include "tcg-internal.h"
32 #define CASE_OP_32_64(x) \
33 glue(glue(case INDEX_op_, x), _i32): \
34 glue(glue(case INDEX_op_, x), _i64)
36 #define CASE_OP_32_64_VEC(x) \
37 glue(glue(case INDEX_op_, x), _i32): \
38 glue(glue(case INDEX_op_, x), _i64): \
39 glue(glue(case INDEX_op_, x), _vec)
41 typedef struct MemCopyInfo
{
42 IntervalTreeNode itree
;
43 QSIMPLEQ_ENTRY (MemCopyInfo
) next
;
48 typedef struct TempOptInfo
{
52 QSIMPLEQ_HEAD(, MemCopyInfo
) mem_copy
;
54 uint64_t z_mask
; /* mask bit is 0 if and only if value bit is 0 */
55 uint64_t s_mask
; /* a left-aligned mask of clrsb(value) bits. */
58 typedef struct OptContext
{
61 TCGTempSet temps_used
;
63 IntervalTreeRoot mem_copy
;
64 QSIMPLEQ_HEAD(, MemCopyInfo
) mem_free
;
66 /* In flight values from optimization. */
67 uint64_t a_mask
; /* mask bit is 0 iff value identical to first input */
68 uint64_t z_mask
; /* mask bit is 0 iff value bit is 0 */
69 uint64_t s_mask
; /* mask of clrsb(value) bits */
73 /* Calculate the smask for a specific value. */
74 static uint64_t smask_from_value(uint64_t value
)
76 int rep
= clrsb64(value
);
77 return ~(~0ull >> rep
);
81 * Calculate the smask for a given set of known-zeros.
82 * If there are lots of zeros on the left, we can consider the remainder
83 * an unsigned field, and thus the corresponding signed field is one bit
86 static uint64_t smask_from_zmask(uint64_t zmask
)
89 * Only the 0 bits are significant for zmask, thus the msb itself
90 * must be zero, else we have no sign information.
92 int rep
= clz64(zmask
);
97 return ~(~0ull >> rep
);
101 * Recreate a properly left-aligned smask after manipulation.
102 * Some bit-shuffling, particularly shifts and rotates, may
103 * retain sign bits on the left, but may scatter disconnected
104 * sign bits on the right. Retain only what remains to the left.
106 static uint64_t smask_from_smask(int64_t smask
)
108 /* Only the 1 bits are significant for smask */
109 return smask_from_zmask(~smask
);
112 static inline TempOptInfo
*ts_info(TCGTemp
*ts
)
114 return ts
->state_ptr
;
117 static inline TempOptInfo
*arg_info(TCGArg arg
)
119 return ts_info(arg_temp(arg
));
122 static inline bool ts_is_const(TCGTemp
*ts
)
124 return ts_info(ts
)->is_const
;
127 static inline bool arg_is_const(TCGArg arg
)
129 return ts_is_const(arg_temp(arg
));
132 static inline bool ts_is_copy(TCGTemp
*ts
)
134 return ts_info(ts
)->next_copy
!= ts
;
137 static TCGTemp
*cmp_better_copy(TCGTemp
*a
, TCGTemp
*b
)
139 return a
->kind
< b
->kind
? b
: a
;
142 /* Initialize and activate a temporary. */
143 static void init_ts_info(OptContext
*ctx
, TCGTemp
*ts
)
145 size_t idx
= temp_idx(ts
);
148 if (test_bit(idx
, ctx
->temps_used
.l
)) {
151 set_bit(idx
, ctx
->temps_used
.l
);
155 ti
= tcg_malloc(sizeof(TempOptInfo
));
161 QSIMPLEQ_INIT(&ti
->mem_copy
);
162 if (ts
->kind
== TEMP_CONST
) {
165 ti
->z_mask
= ts
->val
;
166 ti
->s_mask
= smask_from_value(ts
->val
);
168 ti
->is_const
= false;
174 static MemCopyInfo
*mem_copy_first(OptContext
*ctx
, intptr_t s
, intptr_t l
)
176 IntervalTreeNode
*r
= interval_tree_iter_first(&ctx
->mem_copy
, s
, l
);
177 return r
? container_of(r
, MemCopyInfo
, itree
) : NULL
;
180 static MemCopyInfo
*mem_copy_next(MemCopyInfo
*mem
, intptr_t s
, intptr_t l
)
182 IntervalTreeNode
*r
= interval_tree_iter_next(&mem
->itree
, s
, l
);
183 return r
? container_of(r
, MemCopyInfo
, itree
) : NULL
;
186 static void remove_mem_copy(OptContext
*ctx
, MemCopyInfo
*mc
)
188 TCGTemp
*ts
= mc
->ts
;
189 TempOptInfo
*ti
= ts_info(ts
);
191 interval_tree_remove(&mc
->itree
, &ctx
->mem_copy
);
192 QSIMPLEQ_REMOVE(&ti
->mem_copy
, mc
, MemCopyInfo
, next
);
193 QSIMPLEQ_INSERT_TAIL(&ctx
->mem_free
, mc
, next
);
196 static void remove_mem_copy_in(OptContext
*ctx
, intptr_t s
, intptr_t l
)
199 MemCopyInfo
*mc
= mem_copy_first(ctx
, s
, l
);
203 remove_mem_copy(ctx
, mc
);
207 static void remove_mem_copy_all(OptContext
*ctx
)
209 remove_mem_copy_in(ctx
, 0, -1);
210 tcg_debug_assert(interval_tree_is_empty(&ctx
->mem_copy
));
213 static TCGTemp
*find_better_copy(TCGTemp
*ts
)
217 /* If this is already readonly, we can't do better. */
218 if (temp_readonly(ts
)) {
223 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
224 ret
= cmp_better_copy(ret
, i
);
229 static void move_mem_copies(TCGTemp
*dst_ts
, TCGTemp
*src_ts
)
231 TempOptInfo
*si
= ts_info(src_ts
);
232 TempOptInfo
*di
= ts_info(dst_ts
);
235 QSIMPLEQ_FOREACH(mc
, &si
->mem_copy
, next
) {
236 tcg_debug_assert(mc
->ts
== src_ts
);
239 QSIMPLEQ_CONCAT(&di
->mem_copy
, &si
->mem_copy
);
242 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
243 static void reset_ts(OptContext
*ctx
, TCGTemp
*ts
)
245 TempOptInfo
*ti
= ts_info(ts
);
246 TCGTemp
*pts
= ti
->prev_copy
;
247 TCGTemp
*nts
= ti
->next_copy
;
248 TempOptInfo
*pi
= ts_info(pts
);
249 TempOptInfo
*ni
= ts_info(nts
);
251 ni
->prev_copy
= ti
->prev_copy
;
252 pi
->next_copy
= ti
->next_copy
;
255 ti
->is_const
= false;
259 if (!QSIMPLEQ_EMPTY(&ti
->mem_copy
)) {
261 /* Last temp copy being removed, the mem copies die. */
263 QSIMPLEQ_FOREACH(mc
, &ti
->mem_copy
, next
) {
264 interval_tree_remove(&mc
->itree
, &ctx
->mem_copy
);
266 QSIMPLEQ_CONCAT(&ctx
->mem_free
, &ti
->mem_copy
);
268 move_mem_copies(find_better_copy(nts
), ts
);
273 static void reset_temp(OptContext
*ctx
, TCGArg arg
)
275 reset_ts(ctx
, arg_temp(arg
));
278 static void record_mem_copy(OptContext
*ctx
, TCGType type
,
279 TCGTemp
*ts
, intptr_t start
, intptr_t last
)
284 mc
= QSIMPLEQ_FIRST(&ctx
->mem_free
);
286 QSIMPLEQ_REMOVE_HEAD(&ctx
->mem_free
, next
);
288 mc
= tcg_malloc(sizeof(*mc
));
291 memset(mc
, 0, sizeof(*mc
));
292 mc
->itree
.start
= start
;
293 mc
->itree
.last
= last
;
295 interval_tree_insert(&mc
->itree
, &ctx
->mem_copy
);
297 ts
= find_better_copy(ts
);
300 QSIMPLEQ_INSERT_TAIL(&ti
->mem_copy
, mc
, next
);
303 static bool ts_are_copies(TCGTemp
*ts1
, TCGTemp
*ts2
)
311 if (!ts_is_copy(ts1
) || !ts_is_copy(ts2
)) {
315 for (i
= ts_info(ts1
)->next_copy
; i
!= ts1
; i
= ts_info(i
)->next_copy
) {
324 static bool args_are_copies(TCGArg arg1
, TCGArg arg2
)
326 return ts_are_copies(arg_temp(arg1
), arg_temp(arg2
));
329 static TCGTemp
*find_mem_copy_for(OptContext
*ctx
, TCGType type
, intptr_t s
)
333 for (mc
= mem_copy_first(ctx
, s
, s
); mc
; mc
= mem_copy_next(mc
, s
, s
)) {
334 if (mc
->itree
.start
== s
&& mc
->type
== type
) {
335 return find_better_copy(mc
->ts
);
341 static TCGArg
arg_new_constant(OptContext
*ctx
, uint64_t val
)
343 TCGType type
= ctx
->type
;
346 if (type
== TCG_TYPE_I32
) {
350 ts
= tcg_constant_internal(type
, val
);
351 init_ts_info(ctx
, ts
);
356 static bool tcg_opt_gen_mov(OptContext
*ctx
, TCGOp
*op
, TCGArg dst
, TCGArg src
)
358 TCGTemp
*dst_ts
= arg_temp(dst
);
359 TCGTemp
*src_ts
= arg_temp(src
);
364 if (ts_are_copies(dst_ts
, src_ts
)) {
365 tcg_op_remove(ctx
->tcg
, op
);
369 reset_ts(ctx
, dst_ts
);
370 di
= ts_info(dst_ts
);
371 si
= ts_info(src_ts
);
375 new_op
= INDEX_op_mov_i32
;
378 new_op
= INDEX_op_mov_i64
;
383 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
384 new_op
= INDEX_op_mov_vec
;
387 g_assert_not_reached();
393 di
->z_mask
= si
->z_mask
;
394 di
->s_mask
= si
->s_mask
;
396 if (src_ts
->type
== dst_ts
->type
) {
397 TempOptInfo
*ni
= ts_info(si
->next_copy
);
399 di
->next_copy
= si
->next_copy
;
400 di
->prev_copy
= src_ts
;
401 ni
->prev_copy
= dst_ts
;
402 si
->next_copy
= dst_ts
;
403 di
->is_const
= si
->is_const
;
406 if (!QSIMPLEQ_EMPTY(&si
->mem_copy
)
407 && cmp_better_copy(src_ts
, dst_ts
) == dst_ts
) {
408 move_mem_copies(dst_ts
, src_ts
);
414 static bool tcg_opt_gen_movi(OptContext
*ctx
, TCGOp
*op
,
415 TCGArg dst
, uint64_t val
)
417 /* Convert movi to mov with constant temp. */
418 return tcg_opt_gen_mov(ctx
, op
, dst
, arg_new_constant(ctx
, val
));
421 static uint64_t do_constant_folding_2(TCGOpcode op
, uint64_t x
, uint64_t y
)
435 CASE_OP_32_64_VEC(and):
438 CASE_OP_32_64_VEC(or):
441 CASE_OP_32_64_VEC(xor):
444 case INDEX_op_shl_i32
:
445 return (uint32_t)x
<< (y
& 31);
447 case INDEX_op_shl_i64
:
448 return (uint64_t)x
<< (y
& 63);
450 case INDEX_op_shr_i32
:
451 return (uint32_t)x
>> (y
& 31);
453 case INDEX_op_shr_i64
:
454 return (uint64_t)x
>> (y
& 63);
456 case INDEX_op_sar_i32
:
457 return (int32_t)x
>> (y
& 31);
459 case INDEX_op_sar_i64
:
460 return (int64_t)x
>> (y
& 63);
462 case INDEX_op_rotr_i32
:
463 return ror32(x
, y
& 31);
465 case INDEX_op_rotr_i64
:
466 return ror64(x
, y
& 63);
468 case INDEX_op_rotl_i32
:
469 return rol32(x
, y
& 31);
471 case INDEX_op_rotl_i64
:
472 return rol64(x
, y
& 63);
474 CASE_OP_32_64_VEC(not):
480 CASE_OP_32_64_VEC(andc
):
483 CASE_OP_32_64_VEC(orc
):
486 CASE_OP_32_64_VEC(eqv
):
489 CASE_OP_32_64_VEC(nand
):
492 CASE_OP_32_64_VEC(nor
):
495 case INDEX_op_clz_i32
:
496 return (uint32_t)x
? clz32(x
) : y
;
498 case INDEX_op_clz_i64
:
499 return x
? clz64(x
) : y
;
501 case INDEX_op_ctz_i32
:
502 return (uint32_t)x
? ctz32(x
) : y
;
504 case INDEX_op_ctz_i64
:
505 return x
? ctz64(x
) : y
;
507 case INDEX_op_ctpop_i32
:
510 case INDEX_op_ctpop_i64
:
513 CASE_OP_32_64(ext8s
):
516 CASE_OP_32_64(ext16s
):
519 CASE_OP_32_64(ext8u
):
522 CASE_OP_32_64(ext16u
):
525 CASE_OP_32_64(bswap16
):
527 return y
& TCG_BSWAP_OS
? (int16_t)x
: x
;
529 CASE_OP_32_64(bswap32
):
531 return y
& TCG_BSWAP_OS
? (int32_t)x
: x
;
533 case INDEX_op_bswap64_i64
:
536 case INDEX_op_ext_i32_i64
:
537 case INDEX_op_ext32s_i64
:
540 case INDEX_op_extu_i32_i64
:
541 case INDEX_op_extrl_i64_i32
:
542 case INDEX_op_ext32u_i64
:
545 case INDEX_op_extrh_i64_i32
:
546 return (uint64_t)x
>> 32;
548 case INDEX_op_muluh_i32
:
549 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
550 case INDEX_op_mulsh_i32
:
551 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
553 case INDEX_op_muluh_i64
:
554 mulu64(&l64
, &h64
, x
, y
);
556 case INDEX_op_mulsh_i64
:
557 muls64(&l64
, &h64
, x
, y
);
560 case INDEX_op_div_i32
:
561 /* Avoid crashing on divide by zero, otherwise undefined. */
562 return (int32_t)x
/ ((int32_t)y
? : 1);
563 case INDEX_op_divu_i32
:
564 return (uint32_t)x
/ ((uint32_t)y
? : 1);
565 case INDEX_op_div_i64
:
566 return (int64_t)x
/ ((int64_t)y
? : 1);
567 case INDEX_op_divu_i64
:
568 return (uint64_t)x
/ ((uint64_t)y
? : 1);
570 case INDEX_op_rem_i32
:
571 return (int32_t)x
% ((int32_t)y
? : 1);
572 case INDEX_op_remu_i32
:
573 return (uint32_t)x
% ((uint32_t)y
? : 1);
574 case INDEX_op_rem_i64
:
575 return (int64_t)x
% ((int64_t)y
? : 1);
576 case INDEX_op_remu_i64
:
577 return (uint64_t)x
% ((uint64_t)y
? : 1);
580 g_assert_not_reached();
584 static uint64_t do_constant_folding(TCGOpcode op
, TCGType type
,
585 uint64_t x
, uint64_t y
)
587 uint64_t res
= do_constant_folding_2(op
, x
, y
);
588 if (type
== TCG_TYPE_I32
) {
594 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
602 return (int32_t)x
< (int32_t)y
;
604 return (int32_t)x
>= (int32_t)y
;
606 return (int32_t)x
<= (int32_t)y
;
608 return (int32_t)x
> (int32_t)y
;
618 g_assert_not_reached();
622 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
630 return (int64_t)x
< (int64_t)y
;
632 return (int64_t)x
>= (int64_t)y
;
634 return (int64_t)x
<= (int64_t)y
;
636 return (int64_t)x
> (int64_t)y
;
646 g_assert_not_reached();
650 static bool do_constant_folding_cond_eq(TCGCond c
)
666 g_assert_not_reached();
671 * Return -1 if the condition can't be simplified,
672 * and the result of the condition (0 or 1) if it can.
674 static int do_constant_folding_cond(TCGType type
, TCGArg x
,
677 if (arg_is_const(x
) && arg_is_const(y
)) {
678 uint64_t xv
= arg_info(x
)->val
;
679 uint64_t yv
= arg_info(y
)->val
;
683 return do_constant_folding_cond_32(xv
, yv
, c
);
685 return do_constant_folding_cond_64(xv
, yv
, c
);
687 /* Only scalar comparisons are optimizable */
690 } else if (args_are_copies(x
, y
)) {
691 return do_constant_folding_cond_eq(c
);
692 } else if (arg_is_const(y
) && arg_info(y
)->val
== 0) {
706 * Return -1 if the condition can't be simplified,
707 * and the result of the condition (0 or 1) if it can.
709 static int do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
711 TCGArg al
= p1
[0], ah
= p1
[1];
712 TCGArg bl
= p2
[0], bh
= p2
[1];
714 if (arg_is_const(bl
) && arg_is_const(bh
)) {
715 tcg_target_ulong blv
= arg_info(bl
)->val
;
716 tcg_target_ulong bhv
= arg_info(bh
)->val
;
717 uint64_t b
= deposit64(blv
, 32, 32, bhv
);
719 if (arg_is_const(al
) && arg_is_const(ah
)) {
720 tcg_target_ulong alv
= arg_info(al
)->val
;
721 tcg_target_ulong ahv
= arg_info(ah
)->val
;
722 uint64_t a
= deposit64(alv
, 32, 32, ahv
);
723 return do_constant_folding_cond_64(a
, b
, c
);
736 if (args_are_copies(al
, bl
) && args_are_copies(ah
, bh
)) {
737 return do_constant_folding_cond_eq(c
);
744 * @dest: TCGArg of the destination argument, or NO_DEST.
745 * @p1: first paired argument
746 * @p2: second paired argument
748 * If *@p1 is a constant and *@p2 is not, swap.
749 * If *@p2 matches @dest, swap.
750 * Return true if a swap was performed.
753 #define NO_DEST temp_arg(NULL)
755 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
757 TCGArg a1
= *p1
, a2
= *p2
;
759 sum
+= arg_is_const(a1
);
760 sum
-= arg_is_const(a2
);
762 /* Prefer the constant in second argument, and then the form
763 op a, a, b, which is better handled on non-RISC hosts. */
764 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
772 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
775 sum
+= arg_is_const(p1
[0]);
776 sum
+= arg_is_const(p1
[1]);
777 sum
-= arg_is_const(p2
[0]);
778 sum
-= arg_is_const(p2
[1]);
781 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
782 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
788 static void init_arguments(OptContext
*ctx
, TCGOp
*op
, int nb_args
)
790 for (int i
= 0; i
< nb_args
; i
++) {
791 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
792 init_ts_info(ctx
, ts
);
796 static void copy_propagate(OptContext
*ctx
, TCGOp
*op
,
797 int nb_oargs
, int nb_iargs
)
799 for (int i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
800 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
801 if (ts_is_copy(ts
)) {
802 op
->args
[i
] = temp_arg(find_better_copy(ts
));
807 static void finish_folding(OptContext
*ctx
, TCGOp
*op
)
809 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
813 * We only optimize extended basic blocks. If the opcode ends a BB
814 * and is not a conditional branch, reset all temp data.
816 if (def
->flags
& TCG_OPF_BB_END
) {
818 if (!(def
->flags
& TCG_OPF_COND_BRANCH
)) {
819 memset(&ctx
->temps_used
, 0, sizeof(ctx
->temps_used
));
820 remove_mem_copy_all(ctx
);
825 nb_oargs
= def
->nb_oargs
;
826 for (i
= 0; i
< nb_oargs
; i
++) {
827 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
830 * Save the corresponding known-zero/sign bits mask for the
831 * first output argument (only one supported so far).
834 ts_info(ts
)->z_mask
= ctx
->z_mask
;
835 ts_info(ts
)->s_mask
= ctx
->s_mask
;
841 * The fold_* functions return true when processing is complete,
842 * usually by folding the operation to a constant or to a copy,
843 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
844 * like collect information about the value produced, for use in
845 * optimizing a subsequent operation.
847 * These first fold_* functions are all helpers, used by other
848 * folders for more specific operations.
851 static bool fold_const1(OptContext
*ctx
, TCGOp
*op
)
853 if (arg_is_const(op
->args
[1])) {
856 t
= arg_info(op
->args
[1])->val
;
857 t
= do_constant_folding(op
->opc
, ctx
->type
, t
, 0);
858 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
863 static bool fold_const2(OptContext
*ctx
, TCGOp
*op
)
865 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
866 uint64_t t1
= arg_info(op
->args
[1])->val
;
867 uint64_t t2
= arg_info(op
->args
[2])->val
;
869 t1
= do_constant_folding(op
->opc
, ctx
->type
, t1
, t2
);
870 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
875 static bool fold_commutative(OptContext
*ctx
, TCGOp
*op
)
877 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
881 static bool fold_const2_commutative(OptContext
*ctx
, TCGOp
*op
)
883 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
884 return fold_const2(ctx
, op
);
887 static bool fold_masks(OptContext
*ctx
, TCGOp
*op
)
889 uint64_t a_mask
= ctx
->a_mask
;
890 uint64_t z_mask
= ctx
->z_mask
;
891 uint64_t s_mask
= ctx
->s_mask
;
894 * 32-bit ops generate 32-bit results, which for the purpose of
895 * simplifying tcg are sign-extended. Certainly that's how we
896 * represent our constants elsewhere. Note that the bits will
897 * be reset properly for a 64-bit value when encountering the
898 * type changing opcodes.
900 if (ctx
->type
== TCG_TYPE_I32
) {
901 a_mask
= (int32_t)a_mask
;
902 z_mask
= (int32_t)z_mask
;
903 s_mask
|= MAKE_64BIT_MASK(32, 32);
904 ctx
->z_mask
= z_mask
;
905 ctx
->s_mask
= s_mask
;
909 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], 0);
912 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
918 * Convert @op to NOT, if NOT is supported by the host.
919 * Return true f the conversion is successful, which will still
920 * indicate that the processing is complete.
922 static bool fold_not(OptContext
*ctx
, TCGOp
*op
);
923 static bool fold_to_not(OptContext
*ctx
, TCGOp
*op
, int idx
)
930 not_op
= INDEX_op_not_i32
;
931 have_not
= TCG_TARGET_HAS_not_i32
;
934 not_op
= INDEX_op_not_i64
;
935 have_not
= TCG_TARGET_HAS_not_i64
;
940 not_op
= INDEX_op_not_vec
;
941 have_not
= TCG_TARGET_HAS_not_vec
;
944 g_assert_not_reached();
948 op
->args
[1] = op
->args
[idx
];
949 return fold_not(ctx
, op
);
954 /* If the binary operation has first argument @i, fold to @i. */
955 static bool fold_ix_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
957 if (arg_is_const(op
->args
[1]) && arg_info(op
->args
[1])->val
== i
) {
958 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
963 /* If the binary operation has first argument @i, fold to NOT. */
964 static bool fold_ix_to_not(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
966 if (arg_is_const(op
->args
[1]) && arg_info(op
->args
[1])->val
== i
) {
967 return fold_to_not(ctx
, op
, 2);
972 /* If the binary operation has second argument @i, fold to @i. */
973 static bool fold_xi_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
975 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== i
) {
976 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
981 /* If the binary operation has second argument @i, fold to identity. */
982 static bool fold_xi_to_x(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
984 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== i
) {
985 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
990 /* If the binary operation has second argument @i, fold to NOT. */
991 static bool fold_xi_to_not(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
993 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== i
) {
994 return fold_to_not(ctx
, op
, 1);
999 /* If the binary operation has both arguments equal, fold to @i. */
1000 static bool fold_xx_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
1002 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1003 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1008 /* If the binary operation has both arguments equal, fold to identity. */
1009 static bool fold_xx_to_x(OptContext
*ctx
, TCGOp
*op
)
1011 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1012 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
1018 * These outermost fold_<op> functions are sorted alphabetically.
1020 * The ordering of the transformations should be:
1021 * 1) those that produce a constant
1022 * 2) those that produce a copy
1023 * 3) those that produce information about the result value.
1026 static bool fold_add(OptContext
*ctx
, TCGOp
*op
)
1028 if (fold_const2_commutative(ctx
, op
) ||
1029 fold_xi_to_x(ctx
, op
, 0)) {
1035 /* We cannot as yet do_constant_folding with vectors. */
1036 static bool fold_add_vec(OptContext
*ctx
, TCGOp
*op
)
1038 if (fold_commutative(ctx
, op
) ||
1039 fold_xi_to_x(ctx
, op
, 0)) {
1045 static bool fold_addsub2(OptContext
*ctx
, TCGOp
*op
, bool add
)
1047 bool a_const
= arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3]);
1048 bool b_const
= arg_is_const(op
->args
[4]) && arg_is_const(op
->args
[5]);
1050 if (a_const
&& b_const
) {
1051 uint64_t al
= arg_info(op
->args
[2])->val
;
1052 uint64_t ah
= arg_info(op
->args
[3])->val
;
1053 uint64_t bl
= arg_info(op
->args
[4])->val
;
1054 uint64_t bh
= arg_info(op
->args
[5])->val
;
1058 if (ctx
->type
== TCG_TYPE_I32
) {
1059 uint64_t a
= deposit64(al
, 32, 32, ah
);
1060 uint64_t b
= deposit64(bl
, 32, 32, bh
);
1068 al
= sextract64(a
, 0, 32);
1069 ah
= sextract64(a
, 32, 32);
1071 Int128 a
= int128_make128(al
, ah
);
1072 Int128 b
= int128_make128(bl
, bh
);
1075 a
= int128_add(a
, b
);
1077 a
= int128_sub(a
, b
);
1080 al
= int128_getlo(a
);
1081 ah
= int128_gethi(a
);
1087 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1088 op2
= tcg_op_insert_before(ctx
->tcg
, op
, 0, 2);
1090 tcg_opt_gen_movi(ctx
, op
, rl
, al
);
1091 tcg_opt_gen_movi(ctx
, op2
, rh
, ah
);
1095 /* Fold sub2 r,x,i to add2 r,x,-i */
1096 if (!add
&& b_const
) {
1097 uint64_t bl
= arg_info(op
->args
[4])->val
;
1098 uint64_t bh
= arg_info(op
->args
[5])->val
;
1100 /* Negate the two parts without assembling and disassembling. */
1104 op
->opc
= (ctx
->type
== TCG_TYPE_I32
1105 ? INDEX_op_add2_i32
: INDEX_op_add2_i64
);
1106 op
->args
[4] = arg_new_constant(ctx
, bl
);
1107 op
->args
[5] = arg_new_constant(ctx
, bh
);
1112 static bool fold_add2(OptContext
*ctx
, TCGOp
*op
)
1114 /* Note that the high and low parts may be independently swapped. */
1115 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[4]);
1116 swap_commutative(op
->args
[1], &op
->args
[3], &op
->args
[5]);
1118 return fold_addsub2(ctx
, op
, true);
1121 static bool fold_and(OptContext
*ctx
, TCGOp
*op
)
1125 if (fold_const2_commutative(ctx
, op
) ||
1126 fold_xi_to_i(ctx
, op
, 0) ||
1127 fold_xi_to_x(ctx
, op
, -1) ||
1128 fold_xx_to_x(ctx
, op
)) {
1132 z1
= arg_info(op
->args
[1])->z_mask
;
1133 z2
= arg_info(op
->args
[2])->z_mask
;
1134 ctx
->z_mask
= z1
& z2
;
1137 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1138 * Bitwise operations preserve the relative quantity of the repetitions.
1140 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1141 & arg_info(op
->args
[2])->s_mask
;
1144 * Known-zeros does not imply known-ones. Therefore unless
1145 * arg2 is constant, we can't infer affected bits from it.
1147 if (arg_is_const(op
->args
[2])) {
1148 ctx
->a_mask
= z1
& ~z2
;
1151 return fold_masks(ctx
, op
);
1154 static bool fold_andc(OptContext
*ctx
, TCGOp
*op
)
1158 if (fold_const2(ctx
, op
) ||
1159 fold_xx_to_i(ctx
, op
, 0) ||
1160 fold_xi_to_x(ctx
, op
, 0) ||
1161 fold_ix_to_not(ctx
, op
, -1)) {
1165 z1
= arg_info(op
->args
[1])->z_mask
;
1168 * Known-zeros does not imply known-ones. Therefore unless
1169 * arg2 is constant, we can't infer anything from it.
1171 if (arg_is_const(op
->args
[2])) {
1172 uint64_t z2
= ~arg_info(op
->args
[2])->z_mask
;
1173 ctx
->a_mask
= z1
& ~z2
;
1178 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1179 & arg_info(op
->args
[2])->s_mask
;
1180 return fold_masks(ctx
, op
);
1183 static bool fold_brcond(OptContext
*ctx
, TCGOp
*op
)
1185 TCGCond cond
= op
->args
[2];
1188 if (swap_commutative(NO_DEST
, &op
->args
[0], &op
->args
[1])) {
1189 op
->args
[2] = cond
= tcg_swap_cond(cond
);
1192 i
= do_constant_folding_cond(ctx
->type
, op
->args
[0], op
->args
[1], cond
);
1194 tcg_op_remove(ctx
->tcg
, op
);
1198 op
->opc
= INDEX_op_br
;
1199 op
->args
[0] = op
->args
[3];
1204 static bool fold_brcond2(OptContext
*ctx
, TCGOp
*op
)
1206 TCGCond cond
= op
->args
[4];
1207 TCGArg label
= op
->args
[5];
1210 if (swap_commutative2(&op
->args
[0], &op
->args
[2])) {
1211 op
->args
[4] = cond
= tcg_swap_cond(cond
);
1214 i
= do_constant_folding_cond2(&op
->args
[0], &op
->args
[2], cond
);
1216 goto do_brcond_const
;
1223 * Simplify LT/GE comparisons vs zero to a single compare
1224 * vs the high word of the input.
1226 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== 0 &&
1227 arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0) {
1228 goto do_brcond_high
;
1237 * Simplify EQ/NE comparisons where one of the pairs
1238 * can be simplified.
1240 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[0],
1244 goto do_brcond_const
;
1246 goto do_brcond_high
;
1249 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[1],
1253 goto do_brcond_const
;
1255 op
->opc
= INDEX_op_brcond_i32
;
1256 op
->args
[1] = op
->args
[2];
1258 op
->args
[3] = label
;
1267 op
->opc
= INDEX_op_brcond_i32
;
1268 op
->args
[0] = op
->args
[1];
1269 op
->args
[1] = op
->args
[3];
1271 op
->args
[3] = label
;
1276 tcg_op_remove(ctx
->tcg
, op
);
1279 op
->opc
= INDEX_op_br
;
1280 op
->args
[0] = label
;
1286 static bool fold_bswap(OptContext
*ctx
, TCGOp
*op
)
1288 uint64_t z_mask
, s_mask
, sign
;
1290 if (arg_is_const(op
->args
[1])) {
1291 uint64_t t
= arg_info(op
->args
[1])->val
;
1293 t
= do_constant_folding(op
->opc
, ctx
->type
, t
, op
->args
[2]);
1294 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1297 z_mask
= arg_info(op
->args
[1])->z_mask
;
1300 case INDEX_op_bswap16_i32
:
1301 case INDEX_op_bswap16_i64
:
1302 z_mask
= bswap16(z_mask
);
1305 case INDEX_op_bswap32_i32
:
1306 case INDEX_op_bswap32_i64
:
1307 z_mask
= bswap32(z_mask
);
1310 case INDEX_op_bswap64_i64
:
1311 z_mask
= bswap64(z_mask
);
1315 g_assert_not_reached();
1317 s_mask
= smask_from_zmask(z_mask
);
1319 switch (op
->args
[2] & (TCG_BSWAP_OZ
| TCG_BSWAP_OS
)) {
1323 /* If the sign bit may be 1, force all the bits above to 1. */
1324 if (z_mask
& sign
) {
1330 /* The high bits are undefined: force all bits above the sign to 1. */
1331 z_mask
|= sign
<< 1;
1335 ctx
->z_mask
= z_mask
;
1336 ctx
->s_mask
= s_mask
;
1338 return fold_masks(ctx
, op
);
1341 static bool fold_call(OptContext
*ctx
, TCGOp
*op
)
1343 TCGContext
*s
= ctx
->tcg
;
1344 int nb_oargs
= TCGOP_CALLO(op
);
1345 int nb_iargs
= TCGOP_CALLI(op
);
1348 init_arguments(ctx
, op
, nb_oargs
+ nb_iargs
);
1349 copy_propagate(ctx
, op
, nb_oargs
, nb_iargs
);
1351 /* If the function reads or writes globals, reset temp data. */
1352 flags
= tcg_call_flags(op
);
1353 if (!(flags
& (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
1354 int nb_globals
= s
->nb_globals
;
1356 for (i
= 0; i
< nb_globals
; i
++) {
1357 if (test_bit(i
, ctx
->temps_used
.l
)) {
1358 reset_ts(ctx
, &ctx
->tcg
->temps
[i
]);
1363 /* If the function has side effects, reset mem data. */
1364 if (!(flags
& TCG_CALL_NO_SIDE_EFFECTS
)) {
1365 remove_mem_copy_all(ctx
);
1368 /* Reset temp data for outputs. */
1369 for (i
= 0; i
< nb_oargs
; i
++) {
1370 reset_temp(ctx
, op
->args
[i
]);
1373 /* Stop optimizing MB across calls. */
1374 ctx
->prev_mb
= NULL
;
1378 static bool fold_count_zeros(OptContext
*ctx
, TCGOp
*op
)
1382 if (arg_is_const(op
->args
[1])) {
1383 uint64_t t
= arg_info(op
->args
[1])->val
;
1386 t
= do_constant_folding(op
->opc
, ctx
->type
, t
, 0);
1387 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1389 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[2]);
1392 switch (ctx
->type
) {
1400 g_assert_not_reached();
1402 ctx
->z_mask
= arg_info(op
->args
[2])->z_mask
| z_mask
;
1403 ctx
->s_mask
= smask_from_zmask(ctx
->z_mask
);
1407 static bool fold_ctpop(OptContext
*ctx
, TCGOp
*op
)
1409 if (fold_const1(ctx
, op
)) {
1413 switch (ctx
->type
) {
1415 ctx
->z_mask
= 32 | 31;
1418 ctx
->z_mask
= 64 | 63;
1421 g_assert_not_reached();
1423 ctx
->s_mask
= smask_from_zmask(ctx
->z_mask
);
1427 static bool fold_deposit(OptContext
*ctx
, TCGOp
*op
)
1431 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1432 uint64_t t1
= arg_info(op
->args
[1])->val
;
1433 uint64_t t2
= arg_info(op
->args
[2])->val
;
1435 t1
= deposit64(t1
, op
->args
[3], op
->args
[4], t2
);
1436 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
1439 switch (ctx
->type
) {
1441 and_opc
= INDEX_op_and_i32
;
1444 and_opc
= INDEX_op_and_i64
;
1447 g_assert_not_reached();
1450 /* Inserting a value into zero at offset 0. */
1451 if (arg_is_const(op
->args
[1])
1452 && arg_info(op
->args
[1])->val
== 0
1453 && op
->args
[3] == 0) {
1454 uint64_t mask
= MAKE_64BIT_MASK(0, op
->args
[4]);
1457 op
->args
[1] = op
->args
[2];
1458 op
->args
[2] = arg_new_constant(ctx
, mask
);
1459 ctx
->z_mask
= mask
& arg_info(op
->args
[1])->z_mask
;
1463 /* Inserting zero into a value. */
1464 if (arg_is_const(op
->args
[2])
1465 && arg_info(op
->args
[2])->val
== 0) {
1466 uint64_t mask
= deposit64(-1, op
->args
[3], op
->args
[4], 0);
1469 op
->args
[2] = arg_new_constant(ctx
, mask
);
1470 ctx
->z_mask
= mask
& arg_info(op
->args
[1])->z_mask
;
1474 ctx
->z_mask
= deposit64(arg_info(op
->args
[1])->z_mask
,
1475 op
->args
[3], op
->args
[4],
1476 arg_info(op
->args
[2])->z_mask
);
1480 static bool fold_divide(OptContext
*ctx
, TCGOp
*op
)
1482 if (fold_const2(ctx
, op
) ||
1483 fold_xi_to_x(ctx
, op
, 1)) {
1489 static bool fold_dup(OptContext
*ctx
, TCGOp
*op
)
1491 if (arg_is_const(op
->args
[1])) {
1492 uint64_t t
= arg_info(op
->args
[1])->val
;
1493 t
= dup_const(TCGOP_VECE(op
), t
);
1494 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1499 static bool fold_dup2(OptContext
*ctx
, TCGOp
*op
)
1501 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1502 uint64_t t
= deposit64(arg_info(op
->args
[1])->val
, 32, 32,
1503 arg_info(op
->args
[2])->val
);
1504 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1507 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1508 op
->opc
= INDEX_op_dup_vec
;
1509 TCGOP_VECE(op
) = MO_32
;
1514 static bool fold_eqv(OptContext
*ctx
, TCGOp
*op
)
1516 if (fold_const2_commutative(ctx
, op
) ||
1517 fold_xi_to_x(ctx
, op
, -1) ||
1518 fold_xi_to_not(ctx
, op
, 0)) {
1522 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1523 & arg_info(op
->args
[2])->s_mask
;
1527 static bool fold_extract(OptContext
*ctx
, TCGOp
*op
)
1529 uint64_t z_mask_old
, z_mask
;
1530 int pos
= op
->args
[2];
1531 int len
= op
->args
[3];
1533 if (arg_is_const(op
->args
[1])) {
1536 t
= arg_info(op
->args
[1])->val
;
1537 t
= extract64(t
, pos
, len
);
1538 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1541 z_mask_old
= arg_info(op
->args
[1])->z_mask
;
1542 z_mask
= extract64(z_mask_old
, pos
, len
);
1544 ctx
->a_mask
= z_mask_old
^ z_mask
;
1546 ctx
->z_mask
= z_mask
;
1547 ctx
->s_mask
= smask_from_zmask(z_mask
);
1549 return fold_masks(ctx
, op
);
1552 static bool fold_extract2(OptContext
*ctx
, TCGOp
*op
)
1554 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1555 uint64_t v1
= arg_info(op
->args
[1])->val
;
1556 uint64_t v2
= arg_info(op
->args
[2])->val
;
1557 int shr
= op
->args
[3];
1559 if (op
->opc
== INDEX_op_extract2_i64
) {
1563 v1
= (uint32_t)v1
>> shr
;
1564 v2
= (uint64_t)((int32_t)v2
<< (32 - shr
));
1566 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], v1
| v2
);
1571 static bool fold_exts(OptContext
*ctx
, TCGOp
*op
)
1573 uint64_t s_mask_old
, s_mask
, z_mask
, sign
;
1574 bool type_change
= false;
1576 if (fold_const1(ctx
, op
)) {
1580 z_mask
= arg_info(op
->args
[1])->z_mask
;
1581 s_mask
= arg_info(op
->args
[1])->s_mask
;
1582 s_mask_old
= s_mask
;
1585 CASE_OP_32_64(ext8s
):
1587 z_mask
= (uint8_t)z_mask
;
1589 CASE_OP_32_64(ext16s
):
1591 z_mask
= (uint16_t)z_mask
;
1593 case INDEX_op_ext_i32_i64
:
1596 case INDEX_op_ext32s_i64
:
1598 z_mask
= (uint32_t)z_mask
;
1601 g_assert_not_reached();
1604 if (z_mask
& sign
) {
1607 s_mask
|= sign
<< 1;
1609 ctx
->z_mask
= z_mask
;
1610 ctx
->s_mask
= s_mask
;
1612 ctx
->a_mask
= s_mask
& ~s_mask_old
;
1615 return fold_masks(ctx
, op
);
1618 static bool fold_extu(OptContext
*ctx
, TCGOp
*op
)
1620 uint64_t z_mask_old
, z_mask
;
1621 bool type_change
= false;
1623 if (fold_const1(ctx
, op
)) {
1627 z_mask_old
= z_mask
= arg_info(op
->args
[1])->z_mask
;
1630 CASE_OP_32_64(ext8u
):
1631 z_mask
= (uint8_t)z_mask
;
1633 CASE_OP_32_64(ext16u
):
1634 z_mask
= (uint16_t)z_mask
;
1636 case INDEX_op_extrl_i64_i32
:
1637 case INDEX_op_extu_i32_i64
:
1640 case INDEX_op_ext32u_i64
:
1641 z_mask
= (uint32_t)z_mask
;
1643 case INDEX_op_extrh_i64_i32
:
1648 g_assert_not_reached();
1651 ctx
->z_mask
= z_mask
;
1652 ctx
->s_mask
= smask_from_zmask(z_mask
);
1654 ctx
->a_mask
= z_mask_old
^ z_mask
;
1656 return fold_masks(ctx
, op
);
1659 static bool fold_mb(OptContext
*ctx
, TCGOp
*op
)
1661 /* Eliminate duplicate and redundant fence instructions. */
1664 * Merge two barriers of the same type into one,
1665 * or a weaker barrier into a stronger one,
1666 * or two weaker barriers into a stronger one.
1667 * mb X; mb Y => mb X|Y
1668 * mb; strl => mb; st
1669 * ldaq; mb => ld; mb
1670 * ldaq; strl => ld; mb; st
1671 * Other combinations are also merged into a strong
1672 * barrier. This is stricter than specified but for
1673 * the purposes of TCG is better than not optimizing.
1675 ctx
->prev_mb
->args
[0] |= op
->args
[0];
1676 tcg_op_remove(ctx
->tcg
, op
);
1683 static bool fold_mov(OptContext
*ctx
, TCGOp
*op
)
1685 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
1688 static bool fold_movcond(OptContext
*ctx
, TCGOp
*op
)
1690 TCGCond cond
= op
->args
[5];
1693 if (swap_commutative(NO_DEST
, &op
->args
[1], &op
->args
[2])) {
1694 op
->args
[5] = cond
= tcg_swap_cond(cond
);
1697 * Canonicalize the "false" input reg to match the destination reg so
1698 * that the tcg backend can implement a "move if true" operation.
1700 if (swap_commutative(op
->args
[0], &op
->args
[4], &op
->args
[3])) {
1701 op
->args
[5] = cond
= tcg_invert_cond(cond
);
1704 i
= do_constant_folding_cond(ctx
->type
, op
->args
[1], op
->args
[2], cond
);
1706 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[4 - i
]);
1709 ctx
->z_mask
= arg_info(op
->args
[3])->z_mask
1710 | arg_info(op
->args
[4])->z_mask
;
1711 ctx
->s_mask
= arg_info(op
->args
[3])->s_mask
1712 & arg_info(op
->args
[4])->s_mask
;
1714 if (arg_is_const(op
->args
[3]) && arg_is_const(op
->args
[4])) {
1715 uint64_t tv
= arg_info(op
->args
[3])->val
;
1716 uint64_t fv
= arg_info(op
->args
[4])->val
;
1717 TCGOpcode opc
, negopc
= 0;
1719 switch (ctx
->type
) {
1721 opc
= INDEX_op_setcond_i32
;
1722 if (TCG_TARGET_HAS_negsetcond_i32
) {
1723 negopc
= INDEX_op_negsetcond_i32
;
1729 opc
= INDEX_op_setcond_i64
;
1730 if (TCG_TARGET_HAS_negsetcond_i64
) {
1731 negopc
= INDEX_op_negsetcond_i64
;
1735 g_assert_not_reached();
1738 if (tv
== 1 && fv
== 0) {
1741 } else if (fv
== 1 && tv
== 0) {
1743 op
->args
[3] = tcg_invert_cond(cond
);
1744 } else if (negopc
) {
1745 if (tv
== -1 && fv
== 0) {
1748 } else if (fv
== -1 && tv
== 0) {
1750 op
->args
[3] = tcg_invert_cond(cond
);
1757 static bool fold_mul(OptContext
*ctx
, TCGOp
*op
)
1759 if (fold_const2(ctx
, op
) ||
1760 fold_xi_to_i(ctx
, op
, 0) ||
1761 fold_xi_to_x(ctx
, op
, 1)) {
1767 static bool fold_mul_highpart(OptContext
*ctx
, TCGOp
*op
)
1769 if (fold_const2_commutative(ctx
, op
) ||
1770 fold_xi_to_i(ctx
, op
, 0)) {
1776 static bool fold_multiply2(OptContext
*ctx
, TCGOp
*op
)
1778 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[3]);
1780 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])) {
1781 uint64_t a
= arg_info(op
->args
[2])->val
;
1782 uint64_t b
= arg_info(op
->args
[3])->val
;
1788 case INDEX_op_mulu2_i32
:
1789 l
= (uint64_t)(uint32_t)a
* (uint32_t)b
;
1790 h
= (int32_t)(l
>> 32);
1793 case INDEX_op_muls2_i32
:
1794 l
= (int64_t)(int32_t)a
* (int32_t)b
;
1798 case INDEX_op_mulu2_i64
:
1799 mulu64(&l
, &h
, a
, b
);
1801 case INDEX_op_muls2_i64
:
1802 muls64(&l
, &h
, a
, b
);
1805 g_assert_not_reached();
1811 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1812 op2
= tcg_op_insert_before(ctx
->tcg
, op
, 0, 2);
1814 tcg_opt_gen_movi(ctx
, op
, rl
, l
);
1815 tcg_opt_gen_movi(ctx
, op2
, rh
, h
);
1821 static bool fold_nand(OptContext
*ctx
, TCGOp
*op
)
1823 if (fold_const2_commutative(ctx
, op
) ||
1824 fold_xi_to_not(ctx
, op
, -1)) {
1828 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1829 & arg_info(op
->args
[2])->s_mask
;
1833 static bool fold_neg(OptContext
*ctx
, TCGOp
*op
)
1837 if (fold_const1(ctx
, op
)) {
1841 /* Set to 1 all bits to the left of the rightmost. */
1842 z_mask
= arg_info(op
->args
[1])->z_mask
;
1843 ctx
->z_mask
= -(z_mask
& -z_mask
);
1846 * Because of fold_sub_to_neg, we want to always return true,
1847 * via finish_folding.
1849 finish_folding(ctx
, op
);
1853 static bool fold_nor(OptContext
*ctx
, TCGOp
*op
)
1855 if (fold_const2_commutative(ctx
, op
) ||
1856 fold_xi_to_not(ctx
, op
, 0)) {
1860 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1861 & arg_info(op
->args
[2])->s_mask
;
1865 static bool fold_not(OptContext
*ctx
, TCGOp
*op
)
1867 if (fold_const1(ctx
, op
)) {
1871 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
;
1873 /* Because of fold_to_not, we want to always return true, via finish. */
1874 finish_folding(ctx
, op
);
1878 static bool fold_or(OptContext
*ctx
, TCGOp
*op
)
1880 if (fold_const2_commutative(ctx
, op
) ||
1881 fold_xi_to_x(ctx
, op
, 0) ||
1882 fold_xx_to_x(ctx
, op
)) {
1886 ctx
->z_mask
= arg_info(op
->args
[1])->z_mask
1887 | arg_info(op
->args
[2])->z_mask
;
1888 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1889 & arg_info(op
->args
[2])->s_mask
;
1890 return fold_masks(ctx
, op
);
1893 static bool fold_orc(OptContext
*ctx
, TCGOp
*op
)
1895 if (fold_const2(ctx
, op
) ||
1896 fold_xx_to_i(ctx
, op
, -1) ||
1897 fold_xi_to_x(ctx
, op
, -1) ||
1898 fold_ix_to_not(ctx
, op
, 0)) {
1902 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
1903 & arg_info(op
->args
[2])->s_mask
;
1907 static bool fold_qemu_ld(OptContext
*ctx
, TCGOp
*op
)
1909 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
1910 MemOpIdx oi
= op
->args
[def
->nb_oargs
+ def
->nb_iargs
];
1911 MemOp mop
= get_memop(oi
);
1912 int width
= 8 * memop_size(mop
);
1915 ctx
->s_mask
= MAKE_64BIT_MASK(width
, 64 - width
);
1916 if (!(mop
& MO_SIGN
)) {
1917 ctx
->z_mask
= MAKE_64BIT_MASK(0, width
);
1922 /* Opcodes that touch guest memory stop the mb optimization. */
1923 ctx
->prev_mb
= NULL
;
1927 static bool fold_qemu_st(OptContext
*ctx
, TCGOp
*op
)
1929 /* Opcodes that touch guest memory stop the mb optimization. */
1930 ctx
->prev_mb
= NULL
;
1934 static bool fold_remainder(OptContext
*ctx
, TCGOp
*op
)
1936 if (fold_const2(ctx
, op
) ||
1937 fold_xx_to_i(ctx
, op
, 0)) {
1943 static bool fold_setcond(OptContext
*ctx
, TCGOp
*op
)
1945 TCGCond cond
= op
->args
[3];
1948 if (swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2])) {
1949 op
->args
[3] = cond
= tcg_swap_cond(cond
);
1952 i
= do_constant_folding_cond(ctx
->type
, op
->args
[1], op
->args
[2], cond
);
1954 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1958 ctx
->s_mask
= smask_from_zmask(1);
1962 static bool fold_negsetcond(OptContext
*ctx
, TCGOp
*op
)
1964 TCGCond cond
= op
->args
[3];
1967 if (swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2])) {
1968 op
->args
[3] = cond
= tcg_swap_cond(cond
);
1971 i
= do_constant_folding_cond(ctx
->type
, op
->args
[1], op
->args
[2], cond
);
1973 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], -i
);
1976 /* Value is {0,-1} so all bits are repetitions of the sign. */
1982 static bool fold_setcond2(OptContext
*ctx
, TCGOp
*op
)
1984 TCGCond cond
= op
->args
[5];
1987 if (swap_commutative2(&op
->args
[1], &op
->args
[3])) {
1988 op
->args
[5] = cond
= tcg_swap_cond(cond
);
1991 i
= do_constant_folding_cond2(&op
->args
[1], &op
->args
[3], cond
);
1993 goto do_setcond_const
;
2000 * Simplify LT/GE comparisons vs zero to a single compare
2001 * vs the high word of the input.
2003 if (arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0 &&
2004 arg_is_const(op
->args
[4]) && arg_info(op
->args
[4])->val
== 0) {
2005 goto do_setcond_high
;
2014 * Simplify EQ/NE comparisons where one of the pairs
2015 * can be simplified.
2017 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[1],
2021 goto do_setcond_const
;
2023 goto do_setcond_high
;
2026 i
= do_constant_folding_cond(TCG_TYPE_I32
, op
->args
[2],
2030 goto do_setcond_const
;
2032 op
->args
[2] = op
->args
[3];
2034 op
->opc
= INDEX_op_setcond_i32
;
2043 op
->args
[1] = op
->args
[2];
2044 op
->args
[2] = op
->args
[4];
2046 op
->opc
= INDEX_op_setcond_i32
;
2051 ctx
->s_mask
= smask_from_zmask(1);
2055 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
2058 static bool fold_sextract(OptContext
*ctx
, TCGOp
*op
)
2060 uint64_t z_mask
, s_mask
, s_mask_old
;
2061 int pos
= op
->args
[2];
2062 int len
= op
->args
[3];
2064 if (arg_is_const(op
->args
[1])) {
2067 t
= arg_info(op
->args
[1])->val
;
2068 t
= sextract64(t
, pos
, len
);
2069 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
2072 z_mask
= arg_info(op
->args
[1])->z_mask
;
2073 z_mask
= sextract64(z_mask
, pos
, len
);
2074 ctx
->z_mask
= z_mask
;
2076 s_mask_old
= arg_info(op
->args
[1])->s_mask
;
2077 s_mask
= sextract64(s_mask_old
, pos
, len
);
2078 s_mask
|= MAKE_64BIT_MASK(len
, 64 - len
);
2079 ctx
->s_mask
= s_mask
;
2082 ctx
->a_mask
= s_mask
& ~s_mask_old
;
2085 return fold_masks(ctx
, op
);
2088 static bool fold_shift(OptContext
*ctx
, TCGOp
*op
)
2090 uint64_t s_mask
, z_mask
, sign
;
2092 if (fold_const2(ctx
, op
) ||
2093 fold_ix_to_i(ctx
, op
, 0) ||
2094 fold_xi_to_x(ctx
, op
, 0)) {
2098 s_mask
= arg_info(op
->args
[1])->s_mask
;
2099 z_mask
= arg_info(op
->args
[1])->z_mask
;
2101 if (arg_is_const(op
->args
[2])) {
2102 int sh
= arg_info(op
->args
[2])->val
;
2104 ctx
->z_mask
= do_constant_folding(op
->opc
, ctx
->type
, z_mask
, sh
);
2106 s_mask
= do_constant_folding(op
->opc
, ctx
->type
, s_mask
, sh
);
2107 ctx
->s_mask
= smask_from_smask(s_mask
);
2109 return fold_masks(ctx
, op
);
2115 * Arithmetic right shift will not reduce the number of
2116 * input sign repetitions.
2118 ctx
->s_mask
= s_mask
;
2122 * If the sign bit is known zero, then logical right shift
2123 * will not reduced the number of input sign repetitions.
2125 sign
= (s_mask
& -s_mask
) >> 1;
2126 if (!(z_mask
& sign
)) {
2127 ctx
->s_mask
= s_mask
;
2137 static bool fold_sub_to_neg(OptContext
*ctx
, TCGOp
*op
)
2142 if (!arg_is_const(op
->args
[1]) || arg_info(op
->args
[1])->val
!= 0) {
2146 switch (ctx
->type
) {
2148 neg_op
= INDEX_op_neg_i32
;
2152 neg_op
= INDEX_op_neg_i64
;
2158 neg_op
= INDEX_op_neg_vec
;
2159 have_neg
= (TCG_TARGET_HAS_neg_vec
&&
2160 tcg_can_emit_vec_op(neg_op
, ctx
->type
, TCGOP_VECE(op
)) > 0);
2163 g_assert_not_reached();
2167 op
->args
[1] = op
->args
[2];
2168 return fold_neg(ctx
, op
);
2173 /* We cannot as yet do_constant_folding with vectors. */
2174 static bool fold_sub_vec(OptContext
*ctx
, TCGOp
*op
)
2176 if (fold_xx_to_i(ctx
, op
, 0) ||
2177 fold_xi_to_x(ctx
, op
, 0) ||
2178 fold_sub_to_neg(ctx
, op
)) {
2184 static bool fold_sub(OptContext
*ctx
, TCGOp
*op
)
2186 if (fold_const2(ctx
, op
) || fold_sub_vec(ctx
, op
)) {
2190 /* Fold sub r,x,i to add r,x,-i */
2191 if (arg_is_const(op
->args
[2])) {
2192 uint64_t val
= arg_info(op
->args
[2])->val
;
2194 op
->opc
= (ctx
->type
== TCG_TYPE_I32
2195 ? INDEX_op_add_i32
: INDEX_op_add_i64
);
2196 op
->args
[2] = arg_new_constant(ctx
, -val
);
2201 static bool fold_sub2(OptContext
*ctx
, TCGOp
*op
)
2203 return fold_addsub2(ctx
, op
, false);
2206 static bool fold_tcg_ld(OptContext
*ctx
, TCGOp
*op
)
2208 /* We can't do any folding with a load, but we can record bits. */
2210 CASE_OP_32_64(ld8s
):
2211 ctx
->s_mask
= MAKE_64BIT_MASK(8, 56);
2213 CASE_OP_32_64(ld8u
):
2214 ctx
->z_mask
= MAKE_64BIT_MASK(0, 8);
2215 ctx
->s_mask
= MAKE_64BIT_MASK(9, 55);
2217 CASE_OP_32_64(ld16s
):
2218 ctx
->s_mask
= MAKE_64BIT_MASK(16, 48);
2220 CASE_OP_32_64(ld16u
):
2221 ctx
->z_mask
= MAKE_64BIT_MASK(0, 16);
2222 ctx
->s_mask
= MAKE_64BIT_MASK(17, 47);
2224 case INDEX_op_ld32s_i64
:
2225 ctx
->s_mask
= MAKE_64BIT_MASK(32, 32);
2227 case INDEX_op_ld32u_i64
:
2228 ctx
->z_mask
= MAKE_64BIT_MASK(0, 32);
2229 ctx
->s_mask
= MAKE_64BIT_MASK(33, 31);
2232 g_assert_not_reached();
2237 static bool fold_tcg_ld_memcopy(OptContext
*ctx
, TCGOp
*op
)
2243 if (op
->args
[1] != tcgv_ptr_arg(tcg_env
)) {
2249 dst
= arg_temp(op
->args
[0]);
2250 src
= find_mem_copy_for(ctx
, type
, ofs
);
2251 if (src
&& src
->base_type
== type
) {
2252 return tcg_opt_gen_mov(ctx
, op
, temp_arg(dst
), temp_arg(src
));
2256 record_mem_copy(ctx
, type
, dst
, ofs
, ofs
+ tcg_type_size(type
) - 1);
2260 static bool fold_tcg_st(OptContext
*ctx
, TCGOp
*op
)
2262 intptr_t ofs
= op
->args
[2];
2265 if (op
->args
[1] != tcgv_ptr_arg(tcg_env
)) {
2266 remove_mem_copy_all(ctx
);
2274 CASE_OP_32_64(st16
):
2277 case INDEX_op_st32_i64
:
2278 case INDEX_op_st_i32
:
2281 case INDEX_op_st_i64
:
2284 case INDEX_op_st_vec
:
2285 lm1
= tcg_type_size(ctx
->type
) - 1;
2288 g_assert_not_reached();
2290 remove_mem_copy_in(ctx
, ofs
, ofs
+ lm1
);
2294 static bool fold_tcg_st_memcopy(OptContext
*ctx
, TCGOp
*op
)
2300 if (op
->args
[1] != tcgv_ptr_arg(tcg_env
)) {
2301 fold_tcg_st(ctx
, op
);
2305 src
= arg_temp(op
->args
[0]);
2310 * Eliminate duplicate stores of a constant.
2311 * This happens frequently when the target ISA zero-extends.
2313 if (ts_is_const(src
)) {
2314 TCGTemp
*prev
= find_mem_copy_for(ctx
, type
, ofs
);
2316 tcg_op_remove(ctx
->tcg
, op
);
2321 last
= ofs
+ tcg_type_size(type
) - 1;
2322 remove_mem_copy_in(ctx
, ofs
, last
);
2323 record_mem_copy(ctx
, type
, src
, ofs
, last
);
2327 static bool fold_xor(OptContext
*ctx
, TCGOp
*op
)
2329 if (fold_const2_commutative(ctx
, op
) ||
2330 fold_xx_to_i(ctx
, op
, 0) ||
2331 fold_xi_to_x(ctx
, op
, 0) ||
2332 fold_xi_to_not(ctx
, op
, -1)) {
2336 ctx
->z_mask
= arg_info(op
->args
[1])->z_mask
2337 | arg_info(op
->args
[2])->z_mask
;
2338 ctx
->s_mask
= arg_info(op
->args
[1])->s_mask
2339 & arg_info(op
->args
[2])->s_mask
;
2340 return fold_masks(ctx
, op
);
2343 /* Propagate constants and copies, fold constant expressions. */
2344 void tcg_optimize(TCGContext
*s
)
2347 TCGOp
*op
, *op_next
;
2348 OptContext ctx
= { .tcg
= s
};
2350 QSIMPLEQ_INIT(&ctx
.mem_free
);
2352 /* Array VALS has an element for each temp.
2353 If this temp holds a constant then its value is kept in VALS' element.
2354 If this temp is a copy of other ones then the other copies are
2355 available through the doubly linked circular list. */
2357 nb_temps
= s
->nb_temps
;
2358 for (i
= 0; i
< nb_temps
; ++i
) {
2359 s
->temps
[i
].state_ptr
= NULL
;
2362 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
2363 TCGOpcode opc
= op
->opc
;
2364 const TCGOpDef
*def
;
2367 /* Calls are special. */
2368 if (opc
== INDEX_op_call
) {
2369 fold_call(&ctx
, op
);
2373 def
= &tcg_op_defs
[opc
];
2374 init_arguments(&ctx
, op
, def
->nb_oargs
+ def
->nb_iargs
);
2375 copy_propagate(&ctx
, op
, def
->nb_oargs
, def
->nb_iargs
);
2377 /* Pre-compute the type of the operation. */
2378 if (def
->flags
& TCG_OPF_VECTOR
) {
2379 ctx
.type
= TCG_TYPE_V64
+ TCGOP_VECL(op
);
2380 } else if (def
->flags
& TCG_OPF_64BIT
) {
2381 ctx
.type
= TCG_TYPE_I64
;
2383 ctx
.type
= TCG_TYPE_I32
;
2386 /* Assume all bits affected, no bits known zero, no sign reps. */
2392 * Process each opcode.
2393 * Sorted alphabetically by opcode as much as possible.
2397 done
= fold_add(&ctx
, op
);
2399 case INDEX_op_add_vec
:
2400 done
= fold_add_vec(&ctx
, op
);
2402 CASE_OP_32_64(add2
):
2403 done
= fold_add2(&ctx
, op
);
2405 CASE_OP_32_64_VEC(and):
2406 done
= fold_and(&ctx
, op
);
2408 CASE_OP_32_64_VEC(andc
):
2409 done
= fold_andc(&ctx
, op
);
2411 CASE_OP_32_64(brcond
):
2412 done
= fold_brcond(&ctx
, op
);
2414 case INDEX_op_brcond2_i32
:
2415 done
= fold_brcond2(&ctx
, op
);
2417 CASE_OP_32_64(bswap16
):
2418 CASE_OP_32_64(bswap32
):
2419 case INDEX_op_bswap64_i64
:
2420 done
= fold_bswap(&ctx
, op
);
2424 done
= fold_count_zeros(&ctx
, op
);
2426 CASE_OP_32_64(ctpop
):
2427 done
= fold_ctpop(&ctx
, op
);
2429 CASE_OP_32_64(deposit
):
2430 done
= fold_deposit(&ctx
, op
);
2433 CASE_OP_32_64(divu
):
2434 done
= fold_divide(&ctx
, op
);
2436 case INDEX_op_dup_vec
:
2437 done
= fold_dup(&ctx
, op
);
2439 case INDEX_op_dup2_vec
:
2440 done
= fold_dup2(&ctx
, op
);
2442 CASE_OP_32_64_VEC(eqv
):
2443 done
= fold_eqv(&ctx
, op
);
2445 CASE_OP_32_64(extract
):
2446 done
= fold_extract(&ctx
, op
);
2448 CASE_OP_32_64(extract2
):
2449 done
= fold_extract2(&ctx
, op
);
2451 CASE_OP_32_64(ext8s
):
2452 CASE_OP_32_64(ext16s
):
2453 case INDEX_op_ext32s_i64
:
2454 case INDEX_op_ext_i32_i64
:
2455 done
= fold_exts(&ctx
, op
);
2457 CASE_OP_32_64(ext8u
):
2458 CASE_OP_32_64(ext16u
):
2459 case INDEX_op_ext32u_i64
:
2460 case INDEX_op_extu_i32_i64
:
2461 case INDEX_op_extrl_i64_i32
:
2462 case INDEX_op_extrh_i64_i32
:
2463 done
= fold_extu(&ctx
, op
);
2465 CASE_OP_32_64(ld8s
):
2466 CASE_OP_32_64(ld8u
):
2467 CASE_OP_32_64(ld16s
):
2468 CASE_OP_32_64(ld16u
):
2469 case INDEX_op_ld32s_i64
:
2470 case INDEX_op_ld32u_i64
:
2471 done
= fold_tcg_ld(&ctx
, op
);
2473 case INDEX_op_ld_i32
:
2474 case INDEX_op_ld_i64
:
2475 case INDEX_op_ld_vec
:
2476 done
= fold_tcg_ld_memcopy(&ctx
, op
);
2479 CASE_OP_32_64(st16
):
2480 case INDEX_op_st32_i64
:
2481 done
= fold_tcg_st(&ctx
, op
);
2483 case INDEX_op_st_i32
:
2484 case INDEX_op_st_i64
:
2485 case INDEX_op_st_vec
:
2486 done
= fold_tcg_st_memcopy(&ctx
, op
);
2489 done
= fold_mb(&ctx
, op
);
2491 CASE_OP_32_64_VEC(mov
):
2492 done
= fold_mov(&ctx
, op
);
2494 CASE_OP_32_64(movcond
):
2495 done
= fold_movcond(&ctx
, op
);
2498 done
= fold_mul(&ctx
, op
);
2500 CASE_OP_32_64(mulsh
):
2501 CASE_OP_32_64(muluh
):
2502 done
= fold_mul_highpart(&ctx
, op
);
2504 CASE_OP_32_64(muls2
):
2505 CASE_OP_32_64(mulu2
):
2506 done
= fold_multiply2(&ctx
, op
);
2508 CASE_OP_32_64_VEC(nand
):
2509 done
= fold_nand(&ctx
, op
);
2512 done
= fold_neg(&ctx
, op
);
2514 CASE_OP_32_64_VEC(nor
):
2515 done
= fold_nor(&ctx
, op
);
2517 CASE_OP_32_64_VEC(not):
2518 done
= fold_not(&ctx
, op
);
2520 CASE_OP_32_64_VEC(or):
2521 done
= fold_or(&ctx
, op
);
2523 CASE_OP_32_64_VEC(orc
):
2524 done
= fold_orc(&ctx
, op
);
2526 case INDEX_op_qemu_ld_a32_i32
:
2527 case INDEX_op_qemu_ld_a64_i32
:
2528 case INDEX_op_qemu_ld_a32_i64
:
2529 case INDEX_op_qemu_ld_a64_i64
:
2530 case INDEX_op_qemu_ld_a32_i128
:
2531 case INDEX_op_qemu_ld_a64_i128
:
2532 done
= fold_qemu_ld(&ctx
, op
);
2534 case INDEX_op_qemu_st8_a32_i32
:
2535 case INDEX_op_qemu_st8_a64_i32
:
2536 case INDEX_op_qemu_st_a32_i32
:
2537 case INDEX_op_qemu_st_a64_i32
:
2538 case INDEX_op_qemu_st_a32_i64
:
2539 case INDEX_op_qemu_st_a64_i64
:
2540 case INDEX_op_qemu_st_a32_i128
:
2541 case INDEX_op_qemu_st_a64_i128
:
2542 done
= fold_qemu_st(&ctx
, op
);
2545 CASE_OP_32_64(remu
):
2546 done
= fold_remainder(&ctx
, op
);
2548 CASE_OP_32_64(rotl
):
2549 CASE_OP_32_64(rotr
):
2553 done
= fold_shift(&ctx
, op
);
2555 CASE_OP_32_64(setcond
):
2556 done
= fold_setcond(&ctx
, op
);
2558 CASE_OP_32_64(negsetcond
):
2559 done
= fold_negsetcond(&ctx
, op
);
2561 case INDEX_op_setcond2_i32
:
2562 done
= fold_setcond2(&ctx
, op
);
2564 CASE_OP_32_64(sextract
):
2565 done
= fold_sextract(&ctx
, op
);
2568 done
= fold_sub(&ctx
, op
);
2570 case INDEX_op_sub_vec
:
2571 done
= fold_sub_vec(&ctx
, op
);
2573 CASE_OP_32_64(sub2
):
2574 done
= fold_sub2(&ctx
, op
);
2576 CASE_OP_32_64_VEC(xor):
2577 done
= fold_xor(&ctx
, op
);
2584 finish_folding(&ctx
, op
);