2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "exec/cpu-common.h"
31 #define CASE_OP_32_64(x) \
32 glue(glue(case INDEX_op_, x), _i32): \
33 glue(glue(case INDEX_op_, x), _i64)
35 #define CASE_OP_32_64_VEC(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64): \
38 glue(glue(case INDEX_op_, x), _vec)
40 struct tcg_temp_info
{
45 tcg_target_ulong mask
;
48 static inline struct tcg_temp_info
*ts_info(TCGTemp
*ts
)
53 static inline struct tcg_temp_info
*arg_info(TCGArg arg
)
55 return ts_info(arg_temp(arg
));
58 static inline bool ts_is_const(TCGTemp
*ts
)
60 return ts_info(ts
)->is_const
;
63 static inline bool arg_is_const(TCGArg arg
)
65 return ts_is_const(arg_temp(arg
));
68 static inline bool ts_is_copy(TCGTemp
*ts
)
70 return ts_info(ts
)->next_copy
!= ts
;
73 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
74 static void reset_ts(TCGTemp
*ts
)
76 struct tcg_temp_info
*ti
= ts_info(ts
);
77 struct tcg_temp_info
*pi
= ts_info(ti
->prev_copy
);
78 struct tcg_temp_info
*ni
= ts_info(ti
->next_copy
);
80 ni
->prev_copy
= ti
->prev_copy
;
81 pi
->next_copy
= ti
->next_copy
;
88 static void reset_temp(TCGArg arg
)
90 reset_ts(arg_temp(arg
));
93 /* Initialize and activate a temporary. */
94 static void init_ts_info(struct tcg_temp_info
*infos
,
95 TCGTempSet
*temps_used
, TCGTemp
*ts
)
97 size_t idx
= temp_idx(ts
);
98 if (!test_bit(idx
, temps_used
->l
)) {
99 struct tcg_temp_info
*ti
= &infos
[idx
];
104 ti
->is_const
= false;
106 set_bit(idx
, temps_used
->l
);
110 static void init_arg_info(struct tcg_temp_info
*infos
,
111 TCGTempSet
*temps_used
, TCGArg arg
)
113 init_ts_info(infos
, temps_used
, arg_temp(arg
));
116 static TCGTemp
*find_better_copy(TCGContext
*s
, TCGTemp
*ts
)
120 /* If this is already a global, we can't do better. */
121 if (ts
->temp_global
) {
125 /* Search for a global first. */
126 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
127 if (i
->temp_global
) {
132 /* If it is a temp, search for a temp local. */
133 if (!ts
->temp_local
) {
134 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
135 if (ts
->temp_local
) {
141 /* Failure to find a better representation, return the same temp. */
145 static bool ts_are_copies(TCGTemp
*ts1
, TCGTemp
*ts2
)
153 if (!ts_is_copy(ts1
) || !ts_is_copy(ts2
)) {
157 for (i
= ts_info(ts1
)->next_copy
; i
!= ts1
; i
= ts_info(i
)->next_copy
) {
166 static bool args_are_copies(TCGArg arg1
, TCGArg arg2
)
168 return ts_are_copies(arg_temp(arg1
), arg_temp(arg2
));
171 static void tcg_opt_gen_movi(TCGContext
*s
, TCGOp
*op
, TCGArg dst
, TCGArg val
)
175 tcg_target_ulong mask
;
176 struct tcg_temp_info
*di
= arg_info(dst
);
178 def
= &tcg_op_defs
[op
->opc
];
179 if (def
->flags
& TCG_OPF_VECTOR
) {
180 new_op
= INDEX_op_dupi_vec
;
181 } else if (def
->flags
& TCG_OPF_64BIT
) {
182 new_op
= INDEX_op_movi_i64
;
184 new_op
= INDEX_op_movi_i32
;
187 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
195 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_movi_i32
) {
196 /* High bits of the destination are now garbage. */
197 mask
|= ~0xffffffffull
;
202 static void tcg_opt_gen_mov(TCGContext
*s
, TCGOp
*op
, TCGArg dst
, TCGArg src
)
204 TCGTemp
*dst_ts
= arg_temp(dst
);
205 TCGTemp
*src_ts
= arg_temp(src
);
207 struct tcg_temp_info
*di
;
208 struct tcg_temp_info
*si
;
209 tcg_target_ulong mask
;
212 if (ts_are_copies(dst_ts
, src_ts
)) {
213 tcg_op_remove(s
, op
);
218 di
= ts_info(dst_ts
);
219 si
= ts_info(src_ts
);
220 def
= &tcg_op_defs
[op
->opc
];
221 if (def
->flags
& TCG_OPF_VECTOR
) {
222 new_op
= INDEX_op_mov_vec
;
223 } else if (def
->flags
& TCG_OPF_64BIT
) {
224 new_op
= INDEX_op_mov_i64
;
226 new_op
= INDEX_op_mov_i32
;
229 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
234 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
235 /* High bits of the destination are now garbage. */
236 mask
|= ~0xffffffffull
;
240 if (src_ts
->type
== dst_ts
->type
) {
241 struct tcg_temp_info
*ni
= ts_info(si
->next_copy
);
243 di
->next_copy
= si
->next_copy
;
244 di
->prev_copy
= src_ts
;
245 ni
->prev_copy
= dst_ts
;
246 si
->next_copy
= dst_ts
;
247 di
->is_const
= si
->is_const
;
252 static TCGArg
do_constant_folding_2(TCGOpcode op
, TCGArg x
, TCGArg y
)
275 case INDEX_op_shl_i32
:
276 return (uint32_t)x
<< (y
& 31);
278 case INDEX_op_shl_i64
:
279 return (uint64_t)x
<< (y
& 63);
281 case INDEX_op_shr_i32
:
282 return (uint32_t)x
>> (y
& 31);
284 case INDEX_op_shr_i64
:
285 return (uint64_t)x
>> (y
& 63);
287 case INDEX_op_sar_i32
:
288 return (int32_t)x
>> (y
& 31);
290 case INDEX_op_sar_i64
:
291 return (int64_t)x
>> (y
& 63);
293 case INDEX_op_rotr_i32
:
294 return ror32(x
, y
& 31);
296 case INDEX_op_rotr_i64
:
297 return ror64(x
, y
& 63);
299 case INDEX_op_rotl_i32
:
300 return rol32(x
, y
& 31);
302 case INDEX_op_rotl_i64
:
303 return rol64(x
, y
& 63);
326 case INDEX_op_clz_i32
:
327 return (uint32_t)x
? clz32(x
) : y
;
329 case INDEX_op_clz_i64
:
330 return x
? clz64(x
) : y
;
332 case INDEX_op_ctz_i32
:
333 return (uint32_t)x
? ctz32(x
) : y
;
335 case INDEX_op_ctz_i64
:
336 return x
? ctz64(x
) : y
;
338 case INDEX_op_ctpop_i32
:
341 case INDEX_op_ctpop_i64
:
344 CASE_OP_32_64(ext8s
):
347 CASE_OP_32_64(ext16s
):
350 CASE_OP_32_64(ext8u
):
353 CASE_OP_32_64(ext16u
):
356 case INDEX_op_ext_i32_i64
:
357 case INDEX_op_ext32s_i64
:
360 case INDEX_op_extu_i32_i64
:
361 case INDEX_op_extrl_i64_i32
:
362 case INDEX_op_ext32u_i64
:
365 case INDEX_op_extrh_i64_i32
:
366 return (uint64_t)x
>> 32;
368 case INDEX_op_muluh_i32
:
369 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
370 case INDEX_op_mulsh_i32
:
371 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
373 case INDEX_op_muluh_i64
:
374 mulu64(&l64
, &h64
, x
, y
);
376 case INDEX_op_mulsh_i64
:
377 muls64(&l64
, &h64
, x
, y
);
380 case INDEX_op_div_i32
:
381 /* Avoid crashing on divide by zero, otherwise undefined. */
382 return (int32_t)x
/ ((int32_t)y
? : 1);
383 case INDEX_op_divu_i32
:
384 return (uint32_t)x
/ ((uint32_t)y
? : 1);
385 case INDEX_op_div_i64
:
386 return (int64_t)x
/ ((int64_t)y
? : 1);
387 case INDEX_op_divu_i64
:
388 return (uint64_t)x
/ ((uint64_t)y
? : 1);
390 case INDEX_op_rem_i32
:
391 return (int32_t)x
% ((int32_t)y
? : 1);
392 case INDEX_op_remu_i32
:
393 return (uint32_t)x
% ((uint32_t)y
? : 1);
394 case INDEX_op_rem_i64
:
395 return (int64_t)x
% ((int64_t)y
? : 1);
396 case INDEX_op_remu_i64
:
397 return (uint64_t)x
% ((uint64_t)y
? : 1);
401 "Unrecognized operation %d in do_constant_folding.\n", op
);
406 static TCGArg
do_constant_folding(TCGOpcode op
, TCGArg x
, TCGArg y
)
408 const TCGOpDef
*def
= &tcg_op_defs
[op
];
409 TCGArg res
= do_constant_folding_2(op
, x
, y
);
410 if (!(def
->flags
& TCG_OPF_64BIT
)) {
416 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
424 return (int32_t)x
< (int32_t)y
;
426 return (int32_t)x
>= (int32_t)y
;
428 return (int32_t)x
<= (int32_t)y
;
430 return (int32_t)x
> (int32_t)y
;
444 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
452 return (int64_t)x
< (int64_t)y
;
454 return (int64_t)x
>= (int64_t)y
;
456 return (int64_t)x
<= (int64_t)y
;
458 return (int64_t)x
> (int64_t)y
;
472 static bool do_constant_folding_cond_eq(TCGCond c
)
492 /* Return 2 if the condition can't be simplified, and the result
493 of the condition (0 or 1) if it can */
494 static TCGArg
do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
497 tcg_target_ulong xv
= arg_info(x
)->val
;
498 tcg_target_ulong yv
= arg_info(y
)->val
;
499 if (arg_is_const(x
) && arg_is_const(y
)) {
500 const TCGOpDef
*def
= &tcg_op_defs
[op
];
501 tcg_debug_assert(!(def
->flags
& TCG_OPF_VECTOR
));
502 if (def
->flags
& TCG_OPF_64BIT
) {
503 return do_constant_folding_cond_64(xv
, yv
, c
);
505 return do_constant_folding_cond_32(xv
, yv
, c
);
507 } else if (args_are_copies(x
, y
)) {
508 return do_constant_folding_cond_eq(c
);
509 } else if (arg_is_const(y
) && yv
== 0) {
522 /* Return 2 if the condition can't be simplified, and the result
523 of the condition (0 or 1) if it can */
524 static TCGArg
do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
526 TCGArg al
= p1
[0], ah
= p1
[1];
527 TCGArg bl
= p2
[0], bh
= p2
[1];
529 if (arg_is_const(bl
) && arg_is_const(bh
)) {
530 tcg_target_ulong blv
= arg_info(bl
)->val
;
531 tcg_target_ulong bhv
= arg_info(bh
)->val
;
532 uint64_t b
= deposit64(blv
, 32, 32, bhv
);
534 if (arg_is_const(al
) && arg_is_const(ah
)) {
535 tcg_target_ulong alv
= arg_info(al
)->val
;
536 tcg_target_ulong ahv
= arg_info(ah
)->val
;
537 uint64_t a
= deposit64(alv
, 32, 32, ahv
);
538 return do_constant_folding_cond_64(a
, b
, c
);
551 if (args_are_copies(al
, bl
) && args_are_copies(ah
, bh
)) {
552 return do_constant_folding_cond_eq(c
);
557 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
559 TCGArg a1
= *p1
, a2
= *p2
;
561 sum
+= arg_is_const(a1
);
562 sum
-= arg_is_const(a2
);
564 /* Prefer the constant in second argument, and then the form
565 op a, a, b, which is better handled on non-RISC hosts. */
566 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
574 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
577 sum
+= arg_is_const(p1
[0]);
578 sum
+= arg_is_const(p1
[1]);
579 sum
-= arg_is_const(p2
[0]);
580 sum
-= arg_is_const(p2
[1]);
583 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
584 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
590 /* Propagate constants and copies, fold constant expressions. */
591 void tcg_optimize(TCGContext
*s
)
593 int nb_temps
, nb_globals
;
594 TCGOp
*op
, *op_next
, *prev_mb
= NULL
;
595 struct tcg_temp_info
*infos
;
596 TCGTempSet temps_used
;
598 /* Array VALS has an element for each temp.
599 If this temp holds a constant then its value is kept in VALS' element.
600 If this temp is a copy of other ones then the other copies are
601 available through the doubly linked circular list. */
603 nb_temps
= s
->nb_temps
;
604 nb_globals
= s
->nb_globals
;
605 bitmap_zero(temps_used
.l
, nb_temps
);
606 infos
= tcg_malloc(sizeof(struct tcg_temp_info
) * nb_temps
);
608 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
609 tcg_target_ulong mask
, partmask
, affected
;
610 int nb_oargs
, nb_iargs
, i
;
612 TCGOpcode opc
= op
->opc
;
613 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
615 /* Count the arguments, and initialize the temps that are
617 if (opc
== INDEX_op_call
) {
618 nb_oargs
= TCGOP_CALLO(op
);
619 nb_iargs
= TCGOP_CALLI(op
);
620 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
621 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
623 init_ts_info(infos
, &temps_used
, ts
);
627 nb_oargs
= def
->nb_oargs
;
628 nb_iargs
= def
->nb_iargs
;
629 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
630 init_arg_info(infos
, &temps_used
, op
->args
[i
]);
634 /* Do copy propagation */
635 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
636 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
637 if (ts
&& ts_is_copy(ts
)) {
638 op
->args
[i
] = temp_arg(find_better_copy(s
, ts
));
642 /* For commutative operations make constant second argument */
644 CASE_OP_32_64_VEC(add
):
645 CASE_OP_32_64_VEC(mul
):
646 CASE_OP_32_64_VEC(and):
647 CASE_OP_32_64_VEC(or):
648 CASE_OP_32_64_VEC(xor):
652 CASE_OP_32_64(muluh
):
653 CASE_OP_32_64(mulsh
):
654 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
656 CASE_OP_32_64(brcond
):
657 if (swap_commutative(-1, &op
->args
[0], &op
->args
[1])) {
658 op
->args
[2] = tcg_swap_cond(op
->args
[2]);
661 CASE_OP_32_64(setcond
):
662 if (swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2])) {
663 op
->args
[3] = tcg_swap_cond(op
->args
[3]);
666 CASE_OP_32_64(movcond
):
667 if (swap_commutative(-1, &op
->args
[1], &op
->args
[2])) {
668 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
670 /* For movcond, we canonicalize the "false" input reg to match
671 the destination reg so that the tcg backend can implement
672 a "move if true" operation. */
673 if (swap_commutative(op
->args
[0], &op
->args
[4], &op
->args
[3])) {
674 op
->args
[5] = tcg_invert_cond(op
->args
[5]);
678 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[4]);
679 swap_commutative(op
->args
[1], &op
->args
[3], &op
->args
[5]);
681 CASE_OP_32_64(mulu2
):
682 CASE_OP_32_64(muls2
):
683 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[3]);
685 case INDEX_op_brcond2_i32
:
686 if (swap_commutative2(&op
->args
[0], &op
->args
[2])) {
687 op
->args
[4] = tcg_swap_cond(op
->args
[4]);
690 case INDEX_op_setcond2_i32
:
691 if (swap_commutative2(&op
->args
[1], &op
->args
[3])) {
692 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
699 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
700 and "sub r, 0, a => neg r, a" case. */
707 if (arg_is_const(op
->args
[1])
708 && arg_info(op
->args
[1])->val
== 0) {
709 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
713 CASE_OP_32_64_VEC(sub
):
718 if (arg_is_const(op
->args
[2])) {
719 /* Proceed with possible constant folding. */
722 if (opc
== INDEX_op_sub_i32
) {
723 neg_op
= INDEX_op_neg_i32
;
724 have_neg
= TCG_TARGET_HAS_neg_i32
;
725 } else if (opc
== INDEX_op_sub_i64
) {
726 neg_op
= INDEX_op_neg_i64
;
727 have_neg
= TCG_TARGET_HAS_neg_i64
;
729 neg_op
= INDEX_op_neg_vec
;
730 have_neg
= TCG_TARGET_HAS_neg_vec
;
735 if (arg_is_const(op
->args
[1])
736 && arg_info(op
->args
[1])->val
== 0) {
738 reset_temp(op
->args
[0]);
739 op
->args
[1] = op
->args
[2];
744 CASE_OP_32_64_VEC(xor):
746 if (!arg_is_const(op
->args
[1])
747 && arg_is_const(op
->args
[2])
748 && arg_info(op
->args
[2])->val
== -1) {
754 if (!arg_is_const(op
->args
[1])
755 && arg_is_const(op
->args
[2])
756 && arg_info(op
->args
[2])->val
== 0) {
761 CASE_OP_32_64_VEC(andc
):
762 if (!arg_is_const(op
->args
[2])
763 && arg_is_const(op
->args
[1])
764 && arg_info(op
->args
[1])->val
== -1) {
769 CASE_OP_32_64_VEC(orc
):
771 if (!arg_is_const(op
->args
[2])
772 && arg_is_const(op
->args
[1])
773 && arg_info(op
->args
[1])->val
== 0) {
783 if (def
->flags
& TCG_OPF_VECTOR
) {
784 not_op
= INDEX_op_not_vec
;
785 have_not
= TCG_TARGET_HAS_not_vec
;
786 } else if (def
->flags
& TCG_OPF_64BIT
) {
787 not_op
= INDEX_op_not_i64
;
788 have_not
= TCG_TARGET_HAS_not_i64
;
790 not_op
= INDEX_op_not_i32
;
791 have_not
= TCG_TARGET_HAS_not_i32
;
797 reset_temp(op
->args
[0]);
798 op
->args
[1] = op
->args
[i
];
805 /* Simplify expression for "op r, a, const => mov r, a" cases */
807 CASE_OP_32_64_VEC(add
):
808 CASE_OP_32_64_VEC(sub
):
809 CASE_OP_32_64_VEC(or):
810 CASE_OP_32_64_VEC(xor):
811 CASE_OP_32_64_VEC(andc
):
817 if (!arg_is_const(op
->args
[1])
818 && arg_is_const(op
->args
[2])
819 && arg_info(op
->args
[2])->val
== 0) {
820 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
824 CASE_OP_32_64_VEC(and):
825 CASE_OP_32_64_VEC(orc
):
827 if (!arg_is_const(op
->args
[1])
828 && arg_is_const(op
->args
[2])
829 && arg_info(op
->args
[2])->val
== -1) {
830 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
838 /* Simplify using known-zero bits. Currently only ops with a single
839 output argument is supported. */
843 CASE_OP_32_64(ext8s
):
844 if ((arg_info(op
->args
[1])->mask
& 0x80) != 0) {
847 CASE_OP_32_64(ext8u
):
850 CASE_OP_32_64(ext16s
):
851 if ((arg_info(op
->args
[1])->mask
& 0x8000) != 0) {
854 CASE_OP_32_64(ext16u
):
857 case INDEX_op_ext32s_i64
:
858 if ((arg_info(op
->args
[1])->mask
& 0x80000000) != 0) {
861 case INDEX_op_ext32u_i64
:
866 mask
= arg_info(op
->args
[2])->mask
;
867 if (arg_is_const(op
->args
[2])) {
869 affected
= arg_info(op
->args
[1])->mask
& ~mask
;
871 mask
= arg_info(op
->args
[1])->mask
& mask
;
874 case INDEX_op_ext_i32_i64
:
875 if ((arg_info(op
->args
[1])->mask
& 0x80000000) != 0) {
878 case INDEX_op_extu_i32_i64
:
879 /* We do not compute affected as it is a size changing op. */
880 mask
= (uint32_t)arg_info(op
->args
[1])->mask
;
884 /* Known-zeros does not imply known-ones. Therefore unless
885 op->args[2] is constant, we can't infer anything from it. */
886 if (arg_is_const(op
->args
[2])) {
887 mask
= ~arg_info(op
->args
[2])->mask
;
890 /* But we certainly know nothing outside args[1] may be set. */
891 mask
= arg_info(op
->args
[1])->mask
;
894 case INDEX_op_sar_i32
:
895 if (arg_is_const(op
->args
[2])) {
896 tmp
= arg_info(op
->args
[2])->val
& 31;
897 mask
= (int32_t)arg_info(op
->args
[1])->mask
>> tmp
;
900 case INDEX_op_sar_i64
:
901 if (arg_is_const(op
->args
[2])) {
902 tmp
= arg_info(op
->args
[2])->val
& 63;
903 mask
= (int64_t)arg_info(op
->args
[1])->mask
>> tmp
;
907 case INDEX_op_shr_i32
:
908 if (arg_is_const(op
->args
[2])) {
909 tmp
= arg_info(op
->args
[2])->val
& 31;
910 mask
= (uint32_t)arg_info(op
->args
[1])->mask
>> tmp
;
913 case INDEX_op_shr_i64
:
914 if (arg_is_const(op
->args
[2])) {
915 tmp
= arg_info(op
->args
[2])->val
& 63;
916 mask
= (uint64_t)arg_info(op
->args
[1])->mask
>> tmp
;
920 case INDEX_op_extrl_i64_i32
:
921 mask
= (uint32_t)arg_info(op
->args
[1])->mask
;
923 case INDEX_op_extrh_i64_i32
:
924 mask
= (uint64_t)arg_info(op
->args
[1])->mask
>> 32;
928 if (arg_is_const(op
->args
[2])) {
929 tmp
= arg_info(op
->args
[2])->val
& (TCG_TARGET_REG_BITS
- 1);
930 mask
= arg_info(op
->args
[1])->mask
<< tmp
;
935 /* Set to 1 all bits to the left of the rightmost. */
936 mask
= -(arg_info(op
->args
[1])->mask
937 & -arg_info(op
->args
[1])->mask
);
940 CASE_OP_32_64(deposit
):
941 mask
= deposit64(arg_info(op
->args
[1])->mask
,
942 op
->args
[3], op
->args
[4],
943 arg_info(op
->args
[2])->mask
);
946 CASE_OP_32_64(extract
):
947 mask
= extract64(arg_info(op
->args
[1])->mask
,
948 op
->args
[2], op
->args
[3]);
949 if (op
->args
[2] == 0) {
950 affected
= arg_info(op
->args
[1])->mask
& ~mask
;
953 CASE_OP_32_64(sextract
):
954 mask
= sextract64(arg_info(op
->args
[1])->mask
,
955 op
->args
[2], op
->args
[3]);
956 if (op
->args
[2] == 0 && (tcg_target_long
)mask
>= 0) {
957 affected
= arg_info(op
->args
[1])->mask
& ~mask
;
963 mask
= arg_info(op
->args
[1])->mask
| arg_info(op
->args
[2])->mask
;
966 case INDEX_op_clz_i32
:
967 case INDEX_op_ctz_i32
:
968 mask
= arg_info(op
->args
[2])->mask
| 31;
971 case INDEX_op_clz_i64
:
972 case INDEX_op_ctz_i64
:
973 mask
= arg_info(op
->args
[2])->mask
| 63;
976 case INDEX_op_ctpop_i32
:
979 case INDEX_op_ctpop_i64
:
983 CASE_OP_32_64(setcond
):
984 case INDEX_op_setcond2_i32
:
988 CASE_OP_32_64(movcond
):
989 mask
= arg_info(op
->args
[3])->mask
| arg_info(op
->args
[4])->mask
;
995 CASE_OP_32_64(ld16u
):
998 case INDEX_op_ld32u_i64
:
1002 CASE_OP_32_64(qemu_ld
):
1004 TCGMemOpIdx oi
= op
->args
[nb_oargs
+ nb_iargs
];
1005 TCGMemOp mop
= get_memop(oi
);
1006 if (!(mop
& MO_SIGN
)) {
1007 mask
= (2ULL << ((8 << (mop
& MO_SIZE
)) - 1)) - 1;
1016 /* 32-bit ops generate 32-bit results. For the result is zero test
1017 below, we can ignore high bits, but for further optimizations we
1018 need to record that the high bits contain garbage. */
1020 if (!(def
->flags
& TCG_OPF_64BIT
)) {
1021 mask
|= ~(tcg_target_ulong
)0xffffffffu
;
1022 partmask
&= 0xffffffffu
;
1023 affected
&= 0xffffffffu
;
1026 if (partmask
== 0) {
1027 tcg_debug_assert(nb_oargs
== 1);
1028 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
1031 if (affected
== 0) {
1032 tcg_debug_assert(nb_oargs
== 1);
1033 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
1037 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
1039 CASE_OP_32_64_VEC(and):
1040 CASE_OP_32_64_VEC(mul
):
1041 CASE_OP_32_64(muluh
):
1042 CASE_OP_32_64(mulsh
):
1043 if (arg_is_const(op
->args
[2])
1044 && arg_info(op
->args
[2])->val
== 0) {
1045 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
1053 /* Simplify expression for "op r, a, a => mov r, a" cases */
1055 CASE_OP_32_64_VEC(or):
1056 CASE_OP_32_64_VEC(and):
1057 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1058 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
1066 /* Simplify expression for "op r, a, a => movi r, 0" cases */
1068 CASE_OP_32_64_VEC(andc
):
1069 CASE_OP_32_64_VEC(sub
):
1070 CASE_OP_32_64_VEC(xor):
1071 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1072 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
1080 /* Propagate constants through copy operations and do constant
1081 folding. Constants will be substituted to arguments by register
1082 allocator where needed and possible. Also detect copies. */
1084 CASE_OP_32_64_VEC(mov
):
1085 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
1087 CASE_OP_32_64(movi
):
1088 case INDEX_op_dupi_vec
:
1089 tcg_opt_gen_movi(s
, op
, op
->args
[0], op
->args
[1]);
1092 case INDEX_op_dup_vec
:
1093 if (arg_is_const(op
->args
[1])) {
1094 tmp
= arg_info(op
->args
[1])->val
;
1095 tmp
= dup_const(TCGOP_VECE(op
), tmp
);
1096 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1103 CASE_OP_32_64(ext8s
):
1104 CASE_OP_32_64(ext8u
):
1105 CASE_OP_32_64(ext16s
):
1106 CASE_OP_32_64(ext16u
):
1107 CASE_OP_32_64(ctpop
):
1108 case INDEX_op_ext32s_i64
:
1109 case INDEX_op_ext32u_i64
:
1110 case INDEX_op_ext_i32_i64
:
1111 case INDEX_op_extu_i32_i64
:
1112 case INDEX_op_extrl_i64_i32
:
1113 case INDEX_op_extrh_i64_i32
:
1114 if (arg_is_const(op
->args
[1])) {
1115 tmp
= do_constant_folding(opc
, arg_info(op
->args
[1])->val
, 0);
1116 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1130 CASE_OP_32_64(rotl
):
1131 CASE_OP_32_64(rotr
):
1132 CASE_OP_32_64(andc
):
1135 CASE_OP_32_64(nand
):
1137 CASE_OP_32_64(muluh
):
1138 CASE_OP_32_64(mulsh
):
1140 CASE_OP_32_64(divu
):
1142 CASE_OP_32_64(remu
):
1143 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1144 tmp
= do_constant_folding(opc
, arg_info(op
->args
[1])->val
,
1145 arg_info(op
->args
[2])->val
);
1146 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1153 if (arg_is_const(op
->args
[1])) {
1154 TCGArg v
= arg_info(op
->args
[1])->val
;
1156 tmp
= do_constant_folding(opc
, v
, 0);
1157 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1159 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[2]);
1165 CASE_OP_32_64(deposit
):
1166 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1167 tmp
= deposit64(arg_info(op
->args
[1])->val
,
1168 op
->args
[3], op
->args
[4],
1169 arg_info(op
->args
[2])->val
);
1170 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1175 CASE_OP_32_64(extract
):
1176 if (arg_is_const(op
->args
[1])) {
1177 tmp
= extract64(arg_info(op
->args
[1])->val
,
1178 op
->args
[2], op
->args
[3]);
1179 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1184 CASE_OP_32_64(sextract
):
1185 if (arg_is_const(op
->args
[1])) {
1186 tmp
= sextract64(arg_info(op
->args
[1])->val
,
1187 op
->args
[2], op
->args
[3]);
1188 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1193 CASE_OP_32_64(setcond
):
1194 tmp
= do_constant_folding_cond(opc
, op
->args
[1],
1195 op
->args
[2], op
->args
[3]);
1197 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1202 CASE_OP_32_64(brcond
):
1203 tmp
= do_constant_folding_cond(opc
, op
->args
[0],
1204 op
->args
[1], op
->args
[2]);
1207 bitmap_zero(temps_used
.l
, nb_temps
);
1208 op
->opc
= INDEX_op_br
;
1209 op
->args
[0] = op
->args
[3];
1211 tcg_op_remove(s
, op
);
1217 CASE_OP_32_64(movcond
):
1218 tmp
= do_constant_folding_cond(opc
, op
->args
[1],
1219 op
->args
[2], op
->args
[5]);
1221 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[4-tmp
]);
1224 if (arg_is_const(op
->args
[3]) && arg_is_const(op
->args
[4])) {
1225 tcg_target_ulong tv
= arg_info(op
->args
[3])->val
;
1226 tcg_target_ulong fv
= arg_info(op
->args
[4])->val
;
1227 TCGCond cond
= op
->args
[5];
1228 if (fv
== 1 && tv
== 0) {
1229 cond
= tcg_invert_cond(cond
);
1230 } else if (!(tv
== 1 && fv
== 0)) {
1234 op
->opc
= opc
= (opc
== INDEX_op_movcond_i32
1235 ? INDEX_op_setcond_i32
1236 : INDEX_op_setcond_i64
);
1241 case INDEX_op_add2_i32
:
1242 case INDEX_op_sub2_i32
:
1243 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])
1244 && arg_is_const(op
->args
[4]) && arg_is_const(op
->args
[5])) {
1245 uint32_t al
= arg_info(op
->args
[2])->val
;
1246 uint32_t ah
= arg_info(op
->args
[3])->val
;
1247 uint32_t bl
= arg_info(op
->args
[4])->val
;
1248 uint32_t bh
= arg_info(op
->args
[5])->val
;
1249 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
1250 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
1252 TCGOp
*op2
= tcg_op_insert_before(s
, op
, INDEX_op_movi_i32
, 2);
1254 if (opc
== INDEX_op_add2_i32
) {
1262 tcg_opt_gen_movi(s
, op
, rl
, (int32_t)a
);
1263 tcg_opt_gen_movi(s
, op2
, rh
, (int32_t)(a
>> 32));
1268 case INDEX_op_mulu2_i32
:
1269 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])) {
1270 uint32_t a
= arg_info(op
->args
[2])->val
;
1271 uint32_t b
= arg_info(op
->args
[3])->val
;
1272 uint64_t r
= (uint64_t)a
* b
;
1274 TCGOp
*op2
= tcg_op_insert_before(s
, op
, INDEX_op_movi_i32
, 2);
1278 tcg_opt_gen_movi(s
, op
, rl
, (int32_t)r
);
1279 tcg_opt_gen_movi(s
, op2
, rh
, (int32_t)(r
>> 32));
1284 case INDEX_op_brcond2_i32
:
1285 tmp
= do_constant_folding_cond2(&op
->args
[0], &op
->args
[2],
1290 bitmap_zero(temps_used
.l
, nb_temps
);
1291 op
->opc
= INDEX_op_br
;
1292 op
->args
[0] = op
->args
[5];
1295 tcg_op_remove(s
, op
);
1297 } else if ((op
->args
[4] == TCG_COND_LT
1298 || op
->args
[4] == TCG_COND_GE
)
1299 && arg_is_const(op
->args
[2])
1300 && arg_info(op
->args
[2])->val
== 0
1301 && arg_is_const(op
->args
[3])
1302 && arg_info(op
->args
[3])->val
== 0) {
1303 /* Simplify LT/GE comparisons vs zero to a single compare
1304 vs the high word of the input. */
1306 bitmap_zero(temps_used
.l
, nb_temps
);
1307 op
->opc
= INDEX_op_brcond_i32
;
1308 op
->args
[0] = op
->args
[1];
1309 op
->args
[1] = op
->args
[3];
1310 op
->args
[2] = op
->args
[4];
1311 op
->args
[3] = op
->args
[5];
1312 } else if (op
->args
[4] == TCG_COND_EQ
) {
1313 /* Simplify EQ comparisons where one of the pairs
1314 can be simplified. */
1315 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1316 op
->args
[0], op
->args
[2],
1319 goto do_brcond_false
;
1320 } else if (tmp
== 1) {
1321 goto do_brcond_high
;
1323 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1324 op
->args
[1], op
->args
[3],
1327 goto do_brcond_false
;
1328 } else if (tmp
!= 1) {
1332 bitmap_zero(temps_used
.l
, nb_temps
);
1333 op
->opc
= INDEX_op_brcond_i32
;
1334 op
->args
[1] = op
->args
[2];
1335 op
->args
[2] = op
->args
[4];
1336 op
->args
[3] = op
->args
[5];
1337 } else if (op
->args
[4] == TCG_COND_NE
) {
1338 /* Simplify NE comparisons where one of the pairs
1339 can be simplified. */
1340 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1341 op
->args
[0], op
->args
[2],
1344 goto do_brcond_high
;
1345 } else if (tmp
== 1) {
1346 goto do_brcond_true
;
1348 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1349 op
->args
[1], op
->args
[3],
1353 } else if (tmp
== 1) {
1354 goto do_brcond_true
;
1362 case INDEX_op_setcond2_i32
:
1363 tmp
= do_constant_folding_cond2(&op
->args
[1], &op
->args
[3],
1367 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1368 } else if ((op
->args
[5] == TCG_COND_LT
1369 || op
->args
[5] == TCG_COND_GE
)
1370 && arg_is_const(op
->args
[3])
1371 && arg_info(op
->args
[3])->val
== 0
1372 && arg_is_const(op
->args
[4])
1373 && arg_info(op
->args
[4])->val
== 0) {
1374 /* Simplify LT/GE comparisons vs zero to a single compare
1375 vs the high word of the input. */
1377 reset_temp(op
->args
[0]);
1378 arg_info(op
->args
[0])->mask
= 1;
1379 op
->opc
= INDEX_op_setcond_i32
;
1380 op
->args
[1] = op
->args
[2];
1381 op
->args
[2] = op
->args
[4];
1382 op
->args
[3] = op
->args
[5];
1383 } else if (op
->args
[5] == TCG_COND_EQ
) {
1384 /* Simplify EQ comparisons where one of the pairs
1385 can be simplified. */
1386 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1387 op
->args
[1], op
->args
[3],
1390 goto do_setcond_const
;
1391 } else if (tmp
== 1) {
1392 goto do_setcond_high
;
1394 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1395 op
->args
[2], op
->args
[4],
1398 goto do_setcond_high
;
1399 } else if (tmp
!= 1) {
1403 reset_temp(op
->args
[0]);
1404 arg_info(op
->args
[0])->mask
= 1;
1405 op
->opc
= INDEX_op_setcond_i32
;
1406 op
->args
[2] = op
->args
[3];
1407 op
->args
[3] = op
->args
[5];
1408 } else if (op
->args
[5] == TCG_COND_NE
) {
1409 /* Simplify NE comparisons where one of the pairs
1410 can be simplified. */
1411 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1412 op
->args
[1], op
->args
[3],
1415 goto do_setcond_high
;
1416 } else if (tmp
== 1) {
1417 goto do_setcond_const
;
1419 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1420 op
->args
[2], op
->args
[4],
1423 goto do_setcond_low
;
1424 } else if (tmp
== 1) {
1425 goto do_setcond_const
;
1434 if (!(op
->args
[nb_oargs
+ nb_iargs
+ 1]
1435 & (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
1436 for (i
= 0; i
< nb_globals
; i
++) {
1437 if (test_bit(i
, temps_used
.l
)) {
1438 reset_ts(&s
->temps
[i
]);
1442 goto do_reset_output
;
1446 /* Default case: we know nothing about operation (or were unable
1447 to compute the operation result) so no propagation is done.
1448 We trash everything if the operation is the end of a basic
1449 block, otherwise we only trash the output args. "mask" is
1450 the non-zero bits mask for the first output arg. */
1451 if (def
->flags
& TCG_OPF_BB_END
) {
1452 bitmap_zero(temps_used
.l
, nb_temps
);
1455 for (i
= 0; i
< nb_oargs
; i
++) {
1456 reset_temp(op
->args
[i
]);
1457 /* Save the corresponding known-zero bits mask for the
1458 first output argument (only one supported so far). */
1460 arg_info(op
->args
[i
])->mask
= mask
;
1467 /* Eliminate duplicate and redundant fence instructions. */
1471 /* Merge two barriers of the same type into one,
1472 * or a weaker barrier into a stronger one,
1473 * or two weaker barriers into a stronger one.
1474 * mb X; mb Y => mb X|Y
1475 * mb; strl => mb; st
1476 * ldaq; mb => ld; mb
1477 * ldaq; strl => ld; mb; st
1478 * Other combinations are also merged into a strong
1479 * barrier. This is stricter than specified but for
1480 * the purposes of TCG is better than not optimizing.
1482 prev_mb
->args
[0] |= op
->args
[0];
1483 tcg_op_remove(s
, op
);
1487 /* Opcodes that end the block stop the optimization. */
1488 if ((def
->flags
& TCG_OPF_BB_END
) == 0) {
1492 case INDEX_op_qemu_ld_i32
:
1493 case INDEX_op_qemu_ld_i64
:
1494 case INDEX_op_qemu_st_i32
:
1495 case INDEX_op_qemu_st_i64
:
1497 /* Opcodes that touch guest memory stop the optimization. */
1501 } else if (opc
== INDEX_op_mb
) {