2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "exec/cpu-common.h"
30 #define CASE_OP_32_64(x) \
31 glue(glue(case INDEX_op_, x), _i32): \
32 glue(glue(case INDEX_op_, x), _i64)
34 #define CASE_OP_32_64_VEC(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64): \
37 glue(glue(case INDEX_op_, x), _vec)
39 struct tcg_temp_info
{
44 tcg_target_ulong mask
;
47 static inline struct tcg_temp_info
*ts_info(TCGTemp
*ts
)
52 static inline struct tcg_temp_info
*arg_info(TCGArg arg
)
54 return ts_info(arg_temp(arg
));
57 static inline bool ts_is_const(TCGTemp
*ts
)
59 return ts_info(ts
)->is_const
;
62 static inline bool arg_is_const(TCGArg arg
)
64 return ts_is_const(arg_temp(arg
));
67 static inline bool ts_is_copy(TCGTemp
*ts
)
69 return ts_info(ts
)->next_copy
!= ts
;
72 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
73 static void reset_ts(TCGTemp
*ts
)
75 struct tcg_temp_info
*ti
= ts_info(ts
);
76 struct tcg_temp_info
*pi
= ts_info(ti
->prev_copy
);
77 struct tcg_temp_info
*ni
= ts_info(ti
->next_copy
);
79 ni
->prev_copy
= ti
->prev_copy
;
80 pi
->next_copy
= ti
->next_copy
;
87 static void reset_temp(TCGArg arg
)
89 reset_ts(arg_temp(arg
));
92 /* Initialize and activate a temporary. */
93 static void init_ts_info(struct tcg_temp_info
*infos
,
94 TCGTempSet
*temps_used
, TCGTemp
*ts
)
96 size_t idx
= temp_idx(ts
);
97 if (!test_bit(idx
, temps_used
->l
)) {
98 struct tcg_temp_info
*ti
= &infos
[idx
];
103 ti
->is_const
= false;
105 set_bit(idx
, temps_used
->l
);
109 static void init_arg_info(struct tcg_temp_info
*infos
,
110 TCGTempSet
*temps_used
, TCGArg arg
)
112 init_ts_info(infos
, temps_used
, arg_temp(arg
));
115 static TCGTemp
*find_better_copy(TCGContext
*s
, TCGTemp
*ts
)
119 /* If this is already a global, we can't do better. */
120 if (ts
->temp_global
) {
124 /* Search for a global first. */
125 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
126 if (i
->temp_global
) {
131 /* If it is a temp, search for a temp local. */
132 if (!ts
->temp_local
) {
133 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
134 if (ts
->temp_local
) {
140 /* Failure to find a better representation, return the same temp. */
144 static bool ts_are_copies(TCGTemp
*ts1
, TCGTemp
*ts2
)
152 if (!ts_is_copy(ts1
) || !ts_is_copy(ts2
)) {
156 for (i
= ts_info(ts1
)->next_copy
; i
!= ts1
; i
= ts_info(i
)->next_copy
) {
165 static bool args_are_copies(TCGArg arg1
, TCGArg arg2
)
167 return ts_are_copies(arg_temp(arg1
), arg_temp(arg2
));
170 static void tcg_opt_gen_movi(TCGContext
*s
, TCGOp
*op
, TCGArg dst
, TCGArg val
)
174 tcg_target_ulong mask
;
175 struct tcg_temp_info
*di
= arg_info(dst
);
177 def
= &tcg_op_defs
[op
->opc
];
178 if (def
->flags
& TCG_OPF_VECTOR
) {
179 new_op
= INDEX_op_dupi_vec
;
180 } else if (def
->flags
& TCG_OPF_64BIT
) {
181 new_op
= INDEX_op_movi_i64
;
183 new_op
= INDEX_op_movi_i32
;
186 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
194 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_movi_i32
) {
195 /* High bits of the destination are now garbage. */
196 mask
|= ~0xffffffffull
;
201 static void tcg_opt_gen_mov(TCGContext
*s
, TCGOp
*op
, TCGArg dst
, TCGArg src
)
203 TCGTemp
*dst_ts
= arg_temp(dst
);
204 TCGTemp
*src_ts
= arg_temp(src
);
206 struct tcg_temp_info
*di
;
207 struct tcg_temp_info
*si
;
208 tcg_target_ulong mask
;
211 if (ts_are_copies(dst_ts
, src_ts
)) {
212 tcg_op_remove(s
, op
);
217 di
= ts_info(dst_ts
);
218 si
= ts_info(src_ts
);
219 def
= &tcg_op_defs
[op
->opc
];
220 if (def
->flags
& TCG_OPF_VECTOR
) {
221 new_op
= INDEX_op_mov_vec
;
222 } else if (def
->flags
& TCG_OPF_64BIT
) {
223 new_op
= INDEX_op_mov_i64
;
225 new_op
= INDEX_op_mov_i32
;
228 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
233 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
234 /* High bits of the destination are now garbage. */
235 mask
|= ~0xffffffffull
;
239 if (src_ts
->type
== dst_ts
->type
) {
240 struct tcg_temp_info
*ni
= ts_info(si
->next_copy
);
242 di
->next_copy
= si
->next_copy
;
243 di
->prev_copy
= src_ts
;
244 ni
->prev_copy
= dst_ts
;
245 si
->next_copy
= dst_ts
;
246 di
->is_const
= si
->is_const
;
251 static TCGArg
do_constant_folding_2(TCGOpcode op
, TCGArg x
, TCGArg y
)
274 case INDEX_op_shl_i32
:
275 return (uint32_t)x
<< (y
& 31);
277 case INDEX_op_shl_i64
:
278 return (uint64_t)x
<< (y
& 63);
280 case INDEX_op_shr_i32
:
281 return (uint32_t)x
>> (y
& 31);
283 case INDEX_op_shr_i64
:
284 return (uint64_t)x
>> (y
& 63);
286 case INDEX_op_sar_i32
:
287 return (int32_t)x
>> (y
& 31);
289 case INDEX_op_sar_i64
:
290 return (int64_t)x
>> (y
& 63);
292 case INDEX_op_rotr_i32
:
293 return ror32(x
, y
& 31);
295 case INDEX_op_rotr_i64
:
296 return ror64(x
, y
& 63);
298 case INDEX_op_rotl_i32
:
299 return rol32(x
, y
& 31);
301 case INDEX_op_rotl_i64
:
302 return rol64(x
, y
& 63);
325 case INDEX_op_clz_i32
:
326 return (uint32_t)x
? clz32(x
) : y
;
328 case INDEX_op_clz_i64
:
329 return x
? clz64(x
) : y
;
331 case INDEX_op_ctz_i32
:
332 return (uint32_t)x
? ctz32(x
) : y
;
334 case INDEX_op_ctz_i64
:
335 return x
? ctz64(x
) : y
;
337 case INDEX_op_ctpop_i32
:
340 case INDEX_op_ctpop_i64
:
343 CASE_OP_32_64(ext8s
):
346 CASE_OP_32_64(ext16s
):
349 CASE_OP_32_64(ext8u
):
352 CASE_OP_32_64(ext16u
):
355 CASE_OP_32_64(bswap16
):
358 CASE_OP_32_64(bswap32
):
361 case INDEX_op_bswap64_i64
:
364 case INDEX_op_ext_i32_i64
:
365 case INDEX_op_ext32s_i64
:
368 case INDEX_op_extu_i32_i64
:
369 case INDEX_op_extrl_i64_i32
:
370 case INDEX_op_ext32u_i64
:
373 case INDEX_op_extrh_i64_i32
:
374 return (uint64_t)x
>> 32;
376 case INDEX_op_muluh_i32
:
377 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
378 case INDEX_op_mulsh_i32
:
379 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
381 case INDEX_op_muluh_i64
:
382 mulu64(&l64
, &h64
, x
, y
);
384 case INDEX_op_mulsh_i64
:
385 muls64(&l64
, &h64
, x
, y
);
388 case INDEX_op_div_i32
:
389 /* Avoid crashing on divide by zero, otherwise undefined. */
390 return (int32_t)x
/ ((int32_t)y
? : 1);
391 case INDEX_op_divu_i32
:
392 return (uint32_t)x
/ ((uint32_t)y
? : 1);
393 case INDEX_op_div_i64
:
394 return (int64_t)x
/ ((int64_t)y
? : 1);
395 case INDEX_op_divu_i64
:
396 return (uint64_t)x
/ ((uint64_t)y
? : 1);
398 case INDEX_op_rem_i32
:
399 return (int32_t)x
% ((int32_t)y
? : 1);
400 case INDEX_op_remu_i32
:
401 return (uint32_t)x
% ((uint32_t)y
? : 1);
402 case INDEX_op_rem_i64
:
403 return (int64_t)x
% ((int64_t)y
? : 1);
404 case INDEX_op_remu_i64
:
405 return (uint64_t)x
% ((uint64_t)y
? : 1);
409 "Unrecognized operation %d in do_constant_folding.\n", op
);
414 static TCGArg
do_constant_folding(TCGOpcode op
, TCGArg x
, TCGArg y
)
416 const TCGOpDef
*def
= &tcg_op_defs
[op
];
417 TCGArg res
= do_constant_folding_2(op
, x
, y
);
418 if (!(def
->flags
& TCG_OPF_64BIT
)) {
424 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
432 return (int32_t)x
< (int32_t)y
;
434 return (int32_t)x
>= (int32_t)y
;
436 return (int32_t)x
<= (int32_t)y
;
438 return (int32_t)x
> (int32_t)y
;
452 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
460 return (int64_t)x
< (int64_t)y
;
462 return (int64_t)x
>= (int64_t)y
;
464 return (int64_t)x
<= (int64_t)y
;
466 return (int64_t)x
> (int64_t)y
;
480 static bool do_constant_folding_cond_eq(TCGCond c
)
500 /* Return 2 if the condition can't be simplified, and the result
501 of the condition (0 or 1) if it can */
502 static TCGArg
do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
505 tcg_target_ulong xv
= arg_info(x
)->val
;
506 tcg_target_ulong yv
= arg_info(y
)->val
;
507 if (arg_is_const(x
) && arg_is_const(y
)) {
508 const TCGOpDef
*def
= &tcg_op_defs
[op
];
509 tcg_debug_assert(!(def
->flags
& TCG_OPF_VECTOR
));
510 if (def
->flags
& TCG_OPF_64BIT
) {
511 return do_constant_folding_cond_64(xv
, yv
, c
);
513 return do_constant_folding_cond_32(xv
, yv
, c
);
515 } else if (args_are_copies(x
, y
)) {
516 return do_constant_folding_cond_eq(c
);
517 } else if (arg_is_const(y
) && yv
== 0) {
530 /* Return 2 if the condition can't be simplified, and the result
531 of the condition (0 or 1) if it can */
532 static TCGArg
do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
534 TCGArg al
= p1
[0], ah
= p1
[1];
535 TCGArg bl
= p2
[0], bh
= p2
[1];
537 if (arg_is_const(bl
) && arg_is_const(bh
)) {
538 tcg_target_ulong blv
= arg_info(bl
)->val
;
539 tcg_target_ulong bhv
= arg_info(bh
)->val
;
540 uint64_t b
= deposit64(blv
, 32, 32, bhv
);
542 if (arg_is_const(al
) && arg_is_const(ah
)) {
543 tcg_target_ulong alv
= arg_info(al
)->val
;
544 tcg_target_ulong ahv
= arg_info(ah
)->val
;
545 uint64_t a
= deposit64(alv
, 32, 32, ahv
);
546 return do_constant_folding_cond_64(a
, b
, c
);
559 if (args_are_copies(al
, bl
) && args_are_copies(ah
, bh
)) {
560 return do_constant_folding_cond_eq(c
);
565 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
567 TCGArg a1
= *p1
, a2
= *p2
;
569 sum
+= arg_is_const(a1
);
570 sum
-= arg_is_const(a2
);
572 /* Prefer the constant in second argument, and then the form
573 op a, a, b, which is better handled on non-RISC hosts. */
574 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
582 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
585 sum
+= arg_is_const(p1
[0]);
586 sum
+= arg_is_const(p1
[1]);
587 sum
-= arg_is_const(p2
[0]);
588 sum
-= arg_is_const(p2
[1]);
591 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
592 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
598 /* Propagate constants and copies, fold constant expressions. */
599 void tcg_optimize(TCGContext
*s
)
601 int nb_temps
, nb_globals
;
602 TCGOp
*op
, *op_next
, *prev_mb
= NULL
;
603 struct tcg_temp_info
*infos
;
604 TCGTempSet temps_used
;
606 /* Array VALS has an element for each temp.
607 If this temp holds a constant then its value is kept in VALS' element.
608 If this temp is a copy of other ones then the other copies are
609 available through the doubly linked circular list. */
611 nb_temps
= s
->nb_temps
;
612 nb_globals
= s
->nb_globals
;
613 bitmap_zero(temps_used
.l
, nb_temps
);
614 infos
= tcg_malloc(sizeof(struct tcg_temp_info
) * nb_temps
);
616 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
617 tcg_target_ulong mask
, partmask
, affected
;
618 int nb_oargs
, nb_iargs
, i
;
620 TCGOpcode opc
= op
->opc
;
621 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
623 /* Count the arguments, and initialize the temps that are
625 if (opc
== INDEX_op_call
) {
626 nb_oargs
= TCGOP_CALLO(op
);
627 nb_iargs
= TCGOP_CALLI(op
);
628 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
629 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
631 init_ts_info(infos
, &temps_used
, ts
);
635 nb_oargs
= def
->nb_oargs
;
636 nb_iargs
= def
->nb_iargs
;
637 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
638 init_arg_info(infos
, &temps_used
, op
->args
[i
]);
642 /* Do copy propagation */
643 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
644 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
645 if (ts
&& ts_is_copy(ts
)) {
646 op
->args
[i
] = temp_arg(find_better_copy(s
, ts
));
650 /* For commutative operations make constant second argument */
652 CASE_OP_32_64_VEC(add
):
653 CASE_OP_32_64_VEC(mul
):
654 CASE_OP_32_64_VEC(and):
655 CASE_OP_32_64_VEC(or):
656 CASE_OP_32_64_VEC(xor):
660 CASE_OP_32_64(muluh
):
661 CASE_OP_32_64(mulsh
):
662 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
664 CASE_OP_32_64(brcond
):
665 if (swap_commutative(-1, &op
->args
[0], &op
->args
[1])) {
666 op
->args
[2] = tcg_swap_cond(op
->args
[2]);
669 CASE_OP_32_64(setcond
):
670 if (swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2])) {
671 op
->args
[3] = tcg_swap_cond(op
->args
[3]);
674 CASE_OP_32_64(movcond
):
675 if (swap_commutative(-1, &op
->args
[1], &op
->args
[2])) {
676 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
678 /* For movcond, we canonicalize the "false" input reg to match
679 the destination reg so that the tcg backend can implement
680 a "move if true" operation. */
681 if (swap_commutative(op
->args
[0], &op
->args
[4], &op
->args
[3])) {
682 op
->args
[5] = tcg_invert_cond(op
->args
[5]);
686 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[4]);
687 swap_commutative(op
->args
[1], &op
->args
[3], &op
->args
[5]);
689 CASE_OP_32_64(mulu2
):
690 CASE_OP_32_64(muls2
):
691 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[3]);
693 case INDEX_op_brcond2_i32
:
694 if (swap_commutative2(&op
->args
[0], &op
->args
[2])) {
695 op
->args
[4] = tcg_swap_cond(op
->args
[4]);
698 case INDEX_op_setcond2_i32
:
699 if (swap_commutative2(&op
->args
[1], &op
->args
[3])) {
700 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
707 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
708 and "sub r, 0, a => neg r, a" case. */
715 if (arg_is_const(op
->args
[1])
716 && arg_info(op
->args
[1])->val
== 0) {
717 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
721 CASE_OP_32_64_VEC(sub
):
726 if (arg_is_const(op
->args
[2])) {
727 /* Proceed with possible constant folding. */
730 if (opc
== INDEX_op_sub_i32
) {
731 neg_op
= INDEX_op_neg_i32
;
732 have_neg
= TCG_TARGET_HAS_neg_i32
;
733 } else if (opc
== INDEX_op_sub_i64
) {
734 neg_op
= INDEX_op_neg_i64
;
735 have_neg
= TCG_TARGET_HAS_neg_i64
;
736 } else if (TCG_TARGET_HAS_neg_vec
) {
737 TCGType type
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
738 unsigned vece
= TCGOP_VECE(op
);
739 neg_op
= INDEX_op_neg_vec
;
740 have_neg
= tcg_can_emit_vec_op(neg_op
, type
, vece
) > 0;
747 if (arg_is_const(op
->args
[1])
748 && arg_info(op
->args
[1])->val
== 0) {
750 reset_temp(op
->args
[0]);
751 op
->args
[1] = op
->args
[2];
756 CASE_OP_32_64_VEC(xor):
758 if (!arg_is_const(op
->args
[1])
759 && arg_is_const(op
->args
[2])
760 && arg_info(op
->args
[2])->val
== -1) {
766 if (!arg_is_const(op
->args
[1])
767 && arg_is_const(op
->args
[2])
768 && arg_info(op
->args
[2])->val
== 0) {
773 CASE_OP_32_64_VEC(andc
):
774 if (!arg_is_const(op
->args
[2])
775 && arg_is_const(op
->args
[1])
776 && arg_info(op
->args
[1])->val
== -1) {
781 CASE_OP_32_64_VEC(orc
):
783 if (!arg_is_const(op
->args
[2])
784 && arg_is_const(op
->args
[1])
785 && arg_info(op
->args
[1])->val
== 0) {
795 if (def
->flags
& TCG_OPF_VECTOR
) {
796 not_op
= INDEX_op_not_vec
;
797 have_not
= TCG_TARGET_HAS_not_vec
;
798 } else if (def
->flags
& TCG_OPF_64BIT
) {
799 not_op
= INDEX_op_not_i64
;
800 have_not
= TCG_TARGET_HAS_not_i64
;
802 not_op
= INDEX_op_not_i32
;
803 have_not
= TCG_TARGET_HAS_not_i32
;
809 reset_temp(op
->args
[0]);
810 op
->args
[1] = op
->args
[i
];
817 /* Simplify expression for "op r, a, const => mov r, a" cases */
819 CASE_OP_32_64_VEC(add
):
820 CASE_OP_32_64_VEC(sub
):
821 CASE_OP_32_64_VEC(or):
822 CASE_OP_32_64_VEC(xor):
823 CASE_OP_32_64_VEC(andc
):
829 if (!arg_is_const(op
->args
[1])
830 && arg_is_const(op
->args
[2])
831 && arg_info(op
->args
[2])->val
== 0) {
832 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
836 CASE_OP_32_64_VEC(and):
837 CASE_OP_32_64_VEC(orc
):
839 if (!arg_is_const(op
->args
[1])
840 && arg_is_const(op
->args
[2])
841 && arg_info(op
->args
[2])->val
== -1) {
842 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
850 /* Simplify using known-zero bits. Currently only ops with a single
851 output argument is supported. */
855 CASE_OP_32_64(ext8s
):
856 if ((arg_info(op
->args
[1])->mask
& 0x80) != 0) {
859 CASE_OP_32_64(ext8u
):
862 CASE_OP_32_64(ext16s
):
863 if ((arg_info(op
->args
[1])->mask
& 0x8000) != 0) {
866 CASE_OP_32_64(ext16u
):
869 case INDEX_op_ext32s_i64
:
870 if ((arg_info(op
->args
[1])->mask
& 0x80000000) != 0) {
873 case INDEX_op_ext32u_i64
:
878 mask
= arg_info(op
->args
[2])->mask
;
879 if (arg_is_const(op
->args
[2])) {
881 affected
= arg_info(op
->args
[1])->mask
& ~mask
;
883 mask
= arg_info(op
->args
[1])->mask
& mask
;
886 case INDEX_op_ext_i32_i64
:
887 if ((arg_info(op
->args
[1])->mask
& 0x80000000) != 0) {
890 case INDEX_op_extu_i32_i64
:
891 /* We do not compute affected as it is a size changing op. */
892 mask
= (uint32_t)arg_info(op
->args
[1])->mask
;
896 /* Known-zeros does not imply known-ones. Therefore unless
897 op->args[2] is constant, we can't infer anything from it. */
898 if (arg_is_const(op
->args
[2])) {
899 mask
= ~arg_info(op
->args
[2])->mask
;
902 /* But we certainly know nothing outside args[1] may be set. */
903 mask
= arg_info(op
->args
[1])->mask
;
906 case INDEX_op_sar_i32
:
907 if (arg_is_const(op
->args
[2])) {
908 tmp
= arg_info(op
->args
[2])->val
& 31;
909 mask
= (int32_t)arg_info(op
->args
[1])->mask
>> tmp
;
912 case INDEX_op_sar_i64
:
913 if (arg_is_const(op
->args
[2])) {
914 tmp
= arg_info(op
->args
[2])->val
& 63;
915 mask
= (int64_t)arg_info(op
->args
[1])->mask
>> tmp
;
919 case INDEX_op_shr_i32
:
920 if (arg_is_const(op
->args
[2])) {
921 tmp
= arg_info(op
->args
[2])->val
& 31;
922 mask
= (uint32_t)arg_info(op
->args
[1])->mask
>> tmp
;
925 case INDEX_op_shr_i64
:
926 if (arg_is_const(op
->args
[2])) {
927 tmp
= arg_info(op
->args
[2])->val
& 63;
928 mask
= (uint64_t)arg_info(op
->args
[1])->mask
>> tmp
;
932 case INDEX_op_extrl_i64_i32
:
933 mask
= (uint32_t)arg_info(op
->args
[1])->mask
;
935 case INDEX_op_extrh_i64_i32
:
936 mask
= (uint64_t)arg_info(op
->args
[1])->mask
>> 32;
940 if (arg_is_const(op
->args
[2])) {
941 tmp
= arg_info(op
->args
[2])->val
& (TCG_TARGET_REG_BITS
- 1);
942 mask
= arg_info(op
->args
[1])->mask
<< tmp
;
947 /* Set to 1 all bits to the left of the rightmost. */
948 mask
= -(arg_info(op
->args
[1])->mask
949 & -arg_info(op
->args
[1])->mask
);
952 CASE_OP_32_64(deposit
):
953 mask
= deposit64(arg_info(op
->args
[1])->mask
,
954 op
->args
[3], op
->args
[4],
955 arg_info(op
->args
[2])->mask
);
958 CASE_OP_32_64(extract
):
959 mask
= extract64(arg_info(op
->args
[1])->mask
,
960 op
->args
[2], op
->args
[3]);
961 if (op
->args
[2] == 0) {
962 affected
= arg_info(op
->args
[1])->mask
& ~mask
;
965 CASE_OP_32_64(sextract
):
966 mask
= sextract64(arg_info(op
->args
[1])->mask
,
967 op
->args
[2], op
->args
[3]);
968 if (op
->args
[2] == 0 && (tcg_target_long
)mask
>= 0) {
969 affected
= arg_info(op
->args
[1])->mask
& ~mask
;
975 mask
= arg_info(op
->args
[1])->mask
| arg_info(op
->args
[2])->mask
;
978 case INDEX_op_clz_i32
:
979 case INDEX_op_ctz_i32
:
980 mask
= arg_info(op
->args
[2])->mask
| 31;
983 case INDEX_op_clz_i64
:
984 case INDEX_op_ctz_i64
:
985 mask
= arg_info(op
->args
[2])->mask
| 63;
988 case INDEX_op_ctpop_i32
:
991 case INDEX_op_ctpop_i64
:
995 CASE_OP_32_64(setcond
):
996 case INDEX_op_setcond2_i32
:
1000 CASE_OP_32_64(movcond
):
1001 mask
= arg_info(op
->args
[3])->mask
| arg_info(op
->args
[4])->mask
;
1004 CASE_OP_32_64(ld8u
):
1007 CASE_OP_32_64(ld16u
):
1010 case INDEX_op_ld32u_i64
:
1014 CASE_OP_32_64(qemu_ld
):
1016 TCGMemOpIdx oi
= op
->args
[nb_oargs
+ nb_iargs
];
1017 TCGMemOp mop
= get_memop(oi
);
1018 if (!(mop
& MO_SIGN
)) {
1019 mask
= (2ULL << ((8 << (mop
& MO_SIZE
)) - 1)) - 1;
1028 /* 32-bit ops generate 32-bit results. For the result is zero test
1029 below, we can ignore high bits, but for further optimizations we
1030 need to record that the high bits contain garbage. */
1032 if (!(def
->flags
& TCG_OPF_64BIT
)) {
1033 mask
|= ~(tcg_target_ulong
)0xffffffffu
;
1034 partmask
&= 0xffffffffu
;
1035 affected
&= 0xffffffffu
;
1038 if (partmask
== 0) {
1039 tcg_debug_assert(nb_oargs
== 1);
1040 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
1043 if (affected
== 0) {
1044 tcg_debug_assert(nb_oargs
== 1);
1045 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
1049 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
1051 CASE_OP_32_64_VEC(and):
1052 CASE_OP_32_64_VEC(mul
):
1053 CASE_OP_32_64(muluh
):
1054 CASE_OP_32_64(mulsh
):
1055 if (arg_is_const(op
->args
[2])
1056 && arg_info(op
->args
[2])->val
== 0) {
1057 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
1065 /* Simplify expression for "op r, a, a => mov r, a" cases */
1067 CASE_OP_32_64_VEC(or):
1068 CASE_OP_32_64_VEC(and):
1069 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1070 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
1078 /* Simplify expression for "op r, a, a => movi r, 0" cases */
1080 CASE_OP_32_64_VEC(andc
):
1081 CASE_OP_32_64_VEC(sub
):
1082 CASE_OP_32_64_VEC(xor):
1083 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1084 tcg_opt_gen_movi(s
, op
, op
->args
[0], 0);
1092 /* Propagate constants through copy operations and do constant
1093 folding. Constants will be substituted to arguments by register
1094 allocator where needed and possible. Also detect copies. */
1096 CASE_OP_32_64_VEC(mov
):
1097 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[1]);
1099 CASE_OP_32_64(movi
):
1100 case INDEX_op_dupi_vec
:
1101 tcg_opt_gen_movi(s
, op
, op
->args
[0], op
->args
[1]);
1104 case INDEX_op_dup_vec
:
1105 if (arg_is_const(op
->args
[1])) {
1106 tmp
= arg_info(op
->args
[1])->val
;
1107 tmp
= dup_const(TCGOP_VECE(op
), tmp
);
1108 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1115 CASE_OP_32_64(ext8s
):
1116 CASE_OP_32_64(ext8u
):
1117 CASE_OP_32_64(ext16s
):
1118 CASE_OP_32_64(ext16u
):
1119 CASE_OP_32_64(ctpop
):
1120 CASE_OP_32_64(bswap16
):
1121 CASE_OP_32_64(bswap32
):
1122 case INDEX_op_bswap64_i64
:
1123 case INDEX_op_ext32s_i64
:
1124 case INDEX_op_ext32u_i64
:
1125 case INDEX_op_ext_i32_i64
:
1126 case INDEX_op_extu_i32_i64
:
1127 case INDEX_op_extrl_i64_i32
:
1128 case INDEX_op_extrh_i64_i32
:
1129 if (arg_is_const(op
->args
[1])) {
1130 tmp
= do_constant_folding(opc
, arg_info(op
->args
[1])->val
, 0);
1131 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1145 CASE_OP_32_64(rotl
):
1146 CASE_OP_32_64(rotr
):
1147 CASE_OP_32_64(andc
):
1150 CASE_OP_32_64(nand
):
1152 CASE_OP_32_64(muluh
):
1153 CASE_OP_32_64(mulsh
):
1155 CASE_OP_32_64(divu
):
1157 CASE_OP_32_64(remu
):
1158 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1159 tmp
= do_constant_folding(opc
, arg_info(op
->args
[1])->val
,
1160 arg_info(op
->args
[2])->val
);
1161 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1168 if (arg_is_const(op
->args
[1])) {
1169 TCGArg v
= arg_info(op
->args
[1])->val
;
1171 tmp
= do_constant_folding(opc
, v
, 0);
1172 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1174 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[2]);
1180 CASE_OP_32_64(deposit
):
1181 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1182 tmp
= deposit64(arg_info(op
->args
[1])->val
,
1183 op
->args
[3], op
->args
[4],
1184 arg_info(op
->args
[2])->val
);
1185 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1190 CASE_OP_32_64(extract
):
1191 if (arg_is_const(op
->args
[1])) {
1192 tmp
= extract64(arg_info(op
->args
[1])->val
,
1193 op
->args
[2], op
->args
[3]);
1194 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1199 CASE_OP_32_64(sextract
):
1200 if (arg_is_const(op
->args
[1])) {
1201 tmp
= sextract64(arg_info(op
->args
[1])->val
,
1202 op
->args
[2], op
->args
[3]);
1203 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1208 CASE_OP_32_64(extract2
):
1209 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1210 TCGArg v1
= arg_info(op
->args
[1])->val
;
1211 TCGArg v2
= arg_info(op
->args
[2])->val
;
1213 if (opc
== INDEX_op_extract2_i64
) {
1214 tmp
= (v1
>> op
->args
[3]) | (v2
<< (64 - op
->args
[3]));
1216 tmp
= (v1
>> op
->args
[3]) | (v2
<< (32 - op
->args
[3]));
1219 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1224 CASE_OP_32_64(setcond
):
1225 tmp
= do_constant_folding_cond(opc
, op
->args
[1],
1226 op
->args
[2], op
->args
[3]);
1228 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1233 CASE_OP_32_64(brcond
):
1234 tmp
= do_constant_folding_cond(opc
, op
->args
[0],
1235 op
->args
[1], op
->args
[2]);
1238 bitmap_zero(temps_used
.l
, nb_temps
);
1239 op
->opc
= INDEX_op_br
;
1240 op
->args
[0] = op
->args
[3];
1242 tcg_op_remove(s
, op
);
1248 CASE_OP_32_64(movcond
):
1249 tmp
= do_constant_folding_cond(opc
, op
->args
[1],
1250 op
->args
[2], op
->args
[5]);
1252 tcg_opt_gen_mov(s
, op
, op
->args
[0], op
->args
[4-tmp
]);
1255 if (arg_is_const(op
->args
[3]) && arg_is_const(op
->args
[4])) {
1256 tcg_target_ulong tv
= arg_info(op
->args
[3])->val
;
1257 tcg_target_ulong fv
= arg_info(op
->args
[4])->val
;
1258 TCGCond cond
= op
->args
[5];
1259 if (fv
== 1 && tv
== 0) {
1260 cond
= tcg_invert_cond(cond
);
1261 } else if (!(tv
== 1 && fv
== 0)) {
1265 op
->opc
= opc
= (opc
== INDEX_op_movcond_i32
1266 ? INDEX_op_setcond_i32
1267 : INDEX_op_setcond_i64
);
1272 case INDEX_op_add2_i32
:
1273 case INDEX_op_sub2_i32
:
1274 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])
1275 && arg_is_const(op
->args
[4]) && arg_is_const(op
->args
[5])) {
1276 uint32_t al
= arg_info(op
->args
[2])->val
;
1277 uint32_t ah
= arg_info(op
->args
[3])->val
;
1278 uint32_t bl
= arg_info(op
->args
[4])->val
;
1279 uint32_t bh
= arg_info(op
->args
[5])->val
;
1280 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
1281 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
1283 TCGOp
*op2
= tcg_op_insert_before(s
, op
, INDEX_op_movi_i32
);
1285 if (opc
== INDEX_op_add2_i32
) {
1293 tcg_opt_gen_movi(s
, op
, rl
, (int32_t)a
);
1294 tcg_opt_gen_movi(s
, op2
, rh
, (int32_t)(a
>> 32));
1299 case INDEX_op_mulu2_i32
:
1300 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])) {
1301 uint32_t a
= arg_info(op
->args
[2])->val
;
1302 uint32_t b
= arg_info(op
->args
[3])->val
;
1303 uint64_t r
= (uint64_t)a
* b
;
1305 TCGOp
*op2
= tcg_op_insert_before(s
, op
, INDEX_op_movi_i32
);
1309 tcg_opt_gen_movi(s
, op
, rl
, (int32_t)r
);
1310 tcg_opt_gen_movi(s
, op2
, rh
, (int32_t)(r
>> 32));
1315 case INDEX_op_brcond2_i32
:
1316 tmp
= do_constant_folding_cond2(&op
->args
[0], &op
->args
[2],
1321 bitmap_zero(temps_used
.l
, nb_temps
);
1322 op
->opc
= INDEX_op_br
;
1323 op
->args
[0] = op
->args
[5];
1326 tcg_op_remove(s
, op
);
1328 } else if ((op
->args
[4] == TCG_COND_LT
1329 || op
->args
[4] == TCG_COND_GE
)
1330 && arg_is_const(op
->args
[2])
1331 && arg_info(op
->args
[2])->val
== 0
1332 && arg_is_const(op
->args
[3])
1333 && arg_info(op
->args
[3])->val
== 0) {
1334 /* Simplify LT/GE comparisons vs zero to a single compare
1335 vs the high word of the input. */
1337 bitmap_zero(temps_used
.l
, nb_temps
);
1338 op
->opc
= INDEX_op_brcond_i32
;
1339 op
->args
[0] = op
->args
[1];
1340 op
->args
[1] = op
->args
[3];
1341 op
->args
[2] = op
->args
[4];
1342 op
->args
[3] = op
->args
[5];
1343 } else if (op
->args
[4] == TCG_COND_EQ
) {
1344 /* Simplify EQ comparisons where one of the pairs
1345 can be simplified. */
1346 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1347 op
->args
[0], op
->args
[2],
1350 goto do_brcond_false
;
1351 } else if (tmp
== 1) {
1352 goto do_brcond_high
;
1354 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1355 op
->args
[1], op
->args
[3],
1358 goto do_brcond_false
;
1359 } else if (tmp
!= 1) {
1363 bitmap_zero(temps_used
.l
, nb_temps
);
1364 op
->opc
= INDEX_op_brcond_i32
;
1365 op
->args
[1] = op
->args
[2];
1366 op
->args
[2] = op
->args
[4];
1367 op
->args
[3] = op
->args
[5];
1368 } else if (op
->args
[4] == TCG_COND_NE
) {
1369 /* Simplify NE comparisons where one of the pairs
1370 can be simplified. */
1371 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1372 op
->args
[0], op
->args
[2],
1375 goto do_brcond_high
;
1376 } else if (tmp
== 1) {
1377 goto do_brcond_true
;
1379 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1380 op
->args
[1], op
->args
[3],
1384 } else if (tmp
== 1) {
1385 goto do_brcond_true
;
1393 case INDEX_op_setcond2_i32
:
1394 tmp
= do_constant_folding_cond2(&op
->args
[1], &op
->args
[3],
1398 tcg_opt_gen_movi(s
, op
, op
->args
[0], tmp
);
1399 } else if ((op
->args
[5] == TCG_COND_LT
1400 || op
->args
[5] == TCG_COND_GE
)
1401 && arg_is_const(op
->args
[3])
1402 && arg_info(op
->args
[3])->val
== 0
1403 && arg_is_const(op
->args
[4])
1404 && arg_info(op
->args
[4])->val
== 0) {
1405 /* Simplify LT/GE comparisons vs zero to a single compare
1406 vs the high word of the input. */
1408 reset_temp(op
->args
[0]);
1409 arg_info(op
->args
[0])->mask
= 1;
1410 op
->opc
= INDEX_op_setcond_i32
;
1411 op
->args
[1] = op
->args
[2];
1412 op
->args
[2] = op
->args
[4];
1413 op
->args
[3] = op
->args
[5];
1414 } else if (op
->args
[5] == TCG_COND_EQ
) {
1415 /* Simplify EQ comparisons where one of the pairs
1416 can be simplified. */
1417 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1418 op
->args
[1], op
->args
[3],
1421 goto do_setcond_const
;
1422 } else if (tmp
== 1) {
1423 goto do_setcond_high
;
1425 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1426 op
->args
[2], op
->args
[4],
1429 goto do_setcond_high
;
1430 } else if (tmp
!= 1) {
1434 reset_temp(op
->args
[0]);
1435 arg_info(op
->args
[0])->mask
= 1;
1436 op
->opc
= INDEX_op_setcond_i32
;
1437 op
->args
[2] = op
->args
[3];
1438 op
->args
[3] = op
->args
[5];
1439 } else if (op
->args
[5] == TCG_COND_NE
) {
1440 /* Simplify NE comparisons where one of the pairs
1441 can be simplified. */
1442 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1443 op
->args
[1], op
->args
[3],
1446 goto do_setcond_high
;
1447 } else if (tmp
== 1) {
1448 goto do_setcond_const
;
1450 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1451 op
->args
[2], op
->args
[4],
1454 goto do_setcond_low
;
1455 } else if (tmp
== 1) {
1456 goto do_setcond_const
;
1465 if (!(op
->args
[nb_oargs
+ nb_iargs
+ 1]
1466 & (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
1467 for (i
= 0; i
< nb_globals
; i
++) {
1468 if (test_bit(i
, temps_used
.l
)) {
1469 reset_ts(&s
->temps
[i
]);
1473 goto do_reset_output
;
1477 /* Default case: we know nothing about operation (or were unable
1478 to compute the operation result) so no propagation is done.
1479 We trash everything if the operation is the end of a basic
1480 block, otherwise we only trash the output args. "mask" is
1481 the non-zero bits mask for the first output arg. */
1482 if (def
->flags
& TCG_OPF_BB_END
) {
1483 bitmap_zero(temps_used
.l
, nb_temps
);
1486 for (i
= 0; i
< nb_oargs
; i
++) {
1487 reset_temp(op
->args
[i
]);
1488 /* Save the corresponding known-zero bits mask for the
1489 first output argument (only one supported so far). */
1491 arg_info(op
->args
[i
])->mask
= mask
;
1498 /* Eliminate duplicate and redundant fence instructions. */
1502 /* Merge two barriers of the same type into one,
1503 * or a weaker barrier into a stronger one,
1504 * or two weaker barriers into a stronger one.
1505 * mb X; mb Y => mb X|Y
1506 * mb; strl => mb; st
1507 * ldaq; mb => ld; mb
1508 * ldaq; strl => ld; mb; st
1509 * Other combinations are also merged into a strong
1510 * barrier. This is stricter than specified but for
1511 * the purposes of TCG is better than not optimizing.
1513 prev_mb
->args
[0] |= op
->args
[0];
1514 tcg_op_remove(s
, op
);
1518 /* Opcodes that end the block stop the optimization. */
1519 if ((def
->flags
& TCG_OPF_BB_END
) == 0) {
1523 case INDEX_op_qemu_ld_i32
:
1524 case INDEX_op_qemu_ld_i64
:
1525 case INDEX_op_qemu_st_i32
:
1526 case INDEX_op_qemu_st_i64
:
1528 /* Opcodes that touch guest memory stop the optimization. */
1532 } else if (opc
== INDEX_op_mb
) {