2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu-common.h"
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
38 struct tcg_temp_info
{
43 tcg_target_ulong mask
;
46 static struct tcg_temp_info temps
[TCG_MAX_TEMPS
];
47 static TCGTempSet temps_used
;
49 static inline bool temp_is_const(TCGArg arg
)
51 return temps
[arg
].is_const
;
54 static inline bool temp_is_copy(TCGArg arg
)
56 return temps
[arg
].next_copy
!= arg
;
59 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
60 static void reset_temp(TCGArg temp
)
62 temps
[temps
[temp
].next_copy
].prev_copy
= temps
[temp
].prev_copy
;
63 temps
[temps
[temp
].prev_copy
].next_copy
= temps
[temp
].next_copy
;
64 temps
[temp
].next_copy
= temp
;
65 temps
[temp
].prev_copy
= temp
;
66 temps
[temp
].is_const
= false;
67 temps
[temp
].mask
= -1;
70 /* Reset all temporaries, given that there are NB_TEMPS of them. */
71 static void reset_all_temps(int nb_temps
)
73 bitmap_zero(temps_used
.l
, nb_temps
);
76 /* Initialize and activate a temporary. */
77 static void init_temp_info(TCGArg temp
)
79 if (!test_bit(temp
, temps_used
.l
)) {
80 temps
[temp
].next_copy
= temp
;
81 temps
[temp
].prev_copy
= temp
;
82 temps
[temp
].is_const
= false;
83 temps
[temp
].mask
= -1;
84 set_bit(temp
, temps_used
.l
);
88 static TCGOp
*insert_op_before(TCGContext
*s
, TCGOp
*old_op
,
89 TCGOpcode opc
, int nargs
)
91 int oi
= s
->gen_next_op_idx
;
92 int pi
= s
->gen_next_parm_idx
;
93 int prev
= old_op
->prev
;
94 int next
= old_op
- s
->gen_op_buf
;
97 tcg_debug_assert(oi
< OPC_BUF_SIZE
);
98 tcg_debug_assert(pi
+ nargs
<= OPPARAM_BUF_SIZE
);
99 s
->gen_next_op_idx
= oi
+ 1;
100 s
->gen_next_parm_idx
= pi
+ nargs
;
102 new_op
= &s
->gen_op_buf
[oi
];
110 s
->gen_op_buf
[prev
].next
= oi
;
112 s
->gen_first_op_idx
= oi
;
119 static int op_bits(TCGOpcode op
)
121 const TCGOpDef
*def
= &tcg_op_defs
[op
];
122 return def
->flags
& TCG_OPF_64BIT
? 64 : 32;
125 static TCGOpcode
op_to_mov(TCGOpcode op
)
127 switch (op_bits(op
)) {
129 return INDEX_op_mov_i32
;
131 return INDEX_op_mov_i64
;
133 fprintf(stderr
, "op_to_mov: unexpected return value of "
134 "function op_bits.\n");
139 static TCGOpcode
op_to_movi(TCGOpcode op
)
141 switch (op_bits(op
)) {
143 return INDEX_op_movi_i32
;
145 return INDEX_op_movi_i64
;
147 fprintf(stderr
, "op_to_movi: unexpected return value of "
148 "function op_bits.\n");
153 static TCGArg
find_better_copy(TCGContext
*s
, TCGArg temp
)
157 /* If this is already a global, we can't do better. */
158 if (temp
< s
->nb_globals
) {
162 /* Search for a global first. */
163 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
164 if (i
< s
->nb_globals
) {
169 /* If it is a temp, search for a temp local. */
170 if (!s
->temps
[temp
].temp_local
) {
171 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
172 if (s
->temps
[i
].temp_local
) {
178 /* Failure to find a better representation, return the same temp. */
182 static bool temps_are_copies(TCGArg arg1
, TCGArg arg2
)
190 if (!temp_is_copy(arg1
) || !temp_is_copy(arg2
)) {
194 for (i
= temps
[arg1
].next_copy
; i
!= arg1
; i
= temps
[i
].next_copy
) {
203 static void tcg_opt_gen_movi(TCGContext
*s
, TCGOp
*op
, TCGArg
*args
,
204 TCGArg dst
, TCGArg val
)
206 TCGOpcode new_op
= op_to_movi(op
->opc
);
207 tcg_target_ulong mask
;
212 temps
[dst
].is_const
= true;
213 temps
[dst
].val
= val
;
215 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_movi_i32
) {
216 /* High bits of the destination are now garbage. */
217 mask
|= ~0xffffffffull
;
219 temps
[dst
].mask
= mask
;
225 static void tcg_opt_gen_mov(TCGContext
*s
, TCGOp
*op
, TCGArg
*args
,
226 TCGArg dst
, TCGArg src
)
228 if (temps_are_copies(dst
, src
)) {
229 tcg_op_remove(s
, op
);
233 TCGOpcode new_op
= op_to_mov(op
->opc
);
234 tcg_target_ulong mask
;
239 mask
= temps
[src
].mask
;
240 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
241 /* High bits of the destination are now garbage. */
242 mask
|= ~0xffffffffull
;
244 temps
[dst
].mask
= mask
;
246 if (s
->temps
[src
].type
== s
->temps
[dst
].type
) {
247 temps
[dst
].next_copy
= temps
[src
].next_copy
;
248 temps
[dst
].prev_copy
= src
;
249 temps
[temps
[dst
].next_copy
].prev_copy
= dst
;
250 temps
[src
].next_copy
= dst
;
251 temps
[dst
].is_const
= temps
[src
].is_const
;
252 temps
[dst
].val
= temps
[src
].val
;
259 static TCGArg
do_constant_folding_2(TCGOpcode op
, TCGArg x
, TCGArg y
)
282 case INDEX_op_shl_i32
:
283 return (uint32_t)x
<< (y
& 31);
285 case INDEX_op_shl_i64
:
286 return (uint64_t)x
<< (y
& 63);
288 case INDEX_op_shr_i32
:
289 return (uint32_t)x
>> (y
& 31);
291 case INDEX_op_shr_i64
:
292 return (uint64_t)x
>> (y
& 63);
294 case INDEX_op_sar_i32
:
295 return (int32_t)x
>> (y
& 31);
297 case INDEX_op_sar_i64
:
298 return (int64_t)x
>> (y
& 63);
300 case INDEX_op_rotr_i32
:
301 return ror32(x
, y
& 31);
303 case INDEX_op_rotr_i64
:
304 return ror64(x
, y
& 63);
306 case INDEX_op_rotl_i32
:
307 return rol32(x
, y
& 31);
309 case INDEX_op_rotl_i64
:
310 return rol64(x
, y
& 63);
333 CASE_OP_32_64(ext8s
):
336 CASE_OP_32_64(ext16s
):
339 CASE_OP_32_64(ext8u
):
342 CASE_OP_32_64(ext16u
):
345 case INDEX_op_ext_i32_i64
:
346 case INDEX_op_ext32s_i64
:
349 case INDEX_op_extu_i32_i64
:
350 case INDEX_op_extrl_i64_i32
:
351 case INDEX_op_ext32u_i64
:
354 case INDEX_op_extrh_i64_i32
:
355 return (uint64_t)x
>> 32;
357 case INDEX_op_muluh_i32
:
358 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
359 case INDEX_op_mulsh_i32
:
360 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
362 case INDEX_op_muluh_i64
:
363 mulu64(&l64
, &h64
, x
, y
);
365 case INDEX_op_mulsh_i64
:
366 muls64(&l64
, &h64
, x
, y
);
369 case INDEX_op_div_i32
:
370 /* Avoid crashing on divide by zero, otherwise undefined. */
371 return (int32_t)x
/ ((int32_t)y
? : 1);
372 case INDEX_op_divu_i32
:
373 return (uint32_t)x
/ ((uint32_t)y
? : 1);
374 case INDEX_op_div_i64
:
375 return (int64_t)x
/ ((int64_t)y
? : 1);
376 case INDEX_op_divu_i64
:
377 return (uint64_t)x
/ ((uint64_t)y
? : 1);
379 case INDEX_op_rem_i32
:
380 return (int32_t)x
% ((int32_t)y
? : 1);
381 case INDEX_op_remu_i32
:
382 return (uint32_t)x
% ((uint32_t)y
? : 1);
383 case INDEX_op_rem_i64
:
384 return (int64_t)x
% ((int64_t)y
? : 1);
385 case INDEX_op_remu_i64
:
386 return (uint64_t)x
% ((uint64_t)y
? : 1);
390 "Unrecognized operation %d in do_constant_folding.\n", op
);
395 static TCGArg
do_constant_folding(TCGOpcode op
, TCGArg x
, TCGArg y
)
397 TCGArg res
= do_constant_folding_2(op
, x
, y
);
398 if (op_bits(op
) == 32) {
404 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
412 return (int32_t)x
< (int32_t)y
;
414 return (int32_t)x
>= (int32_t)y
;
416 return (int32_t)x
<= (int32_t)y
;
418 return (int32_t)x
> (int32_t)y
;
432 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
440 return (int64_t)x
< (int64_t)y
;
442 return (int64_t)x
>= (int64_t)y
;
444 return (int64_t)x
<= (int64_t)y
;
446 return (int64_t)x
> (int64_t)y
;
460 static bool do_constant_folding_cond_eq(TCGCond c
)
480 /* Return 2 if the condition can't be simplified, and the result
481 of the condition (0 or 1) if it can */
482 static TCGArg
do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
485 if (temp_is_const(x
) && temp_is_const(y
)) {
486 switch (op_bits(op
)) {
488 return do_constant_folding_cond_32(temps
[x
].val
, temps
[y
].val
, c
);
490 return do_constant_folding_cond_64(temps
[x
].val
, temps
[y
].val
, c
);
494 } else if (temps_are_copies(x
, y
)) {
495 return do_constant_folding_cond_eq(c
);
496 } else if (temp_is_const(y
) && temps
[y
].val
== 0) {
510 /* Return 2 if the condition can't be simplified, and the result
511 of the condition (0 or 1) if it can */
512 static TCGArg
do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
514 TCGArg al
= p1
[0], ah
= p1
[1];
515 TCGArg bl
= p2
[0], bh
= p2
[1];
517 if (temp_is_const(bl
) && temp_is_const(bh
)) {
518 uint64_t b
= ((uint64_t)temps
[bh
].val
<< 32) | (uint32_t)temps
[bl
].val
;
520 if (temp_is_const(al
) && temp_is_const(ah
)) {
522 a
= ((uint64_t)temps
[ah
].val
<< 32) | (uint32_t)temps
[al
].val
;
523 return do_constant_folding_cond_64(a
, b
, c
);
536 if (temps_are_copies(al
, bl
) && temps_are_copies(ah
, bh
)) {
537 return do_constant_folding_cond_eq(c
);
542 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
544 TCGArg a1
= *p1
, a2
= *p2
;
546 sum
+= temp_is_const(a1
);
547 sum
-= temp_is_const(a2
);
549 /* Prefer the constant in second argument, and then the form
550 op a, a, b, which is better handled on non-RISC hosts. */
551 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
559 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
562 sum
+= temp_is_const(p1
[0]);
563 sum
+= temp_is_const(p1
[1]);
564 sum
-= temp_is_const(p2
[0]);
565 sum
-= temp_is_const(p2
[1]);
568 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
569 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
575 /* Propagate constants and copies, fold constant expressions. */
576 void tcg_optimize(TCGContext
*s
)
578 int oi
, oi_next
, nb_temps
, nb_globals
;
580 /* Array VALS has an element for each temp.
581 If this temp holds a constant then its value is kept in VALS' element.
582 If this temp is a copy of other ones then the other copies are
583 available through the doubly linked circular list. */
585 nb_temps
= s
->nb_temps
;
586 nb_globals
= s
->nb_globals
;
587 reset_all_temps(nb_temps
);
589 for (oi
= s
->gen_first_op_idx
; oi
>= 0; oi
= oi_next
) {
590 tcg_target_ulong mask
, partmask
, affected
;
591 int nb_oargs
, nb_iargs
, i
;
594 TCGOp
* const op
= &s
->gen_op_buf
[oi
];
595 TCGArg
* const args
= &s
->gen_opparam_buf
[op
->args
];
596 TCGOpcode opc
= op
->opc
;
597 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
601 /* Count the arguments, and initialize the temps that are
603 if (opc
== INDEX_op_call
) {
604 nb_oargs
= op
->callo
;
605 nb_iargs
= op
->calli
;
606 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
608 if (tmp
!= TCG_CALL_DUMMY_ARG
) {
613 nb_oargs
= def
->nb_oargs
;
614 nb_iargs
= def
->nb_iargs
;
615 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
616 init_temp_info(args
[i
]);
620 /* Do copy propagation */
621 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
622 if (temp_is_copy(args
[i
])) {
623 args
[i
] = find_better_copy(s
, args
[i
]);
627 /* For commutative operations make constant second argument */
637 CASE_OP_32_64(muluh
):
638 CASE_OP_32_64(mulsh
):
639 swap_commutative(args
[0], &args
[1], &args
[2]);
641 CASE_OP_32_64(brcond
):
642 if (swap_commutative(-1, &args
[0], &args
[1])) {
643 args
[2] = tcg_swap_cond(args
[2]);
646 CASE_OP_32_64(setcond
):
647 if (swap_commutative(args
[0], &args
[1], &args
[2])) {
648 args
[3] = tcg_swap_cond(args
[3]);
651 CASE_OP_32_64(movcond
):
652 if (swap_commutative(-1, &args
[1], &args
[2])) {
653 args
[5] = tcg_swap_cond(args
[5]);
655 /* For movcond, we canonicalize the "false" input reg to match
656 the destination reg so that the tcg backend can implement
657 a "move if true" operation. */
658 if (swap_commutative(args
[0], &args
[4], &args
[3])) {
659 args
[5] = tcg_invert_cond(args
[5]);
663 swap_commutative(args
[0], &args
[2], &args
[4]);
664 swap_commutative(args
[1], &args
[3], &args
[5]);
666 CASE_OP_32_64(mulu2
):
667 CASE_OP_32_64(muls2
):
668 swap_commutative(args
[0], &args
[2], &args
[3]);
670 case INDEX_op_brcond2_i32
:
671 if (swap_commutative2(&args
[0], &args
[2])) {
672 args
[4] = tcg_swap_cond(args
[4]);
675 case INDEX_op_setcond2_i32
:
676 if (swap_commutative2(&args
[1], &args
[3])) {
677 args
[5] = tcg_swap_cond(args
[5]);
684 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
685 and "sub r, 0, a => neg r, a" case. */
692 if (temp_is_const(args
[1]) && temps
[args
[1]].val
== 0) {
693 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
702 if (temp_is_const(args
[2])) {
703 /* Proceed with possible constant folding. */
706 if (opc
== INDEX_op_sub_i32
) {
707 neg_op
= INDEX_op_neg_i32
;
708 have_neg
= TCG_TARGET_HAS_neg_i32
;
710 neg_op
= INDEX_op_neg_i64
;
711 have_neg
= TCG_TARGET_HAS_neg_i64
;
716 if (temp_is_const(args
[1]) && temps
[args
[1]].val
== 0) {
726 if (!temp_is_const(args
[1])
727 && temp_is_const(args
[2]) && temps
[args
[2]].val
== -1) {
733 if (!temp_is_const(args
[1])
734 && temp_is_const(args
[2]) && temps
[args
[2]].val
== 0) {
740 if (!temp_is_const(args
[2])
741 && temp_is_const(args
[1]) && temps
[args
[1]].val
== -1) {
748 if (!temp_is_const(args
[2])
749 && temp_is_const(args
[1]) && temps
[args
[1]].val
== 0) {
759 if (def
->flags
& TCG_OPF_64BIT
) {
760 not_op
= INDEX_op_not_i64
;
761 have_not
= TCG_TARGET_HAS_not_i64
;
763 not_op
= INDEX_op_not_i32
;
764 have_not
= TCG_TARGET_HAS_not_i32
;
778 /* Simplify expression for "op r, a, const => mov r, a" cases */
790 if (!temp_is_const(args
[1])
791 && temp_is_const(args
[2]) && temps
[args
[2]].val
== 0) {
792 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
799 if (!temp_is_const(args
[1])
800 && temp_is_const(args
[2]) && temps
[args
[2]].val
== -1) {
801 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
809 /* Simplify using known-zero bits. Currently only ops with a single
810 output argument is supported. */
814 CASE_OP_32_64(ext8s
):
815 if ((temps
[args
[1]].mask
& 0x80) != 0) {
818 CASE_OP_32_64(ext8u
):
821 CASE_OP_32_64(ext16s
):
822 if ((temps
[args
[1]].mask
& 0x8000) != 0) {
825 CASE_OP_32_64(ext16u
):
828 case INDEX_op_ext32s_i64
:
829 if ((temps
[args
[1]].mask
& 0x80000000) != 0) {
832 case INDEX_op_ext32u_i64
:
837 mask
= temps
[args
[2]].mask
;
838 if (temp_is_const(args
[2])) {
840 affected
= temps
[args
[1]].mask
& ~mask
;
842 mask
= temps
[args
[1]].mask
& mask
;
845 case INDEX_op_ext_i32_i64
:
846 if ((temps
[args
[1]].mask
& 0x80000000) != 0) {
849 case INDEX_op_extu_i32_i64
:
850 /* We do not compute affected as it is a size changing op. */
851 mask
= (uint32_t)temps
[args
[1]].mask
;
855 /* Known-zeros does not imply known-ones. Therefore unless
856 args[2] is constant, we can't infer anything from it. */
857 if (temp_is_const(args
[2])) {
858 mask
= ~temps
[args
[2]].mask
;
861 /* But we certainly know nothing outside args[1] may be set. */
862 mask
= temps
[args
[1]].mask
;
865 case INDEX_op_sar_i32
:
866 if (temp_is_const(args
[2])) {
867 tmp
= temps
[args
[2]].val
& 31;
868 mask
= (int32_t)temps
[args
[1]].mask
>> tmp
;
871 case INDEX_op_sar_i64
:
872 if (temp_is_const(args
[2])) {
873 tmp
= temps
[args
[2]].val
& 63;
874 mask
= (int64_t)temps
[args
[1]].mask
>> tmp
;
878 case INDEX_op_shr_i32
:
879 if (temp_is_const(args
[2])) {
880 tmp
= temps
[args
[2]].val
& 31;
881 mask
= (uint32_t)temps
[args
[1]].mask
>> tmp
;
884 case INDEX_op_shr_i64
:
885 if (temp_is_const(args
[2])) {
886 tmp
= temps
[args
[2]].val
& 63;
887 mask
= (uint64_t)temps
[args
[1]].mask
>> tmp
;
891 case INDEX_op_extrl_i64_i32
:
892 mask
= (uint32_t)temps
[args
[1]].mask
;
894 case INDEX_op_extrh_i64_i32
:
895 mask
= (uint64_t)temps
[args
[1]].mask
>> 32;
899 if (temp_is_const(args
[2])) {
900 tmp
= temps
[args
[2]].val
& (TCG_TARGET_REG_BITS
- 1);
901 mask
= temps
[args
[1]].mask
<< tmp
;
906 /* Set to 1 all bits to the left of the rightmost. */
907 mask
= -(temps
[args
[1]].mask
& -temps
[args
[1]].mask
);
910 CASE_OP_32_64(deposit
):
911 mask
= deposit64(temps
[args
[1]].mask
, args
[3], args
[4],
912 temps
[args
[2]].mask
);
917 mask
= temps
[args
[1]].mask
| temps
[args
[2]].mask
;
920 CASE_OP_32_64(setcond
):
921 case INDEX_op_setcond2_i32
:
925 CASE_OP_32_64(movcond
):
926 mask
= temps
[args
[3]].mask
| temps
[args
[4]].mask
;
932 CASE_OP_32_64(ld16u
):
935 case INDEX_op_ld32u_i64
:
939 CASE_OP_32_64(qemu_ld
):
941 TCGMemOpIdx oi
= args
[nb_oargs
+ nb_iargs
];
942 TCGMemOp mop
= get_memop(oi
);
943 if (!(mop
& MO_SIGN
)) {
944 mask
= (2ULL << ((8 << (mop
& MO_SIZE
)) - 1)) - 1;
953 /* 32-bit ops generate 32-bit results. For the result is zero test
954 below, we can ignore high bits, but for further optimizations we
955 need to record that the high bits contain garbage. */
957 if (!(def
->flags
& TCG_OPF_64BIT
)) {
958 mask
|= ~(tcg_target_ulong
)0xffffffffu
;
959 partmask
&= 0xffffffffu
;
960 affected
&= 0xffffffffu
;
964 assert(nb_oargs
== 1);
965 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
969 assert(nb_oargs
== 1);
970 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
974 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
978 CASE_OP_32_64(muluh
):
979 CASE_OP_32_64(mulsh
):
980 if ((temp_is_const(args
[2]) && temps
[args
[2]].val
== 0)) {
981 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
989 /* Simplify expression for "op r, a, a => mov r, a" cases */
993 if (temps_are_copies(args
[1], args
[2])) {
994 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
1002 /* Simplify expression for "op r, a, a => movi r, 0" cases */
1004 CASE_OP_32_64(andc
):
1007 if (temps_are_copies(args
[1], args
[2])) {
1008 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
1016 /* Propagate constants through copy operations and do constant
1017 folding. Constants will be substituted to arguments by register
1018 allocator where needed and possible. Also detect copies. */
1021 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
1023 CASE_OP_32_64(movi
):
1024 tcg_opt_gen_movi(s
, op
, args
, args
[0], args
[1]);
1029 CASE_OP_32_64(ext8s
):
1030 CASE_OP_32_64(ext8u
):
1031 CASE_OP_32_64(ext16s
):
1032 CASE_OP_32_64(ext16u
):
1033 case INDEX_op_ext32s_i64
:
1034 case INDEX_op_ext32u_i64
:
1035 case INDEX_op_ext_i32_i64
:
1036 case INDEX_op_extu_i32_i64
:
1037 case INDEX_op_extrl_i64_i32
:
1038 case INDEX_op_extrh_i64_i32
:
1039 if (temp_is_const(args
[1])) {
1040 tmp
= do_constant_folding(opc
, temps
[args
[1]].val
, 0);
1041 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1055 CASE_OP_32_64(rotl
):
1056 CASE_OP_32_64(rotr
):
1057 CASE_OP_32_64(andc
):
1060 CASE_OP_32_64(nand
):
1062 CASE_OP_32_64(muluh
):
1063 CASE_OP_32_64(mulsh
):
1065 CASE_OP_32_64(divu
):
1067 CASE_OP_32_64(remu
):
1068 if (temp_is_const(args
[1]) && temp_is_const(args
[2])) {
1069 tmp
= do_constant_folding(opc
, temps
[args
[1]].val
,
1070 temps
[args
[2]].val
);
1071 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1076 CASE_OP_32_64(deposit
):
1077 if (temp_is_const(args
[1]) && temp_is_const(args
[2])) {
1078 tmp
= deposit64(temps
[args
[1]].val
, args
[3], args
[4],
1079 temps
[args
[2]].val
);
1080 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1085 CASE_OP_32_64(setcond
):
1086 tmp
= do_constant_folding_cond(opc
, args
[1], args
[2], args
[3]);
1088 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1093 CASE_OP_32_64(brcond
):
1094 tmp
= do_constant_folding_cond(opc
, args
[0], args
[1], args
[2]);
1097 reset_all_temps(nb_temps
);
1098 op
->opc
= INDEX_op_br
;
1101 tcg_op_remove(s
, op
);
1107 CASE_OP_32_64(movcond
):
1108 tmp
= do_constant_folding_cond(opc
, args
[1], args
[2], args
[5]);
1110 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[4-tmp
]);
1115 case INDEX_op_add2_i32
:
1116 case INDEX_op_sub2_i32
:
1117 if (temp_is_const(args
[2]) && temp_is_const(args
[3])
1118 && temp_is_const(args
[4]) && temp_is_const(args
[5])) {
1119 uint32_t al
= temps
[args
[2]].val
;
1120 uint32_t ah
= temps
[args
[3]].val
;
1121 uint32_t bl
= temps
[args
[4]].val
;
1122 uint32_t bh
= temps
[args
[5]].val
;
1123 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
1124 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
1126 TCGOp
*op2
= insert_op_before(s
, op
, INDEX_op_movi_i32
, 2);
1127 TCGArg
*args2
= &s
->gen_opparam_buf
[op2
->args
];
1129 if (opc
== INDEX_op_add2_i32
) {
1137 tcg_opt_gen_movi(s
, op
, args
, rl
, (int32_t)a
);
1138 tcg_opt_gen_movi(s
, op2
, args2
, rh
, (int32_t)(a
>> 32));
1140 /* We've done all we need to do with the movi. Skip it. */
1141 oi_next
= op2
->next
;
1146 case INDEX_op_mulu2_i32
:
1147 if (temp_is_const(args
[2]) && temp_is_const(args
[3])) {
1148 uint32_t a
= temps
[args
[2]].val
;
1149 uint32_t b
= temps
[args
[3]].val
;
1150 uint64_t r
= (uint64_t)a
* b
;
1152 TCGOp
*op2
= insert_op_before(s
, op
, INDEX_op_movi_i32
, 2);
1153 TCGArg
*args2
= &s
->gen_opparam_buf
[op2
->args
];
1157 tcg_opt_gen_movi(s
, op
, args
, rl
, (int32_t)r
);
1158 tcg_opt_gen_movi(s
, op2
, args2
, rh
, (int32_t)(r
>> 32));
1160 /* We've done all we need to do with the movi. Skip it. */
1161 oi_next
= op2
->next
;
1166 case INDEX_op_brcond2_i32
:
1167 tmp
= do_constant_folding_cond2(&args
[0], &args
[2], args
[4]);
1171 reset_all_temps(nb_temps
);
1172 op
->opc
= INDEX_op_br
;
1176 tcg_op_remove(s
, op
);
1178 } else if ((args
[4] == TCG_COND_LT
|| args
[4] == TCG_COND_GE
)
1179 && temp_is_const(args
[2]) && temps
[args
[2]].val
== 0
1180 && temp_is_const(args
[3]) && temps
[args
[3]].val
== 0) {
1181 /* Simplify LT/GE comparisons vs zero to a single compare
1182 vs the high word of the input. */
1184 reset_all_temps(nb_temps
);
1185 op
->opc
= INDEX_op_brcond_i32
;
1190 } else if (args
[4] == TCG_COND_EQ
) {
1191 /* Simplify EQ comparisons where one of the pairs
1192 can be simplified. */
1193 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1194 args
[0], args
[2], TCG_COND_EQ
);
1196 goto do_brcond_false
;
1197 } else if (tmp
== 1) {
1198 goto do_brcond_high
;
1200 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1201 args
[1], args
[3], TCG_COND_EQ
);
1203 goto do_brcond_false
;
1204 } else if (tmp
!= 1) {
1208 reset_all_temps(nb_temps
);
1209 op
->opc
= INDEX_op_brcond_i32
;
1213 } else if (args
[4] == TCG_COND_NE
) {
1214 /* Simplify NE comparisons where one of the pairs
1215 can be simplified. */
1216 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1217 args
[0], args
[2], TCG_COND_NE
);
1219 goto do_brcond_high
;
1220 } else if (tmp
== 1) {
1221 goto do_brcond_true
;
1223 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1224 args
[1], args
[3], TCG_COND_NE
);
1227 } else if (tmp
== 1) {
1228 goto do_brcond_true
;
1236 case INDEX_op_setcond2_i32
:
1237 tmp
= do_constant_folding_cond2(&args
[1], &args
[3], args
[5]);
1240 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1241 } else if ((args
[5] == TCG_COND_LT
|| args
[5] == TCG_COND_GE
)
1242 && temp_is_const(args
[3]) && temps
[args
[3]].val
== 0
1243 && temp_is_const(args
[4]) && temps
[args
[4]].val
== 0) {
1244 /* Simplify LT/GE comparisons vs zero to a single compare
1245 vs the high word of the input. */
1247 reset_temp(args
[0]);
1248 temps
[args
[0]].mask
= 1;
1249 op
->opc
= INDEX_op_setcond_i32
;
1253 } else if (args
[5] == TCG_COND_EQ
) {
1254 /* Simplify EQ comparisons where one of the pairs
1255 can be simplified. */
1256 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1257 args
[1], args
[3], TCG_COND_EQ
);
1259 goto do_setcond_const
;
1260 } else if (tmp
== 1) {
1261 goto do_setcond_high
;
1263 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1264 args
[2], args
[4], TCG_COND_EQ
);
1266 goto do_setcond_high
;
1267 } else if (tmp
!= 1) {
1271 reset_temp(args
[0]);
1272 temps
[args
[0]].mask
= 1;
1273 op
->opc
= INDEX_op_setcond_i32
;
1276 } else if (args
[5] == TCG_COND_NE
) {
1277 /* Simplify NE comparisons where one of the pairs
1278 can be simplified. */
1279 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1280 args
[1], args
[3], TCG_COND_NE
);
1282 goto do_setcond_high
;
1283 } else if (tmp
== 1) {
1284 goto do_setcond_const
;
1286 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1287 args
[2], args
[4], TCG_COND_NE
);
1289 goto do_setcond_low
;
1290 } else if (tmp
== 1) {
1291 goto do_setcond_const
;
1300 if (!(args
[nb_oargs
+ nb_iargs
+ 1]
1301 & (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
1302 for (i
= 0; i
< nb_globals
; i
++) {
1303 if (test_bit(i
, temps_used
.l
)) {
1308 goto do_reset_output
;
1312 /* Default case: we know nothing about operation (or were unable
1313 to compute the operation result) so no propagation is done.
1314 We trash everything if the operation is the end of a basic
1315 block, otherwise we only trash the output args. "mask" is
1316 the non-zero bits mask for the first output arg. */
1317 if (def
->flags
& TCG_OPF_BB_END
) {
1318 reset_all_temps(nb_temps
);
1321 for (i
= 0; i
< nb_oargs
; i
++) {
1322 reset_temp(args
[i
]);
1323 /* Save the corresponding known-zero bits mask for the
1324 first output argument (only one supported so far). */
1326 temps
[args
[i
]].mask
= mask
;