2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "exec/cpu-common.h"
31 #define CASE_OP_32_64(x) \
32 glue(glue(case INDEX_op_, x), _i32): \
33 glue(glue(case INDEX_op_, x), _i64)
35 struct tcg_temp_info
{
40 tcg_target_ulong mask
;
43 static struct tcg_temp_info temps
[TCG_MAX_TEMPS
];
44 static TCGTempSet temps_used
;
46 static inline bool temp_is_const(TCGArg arg
)
48 return temps
[arg
].is_const
;
51 static inline bool temp_is_copy(TCGArg arg
)
53 return temps
[arg
].next_copy
!= arg
;
56 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
57 static void reset_temp(TCGArg temp
)
59 temps
[temps
[temp
].next_copy
].prev_copy
= temps
[temp
].prev_copy
;
60 temps
[temps
[temp
].prev_copy
].next_copy
= temps
[temp
].next_copy
;
61 temps
[temp
].next_copy
= temp
;
62 temps
[temp
].prev_copy
= temp
;
63 temps
[temp
].is_const
= false;
64 temps
[temp
].mask
= -1;
67 /* Reset all temporaries, given that there are NB_TEMPS of them. */
68 static void reset_all_temps(int nb_temps
)
70 bitmap_zero(temps_used
.l
, nb_temps
);
73 /* Initialize and activate a temporary. */
74 static void init_temp_info(TCGArg temp
)
76 if (!test_bit(temp
, temps_used
.l
)) {
77 temps
[temp
].next_copy
= temp
;
78 temps
[temp
].prev_copy
= temp
;
79 temps
[temp
].is_const
= false;
80 temps
[temp
].mask
= -1;
81 set_bit(temp
, temps_used
.l
);
85 static int op_bits(TCGOpcode op
)
87 const TCGOpDef
*def
= &tcg_op_defs
[op
];
88 return def
->flags
& TCG_OPF_64BIT
? 64 : 32;
91 static TCGOpcode
op_to_mov(TCGOpcode op
)
93 switch (op_bits(op
)) {
95 return INDEX_op_mov_i32
;
97 return INDEX_op_mov_i64
;
99 fprintf(stderr
, "op_to_mov: unexpected return value of "
100 "function op_bits.\n");
105 static TCGOpcode
op_to_movi(TCGOpcode op
)
107 switch (op_bits(op
)) {
109 return INDEX_op_movi_i32
;
111 return INDEX_op_movi_i64
;
113 fprintf(stderr
, "op_to_movi: unexpected return value of "
114 "function op_bits.\n");
119 static TCGArg
find_better_copy(TCGContext
*s
, TCGArg temp
)
123 /* If this is already a global, we can't do better. */
124 if (temp
< s
->nb_globals
) {
128 /* Search for a global first. */
129 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
130 if (i
< s
->nb_globals
) {
135 /* If it is a temp, search for a temp local. */
136 if (!s
->temps
[temp
].temp_local
) {
137 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
138 if (s
->temps
[i
].temp_local
) {
144 /* Failure to find a better representation, return the same temp. */
148 static bool temps_are_copies(TCGArg arg1
, TCGArg arg2
)
156 if (!temp_is_copy(arg1
) || !temp_is_copy(arg2
)) {
160 for (i
= temps
[arg1
].next_copy
; i
!= arg1
; i
= temps
[i
].next_copy
) {
169 static void tcg_opt_gen_movi(TCGContext
*s
, TCGOp
*op
, TCGArg
*args
,
170 TCGArg dst
, TCGArg val
)
172 TCGOpcode new_op
= op_to_movi(op
->opc
);
173 tcg_target_ulong mask
;
178 temps
[dst
].is_const
= true;
179 temps
[dst
].val
= val
;
181 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_movi_i32
) {
182 /* High bits of the destination are now garbage. */
183 mask
|= ~0xffffffffull
;
185 temps
[dst
].mask
= mask
;
191 static void tcg_opt_gen_mov(TCGContext
*s
, TCGOp
*op
, TCGArg
*args
,
192 TCGArg dst
, TCGArg src
)
194 if (temps_are_copies(dst
, src
)) {
195 tcg_op_remove(s
, op
);
199 TCGOpcode new_op
= op_to_mov(op
->opc
);
200 tcg_target_ulong mask
;
205 mask
= temps
[src
].mask
;
206 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
207 /* High bits of the destination are now garbage. */
208 mask
|= ~0xffffffffull
;
210 temps
[dst
].mask
= mask
;
212 if (s
->temps
[src
].type
== s
->temps
[dst
].type
) {
213 temps
[dst
].next_copy
= temps
[src
].next_copy
;
214 temps
[dst
].prev_copy
= src
;
215 temps
[temps
[dst
].next_copy
].prev_copy
= dst
;
216 temps
[src
].next_copy
= dst
;
217 temps
[dst
].is_const
= temps
[src
].is_const
;
218 temps
[dst
].val
= temps
[src
].val
;
225 static TCGArg
do_constant_folding_2(TCGOpcode op
, TCGArg x
, TCGArg y
)
248 case INDEX_op_shl_i32
:
249 return (uint32_t)x
<< (y
& 31);
251 case INDEX_op_shl_i64
:
252 return (uint64_t)x
<< (y
& 63);
254 case INDEX_op_shr_i32
:
255 return (uint32_t)x
>> (y
& 31);
257 case INDEX_op_shr_i64
:
258 return (uint64_t)x
>> (y
& 63);
260 case INDEX_op_sar_i32
:
261 return (int32_t)x
>> (y
& 31);
263 case INDEX_op_sar_i64
:
264 return (int64_t)x
>> (y
& 63);
266 case INDEX_op_rotr_i32
:
267 return ror32(x
, y
& 31);
269 case INDEX_op_rotr_i64
:
270 return ror64(x
, y
& 63);
272 case INDEX_op_rotl_i32
:
273 return rol32(x
, y
& 31);
275 case INDEX_op_rotl_i64
:
276 return rol64(x
, y
& 63);
299 case INDEX_op_clz_i32
:
300 return (uint32_t)x
? clz32(x
) : y
;
302 case INDEX_op_clz_i64
:
303 return x
? clz64(x
) : y
;
305 case INDEX_op_ctz_i32
:
306 return (uint32_t)x
? ctz32(x
) : y
;
308 case INDEX_op_ctz_i64
:
309 return x
? ctz64(x
) : y
;
311 case INDEX_op_ctpop_i32
:
314 case INDEX_op_ctpop_i64
:
317 CASE_OP_32_64(ext8s
):
320 CASE_OP_32_64(ext16s
):
323 CASE_OP_32_64(ext8u
):
326 CASE_OP_32_64(ext16u
):
329 case INDEX_op_ext_i32_i64
:
330 case INDEX_op_ext32s_i64
:
333 case INDEX_op_extu_i32_i64
:
334 case INDEX_op_extrl_i64_i32
:
335 case INDEX_op_ext32u_i64
:
338 case INDEX_op_extrh_i64_i32
:
339 return (uint64_t)x
>> 32;
341 case INDEX_op_muluh_i32
:
342 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
343 case INDEX_op_mulsh_i32
:
344 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
346 case INDEX_op_muluh_i64
:
347 mulu64(&l64
, &h64
, x
, y
);
349 case INDEX_op_mulsh_i64
:
350 muls64(&l64
, &h64
, x
, y
);
353 case INDEX_op_div_i32
:
354 /* Avoid crashing on divide by zero, otherwise undefined. */
355 return (int32_t)x
/ ((int32_t)y
? : 1);
356 case INDEX_op_divu_i32
:
357 return (uint32_t)x
/ ((uint32_t)y
? : 1);
358 case INDEX_op_div_i64
:
359 return (int64_t)x
/ ((int64_t)y
? : 1);
360 case INDEX_op_divu_i64
:
361 return (uint64_t)x
/ ((uint64_t)y
? : 1);
363 case INDEX_op_rem_i32
:
364 return (int32_t)x
% ((int32_t)y
? : 1);
365 case INDEX_op_remu_i32
:
366 return (uint32_t)x
% ((uint32_t)y
? : 1);
367 case INDEX_op_rem_i64
:
368 return (int64_t)x
% ((int64_t)y
? : 1);
369 case INDEX_op_remu_i64
:
370 return (uint64_t)x
% ((uint64_t)y
? : 1);
374 "Unrecognized operation %d in do_constant_folding.\n", op
);
379 static TCGArg
do_constant_folding(TCGOpcode op
, TCGArg x
, TCGArg y
)
381 TCGArg res
= do_constant_folding_2(op
, x
, y
);
382 if (op_bits(op
) == 32) {
388 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
396 return (int32_t)x
< (int32_t)y
;
398 return (int32_t)x
>= (int32_t)y
;
400 return (int32_t)x
<= (int32_t)y
;
402 return (int32_t)x
> (int32_t)y
;
416 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
424 return (int64_t)x
< (int64_t)y
;
426 return (int64_t)x
>= (int64_t)y
;
428 return (int64_t)x
<= (int64_t)y
;
430 return (int64_t)x
> (int64_t)y
;
444 static bool do_constant_folding_cond_eq(TCGCond c
)
464 /* Return 2 if the condition can't be simplified, and the result
465 of the condition (0 or 1) if it can */
466 static TCGArg
do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
469 if (temp_is_const(x
) && temp_is_const(y
)) {
470 switch (op_bits(op
)) {
472 return do_constant_folding_cond_32(temps
[x
].val
, temps
[y
].val
, c
);
474 return do_constant_folding_cond_64(temps
[x
].val
, temps
[y
].val
, c
);
478 } else if (temps_are_copies(x
, y
)) {
479 return do_constant_folding_cond_eq(c
);
480 } else if (temp_is_const(y
) && temps
[y
].val
== 0) {
493 /* Return 2 if the condition can't be simplified, and the result
494 of the condition (0 or 1) if it can */
495 static TCGArg
do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
497 TCGArg al
= p1
[0], ah
= p1
[1];
498 TCGArg bl
= p2
[0], bh
= p2
[1];
500 if (temp_is_const(bl
) && temp_is_const(bh
)) {
501 uint64_t b
= ((uint64_t)temps
[bh
].val
<< 32) | (uint32_t)temps
[bl
].val
;
503 if (temp_is_const(al
) && temp_is_const(ah
)) {
505 a
= ((uint64_t)temps
[ah
].val
<< 32) | (uint32_t)temps
[al
].val
;
506 return do_constant_folding_cond_64(a
, b
, c
);
519 if (temps_are_copies(al
, bl
) && temps_are_copies(ah
, bh
)) {
520 return do_constant_folding_cond_eq(c
);
525 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
527 TCGArg a1
= *p1
, a2
= *p2
;
529 sum
+= temp_is_const(a1
);
530 sum
-= temp_is_const(a2
);
532 /* Prefer the constant in second argument, and then the form
533 op a, a, b, which is better handled on non-RISC hosts. */
534 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
542 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
545 sum
+= temp_is_const(p1
[0]);
546 sum
+= temp_is_const(p1
[1]);
547 sum
-= temp_is_const(p2
[0]);
548 sum
-= temp_is_const(p2
[1]);
551 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
552 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
558 /* Propagate constants and copies, fold constant expressions. */
559 void tcg_optimize(TCGContext
*s
)
561 int oi
, oi_next
, nb_temps
, nb_globals
;
562 TCGArg
*prev_mb_args
= NULL
;
564 /* Array VALS has an element for each temp.
565 If this temp holds a constant then its value is kept in VALS' element.
566 If this temp is a copy of other ones then the other copies are
567 available through the doubly linked circular list. */
569 nb_temps
= s
->nb_temps
;
570 nb_globals
= s
->nb_globals
;
571 reset_all_temps(nb_temps
);
573 for (oi
= s
->gen_op_buf
[0].next
; oi
!= 0; oi
= oi_next
) {
574 tcg_target_ulong mask
, partmask
, affected
;
575 int nb_oargs
, nb_iargs
, i
;
578 TCGOp
* const op
= &s
->gen_op_buf
[oi
];
579 TCGArg
* const args
= &s
->gen_opparam_buf
[op
->args
];
580 TCGOpcode opc
= op
->opc
;
581 const TCGOpDef
*def
= &tcg_op_defs
[opc
];
585 /* Count the arguments, and initialize the temps that are
587 if (opc
== INDEX_op_call
) {
588 nb_oargs
= op
->callo
;
589 nb_iargs
= op
->calli
;
590 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
592 if (tmp
!= TCG_CALL_DUMMY_ARG
) {
597 nb_oargs
= def
->nb_oargs
;
598 nb_iargs
= def
->nb_iargs
;
599 for (i
= 0; i
< nb_oargs
+ nb_iargs
; i
++) {
600 init_temp_info(args
[i
]);
604 /* Do copy propagation */
605 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
606 if (temp_is_copy(args
[i
])) {
607 args
[i
] = find_better_copy(s
, args
[i
]);
611 /* For commutative operations make constant second argument */
621 CASE_OP_32_64(muluh
):
622 CASE_OP_32_64(mulsh
):
623 swap_commutative(args
[0], &args
[1], &args
[2]);
625 CASE_OP_32_64(brcond
):
626 if (swap_commutative(-1, &args
[0], &args
[1])) {
627 args
[2] = tcg_swap_cond(args
[2]);
630 CASE_OP_32_64(setcond
):
631 if (swap_commutative(args
[0], &args
[1], &args
[2])) {
632 args
[3] = tcg_swap_cond(args
[3]);
635 CASE_OP_32_64(movcond
):
636 if (swap_commutative(-1, &args
[1], &args
[2])) {
637 args
[5] = tcg_swap_cond(args
[5]);
639 /* For movcond, we canonicalize the "false" input reg to match
640 the destination reg so that the tcg backend can implement
641 a "move if true" operation. */
642 if (swap_commutative(args
[0], &args
[4], &args
[3])) {
643 args
[5] = tcg_invert_cond(args
[5]);
647 swap_commutative(args
[0], &args
[2], &args
[4]);
648 swap_commutative(args
[1], &args
[3], &args
[5]);
650 CASE_OP_32_64(mulu2
):
651 CASE_OP_32_64(muls2
):
652 swap_commutative(args
[0], &args
[2], &args
[3]);
654 case INDEX_op_brcond2_i32
:
655 if (swap_commutative2(&args
[0], &args
[2])) {
656 args
[4] = tcg_swap_cond(args
[4]);
659 case INDEX_op_setcond2_i32
:
660 if (swap_commutative2(&args
[1], &args
[3])) {
661 args
[5] = tcg_swap_cond(args
[5]);
668 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
669 and "sub r, 0, a => neg r, a" case. */
676 if (temp_is_const(args
[1]) && temps
[args
[1]].val
== 0) {
677 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
686 if (temp_is_const(args
[2])) {
687 /* Proceed with possible constant folding. */
690 if (opc
== INDEX_op_sub_i32
) {
691 neg_op
= INDEX_op_neg_i32
;
692 have_neg
= TCG_TARGET_HAS_neg_i32
;
694 neg_op
= INDEX_op_neg_i64
;
695 have_neg
= TCG_TARGET_HAS_neg_i64
;
700 if (temp_is_const(args
[1]) && temps
[args
[1]].val
== 0) {
710 if (!temp_is_const(args
[1])
711 && temp_is_const(args
[2]) && temps
[args
[2]].val
== -1) {
717 if (!temp_is_const(args
[1])
718 && temp_is_const(args
[2]) && temps
[args
[2]].val
== 0) {
724 if (!temp_is_const(args
[2])
725 && temp_is_const(args
[1]) && temps
[args
[1]].val
== -1) {
732 if (!temp_is_const(args
[2])
733 && temp_is_const(args
[1]) && temps
[args
[1]].val
== 0) {
743 if (def
->flags
& TCG_OPF_64BIT
) {
744 not_op
= INDEX_op_not_i64
;
745 have_not
= TCG_TARGET_HAS_not_i64
;
747 not_op
= INDEX_op_not_i32
;
748 have_not
= TCG_TARGET_HAS_not_i32
;
762 /* Simplify expression for "op r, a, const => mov r, a" cases */
774 if (!temp_is_const(args
[1])
775 && temp_is_const(args
[2]) && temps
[args
[2]].val
== 0) {
776 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
783 if (!temp_is_const(args
[1])
784 && temp_is_const(args
[2]) && temps
[args
[2]].val
== -1) {
785 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
793 /* Simplify using known-zero bits. Currently only ops with a single
794 output argument is supported. */
798 CASE_OP_32_64(ext8s
):
799 if ((temps
[args
[1]].mask
& 0x80) != 0) {
802 CASE_OP_32_64(ext8u
):
805 CASE_OP_32_64(ext16s
):
806 if ((temps
[args
[1]].mask
& 0x8000) != 0) {
809 CASE_OP_32_64(ext16u
):
812 case INDEX_op_ext32s_i64
:
813 if ((temps
[args
[1]].mask
& 0x80000000) != 0) {
816 case INDEX_op_ext32u_i64
:
821 mask
= temps
[args
[2]].mask
;
822 if (temp_is_const(args
[2])) {
824 affected
= temps
[args
[1]].mask
& ~mask
;
826 mask
= temps
[args
[1]].mask
& mask
;
829 case INDEX_op_ext_i32_i64
:
830 if ((temps
[args
[1]].mask
& 0x80000000) != 0) {
833 case INDEX_op_extu_i32_i64
:
834 /* We do not compute affected as it is a size changing op. */
835 mask
= (uint32_t)temps
[args
[1]].mask
;
839 /* Known-zeros does not imply known-ones. Therefore unless
840 args[2] is constant, we can't infer anything from it. */
841 if (temp_is_const(args
[2])) {
842 mask
= ~temps
[args
[2]].mask
;
845 /* But we certainly know nothing outside args[1] may be set. */
846 mask
= temps
[args
[1]].mask
;
849 case INDEX_op_sar_i32
:
850 if (temp_is_const(args
[2])) {
851 tmp
= temps
[args
[2]].val
& 31;
852 mask
= (int32_t)temps
[args
[1]].mask
>> tmp
;
855 case INDEX_op_sar_i64
:
856 if (temp_is_const(args
[2])) {
857 tmp
= temps
[args
[2]].val
& 63;
858 mask
= (int64_t)temps
[args
[1]].mask
>> tmp
;
862 case INDEX_op_shr_i32
:
863 if (temp_is_const(args
[2])) {
864 tmp
= temps
[args
[2]].val
& 31;
865 mask
= (uint32_t)temps
[args
[1]].mask
>> tmp
;
868 case INDEX_op_shr_i64
:
869 if (temp_is_const(args
[2])) {
870 tmp
= temps
[args
[2]].val
& 63;
871 mask
= (uint64_t)temps
[args
[1]].mask
>> tmp
;
875 case INDEX_op_extrl_i64_i32
:
876 mask
= (uint32_t)temps
[args
[1]].mask
;
878 case INDEX_op_extrh_i64_i32
:
879 mask
= (uint64_t)temps
[args
[1]].mask
>> 32;
883 if (temp_is_const(args
[2])) {
884 tmp
= temps
[args
[2]].val
& (TCG_TARGET_REG_BITS
- 1);
885 mask
= temps
[args
[1]].mask
<< tmp
;
890 /* Set to 1 all bits to the left of the rightmost. */
891 mask
= -(temps
[args
[1]].mask
& -temps
[args
[1]].mask
);
894 CASE_OP_32_64(deposit
):
895 mask
= deposit64(temps
[args
[1]].mask
, args
[3], args
[4],
896 temps
[args
[2]].mask
);
899 CASE_OP_32_64(extract
):
900 mask
= extract64(temps
[args
[1]].mask
, args
[2], args
[3]);
902 affected
= temps
[args
[1]].mask
& ~mask
;
905 CASE_OP_32_64(sextract
):
906 mask
= sextract64(temps
[args
[1]].mask
, args
[2], args
[3]);
907 if (args
[2] == 0 && (tcg_target_long
)mask
>= 0) {
908 affected
= temps
[args
[1]].mask
& ~mask
;
914 mask
= temps
[args
[1]].mask
| temps
[args
[2]].mask
;
917 case INDEX_op_clz_i32
:
918 case INDEX_op_ctz_i32
:
919 mask
= temps
[args
[2]].mask
| 31;
922 case INDEX_op_clz_i64
:
923 case INDEX_op_ctz_i64
:
924 mask
= temps
[args
[2]].mask
| 63;
927 case INDEX_op_ctpop_i32
:
930 case INDEX_op_ctpop_i64
:
934 CASE_OP_32_64(setcond
):
935 case INDEX_op_setcond2_i32
:
939 CASE_OP_32_64(movcond
):
940 mask
= temps
[args
[3]].mask
| temps
[args
[4]].mask
;
946 CASE_OP_32_64(ld16u
):
949 case INDEX_op_ld32u_i64
:
953 CASE_OP_32_64(qemu_ld
):
955 TCGMemOpIdx oi
= args
[nb_oargs
+ nb_iargs
];
956 TCGMemOp mop
= get_memop(oi
);
957 if (!(mop
& MO_SIGN
)) {
958 mask
= (2ULL << ((8 << (mop
& MO_SIZE
)) - 1)) - 1;
967 /* 32-bit ops generate 32-bit results. For the result is zero test
968 below, we can ignore high bits, but for further optimizations we
969 need to record that the high bits contain garbage. */
971 if (!(def
->flags
& TCG_OPF_64BIT
)) {
972 mask
|= ~(tcg_target_ulong
)0xffffffffu
;
973 partmask
&= 0xffffffffu
;
974 affected
&= 0xffffffffu
;
978 tcg_debug_assert(nb_oargs
== 1);
979 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
983 tcg_debug_assert(nb_oargs
== 1);
984 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
988 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
992 CASE_OP_32_64(muluh
):
993 CASE_OP_32_64(mulsh
):
994 if ((temp_is_const(args
[2]) && temps
[args
[2]].val
== 0)) {
995 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
1003 /* Simplify expression for "op r, a, a => mov r, a" cases */
1007 if (temps_are_copies(args
[1], args
[2])) {
1008 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
1016 /* Simplify expression for "op r, a, a => movi r, 0" cases */
1018 CASE_OP_32_64(andc
):
1021 if (temps_are_copies(args
[1], args
[2])) {
1022 tcg_opt_gen_movi(s
, op
, args
, args
[0], 0);
1030 /* Propagate constants through copy operations and do constant
1031 folding. Constants will be substituted to arguments by register
1032 allocator where needed and possible. Also detect copies. */
1035 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[1]);
1037 CASE_OP_32_64(movi
):
1038 tcg_opt_gen_movi(s
, op
, args
, args
[0], args
[1]);
1043 CASE_OP_32_64(ext8s
):
1044 CASE_OP_32_64(ext8u
):
1045 CASE_OP_32_64(ext16s
):
1046 CASE_OP_32_64(ext16u
):
1047 CASE_OP_32_64(ctpop
):
1048 case INDEX_op_ext32s_i64
:
1049 case INDEX_op_ext32u_i64
:
1050 case INDEX_op_ext_i32_i64
:
1051 case INDEX_op_extu_i32_i64
:
1052 case INDEX_op_extrl_i64_i32
:
1053 case INDEX_op_extrh_i64_i32
:
1054 if (temp_is_const(args
[1])) {
1055 tmp
= do_constant_folding(opc
, temps
[args
[1]].val
, 0);
1056 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1070 CASE_OP_32_64(rotl
):
1071 CASE_OP_32_64(rotr
):
1072 CASE_OP_32_64(andc
):
1075 CASE_OP_32_64(nand
):
1077 CASE_OP_32_64(muluh
):
1078 CASE_OP_32_64(mulsh
):
1080 CASE_OP_32_64(divu
):
1082 CASE_OP_32_64(remu
):
1083 if (temp_is_const(args
[1]) && temp_is_const(args
[2])) {
1084 tmp
= do_constant_folding(opc
, temps
[args
[1]].val
,
1085 temps
[args
[2]].val
);
1086 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1093 if (temp_is_const(args
[1])) {
1094 TCGArg v
= temps
[args
[1]].val
;
1096 tmp
= do_constant_folding(opc
, v
, 0);
1097 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1099 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[2]);
1105 CASE_OP_32_64(deposit
):
1106 if (temp_is_const(args
[1]) && temp_is_const(args
[2])) {
1107 tmp
= deposit64(temps
[args
[1]].val
, args
[3], args
[4],
1108 temps
[args
[2]].val
);
1109 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1114 CASE_OP_32_64(extract
):
1115 if (temp_is_const(args
[1])) {
1116 tmp
= extract64(temps
[args
[1]].val
, args
[2], args
[3]);
1117 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1122 CASE_OP_32_64(sextract
):
1123 if (temp_is_const(args
[1])) {
1124 tmp
= sextract64(temps
[args
[1]].val
, args
[2], args
[3]);
1125 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1130 CASE_OP_32_64(setcond
):
1131 tmp
= do_constant_folding_cond(opc
, args
[1], args
[2], args
[3]);
1133 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1138 CASE_OP_32_64(brcond
):
1139 tmp
= do_constant_folding_cond(opc
, args
[0], args
[1], args
[2]);
1142 reset_all_temps(nb_temps
);
1143 op
->opc
= INDEX_op_br
;
1146 tcg_op_remove(s
, op
);
1152 CASE_OP_32_64(movcond
):
1153 tmp
= do_constant_folding_cond(opc
, args
[1], args
[2], args
[5]);
1155 tcg_opt_gen_mov(s
, op
, args
, args
[0], args
[4-tmp
]);
1158 if (temp_is_const(args
[3]) && temp_is_const(args
[4])) {
1159 tcg_target_ulong tv
= temps
[args
[3]].val
;
1160 tcg_target_ulong fv
= temps
[args
[4]].val
;
1161 TCGCond cond
= args
[5];
1162 if (fv
== 1 && tv
== 0) {
1163 cond
= tcg_invert_cond(cond
);
1164 } else if (!(tv
== 1 && fv
== 0)) {
1168 op
->opc
= opc
= (opc
== INDEX_op_movcond_i32
1169 ? INDEX_op_setcond_i32
1170 : INDEX_op_setcond_i64
);
1175 case INDEX_op_add2_i32
:
1176 case INDEX_op_sub2_i32
:
1177 if (temp_is_const(args
[2]) && temp_is_const(args
[3])
1178 && temp_is_const(args
[4]) && temp_is_const(args
[5])) {
1179 uint32_t al
= temps
[args
[2]].val
;
1180 uint32_t ah
= temps
[args
[3]].val
;
1181 uint32_t bl
= temps
[args
[4]].val
;
1182 uint32_t bh
= temps
[args
[5]].val
;
1183 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
1184 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
1186 TCGOp
*op2
= tcg_op_insert_before(s
, op
, INDEX_op_movi_i32
, 2);
1187 TCGArg
*args2
= &s
->gen_opparam_buf
[op2
->args
];
1189 if (opc
== INDEX_op_add2_i32
) {
1197 tcg_opt_gen_movi(s
, op
, args
, rl
, (int32_t)a
);
1198 tcg_opt_gen_movi(s
, op2
, args2
, rh
, (int32_t)(a
>> 32));
1200 /* We've done all we need to do with the movi. Skip it. */
1201 oi_next
= op2
->next
;
1206 case INDEX_op_mulu2_i32
:
1207 if (temp_is_const(args
[2]) && temp_is_const(args
[3])) {
1208 uint32_t a
= temps
[args
[2]].val
;
1209 uint32_t b
= temps
[args
[3]].val
;
1210 uint64_t r
= (uint64_t)a
* b
;
1212 TCGOp
*op2
= tcg_op_insert_before(s
, op
, INDEX_op_movi_i32
, 2);
1213 TCGArg
*args2
= &s
->gen_opparam_buf
[op2
->args
];
1217 tcg_opt_gen_movi(s
, op
, args
, rl
, (int32_t)r
);
1218 tcg_opt_gen_movi(s
, op2
, args2
, rh
, (int32_t)(r
>> 32));
1220 /* We've done all we need to do with the movi. Skip it. */
1221 oi_next
= op2
->next
;
1226 case INDEX_op_brcond2_i32
:
1227 tmp
= do_constant_folding_cond2(&args
[0], &args
[2], args
[4]);
1231 reset_all_temps(nb_temps
);
1232 op
->opc
= INDEX_op_br
;
1236 tcg_op_remove(s
, op
);
1238 } else if ((args
[4] == TCG_COND_LT
|| args
[4] == TCG_COND_GE
)
1239 && temp_is_const(args
[2]) && temps
[args
[2]].val
== 0
1240 && temp_is_const(args
[3]) && temps
[args
[3]].val
== 0) {
1241 /* Simplify LT/GE comparisons vs zero to a single compare
1242 vs the high word of the input. */
1244 reset_all_temps(nb_temps
);
1245 op
->opc
= INDEX_op_brcond_i32
;
1250 } else if (args
[4] == TCG_COND_EQ
) {
1251 /* Simplify EQ comparisons where one of the pairs
1252 can be simplified. */
1253 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1254 args
[0], args
[2], TCG_COND_EQ
);
1256 goto do_brcond_false
;
1257 } else if (tmp
== 1) {
1258 goto do_brcond_high
;
1260 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1261 args
[1], args
[3], TCG_COND_EQ
);
1263 goto do_brcond_false
;
1264 } else if (tmp
!= 1) {
1268 reset_all_temps(nb_temps
);
1269 op
->opc
= INDEX_op_brcond_i32
;
1273 } else if (args
[4] == TCG_COND_NE
) {
1274 /* Simplify NE comparisons where one of the pairs
1275 can be simplified. */
1276 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1277 args
[0], args
[2], TCG_COND_NE
);
1279 goto do_brcond_high
;
1280 } else if (tmp
== 1) {
1281 goto do_brcond_true
;
1283 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1284 args
[1], args
[3], TCG_COND_NE
);
1287 } else if (tmp
== 1) {
1288 goto do_brcond_true
;
1296 case INDEX_op_setcond2_i32
:
1297 tmp
= do_constant_folding_cond2(&args
[1], &args
[3], args
[5]);
1300 tcg_opt_gen_movi(s
, op
, args
, args
[0], tmp
);
1301 } else if ((args
[5] == TCG_COND_LT
|| args
[5] == TCG_COND_GE
)
1302 && temp_is_const(args
[3]) && temps
[args
[3]].val
== 0
1303 && temp_is_const(args
[4]) && temps
[args
[4]].val
== 0) {
1304 /* Simplify LT/GE comparisons vs zero to a single compare
1305 vs the high word of the input. */
1307 reset_temp(args
[0]);
1308 temps
[args
[0]].mask
= 1;
1309 op
->opc
= INDEX_op_setcond_i32
;
1313 } else if (args
[5] == TCG_COND_EQ
) {
1314 /* Simplify EQ comparisons where one of the pairs
1315 can be simplified. */
1316 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1317 args
[1], args
[3], TCG_COND_EQ
);
1319 goto do_setcond_const
;
1320 } else if (tmp
== 1) {
1321 goto do_setcond_high
;
1323 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1324 args
[2], args
[4], TCG_COND_EQ
);
1326 goto do_setcond_high
;
1327 } else if (tmp
!= 1) {
1331 reset_temp(args
[0]);
1332 temps
[args
[0]].mask
= 1;
1333 op
->opc
= INDEX_op_setcond_i32
;
1336 } else if (args
[5] == TCG_COND_NE
) {
1337 /* Simplify NE comparisons where one of the pairs
1338 can be simplified. */
1339 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1340 args
[1], args
[3], TCG_COND_NE
);
1342 goto do_setcond_high
;
1343 } else if (tmp
== 1) {
1344 goto do_setcond_const
;
1346 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1347 args
[2], args
[4], TCG_COND_NE
);
1349 goto do_setcond_low
;
1350 } else if (tmp
== 1) {
1351 goto do_setcond_const
;
1360 if (!(args
[nb_oargs
+ nb_iargs
+ 1]
1361 & (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
1362 for (i
= 0; i
< nb_globals
; i
++) {
1363 if (test_bit(i
, temps_used
.l
)) {
1368 goto do_reset_output
;
1372 /* Default case: we know nothing about operation (or were unable
1373 to compute the operation result) so no propagation is done.
1374 We trash everything if the operation is the end of a basic
1375 block, otherwise we only trash the output args. "mask" is
1376 the non-zero bits mask for the first output arg. */
1377 if (def
->flags
& TCG_OPF_BB_END
) {
1378 reset_all_temps(nb_temps
);
1381 for (i
= 0; i
< nb_oargs
; i
++) {
1382 reset_temp(args
[i
]);
1383 /* Save the corresponding known-zero bits mask for the
1384 first output argument (only one supported so far). */
1386 temps
[args
[i
]].mask
= mask
;
1393 /* Eliminate duplicate and redundant fence instructions. */
1397 /* Merge two barriers of the same type into one,
1398 * or a weaker barrier into a stronger one,
1399 * or two weaker barriers into a stronger one.
1400 * mb X; mb Y => mb X|Y
1401 * mb; strl => mb; st
1402 * ldaq; mb => ld; mb
1403 * ldaq; strl => ld; mb; st
1404 * Other combinations are also merged into a strong
1405 * barrier. This is stricter than specified but for
1406 * the purposes of TCG is better than not optimizing.
1408 prev_mb_args
[0] |= args
[0];
1409 tcg_op_remove(s
, op
);
1413 /* Opcodes that end the block stop the optimization. */
1414 if ((def
->flags
& TCG_OPF_BB_END
) == 0) {
1418 case INDEX_op_qemu_ld_i32
:
1419 case INDEX_op_qemu_ld_i64
:
1420 case INDEX_op_qemu_st_i32
:
1421 case INDEX_op_qemu_st_i64
:
1423 /* Opcodes that touch guest memory stop the optimization. */
1424 prev_mb_args
= NULL
;
1427 } else if (opc
== INDEX_op_mb
) {
1428 prev_mb_args
= args
;