2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu-common.h"
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
44 struct tcg_temp_info
{
49 tcg_target_ulong mask
;
52 static struct tcg_temp_info temps
[TCG_MAX_TEMPS
];
54 /* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
55 the copy flag from the left temp. */
56 static void reset_temp(TCGArg temp
)
58 if (temps
[temp
].state
== TCG_TEMP_COPY
) {
59 if (temps
[temp
].prev_copy
== temps
[temp
].next_copy
) {
60 temps
[temps
[temp
].next_copy
].state
= TCG_TEMP_UNDEF
;
62 temps
[temps
[temp
].next_copy
].prev_copy
= temps
[temp
].prev_copy
;
63 temps
[temps
[temp
].prev_copy
].next_copy
= temps
[temp
].next_copy
;
66 temps
[temp
].state
= TCG_TEMP_UNDEF
;
67 temps
[temp
].mask
= -1;
70 /* Reset all temporaries, given that there are NB_TEMPS of them. */
71 static void reset_all_temps(int nb_temps
)
74 for (i
= 0; i
< nb_temps
; i
++) {
75 temps
[i
].state
= TCG_TEMP_UNDEF
;
80 static int op_bits(TCGOpcode op
)
82 const TCGOpDef
*def
= &tcg_op_defs
[op
];
83 return def
->flags
& TCG_OPF_64BIT
? 64 : 32;
86 static TCGOpcode
op_to_mov(TCGOpcode op
)
88 switch (op_bits(op
)) {
90 return INDEX_op_mov_i32
;
92 return INDEX_op_mov_i64
;
94 fprintf(stderr
, "op_to_mov: unexpected return value of "
95 "function op_bits.\n");
100 static TCGOpcode
op_to_movi(TCGOpcode op
)
102 switch (op_bits(op
)) {
104 return INDEX_op_movi_i32
;
106 return INDEX_op_movi_i64
;
108 fprintf(stderr
, "op_to_movi: unexpected return value of "
109 "function op_bits.\n");
114 static TCGArg
find_better_copy(TCGContext
*s
, TCGArg temp
)
118 /* If this is already a global, we can't do better. */
119 if (temp
< s
->nb_globals
) {
123 /* Search for a global first. */
124 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
125 if (i
< s
->nb_globals
) {
130 /* If it is a temp, search for a temp local. */
131 if (!s
->temps
[temp
].temp_local
) {
132 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
133 if (s
->temps
[i
].temp_local
) {
139 /* Failure to find a better representation, return the same temp. */
143 static bool temps_are_copies(TCGArg arg1
, TCGArg arg2
)
151 if (temps
[arg1
].state
!= TCG_TEMP_COPY
152 || temps
[arg2
].state
!= TCG_TEMP_COPY
) {
156 for (i
= temps
[arg1
].next_copy
; i
!= arg1
; i
= temps
[i
].next_copy
) {
165 static void tcg_opt_gen_mov(TCGContext
*s
, int op_index
, TCGArg
*gen_args
,
166 TCGOpcode old_op
, TCGArg dst
, TCGArg src
)
168 TCGOpcode new_op
= op_to_mov(old_op
);
169 tcg_target_ulong mask
;
171 s
->gen_opc_buf
[op_index
] = new_op
;
174 mask
= temps
[src
].mask
;
175 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
176 /* High bits of the destination are now garbage. */
177 mask
|= ~0xffffffffull
;
179 temps
[dst
].mask
= mask
;
181 assert(temps
[src
].state
!= TCG_TEMP_CONST
);
183 if (s
->temps
[src
].type
== s
->temps
[dst
].type
) {
184 if (temps
[src
].state
!= TCG_TEMP_COPY
) {
185 temps
[src
].state
= TCG_TEMP_COPY
;
186 temps
[src
].next_copy
= src
;
187 temps
[src
].prev_copy
= src
;
189 temps
[dst
].state
= TCG_TEMP_COPY
;
190 temps
[dst
].next_copy
= temps
[src
].next_copy
;
191 temps
[dst
].prev_copy
= src
;
192 temps
[temps
[dst
].next_copy
].prev_copy
= dst
;
193 temps
[src
].next_copy
= dst
;
200 static void tcg_opt_gen_movi(TCGContext
*s
, int op_index
, TCGArg
*gen_args
,
201 TCGOpcode old_op
, TCGArg dst
, TCGArg val
)
203 TCGOpcode new_op
= op_to_movi(old_op
);
204 tcg_target_ulong mask
;
206 s
->gen_opc_buf
[op_index
] = new_op
;
209 temps
[dst
].state
= TCG_TEMP_CONST
;
210 temps
[dst
].val
= val
;
212 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
213 /* High bits of the destination are now garbage. */
214 mask
|= ~0xffffffffull
;
216 temps
[dst
].mask
= mask
;
222 static TCGArg
do_constant_folding_2(TCGOpcode op
, TCGArg x
, TCGArg y
)
245 case INDEX_op_shl_i32
:
246 return (uint32_t)x
<< (y
& 31);
248 case INDEX_op_shl_i64
:
249 return (uint64_t)x
<< (y
& 63);
251 case INDEX_op_shr_i32
:
252 return (uint32_t)x
>> (y
& 31);
254 case INDEX_op_trunc_shr_i32
:
255 case INDEX_op_shr_i64
:
256 return (uint64_t)x
>> (y
& 63);
258 case INDEX_op_sar_i32
:
259 return (int32_t)x
>> (y
& 31);
261 case INDEX_op_sar_i64
:
262 return (int64_t)x
>> (y
& 63);
264 case INDEX_op_rotr_i32
:
265 return ror32(x
, y
& 31);
267 case INDEX_op_rotr_i64
:
268 return ror64(x
, y
& 63);
270 case INDEX_op_rotl_i32
:
271 return rol32(x
, y
& 31);
273 case INDEX_op_rotl_i64
:
274 return rol64(x
, y
& 63);
297 CASE_OP_32_64(ext8s
):
300 CASE_OP_32_64(ext16s
):
303 CASE_OP_32_64(ext8u
):
306 CASE_OP_32_64(ext16u
):
309 case INDEX_op_ext32s_i64
:
312 case INDEX_op_ext32u_i64
:
315 case INDEX_op_muluh_i32
:
316 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
317 case INDEX_op_mulsh_i32
:
318 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
320 case INDEX_op_muluh_i64
:
321 mulu64(&l64
, &h64
, x
, y
);
323 case INDEX_op_mulsh_i64
:
324 muls64(&l64
, &h64
, x
, y
);
327 case INDEX_op_div_i32
:
328 /* Avoid crashing on divide by zero, otherwise undefined. */
329 return (int32_t)x
/ ((int32_t)y
? : 1);
330 case INDEX_op_divu_i32
:
331 return (uint32_t)x
/ ((uint32_t)y
? : 1);
332 case INDEX_op_div_i64
:
333 return (int64_t)x
/ ((int64_t)y
? : 1);
334 case INDEX_op_divu_i64
:
335 return (uint64_t)x
/ ((uint64_t)y
? : 1);
337 case INDEX_op_rem_i32
:
338 return (int32_t)x
% ((int32_t)y
? : 1);
339 case INDEX_op_remu_i32
:
340 return (uint32_t)x
% ((uint32_t)y
? : 1);
341 case INDEX_op_rem_i64
:
342 return (int64_t)x
% ((int64_t)y
? : 1);
343 case INDEX_op_remu_i64
:
344 return (uint64_t)x
% ((uint64_t)y
? : 1);
348 "Unrecognized operation %d in do_constant_folding.\n", op
);
353 static TCGArg
do_constant_folding(TCGOpcode op
, TCGArg x
, TCGArg y
)
355 TCGArg res
= do_constant_folding_2(op
, x
, y
);
356 if (op_bits(op
) == 32) {
362 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
370 return (int32_t)x
< (int32_t)y
;
372 return (int32_t)x
>= (int32_t)y
;
374 return (int32_t)x
<= (int32_t)y
;
376 return (int32_t)x
> (int32_t)y
;
390 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
398 return (int64_t)x
< (int64_t)y
;
400 return (int64_t)x
>= (int64_t)y
;
402 return (int64_t)x
<= (int64_t)y
;
404 return (int64_t)x
> (int64_t)y
;
418 static bool do_constant_folding_cond_eq(TCGCond c
)
438 /* Return 2 if the condition can't be simplified, and the result
439 of the condition (0 or 1) if it can */
440 static TCGArg
do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
443 if (temps
[x
].state
== TCG_TEMP_CONST
&& temps
[y
].state
== TCG_TEMP_CONST
) {
444 switch (op_bits(op
)) {
446 return do_constant_folding_cond_32(temps
[x
].val
, temps
[y
].val
, c
);
448 return do_constant_folding_cond_64(temps
[x
].val
, temps
[y
].val
, c
);
452 } else if (temps_are_copies(x
, y
)) {
453 return do_constant_folding_cond_eq(c
);
454 } else if (temps
[y
].state
== TCG_TEMP_CONST
&& temps
[y
].val
== 0) {
468 /* Return 2 if the condition can't be simplified, and the result
469 of the condition (0 or 1) if it can */
470 static TCGArg
do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
472 TCGArg al
= p1
[0], ah
= p1
[1];
473 TCGArg bl
= p2
[0], bh
= p2
[1];
475 if (temps
[bl
].state
== TCG_TEMP_CONST
476 && temps
[bh
].state
== TCG_TEMP_CONST
) {
477 uint64_t b
= ((uint64_t)temps
[bh
].val
<< 32) | (uint32_t)temps
[bl
].val
;
479 if (temps
[al
].state
== TCG_TEMP_CONST
480 && temps
[ah
].state
== TCG_TEMP_CONST
) {
482 a
= ((uint64_t)temps
[ah
].val
<< 32) | (uint32_t)temps
[al
].val
;
483 return do_constant_folding_cond_64(a
, b
, c
);
496 if (temps_are_copies(al
, bl
) && temps_are_copies(ah
, bh
)) {
497 return do_constant_folding_cond_eq(c
);
502 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
504 TCGArg a1
= *p1
, a2
= *p2
;
506 sum
+= temps
[a1
].state
== TCG_TEMP_CONST
;
507 sum
-= temps
[a2
].state
== TCG_TEMP_CONST
;
509 /* Prefer the constant in second argument, and then the form
510 op a, a, b, which is better handled on non-RISC hosts. */
511 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
519 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
522 sum
+= temps
[p1
[0]].state
== TCG_TEMP_CONST
;
523 sum
+= temps
[p1
[1]].state
== TCG_TEMP_CONST
;
524 sum
-= temps
[p2
[0]].state
== TCG_TEMP_CONST
;
525 sum
-= temps
[p2
[1]].state
== TCG_TEMP_CONST
;
528 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
529 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
535 /* Propagate constants and copies, fold constant expressions. */
536 static TCGArg
*tcg_constant_folding(TCGContext
*s
, uint16_t *tcg_opc_ptr
,
537 TCGArg
*args
, TCGOpDef
*tcg_op_defs
)
539 int nb_ops
, op_index
, nb_temps
, nb_globals
;
542 /* Array VALS has an element for each temp.
543 If this temp holds a constant then its value is kept in VALS' element.
544 If this temp is a copy of other ones then the other copies are
545 available through the doubly linked circular list. */
547 nb_temps
= s
->nb_temps
;
548 nb_globals
= s
->nb_globals
;
549 reset_all_temps(nb_temps
);
551 nb_ops
= tcg_opc_ptr
- s
->gen_opc_buf
;
553 for (op_index
= 0; op_index
< nb_ops
; op_index
++) {
554 TCGOpcode op
= s
->gen_opc_buf
[op_index
];
555 const TCGOpDef
*def
= &tcg_op_defs
[op
];
556 tcg_target_ulong mask
, partmask
, affected
;
557 int nb_oargs
, nb_iargs
, nb_args
, i
;
560 if (op
== INDEX_op_call
) {
561 *gen_args
++ = tmp
= *args
++;
562 nb_oargs
= tmp
>> 16;
563 nb_iargs
= tmp
& 0xffff;
564 nb_args
= nb_oargs
+ nb_iargs
+ def
->nb_cargs
;
566 nb_oargs
= def
->nb_oargs
;
567 nb_iargs
= def
->nb_iargs
;
568 nb_args
= def
->nb_args
;
571 /* Do copy propagation */
572 for (i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
573 if (temps
[args
[i
]].state
== TCG_TEMP_COPY
) {
574 args
[i
] = find_better_copy(s
, args
[i
]);
578 /* For commutative operations make constant second argument */
588 CASE_OP_32_64(muluh
):
589 CASE_OP_32_64(mulsh
):
590 swap_commutative(args
[0], &args
[1], &args
[2]);
592 CASE_OP_32_64(brcond
):
593 if (swap_commutative(-1, &args
[0], &args
[1])) {
594 args
[2] = tcg_swap_cond(args
[2]);
597 CASE_OP_32_64(setcond
):
598 if (swap_commutative(args
[0], &args
[1], &args
[2])) {
599 args
[3] = tcg_swap_cond(args
[3]);
602 CASE_OP_32_64(movcond
):
603 if (swap_commutative(-1, &args
[1], &args
[2])) {
604 args
[5] = tcg_swap_cond(args
[5]);
606 /* For movcond, we canonicalize the "false" input reg to match
607 the destination reg so that the tcg backend can implement
608 a "move if true" operation. */
609 if (swap_commutative(args
[0], &args
[4], &args
[3])) {
610 args
[5] = tcg_invert_cond(args
[5]);
614 swap_commutative(args
[0], &args
[2], &args
[4]);
615 swap_commutative(args
[1], &args
[3], &args
[5]);
617 CASE_OP_32_64(mulu2
):
618 CASE_OP_32_64(muls2
):
619 swap_commutative(args
[0], &args
[2], &args
[3]);
621 case INDEX_op_brcond2_i32
:
622 if (swap_commutative2(&args
[0], &args
[2])) {
623 args
[4] = tcg_swap_cond(args
[4]);
626 case INDEX_op_setcond2_i32
:
627 if (swap_commutative2(&args
[1], &args
[3])) {
628 args
[5] = tcg_swap_cond(args
[5]);
635 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
636 and "sub r, 0, a => neg r, a" case. */
643 if (temps
[args
[1]].state
== TCG_TEMP_CONST
644 && temps
[args
[1]].val
== 0) {
645 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], 0);
656 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
657 /* Proceed with possible constant folding. */
660 if (op
== INDEX_op_sub_i32
) {
661 neg_op
= INDEX_op_neg_i32
;
662 have_neg
= TCG_TARGET_HAS_neg_i32
;
664 neg_op
= INDEX_op_neg_i64
;
665 have_neg
= TCG_TARGET_HAS_neg_i64
;
670 if (temps
[args
[1]].state
== TCG_TEMP_CONST
671 && temps
[args
[1]].val
== 0) {
672 s
->gen_opc_buf
[op_index
] = neg_op
;
674 gen_args
[0] = args
[0];
675 gen_args
[1] = args
[2];
684 if (temps
[args
[1]].state
!= TCG_TEMP_CONST
685 && temps
[args
[2]].state
== TCG_TEMP_CONST
686 && temps
[args
[2]].val
== -1) {
692 if (temps
[args
[1]].state
!= TCG_TEMP_CONST
693 && temps
[args
[2]].state
== TCG_TEMP_CONST
694 && temps
[args
[2]].val
== 0) {
700 if (temps
[args
[2]].state
!= TCG_TEMP_CONST
701 && temps
[args
[1]].state
== TCG_TEMP_CONST
702 && temps
[args
[1]].val
== -1) {
709 if (temps
[args
[2]].state
!= TCG_TEMP_CONST
710 && temps
[args
[1]].state
== TCG_TEMP_CONST
711 && temps
[args
[1]].val
== 0) {
721 if (def
->flags
& TCG_OPF_64BIT
) {
722 not_op
= INDEX_op_not_i64
;
723 have_not
= TCG_TARGET_HAS_not_i64
;
725 not_op
= INDEX_op_not_i32
;
726 have_not
= TCG_TARGET_HAS_not_i32
;
731 s
->gen_opc_buf
[op_index
] = not_op
;
733 gen_args
[0] = args
[0];
734 gen_args
[1] = args
[i
];
743 /* Simplify expression for "op r, a, const => mov r, a" cases */
755 if (temps
[args
[1]].state
!= TCG_TEMP_CONST
756 && temps
[args
[2]].state
== TCG_TEMP_CONST
757 && temps
[args
[2]].val
== 0) {
764 if (temps
[args
[1]].state
!= TCG_TEMP_CONST
765 && temps
[args
[2]].state
== TCG_TEMP_CONST
766 && temps
[args
[2]].val
== -1) {
771 if (temps_are_copies(args
[0], args
[1])) {
772 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
774 tcg_opt_gen_mov(s
, op_index
, gen_args
, op
, args
[0], args
[1]);
783 /* Simplify using known-zero bits. Currently only ops with a single
784 output argument is supported. */
788 CASE_OP_32_64(ext8s
):
789 if ((temps
[args
[1]].mask
& 0x80) != 0) {
792 CASE_OP_32_64(ext8u
):
795 CASE_OP_32_64(ext16s
):
796 if ((temps
[args
[1]].mask
& 0x8000) != 0) {
799 CASE_OP_32_64(ext16u
):
802 case INDEX_op_ext32s_i64
:
803 if ((temps
[args
[1]].mask
& 0x80000000) != 0) {
806 case INDEX_op_ext32u_i64
:
811 mask
= temps
[args
[2]].mask
;
812 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
814 affected
= temps
[args
[1]].mask
& ~mask
;
816 mask
= temps
[args
[1]].mask
& mask
;
820 /* Known-zeros does not imply known-ones. Therefore unless
821 args[2] is constant, we can't infer anything from it. */
822 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
823 mask
= ~temps
[args
[2]].mask
;
826 /* But we certainly know nothing outside args[1] may be set. */
827 mask
= temps
[args
[1]].mask
;
830 case INDEX_op_sar_i32
:
831 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
832 tmp
= temps
[args
[2]].val
& 31;
833 mask
= (int32_t)temps
[args
[1]].mask
>> tmp
;
836 case INDEX_op_sar_i64
:
837 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
838 tmp
= temps
[args
[2]].val
& 63;
839 mask
= (int64_t)temps
[args
[1]].mask
>> tmp
;
843 case INDEX_op_shr_i32
:
844 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
845 tmp
= temps
[args
[2]].val
& 31;
846 mask
= (uint32_t)temps
[args
[1]].mask
>> tmp
;
849 case INDEX_op_shr_i64
:
850 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
851 tmp
= temps
[args
[2]].val
& 63;
852 mask
= (uint64_t)temps
[args
[1]].mask
>> tmp
;
856 case INDEX_op_trunc_shr_i32
:
857 mask
= (uint64_t)temps
[args
[1]].mask
>> args
[2];
861 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
862 tmp
= temps
[args
[2]].val
& (TCG_TARGET_REG_BITS
- 1);
863 mask
= temps
[args
[1]].mask
<< tmp
;
868 /* Set to 1 all bits to the left of the rightmost. */
869 mask
= -(temps
[args
[1]].mask
& -temps
[args
[1]].mask
);
872 CASE_OP_32_64(deposit
):
873 mask
= deposit64(temps
[args
[1]].mask
, args
[3], args
[4],
874 temps
[args
[2]].mask
);
879 mask
= temps
[args
[1]].mask
| temps
[args
[2]].mask
;
882 CASE_OP_32_64(setcond
):
883 case INDEX_op_setcond2_i32
:
887 CASE_OP_32_64(movcond
):
888 mask
= temps
[args
[3]].mask
| temps
[args
[4]].mask
;
892 case INDEX_op_qemu_ld8u
:
895 CASE_OP_32_64(ld16u
):
896 case INDEX_op_qemu_ld16u
:
899 case INDEX_op_ld32u_i64
:
900 #if TCG_TARGET_REG_BITS == 64
901 case INDEX_op_qemu_ld32u
:
906 CASE_OP_32_64(qemu_ld
):
908 TCGMemOp mop
= args
[nb_oargs
+ nb_iargs
];
909 if (!(mop
& MO_SIGN
)) {
910 mask
= (2ULL << ((8 << (mop
& MO_SIZE
)) - 1)) - 1;
919 /* 32-bit ops (non 64-bit ops and non load/store ops) generate
920 32-bit results. For the result is zero test below, we can
921 ignore high bits, but for further optimizations we need to
922 record that the high bits contain garbage. */
924 if (!(def
->flags
& (TCG_OPF_CALL_CLOBBER
| TCG_OPF_64BIT
))) {
925 mask
|= ~(tcg_target_ulong
)0xffffffffu
;
926 partmask
&= 0xffffffffu
;
927 affected
&= 0xffffffffu
;
931 assert(nb_oargs
== 1);
932 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], 0);
938 assert(nb_oargs
== 1);
939 if (temps_are_copies(args
[0], args
[1])) {
940 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
941 } else if (temps
[args
[1]].state
!= TCG_TEMP_CONST
) {
942 tcg_opt_gen_mov(s
, op_index
, gen_args
, op
, args
[0], args
[1]);
945 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
,
946 args
[0], temps
[args
[1]].val
);
953 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
957 CASE_OP_32_64(muluh
):
958 CASE_OP_32_64(mulsh
):
959 if ((temps
[args
[2]].state
== TCG_TEMP_CONST
960 && temps
[args
[2]].val
== 0)) {
961 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], 0);
971 /* Simplify expression for "op r, a, a => mov r, a" cases */
975 if (temps_are_copies(args
[1], args
[2])) {
976 if (temps_are_copies(args
[0], args
[1])) {
977 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
979 tcg_opt_gen_mov(s
, op_index
, gen_args
, op
,
991 /* Simplify expression for "op r, a, a => movi r, 0" cases */
996 if (temps_are_copies(args
[1], args
[2])) {
997 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], 0);
1007 /* Propagate constants through copy operations and do constant
1008 folding. Constants will be substituted to arguments by register
1009 allocator where needed and possible. Also detect copies. */
1012 if (temps_are_copies(args
[0], args
[1])) {
1014 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
1017 if (temps
[args
[1]].state
!= TCG_TEMP_CONST
) {
1018 tcg_opt_gen_mov(s
, op_index
, gen_args
, op
, args
[0], args
[1]);
1023 /* Source argument is constant. Rewrite the operation and
1024 let movi case handle it. */
1025 args
[1] = temps
[args
[1]].val
;
1027 CASE_OP_32_64(movi
):
1028 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], args
[1]);
1035 CASE_OP_32_64(ext8s
):
1036 CASE_OP_32_64(ext8u
):
1037 CASE_OP_32_64(ext16s
):
1038 CASE_OP_32_64(ext16u
):
1039 case INDEX_op_ext32s_i64
:
1040 case INDEX_op_ext32u_i64
:
1041 if (temps
[args
[1]].state
== TCG_TEMP_CONST
) {
1042 tmp
= do_constant_folding(op
, temps
[args
[1]].val
, 0);
1043 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], tmp
);
1050 case INDEX_op_trunc_shr_i32
:
1051 if (temps
[args
[1]].state
== TCG_TEMP_CONST
) {
1052 tmp
= do_constant_folding(op
, temps
[args
[1]].val
, args
[2]);
1053 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], tmp
);
1069 CASE_OP_32_64(rotl
):
1070 CASE_OP_32_64(rotr
):
1071 CASE_OP_32_64(andc
):
1074 CASE_OP_32_64(nand
):
1076 CASE_OP_32_64(muluh
):
1077 CASE_OP_32_64(mulsh
):
1079 CASE_OP_32_64(divu
):
1081 CASE_OP_32_64(remu
):
1082 if (temps
[args
[1]].state
== TCG_TEMP_CONST
1083 && temps
[args
[2]].state
== TCG_TEMP_CONST
) {
1084 tmp
= do_constant_folding(op
, temps
[args
[1]].val
,
1085 temps
[args
[2]].val
);
1086 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], tmp
);
1093 CASE_OP_32_64(deposit
):
1094 if (temps
[args
[1]].state
== TCG_TEMP_CONST
1095 && temps
[args
[2]].state
== TCG_TEMP_CONST
) {
1096 tmp
= deposit64(temps
[args
[1]].val
, args
[3], args
[4],
1097 temps
[args
[2]].val
);
1098 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], tmp
);
1105 CASE_OP_32_64(setcond
):
1106 tmp
= do_constant_folding_cond(op
, args
[1], args
[2], args
[3]);
1108 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], tmp
);
1115 CASE_OP_32_64(brcond
):
1116 tmp
= do_constant_folding_cond(op
, args
[0], args
[1], args
[2]);
1119 reset_all_temps(nb_temps
);
1120 s
->gen_opc_buf
[op_index
] = INDEX_op_br
;
1121 gen_args
[0] = args
[3];
1124 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
1131 CASE_OP_32_64(movcond
):
1132 tmp
= do_constant_folding_cond(op
, args
[1], args
[2], args
[5]);
1134 if (temps_are_copies(args
[0], args
[4-tmp
])) {
1135 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
1136 } else if (temps
[args
[4-tmp
]].state
== TCG_TEMP_CONST
) {
1137 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
,
1138 args
[0], temps
[args
[4-tmp
]].val
);
1141 tcg_opt_gen_mov(s
, op_index
, gen_args
, op
,
1142 args
[0], args
[4-tmp
]);
1150 case INDEX_op_add2_i32
:
1151 case INDEX_op_sub2_i32
:
1152 if (temps
[args
[2]].state
== TCG_TEMP_CONST
1153 && temps
[args
[3]].state
== TCG_TEMP_CONST
1154 && temps
[args
[4]].state
== TCG_TEMP_CONST
1155 && temps
[args
[5]].state
== TCG_TEMP_CONST
) {
1156 uint32_t al
= temps
[args
[2]].val
;
1157 uint32_t ah
= temps
[args
[3]].val
;
1158 uint32_t bl
= temps
[args
[4]].val
;
1159 uint32_t bh
= temps
[args
[5]].val
;
1160 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
1161 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
1164 if (op
== INDEX_op_add2_i32
) {
1170 /* We emit the extra nop when we emit the add2/sub2. */
1171 assert(s
->gen_opc_buf
[op_index
+ 1] == INDEX_op_nop
);
1175 tcg_opt_gen_movi(s
, op_index
, &gen_args
[0],
1176 op
, rl
, (uint32_t)a
);
1177 tcg_opt_gen_movi(s
, ++op_index
, &gen_args
[2],
1178 op
, rh
, (uint32_t)(a
>> 32));
1185 case INDEX_op_mulu2_i32
:
1186 if (temps
[args
[2]].state
== TCG_TEMP_CONST
1187 && temps
[args
[3]].state
== TCG_TEMP_CONST
) {
1188 uint32_t a
= temps
[args
[2]].val
;
1189 uint32_t b
= temps
[args
[3]].val
;
1190 uint64_t r
= (uint64_t)a
* b
;
1193 /* We emit the extra nop when we emit the mulu2. */
1194 assert(s
->gen_opc_buf
[op_index
+ 1] == INDEX_op_nop
);
1198 tcg_opt_gen_movi(s
, op_index
, &gen_args
[0],
1199 op
, rl
, (uint32_t)r
);
1200 tcg_opt_gen_movi(s
, ++op_index
, &gen_args
[2],
1201 op
, rh
, (uint32_t)(r
>> 32));
1208 case INDEX_op_brcond2_i32
:
1209 tmp
= do_constant_folding_cond2(&args
[0], &args
[2], args
[4]);
1213 reset_all_temps(nb_temps
);
1214 s
->gen_opc_buf
[op_index
] = INDEX_op_br
;
1215 gen_args
[0] = args
[5];
1219 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
1221 } else if ((args
[4] == TCG_COND_LT
|| args
[4] == TCG_COND_GE
)
1222 && temps
[args
[2]].state
== TCG_TEMP_CONST
1223 && temps
[args
[3]].state
== TCG_TEMP_CONST
1224 && temps
[args
[2]].val
== 0
1225 && temps
[args
[3]].val
== 0) {
1226 /* Simplify LT/GE comparisons vs zero to a single compare
1227 vs the high word of the input. */
1229 reset_all_temps(nb_temps
);
1230 s
->gen_opc_buf
[op_index
] = INDEX_op_brcond_i32
;
1231 gen_args
[0] = args
[1];
1232 gen_args
[1] = args
[3];
1233 gen_args
[2] = args
[4];
1234 gen_args
[3] = args
[5];
1236 } else if (args
[4] == TCG_COND_EQ
) {
1237 /* Simplify EQ comparisons where one of the pairs
1238 can be simplified. */
1239 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1240 args
[0], args
[2], TCG_COND_EQ
);
1242 goto do_brcond_false
;
1243 } else if (tmp
== 1) {
1244 goto do_brcond_high
;
1246 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1247 args
[1], args
[3], TCG_COND_EQ
);
1249 goto do_brcond_false
;
1250 } else if (tmp
!= 1) {
1254 reset_all_temps(nb_temps
);
1255 s
->gen_opc_buf
[op_index
] = INDEX_op_brcond_i32
;
1256 gen_args
[0] = args
[0];
1257 gen_args
[1] = args
[2];
1258 gen_args
[2] = args
[4];
1259 gen_args
[3] = args
[5];
1261 } else if (args
[4] == TCG_COND_NE
) {
1262 /* Simplify NE comparisons where one of the pairs
1263 can be simplified. */
1264 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1265 args
[0], args
[2], TCG_COND_NE
);
1267 goto do_brcond_high
;
1268 } else if (tmp
== 1) {
1269 goto do_brcond_true
;
1271 tmp
= do_constant_folding_cond(INDEX_op_brcond_i32
,
1272 args
[1], args
[3], TCG_COND_NE
);
1275 } else if (tmp
== 1) {
1276 goto do_brcond_true
;
1285 case INDEX_op_setcond2_i32
:
1286 tmp
= do_constant_folding_cond2(&args
[1], &args
[3], args
[5]);
1289 tcg_opt_gen_movi(s
, op_index
, gen_args
, op
, args
[0], tmp
);
1291 } else if ((args
[5] == TCG_COND_LT
|| args
[5] == TCG_COND_GE
)
1292 && temps
[args
[3]].state
== TCG_TEMP_CONST
1293 && temps
[args
[4]].state
== TCG_TEMP_CONST
1294 && temps
[args
[3]].val
== 0
1295 && temps
[args
[4]].val
== 0) {
1296 /* Simplify LT/GE comparisons vs zero to a single compare
1297 vs the high word of the input. */
1299 s
->gen_opc_buf
[op_index
] = INDEX_op_setcond_i32
;
1300 reset_temp(args
[0]);
1301 temps
[args
[0]].mask
= 1;
1302 gen_args
[0] = args
[0];
1303 gen_args
[1] = args
[2];
1304 gen_args
[2] = args
[4];
1305 gen_args
[3] = args
[5];
1307 } else if (args
[5] == TCG_COND_EQ
) {
1308 /* Simplify EQ comparisons where one of the pairs
1309 can be simplified. */
1310 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1311 args
[1], args
[3], TCG_COND_EQ
);
1313 goto do_setcond_const
;
1314 } else if (tmp
== 1) {
1315 goto do_setcond_high
;
1317 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1318 args
[2], args
[4], TCG_COND_EQ
);
1320 goto do_setcond_high
;
1321 } else if (tmp
!= 1) {
1325 reset_temp(args
[0]);
1326 temps
[args
[0]].mask
= 1;
1327 s
->gen_opc_buf
[op_index
] = INDEX_op_setcond_i32
;
1328 gen_args
[0] = args
[0];
1329 gen_args
[1] = args
[1];
1330 gen_args
[2] = args
[3];
1331 gen_args
[3] = args
[5];
1333 } else if (args
[5] == TCG_COND_NE
) {
1334 /* Simplify NE comparisons where one of the pairs
1335 can be simplified. */
1336 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1337 args
[1], args
[3], TCG_COND_NE
);
1339 goto do_setcond_high
;
1340 } else if (tmp
== 1) {
1341 goto do_setcond_const
;
1343 tmp
= do_constant_folding_cond(INDEX_op_setcond_i32
,
1344 args
[2], args
[4], TCG_COND_NE
);
1346 goto do_setcond_low
;
1347 } else if (tmp
== 1) {
1348 goto do_setcond_const
;
1358 if (!(args
[nb_oargs
+ nb_iargs
+ 1]
1359 & (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
1360 for (i
= 0; i
< nb_globals
; i
++) {
1364 goto do_reset_output
;
1368 /* Default case: we know nothing about operation (or were unable
1369 to compute the operation result) so no propagation is done.
1370 We trash everything if the operation is the end of a basic
1371 block, otherwise we only trash the output args. "mask" is
1372 the non-zero bits mask for the first output arg. */
1373 if (def
->flags
& TCG_OPF_BB_END
) {
1374 reset_all_temps(nb_temps
);
1377 for (i
= 0; i
< nb_oargs
; i
++) {
1378 reset_temp(args
[i
]);
1379 /* Save the corresponding known-zero bits mask for the
1380 first output argument (only one supported so far). */
1382 temps
[args
[i
]].mask
= mask
;
1386 for (i
= 0; i
< nb_args
; i
++) {
1387 gen_args
[i
] = args
[i
];
1390 gen_args
+= nb_args
;
1398 TCGArg
*tcg_optimize(TCGContext
*s
, uint16_t *tcg_opc_ptr
,
1399 TCGArg
*args
, TCGOpDef
*tcg_op_defs
)
1402 res
= tcg_constant_folding(s
, tcg_opc_ptr
, args
, tcg_op_defs
);