2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu-common.h"
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
44 struct tcg_temp_info
{
51 static struct tcg_temp_info temps
[TCG_MAX_TEMPS
];
53 /* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
54 the copy flag from the left temp. */
55 static void reset_temp(TCGArg temp
)
57 if (temps
[temp
].state
== TCG_TEMP_COPY
) {
58 if (temps
[temp
].prev_copy
== temps
[temp
].next_copy
) {
59 temps
[temps
[temp
].next_copy
].state
= TCG_TEMP_UNDEF
;
61 temps
[temps
[temp
].next_copy
].prev_copy
= temps
[temp
].prev_copy
;
62 temps
[temps
[temp
].prev_copy
].next_copy
= temps
[temp
].next_copy
;
65 temps
[temp
].state
= TCG_TEMP_UNDEF
;
68 static int op_bits(TCGOpcode op
)
70 const TCGOpDef
*def
= &tcg_op_defs
[op
];
71 return def
->flags
& TCG_OPF_64BIT
? 64 : 32;
74 static TCGOpcode
op_to_movi(TCGOpcode op
)
76 switch (op_bits(op
)) {
78 return INDEX_op_movi_i32
;
80 return INDEX_op_movi_i64
;
82 fprintf(stderr
, "op_to_movi: unexpected return value of "
83 "function op_bits.\n");
88 static TCGArg
find_better_copy(TCGContext
*s
, TCGArg temp
)
92 /* If this is already a global, we can't do better. */
93 if (temp
< s
->nb_globals
) {
97 /* Search for a global first. */
98 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
99 if (i
< s
->nb_globals
) {
104 /* If it is a temp, search for a temp local. */
105 if (!s
->temps
[temp
].temp_local
) {
106 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
107 if (s
->temps
[i
].temp_local
) {
113 /* Failure to find a better representation, return the same temp. */
117 static bool temps_are_copies(TCGArg arg1
, TCGArg arg2
)
125 if (temps
[arg1
].state
!= TCG_TEMP_COPY
126 || temps
[arg2
].state
!= TCG_TEMP_COPY
) {
130 for (i
= temps
[arg1
].next_copy
; i
!= arg1
; i
= temps
[i
].next_copy
) {
139 static void tcg_opt_gen_mov(TCGContext
*s
, TCGArg
*gen_args
,
140 TCGArg dst
, TCGArg src
)
143 assert(temps
[src
].state
!= TCG_TEMP_CONST
);
145 if (s
->temps
[src
].type
== s
->temps
[dst
].type
) {
146 if (temps
[src
].state
!= TCG_TEMP_COPY
) {
147 temps
[src
].state
= TCG_TEMP_COPY
;
148 temps
[src
].next_copy
= src
;
149 temps
[src
].prev_copy
= src
;
151 temps
[dst
].state
= TCG_TEMP_COPY
;
152 temps
[dst
].next_copy
= temps
[src
].next_copy
;
153 temps
[dst
].prev_copy
= src
;
154 temps
[temps
[dst
].next_copy
].prev_copy
= dst
;
155 temps
[src
].next_copy
= dst
;
162 static void tcg_opt_gen_movi(TCGArg
*gen_args
, TCGArg dst
, TCGArg val
)
165 temps
[dst
].state
= TCG_TEMP_CONST
;
166 temps
[dst
].val
= val
;
171 static TCGOpcode
op_to_mov(TCGOpcode op
)
173 switch (op_bits(op
)) {
175 return INDEX_op_mov_i32
;
177 return INDEX_op_mov_i64
;
179 fprintf(stderr
, "op_to_mov: unexpected return value of "
180 "function op_bits.\n");
185 static TCGArg
do_constant_folding_2(TCGOpcode op
, TCGArg x
, TCGArg y
)
206 case INDEX_op_shl_i32
:
207 return (uint32_t)x
<< (uint32_t)y
;
209 case INDEX_op_shl_i64
:
210 return (uint64_t)x
<< (uint64_t)y
;
212 case INDEX_op_shr_i32
:
213 return (uint32_t)x
>> (uint32_t)y
;
215 case INDEX_op_shr_i64
:
216 return (uint64_t)x
>> (uint64_t)y
;
218 case INDEX_op_sar_i32
:
219 return (int32_t)x
>> (int32_t)y
;
221 case INDEX_op_sar_i64
:
222 return (int64_t)x
>> (int64_t)y
;
224 case INDEX_op_rotr_i32
:
225 x
= ((uint32_t)x
<< (32 - y
)) | ((uint32_t)x
>> y
);
228 case INDEX_op_rotr_i64
:
229 x
= ((uint64_t)x
<< (64 - y
)) | ((uint64_t)x
>> y
);
232 case INDEX_op_rotl_i32
:
233 x
= ((uint32_t)x
<< y
) | ((uint32_t)x
>> (32 - y
));
236 case INDEX_op_rotl_i64
:
237 x
= ((uint64_t)x
<< y
) | ((uint64_t)x
>> (64 - y
));
261 CASE_OP_32_64(ext8s
):
264 CASE_OP_32_64(ext16s
):
267 CASE_OP_32_64(ext8u
):
270 CASE_OP_32_64(ext16u
):
273 case INDEX_op_ext32s_i64
:
276 case INDEX_op_ext32u_i64
:
281 "Unrecognized operation %d in do_constant_folding.\n", op
);
286 static TCGArg
do_constant_folding(TCGOpcode op
, TCGArg x
, TCGArg y
)
288 TCGArg res
= do_constant_folding_2(op
, x
, y
);
289 if (op_bits(op
) == 32) {
295 /* Return 2 if the condition can't be simplified, and the result
296 of the condition (0 or 1) if it can */
297 static TCGArg
do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
300 if (temps
[x
].state
== TCG_TEMP_CONST
&& temps
[y
].state
== TCG_TEMP_CONST
) {
301 switch (op_bits(op
)) {
305 return (uint32_t)temps
[x
].val
== (uint32_t)temps
[y
].val
;
307 return (uint32_t)temps
[x
].val
!= (uint32_t)temps
[y
].val
;
309 return (int32_t)temps
[x
].val
< (int32_t)temps
[y
].val
;
311 return (int32_t)temps
[x
].val
>= (int32_t)temps
[y
].val
;
313 return (int32_t)temps
[x
].val
<= (int32_t)temps
[y
].val
;
315 return (int32_t)temps
[x
].val
> (int32_t)temps
[y
].val
;
317 return (uint32_t)temps
[x
].val
< (uint32_t)temps
[y
].val
;
319 return (uint32_t)temps
[x
].val
>= (uint32_t)temps
[y
].val
;
321 return (uint32_t)temps
[x
].val
<= (uint32_t)temps
[y
].val
;
323 return (uint32_t)temps
[x
].val
> (uint32_t)temps
[y
].val
;
331 return (uint64_t)temps
[x
].val
== (uint64_t)temps
[y
].val
;
333 return (uint64_t)temps
[x
].val
!= (uint64_t)temps
[y
].val
;
335 return (int64_t)temps
[x
].val
< (int64_t)temps
[y
].val
;
337 return (int64_t)temps
[x
].val
>= (int64_t)temps
[y
].val
;
339 return (int64_t)temps
[x
].val
<= (int64_t)temps
[y
].val
;
341 return (int64_t)temps
[x
].val
> (int64_t)temps
[y
].val
;
343 return (uint64_t)temps
[x
].val
< (uint64_t)temps
[y
].val
;
345 return (uint64_t)temps
[x
].val
>= (uint64_t)temps
[y
].val
;
347 return (uint64_t)temps
[x
].val
<= (uint64_t)temps
[y
].val
;
349 return (uint64_t)temps
[x
].val
> (uint64_t)temps
[y
].val
;
355 } else if (temps_are_copies(x
, y
)) {
372 } else if (temps
[y
].state
== TCG_TEMP_CONST
&& temps
[y
].val
== 0) {
386 "Unrecognized bitness %d or condition %d in "
387 "do_constant_folding_cond.\n", op_bits(op
), c
);
391 /* Propagate constants and copies, fold constant expressions. */
392 static TCGArg
*tcg_constant_folding(TCGContext
*s
, uint16_t *tcg_opc_ptr
,
393 TCGArg
*args
, TCGOpDef
*tcg_op_defs
)
395 int i
, nb_ops
, op_index
, nb_temps
, nb_globals
, nb_call_args
;
402 /* Array VALS has an element for each temp.
403 If this temp holds a constant then its value is kept in VALS' element.
404 If this temp is a copy of other ones then the other copies are
405 available through the doubly linked circular list. */
407 nb_temps
= s
->nb_temps
;
408 nb_globals
= s
->nb_globals
;
409 memset(temps
, 0, nb_temps
* sizeof(struct tcg_temp_info
));
411 nb_ops
= tcg_opc_ptr
- gen_opc_buf
;
413 for (op_index
= 0; op_index
< nb_ops
; op_index
++) {
414 op
= gen_opc_buf
[op_index
];
415 def
= &tcg_op_defs
[op
];
416 /* Do copy propagation */
417 if (op
== INDEX_op_call
) {
418 int nb_oargs
= args
[0] >> 16;
419 int nb_iargs
= args
[0] & 0xffff;
420 for (i
= nb_oargs
+ 1; i
< nb_oargs
+ nb_iargs
+ 1; i
++) {
421 if (temps
[args
[i
]].state
== TCG_TEMP_COPY
) {
422 args
[i
] = find_better_copy(s
, args
[i
]);
426 for (i
= def
->nb_oargs
; i
< def
->nb_oargs
+ def
->nb_iargs
; i
++) {
427 if (temps
[args
[i
]].state
== TCG_TEMP_COPY
) {
428 args
[i
] = find_better_copy(s
, args
[i
]);
433 /* For commutative operations make constant second argument */
443 /* Prefer the constant in second argument, and then the form
444 op a, a, b, which is better handled on non-RISC hosts. */
445 if (temps
[args
[1]].state
== TCG_TEMP_CONST
|| (args
[0] == args
[2]
446 && temps
[args
[2]].state
!= TCG_TEMP_CONST
)) {
452 CASE_OP_32_64(brcond
):
453 if (temps
[args
[0]].state
== TCG_TEMP_CONST
454 && temps
[args
[1]].state
!= TCG_TEMP_CONST
) {
458 args
[2] = tcg_swap_cond(args
[2]);
461 CASE_OP_32_64(setcond
):
462 if (temps
[args
[1]].state
== TCG_TEMP_CONST
463 && temps
[args
[2]].state
!= TCG_TEMP_CONST
) {
467 args
[3] = tcg_swap_cond(args
[3]);
470 CASE_OP_32_64(movcond
):
472 if (temps
[args
[1]].state
== TCG_TEMP_CONST
473 && temps
[args
[2]].state
!= TCG_TEMP_CONST
) {
477 cond
= tcg_swap_cond(cond
);
479 /* For movcond, we canonicalize the "false" input reg to match
480 the destination reg so that the tcg backend can implement
481 a "move if true" operation. */
482 if (args
[0] == args
[3]) {
486 cond
= tcg_invert_cond(cond
);
493 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0" */
500 if (temps
[args
[1]].state
== TCG_TEMP_CONST
501 && temps
[args
[1]].val
== 0) {
502 gen_opc_buf
[op_index
] = op_to_movi(op
);
503 tcg_opt_gen_movi(gen_args
, args
[0], 0);
513 /* Simplify expression for "op r, a, 0 => mov r, a" cases */
524 if (temps
[args
[1]].state
== TCG_TEMP_CONST
) {
525 /* Proceed with possible constant folding. */
528 if (temps
[args
[2]].state
== TCG_TEMP_CONST
529 && temps
[args
[2]].val
== 0) {
530 if (temps_are_copies(args
[0], args
[1])) {
531 gen_opc_buf
[op_index
] = INDEX_op_nop
;
533 gen_opc_buf
[op_index
] = op_to_mov(op
);
534 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[1]);
545 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
549 if ((temps
[args
[2]].state
== TCG_TEMP_CONST
550 && temps
[args
[2]].val
== 0)) {
551 gen_opc_buf
[op_index
] = op_to_movi(op
);
552 tcg_opt_gen_movi(gen_args
, args
[0], 0);
562 /* Simplify expression for "op r, a, a => mov r, a" cases */
566 if (temps_are_copies(args
[1], args
[2])) {
567 if (temps_are_copies(args
[0], args
[1])) {
568 gen_opc_buf
[op_index
] = INDEX_op_nop
;
570 gen_opc_buf
[op_index
] = op_to_mov(op
);
571 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[1]);
582 /* Simplify expression for "op r, a, a => movi r, 0" cases */
586 if (temps_are_copies(args
[1], args
[2])) {
587 gen_opc_buf
[op_index
] = op_to_movi(op
);
588 tcg_opt_gen_movi(gen_args
, args
[0], 0);
598 /* Propagate constants through copy operations and do constant
599 folding. Constants will be substituted to arguments by register
600 allocator where needed and possible. Also detect copies. */
603 if (temps_are_copies(args
[0], args
[1])) {
605 gen_opc_buf
[op_index
] = INDEX_op_nop
;
608 if (temps
[args
[1]].state
!= TCG_TEMP_CONST
) {
609 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[1]);
614 /* Source argument is constant. Rewrite the operation and
615 let movi case handle it. */
617 gen_opc_buf
[op_index
] = op
;
618 args
[1] = temps
[args
[1]].val
;
621 tcg_opt_gen_movi(gen_args
, args
[0], args
[1]);
627 CASE_OP_32_64(ext8s
):
628 CASE_OP_32_64(ext8u
):
629 CASE_OP_32_64(ext16s
):
630 CASE_OP_32_64(ext16u
):
631 case INDEX_op_ext32s_i64
:
632 case INDEX_op_ext32u_i64
:
633 if (temps
[args
[1]].state
== TCG_TEMP_CONST
) {
634 gen_opc_buf
[op_index
] = op_to_movi(op
);
635 tmp
= do_constant_folding(op
, temps
[args
[1]].val
, 0);
636 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
639 gen_args
[0] = args
[0];
640 gen_args
[1] = args
[1];
661 if (temps
[args
[1]].state
== TCG_TEMP_CONST
662 && temps
[args
[2]].state
== TCG_TEMP_CONST
) {
663 gen_opc_buf
[op_index
] = op_to_movi(op
);
664 tmp
= do_constant_folding(op
, temps
[args
[1]].val
,
666 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
670 gen_args
[0] = args
[0];
671 gen_args
[1] = args
[1];
672 gen_args
[2] = args
[2];
677 CASE_OP_32_64(deposit
):
678 if (temps
[args
[1]].state
== TCG_TEMP_CONST
679 && temps
[args
[2]].state
== TCG_TEMP_CONST
) {
680 gen_opc_buf
[op_index
] = op_to_movi(op
);
681 tmp
= ((1ull << args
[4]) - 1);
682 tmp
= (temps
[args
[1]].val
& ~(tmp
<< args
[3]))
683 | ((temps
[args
[2]].val
& tmp
) << args
[3]);
684 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
688 gen_args
[0] = args
[0];
689 gen_args
[1] = args
[1];
690 gen_args
[2] = args
[2];
691 gen_args
[3] = args
[3];
692 gen_args
[4] = args
[4];
697 CASE_OP_32_64(setcond
):
698 tmp
= do_constant_folding_cond(op
, args
[1], args
[2], args
[3]);
700 gen_opc_buf
[op_index
] = op_to_movi(op
);
701 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
705 gen_args
[0] = args
[0];
706 gen_args
[1] = args
[1];
707 gen_args
[2] = args
[2];
708 gen_args
[3] = args
[3];
713 CASE_OP_32_64(brcond
):
714 tmp
= do_constant_folding_cond(op
, args
[0], args
[1], args
[2]);
717 memset(temps
, 0, nb_temps
* sizeof(struct tcg_temp_info
));
718 gen_opc_buf
[op_index
] = INDEX_op_br
;
719 gen_args
[0] = args
[3];
722 gen_opc_buf
[op_index
] = INDEX_op_nop
;
725 memset(temps
, 0, nb_temps
* sizeof(struct tcg_temp_info
));
727 gen_args
[0] = args
[0];
728 gen_args
[1] = args
[1];
729 gen_args
[2] = args
[2];
730 gen_args
[3] = args
[3];
735 CASE_OP_32_64(movcond
):
736 tmp
= do_constant_folding_cond(op
, args
[1], args
[2], args
[5]);
738 if (temps_are_copies(args
[0], args
[4-tmp
])) {
739 gen_opc_buf
[op_index
] = INDEX_op_nop
;
740 } else if (temps
[args
[4-tmp
]].state
== TCG_TEMP_CONST
) {
741 gen_opc_buf
[op_index
] = op_to_movi(op
);
742 tcg_opt_gen_movi(gen_args
, args
[0], temps
[args
[4-tmp
]].val
);
745 gen_opc_buf
[op_index
] = op_to_mov(op
);
746 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[4-tmp
]);
751 gen_args
[0] = args
[0];
752 gen_args
[1] = args
[1];
753 gen_args
[2] = args
[2];
754 gen_args
[3] = args
[3];
755 gen_args
[4] = args
[4];
756 gen_args
[5] = args
[5];
762 nb_call_args
= (args
[0] >> 16) + (args
[0] & 0xffff);
763 if (!(args
[nb_call_args
+ 1] & (TCG_CALL_CONST
| TCG_CALL_PURE
))) {
764 for (i
= 0; i
< nb_globals
; i
++) {
768 for (i
= 0; i
< (args
[0] >> 16); i
++) {
769 reset_temp(args
[i
+ 1]);
771 i
= nb_call_args
+ 3;
780 /* Default case: we do know nothing about operation so no
781 propagation is done. We trash everything if the operation
782 is the end of a basic block, otherwise we only trash the
784 if (def
->flags
& TCG_OPF_BB_END
) {
785 memset(temps
, 0, nb_temps
* sizeof(struct tcg_temp_info
));
787 for (i
= 0; i
< def
->nb_oargs
; i
++) {
791 for (i
= 0; i
< def
->nb_args
; i
++) {
792 gen_args
[i
] = args
[i
];
794 args
+= def
->nb_args
;
795 gen_args
+= def
->nb_args
;
803 TCGArg
*tcg_optimize(TCGContext
*s
, uint16_t *tcg_opc_ptr
,
804 TCGArg
*args
, TCGOpDef
*tcg_op_defs
)
807 res
= tcg_constant_folding(s
, tcg_opc_ptr
, args
, tcg_op_defs
);