Merge tag 'qemu-macppc-20230206' of https://github.com/mcayland/qemu into staging
[qemu.git] / tcg / optimize.c
blob763bca9ea6cb9a3ad4ea9c2b891aff2ae5e359f8
1 /*
2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "qemu/int128.h"
28 #include "tcg/tcg-op.h"
29 #include "tcg-internal.h"
31 #define CASE_OP_32_64(x) \
32 glue(glue(case INDEX_op_, x), _i32): \
33 glue(glue(case INDEX_op_, x), _i64)
35 #define CASE_OP_32_64_VEC(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64): \
38 glue(glue(case INDEX_op_, x), _vec)
40 typedef struct TempOptInfo {
41 bool is_const;
42 TCGTemp *prev_copy;
43 TCGTemp *next_copy;
44 uint64_t val;
45 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
46 uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
47 } TempOptInfo;
49 typedef struct OptContext {
50 TCGContext *tcg;
51 TCGOp *prev_mb;
52 TCGTempSet temps_used;
54 /* In flight values from optimization. */
55 uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
56 uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
57 uint64_t s_mask; /* mask of clrsb(value) bits */
58 TCGType type;
59 } OptContext;
61 /* Calculate the smask for a specific value. */
62 static uint64_t smask_from_value(uint64_t value)
64 int rep = clrsb64(value);
65 return ~(~0ull >> rep);
69 * Calculate the smask for a given set of known-zeros.
70 * If there are lots of zeros on the left, we can consider the remainder
71 * an unsigned field, and thus the corresponding signed field is one bit
72 * larger.
74 static uint64_t smask_from_zmask(uint64_t zmask)
77 * Only the 0 bits are significant for zmask, thus the msb itself
78 * must be zero, else we have no sign information.
80 int rep = clz64(zmask);
81 if (rep == 0) {
82 return 0;
84 rep -= 1;
85 return ~(~0ull >> rep);
89 * Recreate a properly left-aligned smask after manipulation.
90 * Some bit-shuffling, particularly shifts and rotates, may
91 * retain sign bits on the left, but may scatter disconnected
92 * sign bits on the right. Retain only what remains to the left.
94 static uint64_t smask_from_smask(int64_t smask)
96 /* Only the 1 bits are significant for smask */
97 return smask_from_zmask(~smask);
100 static inline TempOptInfo *ts_info(TCGTemp *ts)
102 return ts->state_ptr;
105 static inline TempOptInfo *arg_info(TCGArg arg)
107 return ts_info(arg_temp(arg));
110 static inline bool ts_is_const(TCGTemp *ts)
112 return ts_info(ts)->is_const;
115 static inline bool arg_is_const(TCGArg arg)
117 return ts_is_const(arg_temp(arg));
120 static inline bool ts_is_copy(TCGTemp *ts)
122 return ts_info(ts)->next_copy != ts;
125 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
126 static void reset_ts(TCGTemp *ts)
128 TempOptInfo *ti = ts_info(ts);
129 TempOptInfo *pi = ts_info(ti->prev_copy);
130 TempOptInfo *ni = ts_info(ti->next_copy);
132 ni->prev_copy = ti->prev_copy;
133 pi->next_copy = ti->next_copy;
134 ti->next_copy = ts;
135 ti->prev_copy = ts;
136 ti->is_const = false;
137 ti->z_mask = -1;
138 ti->s_mask = 0;
141 static void reset_temp(TCGArg arg)
143 reset_ts(arg_temp(arg));
146 /* Initialize and activate a temporary. */
147 static void init_ts_info(OptContext *ctx, TCGTemp *ts)
149 size_t idx = temp_idx(ts);
150 TempOptInfo *ti;
152 if (test_bit(idx, ctx->temps_used.l)) {
153 return;
155 set_bit(idx, ctx->temps_used.l);
157 ti = ts->state_ptr;
158 if (ti == NULL) {
159 ti = tcg_malloc(sizeof(TempOptInfo));
160 ts->state_ptr = ti;
163 ti->next_copy = ts;
164 ti->prev_copy = ts;
165 if (ts->kind == TEMP_CONST) {
166 ti->is_const = true;
167 ti->val = ts->val;
168 ti->z_mask = ts->val;
169 ti->s_mask = smask_from_value(ts->val);
170 } else {
171 ti->is_const = false;
172 ti->z_mask = -1;
173 ti->s_mask = 0;
177 static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
179 TCGTemp *i, *g, *l;
181 /* If this is already readonly, we can't do better. */
182 if (temp_readonly(ts)) {
183 return ts;
186 g = l = NULL;
187 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
188 if (temp_readonly(i)) {
189 return i;
190 } else if (i->kind > ts->kind) {
191 if (i->kind == TEMP_GLOBAL) {
192 g = i;
193 } else if (i->kind == TEMP_LOCAL) {
194 l = i;
199 /* If we didn't find a better representation, return the same temp. */
200 return g ? g : l ? l : ts;
203 static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
205 TCGTemp *i;
207 if (ts1 == ts2) {
208 return true;
211 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
212 return false;
215 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
216 if (i == ts2) {
217 return true;
221 return false;
224 static bool args_are_copies(TCGArg arg1, TCGArg arg2)
226 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
229 static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
231 TCGTemp *dst_ts = arg_temp(dst);
232 TCGTemp *src_ts = arg_temp(src);
233 TempOptInfo *di;
234 TempOptInfo *si;
235 TCGOpcode new_op;
237 if (ts_are_copies(dst_ts, src_ts)) {
238 tcg_op_remove(ctx->tcg, op);
239 return true;
242 reset_ts(dst_ts);
243 di = ts_info(dst_ts);
244 si = ts_info(src_ts);
246 switch (ctx->type) {
247 case TCG_TYPE_I32:
248 new_op = INDEX_op_mov_i32;
249 break;
250 case TCG_TYPE_I64:
251 new_op = INDEX_op_mov_i64;
252 break;
253 case TCG_TYPE_V64:
254 case TCG_TYPE_V128:
255 case TCG_TYPE_V256:
256 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
257 new_op = INDEX_op_mov_vec;
258 break;
259 default:
260 g_assert_not_reached();
262 op->opc = new_op;
263 op->args[0] = dst;
264 op->args[1] = src;
266 di->z_mask = si->z_mask;
267 di->s_mask = si->s_mask;
269 if (src_ts->type == dst_ts->type) {
270 TempOptInfo *ni = ts_info(si->next_copy);
272 di->next_copy = si->next_copy;
273 di->prev_copy = src_ts;
274 ni->prev_copy = dst_ts;
275 si->next_copy = dst_ts;
276 di->is_const = si->is_const;
277 di->val = si->val;
279 return true;
282 static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
283 TCGArg dst, uint64_t val)
285 TCGTemp *tv;
287 if (ctx->type == TCG_TYPE_I32) {
288 val = (int32_t)val;
291 /* Convert movi to mov with constant temp. */
292 tv = tcg_constant_internal(ctx->type, val);
293 init_ts_info(ctx, tv);
294 return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
297 static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
299 uint64_t l64, h64;
301 switch (op) {
302 CASE_OP_32_64(add):
303 return x + y;
305 CASE_OP_32_64(sub):
306 return x - y;
308 CASE_OP_32_64(mul):
309 return x * y;
311 CASE_OP_32_64_VEC(and):
312 return x & y;
314 CASE_OP_32_64_VEC(or):
315 return x | y;
317 CASE_OP_32_64_VEC(xor):
318 return x ^ y;
320 case INDEX_op_shl_i32:
321 return (uint32_t)x << (y & 31);
323 case INDEX_op_shl_i64:
324 return (uint64_t)x << (y & 63);
326 case INDEX_op_shr_i32:
327 return (uint32_t)x >> (y & 31);
329 case INDEX_op_shr_i64:
330 return (uint64_t)x >> (y & 63);
332 case INDEX_op_sar_i32:
333 return (int32_t)x >> (y & 31);
335 case INDEX_op_sar_i64:
336 return (int64_t)x >> (y & 63);
338 case INDEX_op_rotr_i32:
339 return ror32(x, y & 31);
341 case INDEX_op_rotr_i64:
342 return ror64(x, y & 63);
344 case INDEX_op_rotl_i32:
345 return rol32(x, y & 31);
347 case INDEX_op_rotl_i64:
348 return rol64(x, y & 63);
350 CASE_OP_32_64_VEC(not):
351 return ~x;
353 CASE_OP_32_64(neg):
354 return -x;
356 CASE_OP_32_64_VEC(andc):
357 return x & ~y;
359 CASE_OP_32_64_VEC(orc):
360 return x | ~y;
362 CASE_OP_32_64_VEC(eqv):
363 return ~(x ^ y);
365 CASE_OP_32_64_VEC(nand):
366 return ~(x & y);
368 CASE_OP_32_64_VEC(nor):
369 return ~(x | y);
371 case INDEX_op_clz_i32:
372 return (uint32_t)x ? clz32(x) : y;
374 case INDEX_op_clz_i64:
375 return x ? clz64(x) : y;
377 case INDEX_op_ctz_i32:
378 return (uint32_t)x ? ctz32(x) : y;
380 case INDEX_op_ctz_i64:
381 return x ? ctz64(x) : y;
383 case INDEX_op_ctpop_i32:
384 return ctpop32(x);
386 case INDEX_op_ctpop_i64:
387 return ctpop64(x);
389 CASE_OP_32_64(ext8s):
390 return (int8_t)x;
392 CASE_OP_32_64(ext16s):
393 return (int16_t)x;
395 CASE_OP_32_64(ext8u):
396 return (uint8_t)x;
398 CASE_OP_32_64(ext16u):
399 return (uint16_t)x;
401 CASE_OP_32_64(bswap16):
402 x = bswap16(x);
403 return y & TCG_BSWAP_OS ? (int16_t)x : x;
405 CASE_OP_32_64(bswap32):
406 x = bswap32(x);
407 return y & TCG_BSWAP_OS ? (int32_t)x : x;
409 case INDEX_op_bswap64_i64:
410 return bswap64(x);
412 case INDEX_op_ext_i32_i64:
413 case INDEX_op_ext32s_i64:
414 return (int32_t)x;
416 case INDEX_op_extu_i32_i64:
417 case INDEX_op_extrl_i64_i32:
418 case INDEX_op_ext32u_i64:
419 return (uint32_t)x;
421 case INDEX_op_extrh_i64_i32:
422 return (uint64_t)x >> 32;
424 case INDEX_op_muluh_i32:
425 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
426 case INDEX_op_mulsh_i32:
427 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
429 case INDEX_op_muluh_i64:
430 mulu64(&l64, &h64, x, y);
431 return h64;
432 case INDEX_op_mulsh_i64:
433 muls64(&l64, &h64, x, y);
434 return h64;
436 case INDEX_op_div_i32:
437 /* Avoid crashing on divide by zero, otherwise undefined. */
438 return (int32_t)x / ((int32_t)y ? : 1);
439 case INDEX_op_divu_i32:
440 return (uint32_t)x / ((uint32_t)y ? : 1);
441 case INDEX_op_div_i64:
442 return (int64_t)x / ((int64_t)y ? : 1);
443 case INDEX_op_divu_i64:
444 return (uint64_t)x / ((uint64_t)y ? : 1);
446 case INDEX_op_rem_i32:
447 return (int32_t)x % ((int32_t)y ? : 1);
448 case INDEX_op_remu_i32:
449 return (uint32_t)x % ((uint32_t)y ? : 1);
450 case INDEX_op_rem_i64:
451 return (int64_t)x % ((int64_t)y ? : 1);
452 case INDEX_op_remu_i64:
453 return (uint64_t)x % ((uint64_t)y ? : 1);
455 default:
456 fprintf(stderr,
457 "Unrecognized operation %d in do_constant_folding.\n", op);
458 tcg_abort();
462 static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
463 uint64_t x, uint64_t y)
465 uint64_t res = do_constant_folding_2(op, x, y);
466 if (type == TCG_TYPE_I32) {
467 res = (int32_t)res;
469 return res;
472 static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
474 switch (c) {
475 case TCG_COND_EQ:
476 return x == y;
477 case TCG_COND_NE:
478 return x != y;
479 case TCG_COND_LT:
480 return (int32_t)x < (int32_t)y;
481 case TCG_COND_GE:
482 return (int32_t)x >= (int32_t)y;
483 case TCG_COND_LE:
484 return (int32_t)x <= (int32_t)y;
485 case TCG_COND_GT:
486 return (int32_t)x > (int32_t)y;
487 case TCG_COND_LTU:
488 return x < y;
489 case TCG_COND_GEU:
490 return x >= y;
491 case TCG_COND_LEU:
492 return x <= y;
493 case TCG_COND_GTU:
494 return x > y;
495 default:
496 tcg_abort();
500 static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
502 switch (c) {
503 case TCG_COND_EQ:
504 return x == y;
505 case TCG_COND_NE:
506 return x != y;
507 case TCG_COND_LT:
508 return (int64_t)x < (int64_t)y;
509 case TCG_COND_GE:
510 return (int64_t)x >= (int64_t)y;
511 case TCG_COND_LE:
512 return (int64_t)x <= (int64_t)y;
513 case TCG_COND_GT:
514 return (int64_t)x > (int64_t)y;
515 case TCG_COND_LTU:
516 return x < y;
517 case TCG_COND_GEU:
518 return x >= y;
519 case TCG_COND_LEU:
520 return x <= y;
521 case TCG_COND_GTU:
522 return x > y;
523 default:
524 tcg_abort();
528 static bool do_constant_folding_cond_eq(TCGCond c)
530 switch (c) {
531 case TCG_COND_GT:
532 case TCG_COND_LTU:
533 case TCG_COND_LT:
534 case TCG_COND_GTU:
535 case TCG_COND_NE:
536 return 0;
537 case TCG_COND_GE:
538 case TCG_COND_GEU:
539 case TCG_COND_LE:
540 case TCG_COND_LEU:
541 case TCG_COND_EQ:
542 return 1;
543 default:
544 tcg_abort();
549 * Return -1 if the condition can't be simplified,
550 * and the result of the condition (0 or 1) if it can.
552 static int do_constant_folding_cond(TCGType type, TCGArg x,
553 TCGArg y, TCGCond c)
555 if (arg_is_const(x) && arg_is_const(y)) {
556 uint64_t xv = arg_info(x)->val;
557 uint64_t yv = arg_info(y)->val;
559 switch (type) {
560 case TCG_TYPE_I32:
561 return do_constant_folding_cond_32(xv, yv, c);
562 case TCG_TYPE_I64:
563 return do_constant_folding_cond_64(xv, yv, c);
564 default:
565 /* Only scalar comparisons are optimizable */
566 return -1;
568 } else if (args_are_copies(x, y)) {
569 return do_constant_folding_cond_eq(c);
570 } else if (arg_is_const(y) && arg_info(y)->val == 0) {
571 switch (c) {
572 case TCG_COND_LTU:
573 return 0;
574 case TCG_COND_GEU:
575 return 1;
576 default:
577 return -1;
580 return -1;
584 * Return -1 if the condition can't be simplified,
585 * and the result of the condition (0 or 1) if it can.
587 static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
589 TCGArg al = p1[0], ah = p1[1];
590 TCGArg bl = p2[0], bh = p2[1];
592 if (arg_is_const(bl) && arg_is_const(bh)) {
593 tcg_target_ulong blv = arg_info(bl)->val;
594 tcg_target_ulong bhv = arg_info(bh)->val;
595 uint64_t b = deposit64(blv, 32, 32, bhv);
597 if (arg_is_const(al) && arg_is_const(ah)) {
598 tcg_target_ulong alv = arg_info(al)->val;
599 tcg_target_ulong ahv = arg_info(ah)->val;
600 uint64_t a = deposit64(alv, 32, 32, ahv);
601 return do_constant_folding_cond_64(a, b, c);
603 if (b == 0) {
604 switch (c) {
605 case TCG_COND_LTU:
606 return 0;
607 case TCG_COND_GEU:
608 return 1;
609 default:
610 break;
614 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
615 return do_constant_folding_cond_eq(c);
617 return -1;
621 * swap_commutative:
622 * @dest: TCGArg of the destination argument, or NO_DEST.
623 * @p1: first paired argument
624 * @p2: second paired argument
626 * If *@p1 is a constant and *@p2 is not, swap.
627 * If *@p2 matches @dest, swap.
628 * Return true if a swap was performed.
631 #define NO_DEST temp_arg(NULL)
633 static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
635 TCGArg a1 = *p1, a2 = *p2;
636 int sum = 0;
637 sum += arg_is_const(a1);
638 sum -= arg_is_const(a2);
640 /* Prefer the constant in second argument, and then the form
641 op a, a, b, which is better handled on non-RISC hosts. */
642 if (sum > 0 || (sum == 0 && dest == a2)) {
643 *p1 = a2;
644 *p2 = a1;
645 return true;
647 return false;
650 static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
652 int sum = 0;
653 sum += arg_is_const(p1[0]);
654 sum += arg_is_const(p1[1]);
655 sum -= arg_is_const(p2[0]);
656 sum -= arg_is_const(p2[1]);
657 if (sum > 0) {
658 TCGArg t;
659 t = p1[0], p1[0] = p2[0], p2[0] = t;
660 t = p1[1], p1[1] = p2[1], p2[1] = t;
661 return true;
663 return false;
666 static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
668 for (int i = 0; i < nb_args; i++) {
669 TCGTemp *ts = arg_temp(op->args[i]);
670 init_ts_info(ctx, ts);
674 static void copy_propagate(OptContext *ctx, TCGOp *op,
675 int nb_oargs, int nb_iargs)
677 TCGContext *s = ctx->tcg;
679 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
680 TCGTemp *ts = arg_temp(op->args[i]);
681 if (ts_is_copy(ts)) {
682 op->args[i] = temp_arg(find_better_copy(s, ts));
687 static void finish_folding(OptContext *ctx, TCGOp *op)
689 const TCGOpDef *def = &tcg_op_defs[op->opc];
690 int i, nb_oargs;
693 * For an opcode that ends a BB, reset all temp data.
694 * We do no cross-BB optimization.
696 if (def->flags & TCG_OPF_BB_END) {
697 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
698 ctx->prev_mb = NULL;
699 return;
702 nb_oargs = def->nb_oargs;
703 for (i = 0; i < nb_oargs; i++) {
704 TCGTemp *ts = arg_temp(op->args[i]);
705 reset_ts(ts);
707 * Save the corresponding known-zero/sign bits mask for the
708 * first output argument (only one supported so far).
710 if (i == 0) {
711 ts_info(ts)->z_mask = ctx->z_mask;
712 ts_info(ts)->s_mask = ctx->s_mask;
718 * The fold_* functions return true when processing is complete,
719 * usually by folding the operation to a constant or to a copy,
720 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
721 * like collect information about the value produced, for use in
722 * optimizing a subsequent operation.
724 * These first fold_* functions are all helpers, used by other
725 * folders for more specific operations.
728 static bool fold_const1(OptContext *ctx, TCGOp *op)
730 if (arg_is_const(op->args[1])) {
731 uint64_t t;
733 t = arg_info(op->args[1])->val;
734 t = do_constant_folding(op->opc, ctx->type, t, 0);
735 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
737 return false;
740 static bool fold_const2(OptContext *ctx, TCGOp *op)
742 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
743 uint64_t t1 = arg_info(op->args[1])->val;
744 uint64_t t2 = arg_info(op->args[2])->val;
746 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
747 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
749 return false;
752 static bool fold_commutative(OptContext *ctx, TCGOp *op)
754 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
755 return false;
758 static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
760 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
761 return fold_const2(ctx, op);
764 static bool fold_masks(OptContext *ctx, TCGOp *op)
766 uint64_t a_mask = ctx->a_mask;
767 uint64_t z_mask = ctx->z_mask;
768 uint64_t s_mask = ctx->s_mask;
771 * 32-bit ops generate 32-bit results, which for the purpose of
772 * simplifying tcg are sign-extended. Certainly that's how we
773 * represent our constants elsewhere. Note that the bits will
774 * be reset properly for a 64-bit value when encountering the
775 * type changing opcodes.
777 if (ctx->type == TCG_TYPE_I32) {
778 a_mask = (int32_t)a_mask;
779 z_mask = (int32_t)z_mask;
780 s_mask |= MAKE_64BIT_MASK(32, 32);
781 ctx->z_mask = z_mask;
782 ctx->s_mask = s_mask;
785 if (z_mask == 0) {
786 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
788 if (a_mask == 0) {
789 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
791 return false;
795 * Convert @op to NOT, if NOT is supported by the host.
796 * Return true f the conversion is successful, which will still
797 * indicate that the processing is complete.
799 static bool fold_not(OptContext *ctx, TCGOp *op);
800 static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
802 TCGOpcode not_op;
803 bool have_not;
805 switch (ctx->type) {
806 case TCG_TYPE_I32:
807 not_op = INDEX_op_not_i32;
808 have_not = TCG_TARGET_HAS_not_i32;
809 break;
810 case TCG_TYPE_I64:
811 not_op = INDEX_op_not_i64;
812 have_not = TCG_TARGET_HAS_not_i64;
813 break;
814 case TCG_TYPE_V64:
815 case TCG_TYPE_V128:
816 case TCG_TYPE_V256:
817 not_op = INDEX_op_not_vec;
818 have_not = TCG_TARGET_HAS_not_vec;
819 break;
820 default:
821 g_assert_not_reached();
823 if (have_not) {
824 op->opc = not_op;
825 op->args[1] = op->args[idx];
826 return fold_not(ctx, op);
828 return false;
831 /* If the binary operation has first argument @i, fold to @i. */
832 static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
834 if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
835 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
837 return false;
840 /* If the binary operation has first argument @i, fold to NOT. */
841 static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
843 if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
844 return fold_to_not(ctx, op, 2);
846 return false;
849 /* If the binary operation has second argument @i, fold to @i. */
850 static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
852 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
853 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
855 return false;
858 /* If the binary operation has second argument @i, fold to identity. */
859 static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
861 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
862 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
864 return false;
867 /* If the binary operation has second argument @i, fold to NOT. */
868 static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
870 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
871 return fold_to_not(ctx, op, 1);
873 return false;
876 /* If the binary operation has both arguments equal, fold to @i. */
877 static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
879 if (args_are_copies(op->args[1], op->args[2])) {
880 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
882 return false;
885 /* If the binary operation has both arguments equal, fold to identity. */
886 static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
888 if (args_are_copies(op->args[1], op->args[2])) {
889 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
891 return false;
895 * These outermost fold_<op> functions are sorted alphabetically.
897 * The ordering of the transformations should be:
898 * 1) those that produce a constant
899 * 2) those that produce a copy
900 * 3) those that produce information about the result value.
903 static bool fold_add(OptContext *ctx, TCGOp *op)
905 if (fold_const2_commutative(ctx, op) ||
906 fold_xi_to_x(ctx, op, 0)) {
907 return true;
909 return false;
912 /* We cannot as yet do_constant_folding with vectors. */
913 static bool fold_add_vec(OptContext *ctx, TCGOp *op)
915 if (fold_commutative(ctx, op) ||
916 fold_xi_to_x(ctx, op, 0)) {
917 return true;
919 return false;
922 static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
924 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
925 arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
926 uint64_t al = arg_info(op->args[2])->val;
927 uint64_t ah = arg_info(op->args[3])->val;
928 uint64_t bl = arg_info(op->args[4])->val;
929 uint64_t bh = arg_info(op->args[5])->val;
930 TCGArg rl, rh;
931 TCGOp *op2;
933 if (ctx->type == TCG_TYPE_I32) {
934 uint64_t a = deposit64(al, 32, 32, ah);
935 uint64_t b = deposit64(bl, 32, 32, bh);
937 if (add) {
938 a += b;
939 } else {
940 a -= b;
943 al = sextract64(a, 0, 32);
944 ah = sextract64(a, 32, 32);
945 } else {
946 Int128 a = int128_make128(al, ah);
947 Int128 b = int128_make128(bl, bh);
949 if (add) {
950 a = int128_add(a, b);
951 } else {
952 a = int128_sub(a, b);
955 al = int128_getlo(a);
956 ah = int128_gethi(a);
959 rl = op->args[0];
960 rh = op->args[1];
962 /* The proper opcode is supplied by tcg_opt_gen_mov. */
963 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
965 tcg_opt_gen_movi(ctx, op, rl, al);
966 tcg_opt_gen_movi(ctx, op2, rh, ah);
967 return true;
969 return false;
972 static bool fold_add2(OptContext *ctx, TCGOp *op)
974 /* Note that the high and low parts may be independently swapped. */
975 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
976 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
978 return fold_addsub2(ctx, op, true);
981 static bool fold_and(OptContext *ctx, TCGOp *op)
983 uint64_t z1, z2;
985 if (fold_const2_commutative(ctx, op) ||
986 fold_xi_to_i(ctx, op, 0) ||
987 fold_xi_to_x(ctx, op, -1) ||
988 fold_xx_to_x(ctx, op)) {
989 return true;
992 z1 = arg_info(op->args[1])->z_mask;
993 z2 = arg_info(op->args[2])->z_mask;
994 ctx->z_mask = z1 & z2;
997 * Sign repetitions are perforce all identical, whether they are 1 or 0.
998 * Bitwise operations preserve the relative quantity of the repetitions.
1000 ctx->s_mask = arg_info(op->args[1])->s_mask
1001 & arg_info(op->args[2])->s_mask;
1004 * Known-zeros does not imply known-ones. Therefore unless
1005 * arg2 is constant, we can't infer affected bits from it.
1007 if (arg_is_const(op->args[2])) {
1008 ctx->a_mask = z1 & ~z2;
1011 return fold_masks(ctx, op);
1014 static bool fold_andc(OptContext *ctx, TCGOp *op)
1016 uint64_t z1;
1018 if (fold_const2(ctx, op) ||
1019 fold_xx_to_i(ctx, op, 0) ||
1020 fold_xi_to_x(ctx, op, 0) ||
1021 fold_ix_to_not(ctx, op, -1)) {
1022 return true;
1025 z1 = arg_info(op->args[1])->z_mask;
1028 * Known-zeros does not imply known-ones. Therefore unless
1029 * arg2 is constant, we can't infer anything from it.
1031 if (arg_is_const(op->args[2])) {
1032 uint64_t z2 = ~arg_info(op->args[2])->z_mask;
1033 ctx->a_mask = z1 & ~z2;
1034 z1 &= z2;
1036 ctx->z_mask = z1;
1038 ctx->s_mask = arg_info(op->args[1])->s_mask
1039 & arg_info(op->args[2])->s_mask;
1040 return fold_masks(ctx, op);
1043 static bool fold_brcond(OptContext *ctx, TCGOp *op)
1045 TCGCond cond = op->args[2];
1046 int i;
1048 if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
1049 op->args[2] = cond = tcg_swap_cond(cond);
1052 i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
1053 if (i == 0) {
1054 tcg_op_remove(ctx->tcg, op);
1055 return true;
1057 if (i > 0) {
1058 op->opc = INDEX_op_br;
1059 op->args[0] = op->args[3];
1061 return false;
1064 static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1066 TCGCond cond = op->args[4];
1067 TCGArg label = op->args[5];
1068 int i, inv = 0;
1070 if (swap_commutative2(&op->args[0], &op->args[2])) {
1071 op->args[4] = cond = tcg_swap_cond(cond);
1074 i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
1075 if (i >= 0) {
1076 goto do_brcond_const;
1079 switch (cond) {
1080 case TCG_COND_LT:
1081 case TCG_COND_GE:
1083 * Simplify LT/GE comparisons vs zero to a single compare
1084 * vs the high word of the input.
1086 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
1087 arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
1088 goto do_brcond_high;
1090 break;
1092 case TCG_COND_NE:
1093 inv = 1;
1094 QEMU_FALLTHROUGH;
1095 case TCG_COND_EQ:
1097 * Simplify EQ/NE comparisons where one of the pairs
1098 * can be simplified.
1100 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
1101 op->args[2], cond);
1102 switch (i ^ inv) {
1103 case 0:
1104 goto do_brcond_const;
1105 case 1:
1106 goto do_brcond_high;
1109 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
1110 op->args[3], cond);
1111 switch (i ^ inv) {
1112 case 0:
1113 goto do_brcond_const;
1114 case 1:
1115 op->opc = INDEX_op_brcond_i32;
1116 op->args[1] = op->args[2];
1117 op->args[2] = cond;
1118 op->args[3] = label;
1119 break;
1121 break;
1123 default:
1124 break;
1126 do_brcond_high:
1127 op->opc = INDEX_op_brcond_i32;
1128 op->args[0] = op->args[1];
1129 op->args[1] = op->args[3];
1130 op->args[2] = cond;
1131 op->args[3] = label;
1132 break;
1134 do_brcond_const:
1135 if (i == 0) {
1136 tcg_op_remove(ctx->tcg, op);
1137 return true;
1139 op->opc = INDEX_op_br;
1140 op->args[0] = label;
1141 break;
1143 return false;
1146 static bool fold_bswap(OptContext *ctx, TCGOp *op)
1148 uint64_t z_mask, s_mask, sign;
1150 if (arg_is_const(op->args[1])) {
1151 uint64_t t = arg_info(op->args[1])->val;
1153 t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
1154 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1157 z_mask = arg_info(op->args[1])->z_mask;
1159 switch (op->opc) {
1160 case INDEX_op_bswap16_i32:
1161 case INDEX_op_bswap16_i64:
1162 z_mask = bswap16(z_mask);
1163 sign = INT16_MIN;
1164 break;
1165 case INDEX_op_bswap32_i32:
1166 case INDEX_op_bswap32_i64:
1167 z_mask = bswap32(z_mask);
1168 sign = INT32_MIN;
1169 break;
1170 case INDEX_op_bswap64_i64:
1171 z_mask = bswap64(z_mask);
1172 sign = INT64_MIN;
1173 break;
1174 default:
1175 g_assert_not_reached();
1177 s_mask = smask_from_zmask(z_mask);
1179 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1180 case TCG_BSWAP_OZ:
1181 break;
1182 case TCG_BSWAP_OS:
1183 /* If the sign bit may be 1, force all the bits above to 1. */
1184 if (z_mask & sign) {
1185 z_mask |= sign;
1186 s_mask = sign << 1;
1188 break;
1189 default:
1190 /* The high bits are undefined: force all bits above the sign to 1. */
1191 z_mask |= sign << 1;
1192 s_mask = 0;
1193 break;
1195 ctx->z_mask = z_mask;
1196 ctx->s_mask = s_mask;
1198 return fold_masks(ctx, op);
1201 static bool fold_call(OptContext *ctx, TCGOp *op)
1203 TCGContext *s = ctx->tcg;
1204 int nb_oargs = TCGOP_CALLO(op);
1205 int nb_iargs = TCGOP_CALLI(op);
1206 int flags, i;
1208 init_arguments(ctx, op, nb_oargs + nb_iargs);
1209 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1211 /* If the function reads or writes globals, reset temp data. */
1212 flags = tcg_call_flags(op);
1213 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1214 int nb_globals = s->nb_globals;
1216 for (i = 0; i < nb_globals; i++) {
1217 if (test_bit(i, ctx->temps_used.l)) {
1218 reset_ts(&ctx->tcg->temps[i]);
1223 /* Reset temp data for outputs. */
1224 for (i = 0; i < nb_oargs; i++) {
1225 reset_temp(op->args[i]);
1228 /* Stop optimizing MB across calls. */
1229 ctx->prev_mb = NULL;
1230 return true;
1233 static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1235 uint64_t z_mask;
1237 if (arg_is_const(op->args[1])) {
1238 uint64_t t = arg_info(op->args[1])->val;
1240 if (t != 0) {
1241 t = do_constant_folding(op->opc, ctx->type, t, 0);
1242 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1244 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1247 switch (ctx->type) {
1248 case TCG_TYPE_I32:
1249 z_mask = 31;
1250 break;
1251 case TCG_TYPE_I64:
1252 z_mask = 63;
1253 break;
1254 default:
1255 g_assert_not_reached();
1257 ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
1258 ctx->s_mask = smask_from_zmask(ctx->z_mask);
1259 return false;
1262 static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1264 if (fold_const1(ctx, op)) {
1265 return true;
1268 switch (ctx->type) {
1269 case TCG_TYPE_I32:
1270 ctx->z_mask = 32 | 31;
1271 break;
1272 case TCG_TYPE_I64:
1273 ctx->z_mask = 64 | 63;
1274 break;
1275 default:
1276 g_assert_not_reached();
1278 ctx->s_mask = smask_from_zmask(ctx->z_mask);
1279 return false;
1282 static bool fold_deposit(OptContext *ctx, TCGOp *op)
1284 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1285 uint64_t t1 = arg_info(op->args[1])->val;
1286 uint64_t t2 = arg_info(op->args[2])->val;
1288 t1 = deposit64(t1, op->args[3], op->args[4], t2);
1289 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1292 ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
1293 op->args[3], op->args[4],
1294 arg_info(op->args[2])->z_mask);
1295 return false;
1298 static bool fold_divide(OptContext *ctx, TCGOp *op)
1300 if (fold_const2(ctx, op) ||
1301 fold_xi_to_x(ctx, op, 1)) {
1302 return true;
1304 return false;
1307 static bool fold_dup(OptContext *ctx, TCGOp *op)
1309 if (arg_is_const(op->args[1])) {
1310 uint64_t t = arg_info(op->args[1])->val;
1311 t = dup_const(TCGOP_VECE(op), t);
1312 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1314 return false;
1317 static bool fold_dup2(OptContext *ctx, TCGOp *op)
1319 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1320 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1321 arg_info(op->args[2])->val);
1322 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1325 if (args_are_copies(op->args[1], op->args[2])) {
1326 op->opc = INDEX_op_dup_vec;
1327 TCGOP_VECE(op) = MO_32;
1329 return false;
1332 static bool fold_eqv(OptContext *ctx, TCGOp *op)
1334 if (fold_const2_commutative(ctx, op) ||
1335 fold_xi_to_x(ctx, op, -1) ||
1336 fold_xi_to_not(ctx, op, 0)) {
1337 return true;
1340 ctx->s_mask = arg_info(op->args[1])->s_mask
1341 & arg_info(op->args[2])->s_mask;
1342 return false;
1345 static bool fold_extract(OptContext *ctx, TCGOp *op)
1347 uint64_t z_mask_old, z_mask;
1348 int pos = op->args[2];
1349 int len = op->args[3];
1351 if (arg_is_const(op->args[1])) {
1352 uint64_t t;
1354 t = arg_info(op->args[1])->val;
1355 t = extract64(t, pos, len);
1356 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1359 z_mask_old = arg_info(op->args[1])->z_mask;
1360 z_mask = extract64(z_mask_old, pos, len);
1361 if (pos == 0) {
1362 ctx->a_mask = z_mask_old ^ z_mask;
1364 ctx->z_mask = z_mask;
1365 ctx->s_mask = smask_from_zmask(z_mask);
1367 return fold_masks(ctx, op);
1370 static bool fold_extract2(OptContext *ctx, TCGOp *op)
1372 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1373 uint64_t v1 = arg_info(op->args[1])->val;
1374 uint64_t v2 = arg_info(op->args[2])->val;
1375 int shr = op->args[3];
1377 if (op->opc == INDEX_op_extract2_i64) {
1378 v1 >>= shr;
1379 v2 <<= 64 - shr;
1380 } else {
1381 v1 = (uint32_t)v1 >> shr;
1382 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
1384 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1386 return false;
1389 static bool fold_exts(OptContext *ctx, TCGOp *op)
1391 uint64_t s_mask_old, s_mask, z_mask, sign;
1392 bool type_change = false;
1394 if (fold_const1(ctx, op)) {
1395 return true;
1398 z_mask = arg_info(op->args[1])->z_mask;
1399 s_mask = arg_info(op->args[1])->s_mask;
1400 s_mask_old = s_mask;
1402 switch (op->opc) {
1403 CASE_OP_32_64(ext8s):
1404 sign = INT8_MIN;
1405 z_mask = (uint8_t)z_mask;
1406 break;
1407 CASE_OP_32_64(ext16s):
1408 sign = INT16_MIN;
1409 z_mask = (uint16_t)z_mask;
1410 break;
1411 case INDEX_op_ext_i32_i64:
1412 type_change = true;
1413 QEMU_FALLTHROUGH;
1414 case INDEX_op_ext32s_i64:
1415 sign = INT32_MIN;
1416 z_mask = (uint32_t)z_mask;
1417 break;
1418 default:
1419 g_assert_not_reached();
1422 if (z_mask & sign) {
1423 z_mask |= sign;
1425 s_mask |= sign << 1;
1427 ctx->z_mask = z_mask;
1428 ctx->s_mask = s_mask;
1429 if (!type_change) {
1430 ctx->a_mask = s_mask & ~s_mask_old;
1433 return fold_masks(ctx, op);
1436 static bool fold_extu(OptContext *ctx, TCGOp *op)
1438 uint64_t z_mask_old, z_mask;
1439 bool type_change = false;
1441 if (fold_const1(ctx, op)) {
1442 return true;
1445 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1447 switch (op->opc) {
1448 CASE_OP_32_64(ext8u):
1449 z_mask = (uint8_t)z_mask;
1450 break;
1451 CASE_OP_32_64(ext16u):
1452 z_mask = (uint16_t)z_mask;
1453 break;
1454 case INDEX_op_extrl_i64_i32:
1455 case INDEX_op_extu_i32_i64:
1456 type_change = true;
1457 QEMU_FALLTHROUGH;
1458 case INDEX_op_ext32u_i64:
1459 z_mask = (uint32_t)z_mask;
1460 break;
1461 case INDEX_op_extrh_i64_i32:
1462 type_change = true;
1463 z_mask >>= 32;
1464 break;
1465 default:
1466 g_assert_not_reached();
1469 ctx->z_mask = z_mask;
1470 ctx->s_mask = smask_from_zmask(z_mask);
1471 if (!type_change) {
1472 ctx->a_mask = z_mask_old ^ z_mask;
1474 return fold_masks(ctx, op);
1477 static bool fold_mb(OptContext *ctx, TCGOp *op)
1479 /* Eliminate duplicate and redundant fence instructions. */
1480 if (ctx->prev_mb) {
1482 * Merge two barriers of the same type into one,
1483 * or a weaker barrier into a stronger one,
1484 * or two weaker barriers into a stronger one.
1485 * mb X; mb Y => mb X|Y
1486 * mb; strl => mb; st
1487 * ldaq; mb => ld; mb
1488 * ldaq; strl => ld; mb; st
1489 * Other combinations are also merged into a strong
1490 * barrier. This is stricter than specified but for
1491 * the purposes of TCG is better than not optimizing.
1493 ctx->prev_mb->args[0] |= op->args[0];
1494 tcg_op_remove(ctx->tcg, op);
1495 } else {
1496 ctx->prev_mb = op;
1498 return true;
1501 static bool fold_mov(OptContext *ctx, TCGOp *op)
1503 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1506 static bool fold_movcond(OptContext *ctx, TCGOp *op)
1508 TCGCond cond = op->args[5];
1509 int i;
1511 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1512 op->args[5] = cond = tcg_swap_cond(cond);
1515 * Canonicalize the "false" input reg to match the destination reg so
1516 * that the tcg backend can implement a "move if true" operation.
1518 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1519 op->args[5] = cond = tcg_invert_cond(cond);
1522 i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
1523 if (i >= 0) {
1524 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1527 ctx->z_mask = arg_info(op->args[3])->z_mask
1528 | arg_info(op->args[4])->z_mask;
1529 ctx->s_mask = arg_info(op->args[3])->s_mask
1530 & arg_info(op->args[4])->s_mask;
1532 if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
1533 uint64_t tv = arg_info(op->args[3])->val;
1534 uint64_t fv = arg_info(op->args[4])->val;
1535 TCGOpcode opc;
1537 switch (ctx->type) {
1538 case TCG_TYPE_I32:
1539 opc = INDEX_op_setcond_i32;
1540 break;
1541 case TCG_TYPE_I64:
1542 opc = INDEX_op_setcond_i64;
1543 break;
1544 default:
1545 g_assert_not_reached();
1548 if (tv == 1 && fv == 0) {
1549 op->opc = opc;
1550 op->args[3] = cond;
1551 } else if (fv == 1 && tv == 0) {
1552 op->opc = opc;
1553 op->args[3] = tcg_invert_cond(cond);
1556 return false;
1559 static bool fold_mul(OptContext *ctx, TCGOp *op)
1561 if (fold_const2(ctx, op) ||
1562 fold_xi_to_i(ctx, op, 0) ||
1563 fold_xi_to_x(ctx, op, 1)) {
1564 return true;
1566 return false;
1569 static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
1571 if (fold_const2_commutative(ctx, op) ||
1572 fold_xi_to_i(ctx, op, 0)) {
1573 return true;
1575 return false;
1578 static bool fold_multiply2(OptContext *ctx, TCGOp *op)
1580 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
1582 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1583 uint64_t a = arg_info(op->args[2])->val;
1584 uint64_t b = arg_info(op->args[3])->val;
1585 uint64_t h, l;
1586 TCGArg rl, rh;
1587 TCGOp *op2;
1589 switch (op->opc) {
1590 case INDEX_op_mulu2_i32:
1591 l = (uint64_t)(uint32_t)a * (uint32_t)b;
1592 h = (int32_t)(l >> 32);
1593 l = (int32_t)l;
1594 break;
1595 case INDEX_op_muls2_i32:
1596 l = (int64_t)(int32_t)a * (int32_t)b;
1597 h = l >> 32;
1598 l = (int32_t)l;
1599 break;
1600 case INDEX_op_mulu2_i64:
1601 mulu64(&l, &h, a, b);
1602 break;
1603 case INDEX_op_muls2_i64:
1604 muls64(&l, &h, a, b);
1605 break;
1606 default:
1607 g_assert_not_reached();
1610 rl = op->args[0];
1611 rh = op->args[1];
1613 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1614 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
1616 tcg_opt_gen_movi(ctx, op, rl, l);
1617 tcg_opt_gen_movi(ctx, op2, rh, h);
1618 return true;
1620 return false;
1623 static bool fold_nand(OptContext *ctx, TCGOp *op)
1625 if (fold_const2_commutative(ctx, op) ||
1626 fold_xi_to_not(ctx, op, -1)) {
1627 return true;
1630 ctx->s_mask = arg_info(op->args[1])->s_mask
1631 & arg_info(op->args[2])->s_mask;
1632 return false;
1635 static bool fold_neg(OptContext *ctx, TCGOp *op)
1637 uint64_t z_mask;
1639 if (fold_const1(ctx, op)) {
1640 return true;
1643 /* Set to 1 all bits to the left of the rightmost. */
1644 z_mask = arg_info(op->args[1])->z_mask;
1645 ctx->z_mask = -(z_mask & -z_mask);
1648 * Because of fold_sub_to_neg, we want to always return true,
1649 * via finish_folding.
1651 finish_folding(ctx, op);
1652 return true;
1655 static bool fold_nor(OptContext *ctx, TCGOp *op)
1657 if (fold_const2_commutative(ctx, op) ||
1658 fold_xi_to_not(ctx, op, 0)) {
1659 return true;
1662 ctx->s_mask = arg_info(op->args[1])->s_mask
1663 & arg_info(op->args[2])->s_mask;
1664 return false;
1667 static bool fold_not(OptContext *ctx, TCGOp *op)
1669 if (fold_const1(ctx, op)) {
1670 return true;
1673 ctx->s_mask = arg_info(op->args[1])->s_mask;
1675 /* Because of fold_to_not, we want to always return true, via finish. */
1676 finish_folding(ctx, op);
1677 return true;
1680 static bool fold_or(OptContext *ctx, TCGOp *op)
1682 if (fold_const2_commutative(ctx, op) ||
1683 fold_xi_to_x(ctx, op, 0) ||
1684 fold_xx_to_x(ctx, op)) {
1685 return true;
1688 ctx->z_mask = arg_info(op->args[1])->z_mask
1689 | arg_info(op->args[2])->z_mask;
1690 ctx->s_mask = arg_info(op->args[1])->s_mask
1691 & arg_info(op->args[2])->s_mask;
1692 return fold_masks(ctx, op);
1695 static bool fold_orc(OptContext *ctx, TCGOp *op)
1697 if (fold_const2(ctx, op) ||
1698 fold_xx_to_i(ctx, op, -1) ||
1699 fold_xi_to_x(ctx, op, -1) ||
1700 fold_ix_to_not(ctx, op, 0)) {
1701 return true;
1704 ctx->s_mask = arg_info(op->args[1])->s_mask
1705 & arg_info(op->args[2])->s_mask;
1706 return false;
1709 static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
1711 const TCGOpDef *def = &tcg_op_defs[op->opc];
1712 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
1713 MemOp mop = get_memop(oi);
1714 int width = 8 * memop_size(mop);
1716 if (width < 64) {
1717 ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
1718 if (!(mop & MO_SIGN)) {
1719 ctx->z_mask = MAKE_64BIT_MASK(0, width);
1720 ctx->s_mask <<= 1;
1724 /* Opcodes that touch guest memory stop the mb optimization. */
1725 ctx->prev_mb = NULL;
1726 return false;
1729 static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
1731 /* Opcodes that touch guest memory stop the mb optimization. */
1732 ctx->prev_mb = NULL;
1733 return false;
1736 static bool fold_remainder(OptContext *ctx, TCGOp *op)
1738 if (fold_const2(ctx, op) ||
1739 fold_xx_to_i(ctx, op, 0)) {
1740 return true;
1742 return false;
1745 static bool fold_setcond(OptContext *ctx, TCGOp *op)
1747 TCGCond cond = op->args[3];
1748 int i;
1750 if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
1751 op->args[3] = cond = tcg_swap_cond(cond);
1754 i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
1755 if (i >= 0) {
1756 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1759 ctx->z_mask = 1;
1760 ctx->s_mask = smask_from_zmask(1);
1761 return false;
1764 static bool fold_setcond2(OptContext *ctx, TCGOp *op)
1766 TCGCond cond = op->args[5];
1767 int i, inv = 0;
1769 if (swap_commutative2(&op->args[1], &op->args[3])) {
1770 op->args[5] = cond = tcg_swap_cond(cond);
1773 i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
1774 if (i >= 0) {
1775 goto do_setcond_const;
1778 switch (cond) {
1779 case TCG_COND_LT:
1780 case TCG_COND_GE:
1782 * Simplify LT/GE comparisons vs zero to a single compare
1783 * vs the high word of the input.
1785 if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
1786 arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
1787 goto do_setcond_high;
1789 break;
1791 case TCG_COND_NE:
1792 inv = 1;
1793 QEMU_FALLTHROUGH;
1794 case TCG_COND_EQ:
1796 * Simplify EQ/NE comparisons where one of the pairs
1797 * can be simplified.
1799 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
1800 op->args[3], cond);
1801 switch (i ^ inv) {
1802 case 0:
1803 goto do_setcond_const;
1804 case 1:
1805 goto do_setcond_high;
1808 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
1809 op->args[4], cond);
1810 switch (i ^ inv) {
1811 case 0:
1812 goto do_setcond_const;
1813 case 1:
1814 op->args[2] = op->args[3];
1815 op->args[3] = cond;
1816 op->opc = INDEX_op_setcond_i32;
1817 break;
1819 break;
1821 default:
1822 break;
1824 do_setcond_high:
1825 op->args[1] = op->args[2];
1826 op->args[2] = op->args[4];
1827 op->args[3] = cond;
1828 op->opc = INDEX_op_setcond_i32;
1829 break;
1832 ctx->z_mask = 1;
1833 ctx->s_mask = smask_from_zmask(1);
1834 return false;
1836 do_setcond_const:
1837 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1840 static bool fold_sextract(OptContext *ctx, TCGOp *op)
1842 uint64_t z_mask, s_mask, s_mask_old;
1843 int pos = op->args[2];
1844 int len = op->args[3];
1846 if (arg_is_const(op->args[1])) {
1847 uint64_t t;
1849 t = arg_info(op->args[1])->val;
1850 t = sextract64(t, pos, len);
1851 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1854 z_mask = arg_info(op->args[1])->z_mask;
1855 z_mask = sextract64(z_mask, pos, len);
1856 ctx->z_mask = z_mask;
1858 s_mask_old = arg_info(op->args[1])->s_mask;
1859 s_mask = sextract64(s_mask_old, pos, len);
1860 s_mask |= MAKE_64BIT_MASK(len, 64 - len);
1861 ctx->s_mask = s_mask;
1863 if (pos == 0) {
1864 ctx->a_mask = s_mask & ~s_mask_old;
1867 return fold_masks(ctx, op);
1870 static bool fold_shift(OptContext *ctx, TCGOp *op)
1872 uint64_t s_mask, z_mask, sign;
1874 if (fold_const2(ctx, op) ||
1875 fold_ix_to_i(ctx, op, 0) ||
1876 fold_xi_to_x(ctx, op, 0)) {
1877 return true;
1880 s_mask = arg_info(op->args[1])->s_mask;
1881 z_mask = arg_info(op->args[1])->z_mask;
1883 if (arg_is_const(op->args[2])) {
1884 int sh = arg_info(op->args[2])->val;
1886 ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
1888 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
1889 ctx->s_mask = smask_from_smask(s_mask);
1891 return fold_masks(ctx, op);
1894 switch (op->opc) {
1895 CASE_OP_32_64(sar):
1897 * Arithmetic right shift will not reduce the number of
1898 * input sign repetitions.
1900 ctx->s_mask = s_mask;
1901 break;
1902 CASE_OP_32_64(shr):
1904 * If the sign bit is known zero, then logical right shift
1905 * will not reduced the number of input sign repetitions.
1907 sign = (s_mask & -s_mask) >> 1;
1908 if (!(z_mask & sign)) {
1909 ctx->s_mask = s_mask;
1911 break;
1912 default:
1913 break;
1916 return false;
1919 static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
1921 TCGOpcode neg_op;
1922 bool have_neg;
1924 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
1925 return false;
1928 switch (ctx->type) {
1929 case TCG_TYPE_I32:
1930 neg_op = INDEX_op_neg_i32;
1931 have_neg = TCG_TARGET_HAS_neg_i32;
1932 break;
1933 case TCG_TYPE_I64:
1934 neg_op = INDEX_op_neg_i64;
1935 have_neg = TCG_TARGET_HAS_neg_i64;
1936 break;
1937 case TCG_TYPE_V64:
1938 case TCG_TYPE_V128:
1939 case TCG_TYPE_V256:
1940 neg_op = INDEX_op_neg_vec;
1941 have_neg = (TCG_TARGET_HAS_neg_vec &&
1942 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
1943 break;
1944 default:
1945 g_assert_not_reached();
1947 if (have_neg) {
1948 op->opc = neg_op;
1949 op->args[1] = op->args[2];
1950 return fold_neg(ctx, op);
1952 return false;
1955 /* We cannot as yet do_constant_folding with vectors. */
1956 static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
1958 if (fold_xx_to_i(ctx, op, 0) ||
1959 fold_xi_to_x(ctx, op, 0) ||
1960 fold_sub_to_neg(ctx, op)) {
1961 return true;
1963 return false;
1966 static bool fold_sub(OptContext *ctx, TCGOp *op)
1968 return fold_const2(ctx, op) || fold_sub_vec(ctx, op);
1971 static bool fold_sub2(OptContext *ctx, TCGOp *op)
1973 return fold_addsub2(ctx, op, false);
1976 static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
1978 /* We can't do any folding with a load, but we can record bits. */
1979 switch (op->opc) {
1980 CASE_OP_32_64(ld8s):
1981 ctx->s_mask = MAKE_64BIT_MASK(8, 56);
1982 break;
1983 CASE_OP_32_64(ld8u):
1984 ctx->z_mask = MAKE_64BIT_MASK(0, 8);
1985 ctx->s_mask = MAKE_64BIT_MASK(9, 55);
1986 break;
1987 CASE_OP_32_64(ld16s):
1988 ctx->s_mask = MAKE_64BIT_MASK(16, 48);
1989 break;
1990 CASE_OP_32_64(ld16u):
1991 ctx->z_mask = MAKE_64BIT_MASK(0, 16);
1992 ctx->s_mask = MAKE_64BIT_MASK(17, 47);
1993 break;
1994 case INDEX_op_ld32s_i64:
1995 ctx->s_mask = MAKE_64BIT_MASK(32, 32);
1996 break;
1997 case INDEX_op_ld32u_i64:
1998 ctx->z_mask = MAKE_64BIT_MASK(0, 32);
1999 ctx->s_mask = MAKE_64BIT_MASK(33, 31);
2000 break;
2001 default:
2002 g_assert_not_reached();
2004 return false;
2007 static bool fold_xor(OptContext *ctx, TCGOp *op)
2009 if (fold_const2_commutative(ctx, op) ||
2010 fold_xx_to_i(ctx, op, 0) ||
2011 fold_xi_to_x(ctx, op, 0) ||
2012 fold_xi_to_not(ctx, op, -1)) {
2013 return true;
2016 ctx->z_mask = arg_info(op->args[1])->z_mask
2017 | arg_info(op->args[2])->z_mask;
2018 ctx->s_mask = arg_info(op->args[1])->s_mask
2019 & arg_info(op->args[2])->s_mask;
2020 return fold_masks(ctx, op);
2023 /* Propagate constants and copies, fold constant expressions. */
2024 void tcg_optimize(TCGContext *s)
2026 int nb_temps, i;
2027 TCGOp *op, *op_next;
2028 OptContext ctx = { .tcg = s };
2030 /* Array VALS has an element for each temp.
2031 If this temp holds a constant then its value is kept in VALS' element.
2032 If this temp is a copy of other ones then the other copies are
2033 available through the doubly linked circular list. */
2035 nb_temps = s->nb_temps;
2036 for (i = 0; i < nb_temps; ++i) {
2037 s->temps[i].state_ptr = NULL;
2040 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2041 TCGOpcode opc = op->opc;
2042 const TCGOpDef *def;
2043 bool done = false;
2045 /* Calls are special. */
2046 if (opc == INDEX_op_call) {
2047 fold_call(&ctx, op);
2048 continue;
2051 def = &tcg_op_defs[opc];
2052 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2053 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
2055 /* Pre-compute the type of the operation. */
2056 if (def->flags & TCG_OPF_VECTOR) {
2057 ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
2058 } else if (def->flags & TCG_OPF_64BIT) {
2059 ctx.type = TCG_TYPE_I64;
2060 } else {
2061 ctx.type = TCG_TYPE_I32;
2064 /* Assume all bits affected, no bits known zero, no sign reps. */
2065 ctx.a_mask = -1;
2066 ctx.z_mask = -1;
2067 ctx.s_mask = 0;
2070 * Process each opcode.
2071 * Sorted alphabetically by opcode as much as possible.
2073 switch (opc) {
2074 CASE_OP_32_64(add):
2075 done = fold_add(&ctx, op);
2076 break;
2077 case INDEX_op_add_vec:
2078 done = fold_add_vec(&ctx, op);
2079 break;
2080 CASE_OP_32_64(add2):
2081 done = fold_add2(&ctx, op);
2082 break;
2083 CASE_OP_32_64_VEC(and):
2084 done = fold_and(&ctx, op);
2085 break;
2086 CASE_OP_32_64_VEC(andc):
2087 done = fold_andc(&ctx, op);
2088 break;
2089 CASE_OP_32_64(brcond):
2090 done = fold_brcond(&ctx, op);
2091 break;
2092 case INDEX_op_brcond2_i32:
2093 done = fold_brcond2(&ctx, op);
2094 break;
2095 CASE_OP_32_64(bswap16):
2096 CASE_OP_32_64(bswap32):
2097 case INDEX_op_bswap64_i64:
2098 done = fold_bswap(&ctx, op);
2099 break;
2100 CASE_OP_32_64(clz):
2101 CASE_OP_32_64(ctz):
2102 done = fold_count_zeros(&ctx, op);
2103 break;
2104 CASE_OP_32_64(ctpop):
2105 done = fold_ctpop(&ctx, op);
2106 break;
2107 CASE_OP_32_64(deposit):
2108 done = fold_deposit(&ctx, op);
2109 break;
2110 CASE_OP_32_64(div):
2111 CASE_OP_32_64(divu):
2112 done = fold_divide(&ctx, op);
2113 break;
2114 case INDEX_op_dup_vec:
2115 done = fold_dup(&ctx, op);
2116 break;
2117 case INDEX_op_dup2_vec:
2118 done = fold_dup2(&ctx, op);
2119 break;
2120 CASE_OP_32_64_VEC(eqv):
2121 done = fold_eqv(&ctx, op);
2122 break;
2123 CASE_OP_32_64(extract):
2124 done = fold_extract(&ctx, op);
2125 break;
2126 CASE_OP_32_64(extract2):
2127 done = fold_extract2(&ctx, op);
2128 break;
2129 CASE_OP_32_64(ext8s):
2130 CASE_OP_32_64(ext16s):
2131 case INDEX_op_ext32s_i64:
2132 case INDEX_op_ext_i32_i64:
2133 done = fold_exts(&ctx, op);
2134 break;
2135 CASE_OP_32_64(ext8u):
2136 CASE_OP_32_64(ext16u):
2137 case INDEX_op_ext32u_i64:
2138 case INDEX_op_extu_i32_i64:
2139 case INDEX_op_extrl_i64_i32:
2140 case INDEX_op_extrh_i64_i32:
2141 done = fold_extu(&ctx, op);
2142 break;
2143 CASE_OP_32_64(ld8s):
2144 CASE_OP_32_64(ld8u):
2145 CASE_OP_32_64(ld16s):
2146 CASE_OP_32_64(ld16u):
2147 case INDEX_op_ld32s_i64:
2148 case INDEX_op_ld32u_i64:
2149 done = fold_tcg_ld(&ctx, op);
2150 break;
2151 case INDEX_op_mb:
2152 done = fold_mb(&ctx, op);
2153 break;
2154 CASE_OP_32_64_VEC(mov):
2155 done = fold_mov(&ctx, op);
2156 break;
2157 CASE_OP_32_64(movcond):
2158 done = fold_movcond(&ctx, op);
2159 break;
2160 CASE_OP_32_64(mul):
2161 done = fold_mul(&ctx, op);
2162 break;
2163 CASE_OP_32_64(mulsh):
2164 CASE_OP_32_64(muluh):
2165 done = fold_mul_highpart(&ctx, op);
2166 break;
2167 CASE_OP_32_64(muls2):
2168 CASE_OP_32_64(mulu2):
2169 done = fold_multiply2(&ctx, op);
2170 break;
2171 CASE_OP_32_64_VEC(nand):
2172 done = fold_nand(&ctx, op);
2173 break;
2174 CASE_OP_32_64(neg):
2175 done = fold_neg(&ctx, op);
2176 break;
2177 CASE_OP_32_64_VEC(nor):
2178 done = fold_nor(&ctx, op);
2179 break;
2180 CASE_OP_32_64_VEC(not):
2181 done = fold_not(&ctx, op);
2182 break;
2183 CASE_OP_32_64_VEC(or):
2184 done = fold_or(&ctx, op);
2185 break;
2186 CASE_OP_32_64_VEC(orc):
2187 done = fold_orc(&ctx, op);
2188 break;
2189 case INDEX_op_qemu_ld_i32:
2190 case INDEX_op_qemu_ld_i64:
2191 done = fold_qemu_ld(&ctx, op);
2192 break;
2193 case INDEX_op_qemu_st_i32:
2194 case INDEX_op_qemu_st8_i32:
2195 case INDEX_op_qemu_st_i64:
2196 done = fold_qemu_st(&ctx, op);
2197 break;
2198 CASE_OP_32_64(rem):
2199 CASE_OP_32_64(remu):
2200 done = fold_remainder(&ctx, op);
2201 break;
2202 CASE_OP_32_64(rotl):
2203 CASE_OP_32_64(rotr):
2204 CASE_OP_32_64(sar):
2205 CASE_OP_32_64(shl):
2206 CASE_OP_32_64(shr):
2207 done = fold_shift(&ctx, op);
2208 break;
2209 CASE_OP_32_64(setcond):
2210 done = fold_setcond(&ctx, op);
2211 break;
2212 case INDEX_op_setcond2_i32:
2213 done = fold_setcond2(&ctx, op);
2214 break;
2215 CASE_OP_32_64(sextract):
2216 done = fold_sextract(&ctx, op);
2217 break;
2218 CASE_OP_32_64(sub):
2219 done = fold_sub(&ctx, op);
2220 break;
2221 case INDEX_op_sub_vec:
2222 done = fold_sub_vec(&ctx, op);
2223 break;
2224 CASE_OP_32_64(sub2):
2225 done = fold_sub2(&ctx, op);
2226 break;
2227 CASE_OP_32_64_VEC(xor):
2228 done = fold_xor(&ctx, op);
2229 break;
2230 default:
2231 break;
2234 if (!done) {
2235 finish_folding(&ctx, op);