1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
50 static bool plus_minus_operand_p (const_rtx
);
52 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
55 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
57 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
60 /* Test whether expression, X, is an immediate constant that represents
61 the most significant bit of machine mode MODE. */
64 mode_signbit_p (machine_mode mode
, const_rtx x
)
66 unsigned HOST_WIDE_INT val
;
68 scalar_int_mode int_mode
;
70 if (!is_int_mode (mode
, &int_mode
))
73 width
= GET_MODE_PRECISION (int_mode
);
77 if (width
<= HOST_BITS_PER_WIDE_INT
80 #if TARGET_SUPPORTS_WIDE_INT
81 else if (CONST_WIDE_INT_P (x
))
84 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
85 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
87 for (i
= 0; i
< elts
- 1; i
++)
88 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
90 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
91 width
%= HOST_BITS_PER_WIDE_INT
;
93 width
= HOST_BITS_PER_WIDE_INT
;
96 else if (width
<= HOST_BITS_PER_DOUBLE_INT
97 && CONST_DOUBLE_AS_INT_P (x
)
98 && CONST_DOUBLE_LOW (x
) == 0)
100 val
= CONST_DOUBLE_HIGH (x
);
101 width
-= HOST_BITS_PER_WIDE_INT
;
105 /* X is not an integer constant. */
108 if (width
< HOST_BITS_PER_WIDE_INT
)
109 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
110 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114 (after masking with the mode mask of MODE). Returns false if the
115 precision of MODE is too large to handle. */
118 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
121 scalar_int_mode int_mode
;
123 if (!is_int_mode (mode
, &int_mode
))
126 width
= GET_MODE_PRECISION (int_mode
);
127 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
130 val
&= GET_MODE_MASK (int_mode
);
131 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135 Returns false if the precision of MODE is too large to handle. */
137 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
141 scalar_int_mode int_mode
;
142 if (!is_int_mode (mode
, &int_mode
))
145 width
= GET_MODE_PRECISION (int_mode
);
146 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
149 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154 Returns false if the precision of MODE is too large to handle. */
156 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
160 scalar_int_mode int_mode
;
161 if (!is_int_mode (mode
, &int_mode
))
164 width
= GET_MODE_PRECISION (int_mode
);
165 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
168 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
172 /* Make a binary operation by properly ordering the operands and
173 seeing if the expression folds. */
176 simplify_context::simplify_gen_binary (rtx_code code
, machine_mode mode
,
181 /* If this simplifies, do it. */
182 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
186 /* Put complex operands first and constants second if commutative. */
187 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
188 && swap_commutative_operands_p (op0
, op1
))
189 std::swap (op0
, op1
);
191 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
194 /* If X is a MEM referencing the constant pool, return the real value.
195 Otherwise return X. */
197 avoid_constant_pool_reference (rtx x
)
201 poly_int64 offset
= 0;
203 switch (GET_CODE (x
))
209 /* Handle float extensions of constant pool references. */
211 c
= avoid_constant_pool_reference (tmp
);
212 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
213 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
221 if (GET_MODE (x
) == BLKmode
)
226 /* Call target hook to avoid the effects of -fpic etc.... */
227 addr
= targetm
.delegitimize_address (addr
);
229 /* Split the address into a base and integer offset. */
230 addr
= strip_offset (addr
, &offset
);
232 if (GET_CODE (addr
) == LO_SUM
)
233 addr
= XEXP (addr
, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr
) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr
))
240 c
= get_pool_constant (addr
);
241 cmode
= get_pool_mode (addr
);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
248 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
250 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
251 if (tem
&& CONSTANT_P (tem
))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x
)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x
))
272 tree decl
= MEM_EXPR (x
);
273 machine_mode mode
= GET_MODE (x
);
274 poly_int64 offset
= 0;
276 switch (TREE_CODE (decl
))
286 case ARRAY_RANGE_REF
:
291 case VIEW_CONVERT_EXPR
:
293 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
295 int unsignedp
, reversep
, volatilep
= 0;
298 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
299 &unsignedp
, &reversep
, &volatilep
);
300 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
301 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
302 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
305 offset
+= bytepos
+ toffset_val
;
311 && mode
== GET_MODE (x
)
313 && (TREE_STATIC (decl
)
314 || DECL_THREAD_LOCAL_P (decl
))
315 && DECL_RTL_SET_P (decl
)
316 && MEM_P (DECL_RTL (decl
)))
320 offset
+= MEM_OFFSET (x
);
322 newx
= DECL_RTL (decl
);
326 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
327 poly_int64 n_offset
, o_offset
;
329 /* Avoid creating a new MEM needlessly if we already had
330 the same address. We do if there's no OFFSET and the
331 old address X is identical to NEWX, or if X is of the
332 form (plus NEWX OFFSET), or the NEWX is of the form
333 (plus Y (const_int Z)) and X is that with the offset
334 added: (plus Y (const_int Z+OFFSET)). */
335 n
= strip_offset (n
, &n_offset
);
336 o
= strip_offset (o
, &o_offset
);
337 if (!(known_eq (o_offset
, n_offset
+ offset
)
338 && rtx_equal_p (o
, n
)))
339 x
= adjust_address_nv (newx
, mode
, offset
);
341 else if (GET_MODE (x
) == GET_MODE (newx
)
342 && known_eq (offset
, 0))
350 /* Make a unary operation by first seeing if it folds and otherwise making
351 the specified operation. */
354 simplify_context::simplify_gen_unary (rtx_code code
, machine_mode mode
, rtx op
,
355 machine_mode op_mode
)
359 /* If this simplifies, use it. */
360 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
363 return gen_rtx_fmt_e (code
, mode
, op
);
366 /* Likewise for ternary operations. */
369 simplify_context::simplify_gen_ternary (rtx_code code
, machine_mode mode
,
370 machine_mode op0_mode
,
371 rtx op0
, rtx op1
, rtx op2
)
375 /* If this simplifies, use it. */
376 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
377 op0
, op1
, op2
)) != 0)
380 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
383 /* Likewise, for relational operations.
384 CMP_MODE specifies mode comparison is done in. */
387 simplify_context::simplify_gen_relational (rtx_code code
, machine_mode mode
,
388 machine_mode cmp_mode
,
393 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
397 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 and simplify the result. If FN is non-NULL, call this callback on each
402 X, if it returns non-NULL, replace X with its return value and simplify the
406 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
407 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
409 enum rtx_code code
= GET_CODE (x
);
410 machine_mode mode
= GET_MODE (x
);
411 machine_mode op_mode
;
413 rtx op0
, op1
, op2
, newx
, op
;
417 if (UNLIKELY (fn
!= NULL
))
419 newx
= fn (x
, old_rtx
, data
);
423 else if (rtx_equal_p (x
, old_rtx
))
424 return copy_rtx ((rtx
) data
);
426 switch (GET_RTX_CLASS (code
))
430 op_mode
= GET_MODE (op0
);
431 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
432 if (op0
== XEXP (x
, 0))
434 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
438 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
439 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
442 return simplify_gen_binary (code
, mode
, op0
, op1
);
445 case RTX_COMM_COMPARE
:
448 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
449 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
450 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
451 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
453 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
456 case RTX_BITFIELD_OPS
:
458 op_mode
= GET_MODE (op0
);
459 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
460 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
461 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
464 if (op_mode
== VOIDmode
)
465 op_mode
= GET_MODE (op0
);
466 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
471 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
472 if (op0
== SUBREG_REG (x
))
474 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
475 GET_MODE (SUBREG_REG (x
)),
477 return op0
? op0
: x
;
484 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
485 if (op0
== XEXP (x
, 0))
487 return replace_equiv_address_nv (x
, op0
);
489 else if (code
== LO_SUM
)
491 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
492 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
494 /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 if (GET_CODE (op0
) == HIGH
)
497 rtx base0
, base1
, offset0
, offset1
;
498 split_const (XEXP (op0
, 0), &base0
, &offset0
);
499 split_const (op1
, &base1
, &offset1
);
500 if (rtx_equal_p (base0
, base1
))
504 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
506 return gen_rtx_LO_SUM (mode
, op0
, op1
);
515 fmt
= GET_RTX_FORMAT (code
);
516 for (i
= 0; fmt
[i
]; i
++)
521 newvec
= XVEC (newx
, i
);
522 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
524 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
526 if (op
!= RTVEC_ELT (vec
, j
))
530 newvec
= shallow_copy_rtvec (vec
);
532 newx
= shallow_copy_rtx (x
);
533 XVEC (newx
, i
) = newvec
;
535 RTVEC_ELT (newvec
, j
) = op
;
543 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
544 if (op
!= XEXP (x
, i
))
547 newx
= shallow_copy_rtx (x
);
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
562 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_context::simplify_truncation (machine_mode mode
, rtx op
,
614 machine_mode op_mode
)
616 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
617 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
618 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
620 gcc_assert (precision
<= op_precision
);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op
) == ZERO_EXTEND
624 || GET_CODE (op
) == SIGN_EXTEND
)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
632 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
633 if (mode
== origmode
)
635 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
636 return simplify_gen_unary (TRUNCATE
, mode
,
637 XEXP (op
, 0), origmode
);
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (op
, 0), origmode
);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
647 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
648 && (GET_CODE (op
) == PLUS
649 || GET_CODE (op
) == MINUS
650 || GET_CODE (op
) == MULT
))
652 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
655 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
657 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
661 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 the outer subreg is effectively a truncation to the original mode. */
664 if ((GET_CODE (op
) == LSHIFTRT
665 || GET_CODE (op
) == ASHIFTRT
)
666 /* Ensure that OP_MODE is at least twice as wide as MODE
667 to avoid the possibility that an outer LSHIFTRT shifts by more
668 than the sign extension's sign_bit_copies and introduces zeros
669 into the high bits of the result. */
670 && 2 * precision
<= op_precision
671 && CONST_INT_P (XEXP (op
, 1))
672 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
673 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
674 && UINTVAL (XEXP (op
, 1)) < precision
)
675 return simplify_gen_binary (ASHIFTRT
, mode
,
676 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
678 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 the outer subreg is effectively a truncation to the original mode. */
681 if ((GET_CODE (op
) == LSHIFTRT
682 || GET_CODE (op
) == ASHIFTRT
)
683 && CONST_INT_P (XEXP (op
, 1))
684 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
685 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
686 && UINTVAL (XEXP (op
, 1)) < precision
)
687 return simplify_gen_binary (LSHIFTRT
, mode
,
688 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
690 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 the outer subreg is effectively a truncation to the original mode. */
693 if (GET_CODE (op
) == ASHIFT
694 && CONST_INT_P (XEXP (op
, 1))
695 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
696 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
697 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
698 && UINTVAL (XEXP (op
, 1)) < precision
)
699 return simplify_gen_binary (ASHIFT
, mode
,
700 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
702 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
705 if (GET_CODE (op
) == AND
706 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
707 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
708 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
709 && CONST_INT_P (XEXP (op
, 1)))
711 rtx op0
= (XEXP (XEXP (op
, 0), 0));
712 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
713 rtx mask_op
= XEXP (op
, 1);
714 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
715 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
717 if (shift
< precision
718 /* If doing this transform works for an X with all bits set,
719 it works for any X. */
720 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
721 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
722 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
723 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
725 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
726 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
730 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
733 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
734 && REG_P (XEXP (op
, 0))
735 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
736 && CONST_INT_P (XEXP (op
, 1))
737 && CONST_INT_P (XEXP (op
, 2)))
739 rtx op0
= XEXP (op
, 0);
740 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
741 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
742 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
744 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
747 pos
-= op_precision
- precision
;
748 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
749 XEXP (op
, 1), GEN_INT (pos
));
752 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
754 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
756 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
757 XEXP (op
, 1), XEXP (op
, 2));
761 /* Recognize a word extraction from a multi-word subreg. */
762 if ((GET_CODE (op
) == LSHIFTRT
763 || GET_CODE (op
) == ASHIFTRT
)
764 && SCALAR_INT_MODE_P (mode
)
765 && SCALAR_INT_MODE_P (op_mode
)
766 && precision
>= BITS_PER_WORD
767 && 2 * precision
<= op_precision
768 && CONST_INT_P (XEXP (op
, 1))
769 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
770 && UINTVAL (XEXP (op
, 1)) < op_precision
)
772 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
773 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
774 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
776 ? byte
- shifted_bytes
777 : byte
+ shifted_bytes
));
780 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 and try replacing the TRUNCATE and shift with it. Don't do this
782 if the MEM has a mode-dependent address. */
783 if ((GET_CODE (op
) == LSHIFTRT
784 || GET_CODE (op
) == ASHIFTRT
)
785 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
786 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
787 && MEM_P (XEXP (op
, 0))
788 && CONST_INT_P (XEXP (op
, 1))
789 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
790 && INTVAL (XEXP (op
, 1)) > 0
791 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
792 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
793 MEM_ADDR_SPACE (XEXP (op
, 0)))
794 && ! MEM_VOLATILE_P (XEXP (op
, 0))
795 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
796 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
798 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
799 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
800 return adjust_address_nv (XEXP (op
, 0), int_mode
,
802 ? byte
- shifted_bytes
803 : byte
+ shifted_bytes
));
806 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 (OP:SI foo:SI) if OP is NEG or ABS. */
808 if ((GET_CODE (op
) == ABS
809 || GET_CODE (op
) == NEG
)
810 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
811 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
812 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
813 return simplify_gen_unary (GET_CODE (op
), mode
,
814 XEXP (XEXP (op
, 0), 0), mode
);
816 /* Simplifications of (truncate:A (subreg:B X 0)). */
817 if (GET_CODE (op
) == SUBREG
818 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
819 && SCALAR_INT_MODE_P (op_mode
)
820 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
821 && subreg_lowpart_p (op
))
823 /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 if (GET_CODE (SUBREG_REG (op
)) == TRUNCATE
)
826 rtx inner
= XEXP (SUBREG_REG (op
), 0);
827 if (GET_MODE_PRECISION (int_mode
)
828 <= GET_MODE_PRECISION (subreg_mode
))
829 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
832 /* If subreg above is paradoxical and C is narrower
833 than A, return (subreg:A (truncate:C X) 0). */
834 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
),
838 /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 paradoxical subregs (B is wider than C). */
840 if (is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
))
842 unsigned int int_op_prec
= GET_MODE_PRECISION (int_op_mode
);
843 unsigned int subreg_prec
= GET_MODE_PRECISION (subreg_mode
);
844 if (int_op_prec
> subreg_prec
)
846 if (int_mode
== subreg_mode
)
847 return SUBREG_REG (op
);
848 if (GET_MODE_PRECISION (int_mode
) < subreg_prec
)
849 return simplify_gen_unary (TRUNCATE
, int_mode
,
850 SUBREG_REG (op
), subreg_mode
);
852 /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 A is narrower than B and B is narrower than C. */
854 else if (int_op_prec
< subreg_prec
855 && GET_MODE_PRECISION (int_mode
) < int_op_prec
)
856 return simplify_gen_unary (TRUNCATE
, int_mode
,
857 SUBREG_REG (op
), subreg_mode
);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op
) == TRUNCATE
)
863 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
864 GET_MODE (XEXP (op
, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
868 if (GET_CODE (op
) == IOR
869 && SCALAR_INT_MODE_P (mode
)
870 && SCALAR_INT_MODE_P (op_mode
)
871 && CONST_INT_P (XEXP (op
, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_context::simplify_unary_operation (rtx_code code
, machine_mode mode
,
883 rtx op
, machine_mode op_mode
)
887 trueop
= avoid_constant_pool_reference (op
);
889 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
893 return simplify_unary_operation_1 (code
, mode
, op
);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
900 exact_int_to_float_conversion_p (const_rtx op
)
902 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
903 /* Constants can reach here with -frounding-math, if they do then
904 the conversion isn't exact. */
905 if (op0_mode
== VOIDmode
)
907 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
908 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
909 int in_bits
= in_prec
;
910 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
912 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
913 if (GET_CODE (op
) == FLOAT
)
914 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
915 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
916 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
919 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
921 return in_bits
<= out_bits
;
924 /* Perform some simplifications we can do even if the operands
927 simplify_context::simplify_unary_operation_1 (rtx_code code
, machine_mode mode
,
930 enum rtx_code reversed
;
931 rtx temp
, elt
, base
, step
;
932 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
937 /* (not (not X)) == X. */
938 if (GET_CODE (op
) == NOT
)
941 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
942 comparison is all ones. */
943 if (COMPARISON_P (op
)
944 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
945 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
946 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
947 XEXP (op
, 0), XEXP (op
, 1));
949 /* (not (plus X -1)) can become (neg X). */
950 if (GET_CODE (op
) == PLUS
951 && XEXP (op
, 1) == constm1_rtx
)
952 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
954 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
955 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
956 and MODE_VECTOR_INT. */
957 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
958 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
961 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
962 if (GET_CODE (op
) == XOR
963 && CONST_INT_P (XEXP (op
, 1))
964 && (temp
= simplify_unary_operation (NOT
, mode
,
965 XEXP (op
, 1), mode
)) != 0)
966 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
968 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
969 if (GET_CODE (op
) == PLUS
970 && CONST_INT_P (XEXP (op
, 1))
971 && mode_signbit_p (mode
, XEXP (op
, 1))
972 && (temp
= simplify_unary_operation (NOT
, mode
,
973 XEXP (op
, 1), mode
)) != 0)
974 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
977 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
978 operands other than 1, but that is not valid. We could do a
979 similar simplification for (not (lshiftrt C X)) where C is
980 just the sign bit, but this doesn't seem common enough to
982 if (GET_CODE (op
) == ASHIFT
983 && XEXP (op
, 0) == const1_rtx
)
985 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
986 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
989 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
990 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
991 so we can perform the above simplification. */
992 if (STORE_FLAG_VALUE
== -1
993 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
994 && GET_CODE (op
) == ASHIFTRT
995 && CONST_INT_P (XEXP (op
, 1))
996 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
997 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
998 XEXP (op
, 0), const0_rtx
);
1001 if (partial_subreg_p (op
)
1002 && subreg_lowpart_p (op
)
1003 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
1004 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
1006 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1009 x
= gen_rtx_ROTATE (inner_mode
,
1010 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1012 XEXP (SUBREG_REG (op
), 1));
1013 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1018 /* Apply De Morgan's laws to reduce number of patterns for machines
1019 with negating logical insns (and-not, nand, etc.). If result has
1020 only one NOT, put it first, since that is how the patterns are
1022 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1024 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1025 machine_mode op_mode
;
1027 op_mode
= GET_MODE (in1
);
1028 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1030 op_mode
= GET_MODE (in2
);
1031 if (op_mode
== VOIDmode
)
1033 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1035 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1036 std::swap (in1
, in2
);
1038 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1042 /* (not (bswap x)) -> (bswap (not x)). */
1043 if (GET_CODE (op
) == BSWAP
)
1045 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1046 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1051 /* (neg (neg X)) == X. */
1052 if (GET_CODE (op
) == NEG
)
1053 return XEXP (op
, 0);
1055 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1056 If comparison is not reversible use
1058 if (GET_CODE (op
) == IF_THEN_ELSE
)
1060 rtx cond
= XEXP (op
, 0);
1061 rtx true_rtx
= XEXP (op
, 1);
1062 rtx false_rtx
= XEXP (op
, 2);
1064 if ((GET_CODE (true_rtx
) == NEG
1065 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1066 || (GET_CODE (false_rtx
) == NEG
1067 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1069 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1070 temp
= reversed_comparison (cond
, mode
);
1074 std::swap (true_rtx
, false_rtx
);
1076 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1077 mode
, temp
, true_rtx
, false_rtx
);
1081 /* (neg (plus X 1)) can become (not X). */
1082 if (GET_CODE (op
) == PLUS
1083 && XEXP (op
, 1) == const1_rtx
)
1084 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1086 /* Similarly, (neg (not X)) is (plus X 1). */
1087 if (GET_CODE (op
) == NOT
)
1088 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1091 /* (neg (minus X Y)) can become (minus Y X). This transformation
1092 isn't safe for modes with signed zeros, since if X and Y are
1093 both +0, (minus Y X) is the same as (minus X Y). If the
1094 rounding mode is towards +infinity (or -infinity) then the two
1095 expressions will be rounded differently. */
1096 if (GET_CODE (op
) == MINUS
1097 && !HONOR_SIGNED_ZEROS (mode
)
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1099 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1101 if (GET_CODE (op
) == PLUS
1102 && !HONOR_SIGNED_ZEROS (mode
)
1103 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1105 /* (neg (plus A C)) is simplified to (minus -C A). */
1106 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1107 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1109 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1111 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1114 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1115 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1116 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1119 /* (neg (mult A B)) becomes (mult A (neg B)).
1120 This works even for floating-point values. */
1121 if (GET_CODE (op
) == MULT
1122 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1124 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1125 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1128 /* NEG commutes with ASHIFT since it is multiplication. Only do
1129 this if we can then eliminate the NEG (e.g., if the operand
1131 if (GET_CODE (op
) == ASHIFT
)
1133 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1135 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1138 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1139 C is equal to the width of MODE minus 1. */
1140 if (GET_CODE (op
) == ASHIFTRT
1141 && CONST_INT_P (XEXP (op
, 1))
1142 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1143 return simplify_gen_binary (LSHIFTRT
, mode
,
1144 XEXP (op
, 0), XEXP (op
, 1));
1146 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1147 C is equal to the width of MODE minus 1. */
1148 if (GET_CODE (op
) == LSHIFTRT
1149 && CONST_INT_P (XEXP (op
, 1))
1150 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1151 return simplify_gen_binary (ASHIFTRT
, mode
,
1152 XEXP (op
, 0), XEXP (op
, 1));
1154 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1155 if (GET_CODE (op
) == XOR
1156 && XEXP (op
, 1) == const1_rtx
1157 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1158 return plus_constant (mode
, XEXP (op
, 0), -1);
1160 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1161 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1162 if (GET_CODE (op
) == LT
1163 && XEXP (op
, 1) == const0_rtx
1164 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1166 int_mode
= as_a
<scalar_int_mode
> (mode
);
1167 int isize
= GET_MODE_PRECISION (inner
);
1168 if (STORE_FLAG_VALUE
== 1)
1170 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1171 gen_int_shift_amount (inner
,
1173 if (int_mode
== inner
)
1175 if (GET_MODE_PRECISION (int_mode
) > isize
)
1176 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1177 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1179 else if (STORE_FLAG_VALUE
== -1)
1181 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1182 gen_int_shift_amount (inner
,
1184 if (int_mode
== inner
)
1186 if (GET_MODE_PRECISION (int_mode
) > isize
)
1187 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1188 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1192 if (vec_series_p (op
, &base
, &step
))
1194 /* Only create a new series if we can simplify both parts. In other
1195 cases this isn't really a simplification, and it's not necessarily
1196 a win to replace a vector operation with a scalar operation. */
1197 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1198 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1201 step
= simplify_unary_operation (NEG
, inner_mode
,
1204 return gen_vec_series (mode
, base
, step
);
1210 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1211 with the umulXi3_highpart patterns. */
1212 if (GET_CODE (op
) == LSHIFTRT
1213 && GET_CODE (XEXP (op
, 0)) == MULT
)
1216 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1218 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1220 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1224 /* We can't handle truncation to a partial integer mode here
1225 because we don't know the real bitsize of the partial
1230 if (GET_MODE (op
) != VOIDmode
)
1232 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1237 /* If we know that the value is already truncated, we can
1238 replace the TRUNCATE with a SUBREG. */
1239 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1240 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1241 || truncated_to_mode (mode
, op
)))
1243 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1248 /* A truncate of a comparison can be replaced with a subreg if
1249 STORE_FLAG_VALUE permits. This is like the previous test,
1250 but it works even if the comparison is done in a mode larger
1251 than HOST_BITS_PER_WIDE_INT. */
1252 if (HWI_COMPUTABLE_MODE_P (mode
)
1253 && COMPARISON_P (op
)
1254 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
1255 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1257 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1262 /* A truncate of a memory is just loading the low part of the memory
1263 if we are not changing the meaning of the address. */
1264 if (GET_CODE (op
) == MEM
1265 && !VECTOR_MODE_P (mode
)
1266 && !MEM_VOLATILE_P (op
)
1267 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1269 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1274 /* Check for useless truncation. */
1275 if (GET_MODE (op
) == mode
)
1279 case FLOAT_TRUNCATE
:
1280 /* Check for useless truncation. */
1281 if (GET_MODE (op
) == mode
)
1284 if (DECIMAL_FLOAT_MODE_P (mode
))
1287 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1288 if (GET_CODE (op
) == FLOAT_EXTEND
1289 && GET_MODE (XEXP (op
, 0)) == mode
)
1290 return XEXP (op
, 0);
1292 /* (float_truncate:SF (float_truncate:DF foo:XF))
1293 = (float_truncate:SF foo:XF).
1294 This may eliminate double rounding, so it is unsafe.
1296 (float_truncate:SF (float_extend:XF foo:DF))
1297 = (float_truncate:SF foo:DF).
1299 (float_truncate:DF (float_extend:XF foo:SF))
1300 = (float_extend:DF foo:SF). */
1301 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1302 && flag_unsafe_math_optimizations
)
1303 || GET_CODE (op
) == FLOAT_EXTEND
)
1304 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1305 > GET_MODE_UNIT_SIZE (mode
)
1306 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1308 XEXP (op
, 0), mode
);
1310 /* (float_truncate (float x)) is (float x) */
1311 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1312 && (flag_unsafe_math_optimizations
1313 || exact_int_to_float_conversion_p (op
)))
1314 return simplify_gen_unary (GET_CODE (op
), mode
,
1316 GET_MODE (XEXP (op
, 0)));
1318 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1319 (OP:SF foo:SF) if OP is NEG or ABS. */
1320 if ((GET_CODE (op
) == ABS
1321 || GET_CODE (op
) == NEG
)
1322 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1323 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1324 return simplify_gen_unary (GET_CODE (op
), mode
,
1325 XEXP (XEXP (op
, 0), 0), mode
);
1327 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1328 is (float_truncate:SF x). */
1329 if (GET_CODE (op
) == SUBREG
1330 && subreg_lowpart_p (op
)
1331 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1332 return SUBREG_REG (op
);
1336 /* Check for useless extension. */
1337 if (GET_MODE (op
) == mode
)
1340 if (DECIMAL_FLOAT_MODE_P (mode
))
1343 /* (float_extend (float_extend x)) is (float_extend x)
1345 (float_extend (float x)) is (float x) assuming that double
1346 rounding can't happen.
1348 if (GET_CODE (op
) == FLOAT_EXTEND
1349 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1350 && exact_int_to_float_conversion_p (op
)))
1351 return simplify_gen_unary (GET_CODE (op
), mode
,
1353 GET_MODE (XEXP (op
, 0)));
1358 /* (abs (neg <foo>)) -> (abs <foo>) */
1359 if (GET_CODE (op
) == NEG
)
1360 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1361 GET_MODE (XEXP (op
, 0)));
1363 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1365 if (GET_MODE (op
) == VOIDmode
)
1368 /* If operand is something known to be positive, ignore the ABS. */
1369 if (val_signbit_known_clear_p (GET_MODE (op
),
1370 nonzero_bits (op
, GET_MODE (op
))))
1373 /* Using nonzero_bits doesn't (currently) work for modes wider than
1374 HOST_WIDE_INT, so the following transformations help simplify
1375 ABS for TImode and wider. */
1376 switch (GET_CODE (op
))
1387 if (CONST_INT_P (XEXP (op
, 1))
1388 && INTVAL (XEXP (op
, 1)) > 0
1389 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1390 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (int_mode
))
1398 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1399 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1400 && (num_sign_bit_copies (op
, int_mode
)
1401 == GET_MODE_PRECISION (int_mode
)))
1402 return gen_rtx_NEG (int_mode
, op
);
1407 /* (ffs (*_extend <X>)) = (*_extend (ffs <X>)). */
1408 if (GET_CODE (op
) == SIGN_EXTEND
1409 || GET_CODE (op
) == ZERO_EXTEND
)
1411 temp
= simplify_gen_unary (FFS
, GET_MODE (XEXP (op
, 0)),
1412 XEXP (op
, 0), GET_MODE (XEXP (op
, 0)));
1413 return simplify_gen_unary (GET_CODE (op
), mode
, temp
,
1419 switch (GET_CODE (op
))
1422 /* (popcount (bswap <X>)) = (popcount <X>). */
1423 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1424 GET_MODE (XEXP (op
, 0)));
1427 /* (popcount (zero_extend <X>)) = (zero_extend (popcount <X>)). */
1428 temp
= simplify_gen_unary (POPCOUNT
, GET_MODE (XEXP (op
, 0)),
1429 XEXP (op
, 0), GET_MODE (XEXP (op
, 0)));
1430 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
,
1435 /* Rotations don't affect popcount. */
1436 if (!side_effects_p (XEXP (op
, 1)))
1437 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1438 GET_MODE (XEXP (op
, 0)));
1447 switch (GET_CODE (op
))
1451 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1452 GET_MODE (XEXP (op
, 0)));
1456 temp
= simplify_gen_unary (PARITY
, GET_MODE (XEXP (op
, 0)),
1457 XEXP (op
, 0), GET_MODE (XEXP (op
, 0)));
1458 return simplify_gen_unary (GET_CODE (op
), mode
, temp
,
1463 /* Rotations don't affect parity. */
1464 if (!side_effects_p (XEXP (op
, 1)))
1465 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1466 GET_MODE (XEXP (op
, 0)));
1470 /* (parity (parity x)) -> parity (x). */
1479 /* (bswap (bswap x)) -> x. */
1480 if (GET_CODE (op
) == BSWAP
)
1481 return XEXP (op
, 0);
1485 /* (float (sign_extend <X>)) = (float <X>). */
1486 if (GET_CODE (op
) == SIGN_EXTEND
)
1487 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1488 GET_MODE (XEXP (op
, 0)));
1492 /* Check for useless extension. */
1493 if (GET_MODE (op
) == mode
)
1496 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1497 becomes just the MINUS if its mode is MODE. This allows
1498 folding switch statements on machines using casesi (such as
1500 if (GET_CODE (op
) == TRUNCATE
1501 && GET_MODE (XEXP (op
, 0)) == mode
1502 && GET_CODE (XEXP (op
, 0)) == MINUS
1503 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1504 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1505 return XEXP (op
, 0);
1507 /* Extending a widening multiplication should be canonicalized to
1508 a wider widening multiplication. */
1509 if (GET_CODE (op
) == MULT
)
1511 rtx lhs
= XEXP (op
, 0);
1512 rtx rhs
= XEXP (op
, 1);
1513 enum rtx_code lcode
= GET_CODE (lhs
);
1514 enum rtx_code rcode
= GET_CODE (rhs
);
1516 /* Widening multiplies usually extend both operands, but sometimes
1517 they use a shift to extract a portion of a register. */
1518 if ((lcode
== SIGN_EXTEND
1519 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1520 && (rcode
== SIGN_EXTEND
1521 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1523 machine_mode lmode
= GET_MODE (lhs
);
1524 machine_mode rmode
= GET_MODE (rhs
);
1527 if (lcode
== ASHIFTRT
)
1528 /* Number of bits not shifted off the end. */
1529 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1530 - INTVAL (XEXP (lhs
, 1)));
1531 else /* lcode == SIGN_EXTEND */
1532 /* Size of inner mode. */
1533 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1535 if (rcode
== ASHIFTRT
)
1536 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1537 - INTVAL (XEXP (rhs
, 1)));
1538 else /* rcode == SIGN_EXTEND */
1539 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1541 /* We can only widen multiplies if the result is mathematiclly
1542 equivalent. I.e. if overflow was impossible. */
1543 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1544 return simplify_gen_binary
1546 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1547 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1551 /* Check for a sign extension of a subreg of a promoted
1552 variable, where the promotion is sign-extended, and the
1553 target mode is the same as the variable's promotion. */
1554 if (GET_CODE (op
) == SUBREG
1555 && SUBREG_PROMOTED_VAR_P (op
)
1556 && SUBREG_PROMOTED_SIGNED_P (op
))
1558 rtx subreg
= SUBREG_REG (op
);
1559 machine_mode subreg_mode
= GET_MODE (subreg
);
1560 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1562 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1565 /* Preserve SUBREG_PROMOTED_VAR_P. */
1566 if (partial_subreg_p (temp
))
1568 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1569 SUBREG_PROMOTED_SET (temp
, SRP_SIGNED
);
1575 /* Sign-extending a sign-extended subreg. */
1576 return simplify_gen_unary (SIGN_EXTEND
, mode
,
1577 subreg
, subreg_mode
);
1580 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1581 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1582 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1584 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1585 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1586 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1587 GET_MODE (XEXP (op
, 0)));
1590 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1591 is (sign_extend:M (subreg:O <X>)) if there is mode with
1592 GET_MODE_BITSIZE (N) - I bits.
1593 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1594 is similarly (zero_extend:M (subreg:O <X>)). */
1595 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1596 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1597 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1598 && CONST_INT_P (XEXP (op
, 1))
1599 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1600 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1601 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1603 scalar_int_mode tmode
;
1604 gcc_assert (GET_MODE_PRECISION (int_mode
)
1605 > GET_MODE_PRECISION (op_mode
));
1606 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1607 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1610 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1612 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1613 ? SIGN_EXTEND
: ZERO_EXTEND
,
1614 int_mode
, inner
, tmode
);
1618 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1619 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1620 if (GET_CODE (op
) == LSHIFTRT
1621 && CONST_INT_P (XEXP (op
, 1))
1622 && XEXP (op
, 1) != const0_rtx
)
1623 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1625 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1626 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1627 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1628 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1629 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1631 if (GET_CODE (op
) == TRUNCATE
1632 && GET_CODE (XEXP (op
, 0)) == LSHIFTRT
1633 && CONST_INT_P (XEXP (XEXP (op
, 0), 1)))
1635 scalar_int_mode m_mode
, n_mode
, o_mode
;
1636 rtx old_shift
= XEXP (op
, 0);
1637 if (is_a
<scalar_int_mode
> (mode
, &m_mode
)
1638 && is_a
<scalar_int_mode
> (GET_MODE (op
), &n_mode
)
1639 && is_a
<scalar_int_mode
> (GET_MODE (old_shift
), &o_mode
)
1640 && GET_MODE_PRECISION (o_mode
) - GET_MODE_PRECISION (n_mode
)
1641 == INTVAL (XEXP (old_shift
, 1)))
1643 rtx new_shift
= simplify_gen_binary (ASHIFTRT
,
1644 GET_MODE (old_shift
),
1645 XEXP (old_shift
, 0),
1646 XEXP (old_shift
, 1));
1647 if (GET_MODE_PRECISION (m_mode
) > GET_MODE_PRECISION (o_mode
))
1648 return simplify_gen_unary (SIGN_EXTEND
, mode
, new_shift
,
1649 GET_MODE (new_shift
));
1650 if (mode
!= GET_MODE (new_shift
))
1651 return simplify_gen_unary (TRUNCATE
, mode
, new_shift
,
1652 GET_MODE (new_shift
));
1657 /* We can canonicalize SIGN_EXTEND (op) as ZERO_EXTEND (op) when
1658 we know the sign bit of OP must be clear. */
1659 if (val_signbit_known_clear_p (GET_MODE (op
),
1660 nonzero_bits (op
, GET_MODE (op
))))
1661 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1663 /* (sign_extend:DI (subreg:SI (ctz:DI ...))) is (ctz:DI ...). */
1664 if (GET_CODE (op
) == SUBREG
1665 && subreg_lowpart_p (op
)
1666 && GET_MODE (SUBREG_REG (op
)) == mode
1667 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1668 && is_a
<scalar_int_mode
> (GET_MODE (op
), &op_mode
)
1669 && GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_WIDE_INT
1670 && GET_MODE_PRECISION (op_mode
) < GET_MODE_PRECISION (int_mode
)
1671 && (nonzero_bits (SUBREG_REG (op
), mode
)
1672 & ~(GET_MODE_MASK (op_mode
) >> 1)) == 0)
1673 return SUBREG_REG (op
);
1675 #if defined(POINTERS_EXTEND_UNSIGNED)
1676 /* As we do not know which address space the pointer is referring to,
1677 we can do this only if the target does not support different pointer
1678 or address modes depending on the address space. */
1679 if (target_default_pointer_address_modes_p ()
1680 && ! POINTERS_EXTEND_UNSIGNED
1681 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1683 || (GET_CODE (op
) == SUBREG
1684 && REG_P (SUBREG_REG (op
))
1685 && REG_POINTER (SUBREG_REG (op
))
1686 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1687 && !targetm
.have_ptr_extend ())
1690 = convert_memory_address_addr_space_1 (Pmode
, op
,
1691 ADDR_SPACE_GENERIC
, false,
1700 /* Check for useless extension. */
1701 if (GET_MODE (op
) == mode
)
1704 /* Check for a zero extension of a subreg of a promoted
1705 variable, where the promotion is zero-extended, and the
1706 target mode is the same as the variable's promotion. */
1707 if (GET_CODE (op
) == SUBREG
1708 && SUBREG_PROMOTED_VAR_P (op
)
1709 && SUBREG_PROMOTED_UNSIGNED_P (op
))
1711 rtx subreg
= SUBREG_REG (op
);
1712 machine_mode subreg_mode
= GET_MODE (subreg
);
1713 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1715 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1718 /* Preserve SUBREG_PROMOTED_VAR_P. */
1719 if (partial_subreg_p (temp
))
1721 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1722 SUBREG_PROMOTED_SET (temp
, SRP_UNSIGNED
);
1728 /* Zero-extending a zero-extended subreg. */
1729 return simplify_gen_unary (ZERO_EXTEND
, mode
,
1730 subreg
, subreg_mode
);
1733 /* Extending a widening multiplication should be canonicalized to
1734 a wider widening multiplication. */
1735 if (GET_CODE (op
) == MULT
)
1737 rtx lhs
= XEXP (op
, 0);
1738 rtx rhs
= XEXP (op
, 1);
1739 enum rtx_code lcode
= GET_CODE (lhs
);
1740 enum rtx_code rcode
= GET_CODE (rhs
);
1742 /* Widening multiplies usually extend both operands, but sometimes
1743 they use a shift to extract a portion of a register. */
1744 if ((lcode
== ZERO_EXTEND
1745 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1746 && (rcode
== ZERO_EXTEND
1747 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1749 machine_mode lmode
= GET_MODE (lhs
);
1750 machine_mode rmode
= GET_MODE (rhs
);
1753 if (lcode
== LSHIFTRT
)
1754 /* Number of bits not shifted off the end. */
1755 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1756 - INTVAL (XEXP (lhs
, 1)));
1757 else /* lcode == ZERO_EXTEND */
1758 /* Size of inner mode. */
1759 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1761 if (rcode
== LSHIFTRT
)
1762 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1763 - INTVAL (XEXP (rhs
, 1)));
1764 else /* rcode == ZERO_EXTEND */
1765 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1767 /* We can only widen multiplies if the result is mathematiclly
1768 equivalent. I.e. if overflow was impossible. */
1769 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1770 return simplify_gen_binary
1772 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1773 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1777 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1778 if (GET_CODE (op
) == ZERO_EXTEND
)
1779 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1780 GET_MODE (XEXP (op
, 0)));
1782 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1783 is (zero_extend:M (subreg:O <X>)) if there is mode with
1784 GET_MODE_PRECISION (N) - I bits. */
1785 if (GET_CODE (op
) == LSHIFTRT
1786 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1787 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1788 && CONST_INT_P (XEXP (op
, 1))
1789 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1790 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1791 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1793 scalar_int_mode tmode
;
1794 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1795 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1798 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1800 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1805 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1806 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1808 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1809 (and:SI (reg:SI) (const_int 63)). */
1810 if (partial_subreg_p (op
)
1811 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1812 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1813 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1814 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1815 && subreg_lowpart_p (op
)
1816 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1817 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1819 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1820 return SUBREG_REG (op
);
1821 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1825 /* (zero_extend:DI (subreg:SI (ctz:DI ...))) is (ctz:DI ...). */
1826 if (GET_CODE (op
) == SUBREG
1827 && subreg_lowpart_p (op
)
1828 && GET_MODE (SUBREG_REG (op
)) == mode
1829 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1830 && is_a
<scalar_int_mode
> (GET_MODE (op
), &op_mode
)
1831 && GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_WIDE_INT
1832 && GET_MODE_PRECISION (op_mode
) < GET_MODE_PRECISION (int_mode
)
1833 && (nonzero_bits (SUBREG_REG (op
), mode
)
1834 & ~GET_MODE_MASK (op_mode
)) == 0)
1835 return SUBREG_REG (op
);
1837 #if defined(POINTERS_EXTEND_UNSIGNED)
1838 /* As we do not know which address space the pointer is referring to,
1839 we can do this only if the target does not support different pointer
1840 or address modes depending on the address space. */
1841 if (target_default_pointer_address_modes_p ()
1842 && POINTERS_EXTEND_UNSIGNED
> 0
1843 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1845 || (GET_CODE (op
) == SUBREG
1846 && REG_P (SUBREG_REG (op
))
1847 && REG_POINTER (SUBREG_REG (op
))
1848 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1849 && !targetm
.have_ptr_extend ())
1852 = convert_memory_address_addr_space_1 (Pmode
, op
,
1853 ADDR_SPACE_GENERIC
, false,
1865 if (VECTOR_MODE_P (mode
)
1866 && vec_duplicate_p (op
, &elt
)
1867 && code
!= VEC_DUPLICATE
)
1869 if (code
== SIGN_EXTEND
|| code
== ZERO_EXTEND
)
1870 /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1871 operations by promoting VEC_DUPLICATE to the root of the expression
1872 (as far as possible). */
1873 temp
= simplify_gen_unary (code
, GET_MODE_INNER (mode
),
1874 elt
, GET_MODE_INNER (GET_MODE (op
)));
1876 /* Try applying the operator to ELT and see if that simplifies.
1877 We can duplicate the result if so.
1879 The reason we traditionally haven't used simplify_gen_unary
1880 for these codes is that it didn't necessarily seem to be a
1881 win to convert things like:
1883 (neg:V (vec_duplicate:V (reg:S R)))
1887 (vec_duplicate:V (neg:S (reg:S R)))
1889 The first might be done entirely in vector registers while the
1890 second might need a move between register files.
1892 However, there also cases where promoting the vec_duplicate is
1893 more efficient, and there is definite value in having a canonical
1894 form when matching instruction patterns. We should consider
1895 extending the simplify_gen_unary code above to more cases. */
1896 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1897 elt
, GET_MODE_INNER (GET_MODE (op
)));
1899 return gen_vec_duplicate (mode
, temp
);
1905 /* Try to compute the value of a unary operation CODE whose output mode is to
1906 be MODE with input operand OP whose mode was originally OP_MODE.
1907 Return zero if the value cannot be computed. */
1909 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1910 rtx op
, machine_mode op_mode
)
1912 scalar_int_mode result_mode
;
1914 if (code
== VEC_DUPLICATE
)
1916 gcc_assert (VECTOR_MODE_P (mode
));
1917 if (GET_MODE (op
) != VOIDmode
)
1919 if (!VECTOR_MODE_P (GET_MODE (op
)))
1920 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1922 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1925 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1926 return gen_const_vec_duplicate (mode
, op
);
1927 if (GET_CODE (op
) == CONST_VECTOR
1928 && (CONST_VECTOR_DUPLICATE_P (op
)
1929 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1931 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1932 ? CONST_VECTOR_NPATTERNS (op
)
1933 : CONST_VECTOR_NUNITS (op
).to_constant ());
1934 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1935 rtx_vector_builder
builder (mode
, npatterns
, 1);
1936 for (unsigned i
= 0; i
< npatterns
; i
++)
1937 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1938 return builder
.build ();
1942 if (VECTOR_MODE_P (mode
)
1943 && GET_CODE (op
) == CONST_VECTOR
1944 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1946 gcc_assert (GET_MODE (op
) == op_mode
);
1948 rtx_vector_builder builder
;
1949 if (!builder
.new_unary_operation (mode
, op
, false))
1952 unsigned int count
= builder
.encoded_nelts ();
1953 for (unsigned int i
= 0; i
< count
; i
++)
1955 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1956 CONST_VECTOR_ELT (op
, i
),
1957 GET_MODE_INNER (op_mode
));
1958 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1960 builder
.quick_push (x
);
1962 return builder
.build ();
1965 /* The order of these tests is critical so that, for example, we don't
1966 check the wrong mode (input vs. output) for a conversion operation,
1967 such as FIX. At some point, this should be simplified. */
1969 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1973 if (op_mode
== VOIDmode
)
1975 /* CONST_INT have VOIDmode as the mode. We assume that all
1976 the bits of the constant are significant, though, this is
1977 a dangerous assumption as many times CONST_INTs are
1978 created and used with garbage in the bits outside of the
1979 precision of the implied mode of the const_int. */
1980 op_mode
= MAX_MODE_INT
;
1983 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1985 /* Avoid the folding if flag_signaling_nans is on and
1986 operand is a signaling NaN. */
1987 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1990 d
= real_value_truncate (mode
, d
);
1992 /* Avoid the folding if flag_rounding_math is on and the
1993 conversion is not exact. */
1994 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1997 wide_int w
= real_to_integer (&d
, &fail
,
1999 (as_a
<scalar_int_mode
> (op_mode
)));
2000 if (fail
|| wi::ne_p (w
, wide_int (rtx_mode_t (op
, op_mode
))))
2004 return const_double_from_real_value (d
, mode
);
2006 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
2010 if (op_mode
== VOIDmode
)
2012 /* CONST_INT have VOIDmode as the mode. We assume that all
2013 the bits of the constant are significant, though, this is
2014 a dangerous assumption as many times CONST_INTs are
2015 created and used with garbage in the bits outside of the
2016 precision of the implied mode of the const_int. */
2017 op_mode
= MAX_MODE_INT
;
2020 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
2022 /* Avoid the folding if flag_signaling_nans is on and
2023 operand is a signaling NaN. */
2024 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2027 d
= real_value_truncate (mode
, d
);
2029 /* Avoid the folding if flag_rounding_math is on and the
2030 conversion is not exact. */
2031 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2034 wide_int w
= real_to_integer (&d
, &fail
,
2036 (as_a
<scalar_int_mode
> (op_mode
)));
2037 if (fail
|| wi::ne_p (w
, wide_int (rtx_mode_t (op
, op_mode
))))
2041 return const_double_from_real_value (d
, mode
);
2044 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
2046 unsigned int width
= GET_MODE_PRECISION (result_mode
);
2047 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
2051 scalar_int_mode imode
= (op_mode
== VOIDmode
2053 : as_a
<scalar_int_mode
> (op_mode
));
2054 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
2057 #if TARGET_SUPPORTS_WIDE_INT == 0
2058 /* This assert keeps the simplification from producing a result
2059 that cannot be represented in a CONST_DOUBLE but a lot of
2060 upstream callers expect that this function never fails to
2061 simplify something and so you if you added this to the test
2062 above the code would die later anyway. If this assert
2063 happens, you just need to make the port support wide int. */
2064 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
2070 result
= wi::bit_not (op0
);
2074 result
= wi::neg (op0
);
2078 result
= wi::abs (op0
);
2082 result
= wi::shwi (wi::ffs (op0
), result_mode
);
2086 if (wi::ne_p (op0
, 0))
2087 int_value
= wi::clz (op0
);
2088 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
2090 result
= wi::shwi (int_value
, result_mode
);
2094 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
2098 if (wi::ne_p (op0
, 0))
2099 int_value
= wi::ctz (op0
);
2100 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
2102 result
= wi::shwi (int_value
, result_mode
);
2106 result
= wi::shwi (wi::popcount (op0
), result_mode
);
2110 result
= wi::shwi (wi::parity (op0
), result_mode
);
2114 result
= wide_int (op0
).bswap ();
2119 result
= wide_int::from (op0
, width
, UNSIGNED
);
2123 result
= wide_int::from (op0
, width
, SIGNED
);
2127 if (wi::only_sign_bit_p (op0
))
2128 result
= wi::max_value (GET_MODE_PRECISION (imode
), SIGNED
);
2130 result
= wi::neg (op0
);
2134 if (wi::only_sign_bit_p (op0
))
2135 result
= wi::max_value (GET_MODE_PRECISION (imode
), SIGNED
);
2137 result
= wi::abs (op0
);
2145 return immed_wide_int_const (result
, result_mode
);
2148 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2149 && SCALAR_FLOAT_MODE_P (mode
)
2150 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
2152 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
2158 d
= real_value_abs (&d
);
2161 d
= real_value_negate (&d
);
2163 case FLOAT_TRUNCATE
:
2164 /* Don't perform the operation if flag_signaling_nans is on
2165 and the operand is a signaling NaN. */
2166 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2168 /* Or if flag_rounding_math is on and the truncation is not
2170 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2171 && !exact_real_truncate (mode
, &d
))
2173 d
= real_value_truncate (mode
, d
);
2176 /* Don't perform the operation if flag_signaling_nans is on
2177 and the operand is a signaling NaN. */
2178 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2180 /* All this does is change the mode, unless changing
2182 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
2183 real_convert (&d
, mode
, &d
);
2186 /* Don't perform the operation if flag_signaling_nans is on
2187 and the operand is a signaling NaN. */
2188 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2190 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
2197 real_to_target (tmp
, &d
, GET_MODE (op
));
2198 for (i
= 0; i
< 4; i
++)
2200 real_from_target (&d
, tmp
, mode
);
2206 return const_double_from_real_value (d
, mode
);
2208 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2209 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
2210 && is_int_mode (mode
, &result_mode
))
2212 unsigned int width
= GET_MODE_PRECISION (result_mode
);
2213 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
2216 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2217 operators are intentionally left unspecified (to ease implementation
2218 by target backends), for consistency, this routine implements the
2219 same semantics for constant folding as used by the middle-end. */
2221 /* This was formerly used only for non-IEEE float.
2222 eggert@twinsun.com says it is safe for IEEE also. */
2224 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
2225 wide_int wmax
, wmin
;
2226 /* This is part of the abi to real_to_integer, but we check
2227 things before making this call. */
2233 if (REAL_VALUE_ISNAN (*x
))
2236 /* Test against the signed upper bound. */
2237 wmax
= wi::max_value (width
, SIGNED
);
2238 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2239 if (real_less (&t
, x
))
2240 return immed_wide_int_const (wmax
, mode
);
2242 /* Test against the signed lower bound. */
2243 wmin
= wi::min_value (width
, SIGNED
);
2244 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2245 if (real_less (x
, &t
))
2246 return immed_wide_int_const (wmin
, mode
);
2248 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2252 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2255 /* Test against the unsigned upper bound. */
2256 wmax
= wi::max_value (width
, UNSIGNED
);
2257 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2258 if (real_less (&t
, x
))
2259 return immed_wide_int_const (wmax
, mode
);
2261 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2269 /* Handle polynomial integers. */
2270 else if (CONST_POLY_INT_P (op
))
2272 poly_wide_int result
;
2276 result
= -const_poly_int_value (op
);
2280 result
= ~const_poly_int_value (op
);
2286 return immed_wide_int_const (result
, mode
);
2292 /* Subroutine of simplify_binary_operation to simplify a binary operation
2293 CODE that can commute with byte swapping, with result mode MODE and
2294 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2295 Return zero if no simplification or canonicalization is possible. */
2298 simplify_context::simplify_byte_swapping_operation (rtx_code code
,
2304 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2305 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2307 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2308 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2309 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2312 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2313 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2315 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2316 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2322 /* Subroutine of simplify_binary_operation to simplify a commutative,
2323 associative binary operation CODE with result mode MODE, operating
2324 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2325 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2326 canonicalization is possible. */
2329 simplify_context::simplify_associative_operation (rtx_code code
,
2335 /* Normally expressions simplified by simplify-rtx.cc are combined
2336 at most from a few machine instructions and therefore the
2337 expressions should be fairly small. During var-tracking
2338 we can see arbitrarily large expressions though and reassociating
2339 those can be quadratic, so punt after encountering max_assoc_count
2340 simplify_associative_operation calls during outermost simplify_*
2342 if (++assoc_count
>= max_assoc_count
)
2345 /* Linearize the operator to the left. */
2346 if (GET_CODE (op1
) == code
)
2348 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2349 if (GET_CODE (op0
) == code
)
2351 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2352 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2355 /* "a op (b op c)" becomes "(b op c) op a". */
2356 if (! swap_commutative_operands_p (op1
, op0
))
2357 return simplify_gen_binary (code
, mode
, op1
, op0
);
2359 std::swap (op0
, op1
);
2362 if (GET_CODE (op0
) == code
)
2364 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2365 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2367 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2368 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2371 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2372 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2374 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2376 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2377 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2379 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2385 /* Return a mask describing the COMPARISON. */
2387 comparison_to_mask (enum rtx_code comparison
)
2427 /* Return a comparison corresponding to the MASK. */
2428 static enum rtx_code
2429 mask_to_comparison (int mask
)
2469 /* Return true if CODE is valid for comparisons of mode MODE, false
2472 It is always safe to return false, even if the code was valid for the
2473 given mode as that will merely suppress optimizations. */
2476 comparison_code_valid_for_mode (enum rtx_code code
, enum machine_mode mode
)
2480 /* These are valid for integral, floating and vector modes. */
2487 return (INTEGRAL_MODE_P (mode
)
2488 || FLOAT_MODE_P (mode
)
2489 || VECTOR_MODE_P (mode
));
2491 /* These are valid for floating point modes. */
2500 return FLOAT_MODE_P (mode
);
2502 /* These are filtered out in simplify_logical_operation, but
2503 we check for them too as a matter of safety. They are valid
2504 for integral and vector modes. */
2509 return INTEGRAL_MODE_P (mode
) || VECTOR_MODE_P (mode
);
2516 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2517 false/true value of comparison with MODE where comparison operands
2521 relational_result (machine_mode mode
, machine_mode cmp_mode
, rtx res
)
2523 if (SCALAR_FLOAT_MODE_P (mode
))
2525 if (res
== const0_rtx
)
2526 return CONST0_RTX (mode
);
2527 #ifdef FLOAT_STORE_FLAG_VALUE
2528 REAL_VALUE_TYPE val
= FLOAT_STORE_FLAG_VALUE (mode
);
2529 return const_double_from_real_value (val
, mode
);
2534 if (VECTOR_MODE_P (mode
))
2536 if (res
== const0_rtx
)
2537 return CONST0_RTX (mode
);
2538 #ifdef VECTOR_STORE_FLAG_VALUE
2539 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2540 if (val
== NULL_RTX
)
2542 if (val
== const1_rtx
)
2543 return CONST1_RTX (mode
);
2545 return gen_const_vec_duplicate (mode
, val
);
2550 /* For vector comparison with scalar int result, it is unknown
2551 if the target means here a comparison into an integral bitmask,
2552 or comparison where all comparisons true mean const_true_rtx
2553 whole result, or where any comparisons true mean const_true_rtx
2554 whole result. For const0_rtx all the cases are the same. */
2555 if (VECTOR_MODE_P (cmp_mode
)
2556 && SCALAR_INT_MODE_P (mode
)
2557 && res
== const_true_rtx
)
2563 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2564 and OP1, which should be both relational operations. Return 0 if no such
2565 simplification is possible. */
2567 simplify_context::simplify_logical_relational_operation (rtx_code code
,
2571 /* We only handle IOR of two relational operations. */
2575 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2578 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2579 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2582 enum rtx_code code0
= GET_CODE (op0
);
2583 enum rtx_code code1
= GET_CODE (op1
);
2585 /* We don't handle unsigned comparisons currently. */
2586 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2588 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2591 int mask0
= comparison_to_mask (code0
);
2592 int mask1
= comparison_to_mask (code1
);
2594 int mask
= mask0
| mask1
;
2597 return relational_result (mode
, GET_MODE (op0
), const_true_rtx
);
2599 code
= mask_to_comparison (mask
);
2601 /* Many comparison codes are only valid for certain mode classes. */
2602 if (!comparison_code_valid_for_mode (code
, mode
))
2605 op0
= XEXP (op1
, 0);
2606 op1
= XEXP (op1
, 1);
2608 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2611 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2612 and OP1. Return 0 if no simplification is possible.
2614 Don't use this for relational operations such as EQ or LT.
2615 Use simplify_relational_operation instead. */
2617 simplify_context::simplify_binary_operation (rtx_code code
, machine_mode mode
,
2620 rtx trueop0
, trueop1
;
2623 /* Relational operations don't work here. We must know the mode
2624 of the operands in order to do the comparison correctly.
2625 Assuming a full word can give incorrect results.
2626 Consider comparing 128 with -128 in QImode. */
2627 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2628 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2630 /* Make sure the constant is second. */
2631 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2632 && swap_commutative_operands_p (op0
, op1
))
2633 std::swap (op0
, op1
);
2635 trueop0
= avoid_constant_pool_reference (op0
);
2636 trueop1
= avoid_constant_pool_reference (op1
);
2638 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2641 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2646 /* If the above steps did not result in a simplification and op0 or op1
2647 were constant pool references, use the referenced constants directly. */
2648 if (trueop0
!= op0
|| trueop1
!= op1
)
2649 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2654 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2655 which OP0 and OP1 are both vector series or vector duplicates
2656 (which are really just series with a step of 0). If so, try to
2657 form a new series by applying CODE to the bases and to the steps.
2658 Return null if no simplification is possible.
2660 MODE is the mode of the operation and is known to be a vector
2664 simplify_context::simplify_binary_operation_series (rtx_code code
,
2669 if (vec_duplicate_p (op0
, &base0
))
2671 else if (!vec_series_p (op0
, &base0
, &step0
))
2675 if (vec_duplicate_p (op1
, &base1
))
2677 else if (!vec_series_p (op1
, &base1
, &step1
))
2680 /* Only create a new series if we can simplify both parts. In other
2681 cases this isn't really a simplification, and it's not necessarily
2682 a win to replace a vector operation with a scalar operation. */
2683 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2684 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2688 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2692 return gen_vec_series (mode
, new_base
, new_step
);
2695 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2696 operation CODE with result mode MODE, operating on OP0 and OP1.
2697 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2698 Returns NULL_RTX if no simplification is possible. */
2701 simplify_context::simplify_distributive_operation (rtx_code code
,
2705 enum rtx_code op
= GET_CODE (op0
);
2706 gcc_assert (GET_CODE (op1
) == op
);
2708 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))
2709 && ! side_effects_p (XEXP (op0
, 1)))
2710 return simplify_gen_binary (op
, mode
,
2711 simplify_gen_binary (code
, mode
,
2716 if (GET_RTX_CLASS (op
) == RTX_COMM_ARITH
)
2718 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2719 && ! side_effects_p (XEXP (op0
, 0)))
2720 return simplify_gen_binary (op
, mode
,
2721 simplify_gen_binary (code
, mode
,
2725 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 1))
2726 && ! side_effects_p (XEXP (op0
, 0)))
2727 return simplify_gen_binary (op
, mode
,
2728 simplify_gen_binary (code
, mode
,
2732 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 0))
2733 && ! side_effects_p (XEXP (op0
, 1)))
2734 return simplify_gen_binary (op
, mode
,
2735 simplify_gen_binary (code
, mode
,
2744 /* Return TRUE if a rotate in mode MODE with a constant count in OP1
2747 If the rotate should not be reversed, return FALSE.
2749 LEFT indicates if this is a rotate left or a rotate right. */
2752 reverse_rotate_by_imm_p (machine_mode mode
, unsigned int left
, rtx op1
)
2754 if (!CONST_INT_P (op1
))
2757 /* Some targets may only be able to rotate by a constant
2758 in one direction. So we need to query the optab interface
2759 to see what is possible. */
2760 optab binoptab
= left
? rotl_optab
: rotr_optab
;
2761 optab re_binoptab
= left
? rotr_optab
: rotl_optab
;
2762 enum insn_code icode
= optab_handler (binoptab
, mode
);
2763 enum insn_code re_icode
= optab_handler (re_binoptab
, mode
);
2765 /* If the target can not support the reversed optab, then there
2766 is nothing to do. */
2767 if (re_icode
== CODE_FOR_nothing
)
2770 /* If the target does not support the requested rotate-by-immediate,
2771 then we want to try reversing the rotate. We also want to try
2772 reversing to minimize the count. */
2773 if ((icode
== CODE_FOR_nothing
)
2774 || (!insn_operand_matches (icode
, 2, op1
))
2775 || (IN_RANGE (INTVAL (op1
),
2776 GET_MODE_UNIT_PRECISION (mode
) / 2 + left
,
2777 GET_MODE_UNIT_PRECISION (mode
) - 1)))
2778 return (insn_operand_matches (re_icode
, 2, op1
));
2782 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2783 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2784 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2785 actual constants. */
2788 simplify_context::simplify_binary_operation_1 (rtx_code code
,
2791 rtx trueop0
, rtx trueop1
)
2793 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2795 scalar_int_mode int_mode
, inner_mode
;
2798 /* Even if we can't compute a constant result,
2799 there are some cases worth simplifying. */
2804 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2805 when x is NaN, infinite, or finite and nonzero. They aren't
2806 when x is -0 and the rounding mode is not towards -infinity,
2807 since (-0) + 0 is then 0. */
2808 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2811 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2812 transformations are safe even for IEEE. */
2813 if (GET_CODE (op0
) == NEG
)
2814 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2815 else if (GET_CODE (op1
) == NEG
)
2816 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2818 /* (~a) + 1 -> -a */
2819 if (INTEGRAL_MODE_P (mode
)
2820 && GET_CODE (op0
) == NOT
2821 && trueop1
== const1_rtx
)
2822 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2824 /* Handle both-operands-constant cases. We can only add
2825 CONST_INTs to constants since the sum of relocatable symbols
2826 can't be handled by most assemblers. Don't add CONST_INT
2827 to CONST_INT since overflow won't be computed properly if wider
2828 than HOST_BITS_PER_WIDE_INT. */
2830 if ((GET_CODE (op0
) == CONST
2831 || GET_CODE (op0
) == SYMBOL_REF
2832 || GET_CODE (op0
) == LABEL_REF
)
2833 && poly_int_rtx_p (op1
, &offset
))
2834 return plus_constant (mode
, op0
, offset
);
2835 else if ((GET_CODE (op1
) == CONST
2836 || GET_CODE (op1
) == SYMBOL_REF
2837 || GET_CODE (op1
) == LABEL_REF
)
2838 && poly_int_rtx_p (op0
, &offset
))
2839 return plus_constant (mode
, op1
, offset
);
2841 /* See if this is something like X * C - X or vice versa or
2842 if the multiplication is written as a shift. If so, we can
2843 distribute and make a new multiply, shift, or maybe just
2844 have X (if C is 2 in the example above). But don't make
2845 something more expensive than we had before. */
2847 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2849 rtx lhs
= op0
, rhs
= op1
;
2851 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2852 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2854 if (GET_CODE (lhs
) == NEG
)
2856 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2857 lhs
= XEXP (lhs
, 0);
2859 else if (GET_CODE (lhs
) == MULT
2860 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2862 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2863 lhs
= XEXP (lhs
, 0);
2865 else if (GET_CODE (lhs
) == ASHIFT
2866 && CONST_INT_P (XEXP (lhs
, 1))
2867 && INTVAL (XEXP (lhs
, 1)) >= 0
2868 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2870 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2871 GET_MODE_PRECISION (int_mode
));
2872 lhs
= XEXP (lhs
, 0);
2875 if (GET_CODE (rhs
) == NEG
)
2877 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2878 rhs
= XEXP (rhs
, 0);
2880 else if (GET_CODE (rhs
) == MULT
2881 && CONST_INT_P (XEXP (rhs
, 1)))
2883 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2884 rhs
= XEXP (rhs
, 0);
2886 else if (GET_CODE (rhs
) == ASHIFT
2887 && CONST_INT_P (XEXP (rhs
, 1))
2888 && INTVAL (XEXP (rhs
, 1)) >= 0
2889 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2891 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2892 GET_MODE_PRECISION (int_mode
));
2893 rhs
= XEXP (rhs
, 0);
2896 if (rtx_equal_p (lhs
, rhs
))
2898 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2900 bool speed
= optimize_function_for_speed_p (cfun
);
2902 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2904 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2905 return (set_src_cost (tem
, int_mode
, speed
)
2906 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2909 /* Optimize (X - 1) * Y + Y to X * Y. */
2912 if (GET_CODE (op0
) == MULT
)
2914 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2915 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
)
2916 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2917 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
))
2918 && rtx_equal_p (XEXP (op0
, 1), op1
))
2919 lhs
= XEXP (XEXP (op0
, 0), 0);
2920 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2921 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
)
2922 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2923 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
))
2924 && rtx_equal_p (XEXP (op0
, 0), op1
))
2925 lhs
= XEXP (XEXP (op0
, 1), 0);
2927 else if (GET_CODE (op1
) == MULT
)
2929 if (((GET_CODE (XEXP (op1
, 0)) == PLUS
2930 && XEXP (XEXP (op1
, 0), 1) == constm1_rtx
)
2931 || (GET_CODE (XEXP (op1
, 0)) == MINUS
2932 && XEXP (XEXP (op1
, 0), 1) == const1_rtx
))
2933 && rtx_equal_p (XEXP (op1
, 1), op0
))
2934 rhs
= XEXP (XEXP (op1
, 0), 0);
2935 else if (((GET_CODE (XEXP (op1
, 1)) == PLUS
2936 && XEXP (XEXP (op1
, 1), 1) == constm1_rtx
)
2937 || (GET_CODE (XEXP (op1
, 1)) == MINUS
2938 && XEXP (XEXP (op1
, 1), 1) == const1_rtx
))
2939 && rtx_equal_p (XEXP (op1
, 0), op0
))
2940 rhs
= XEXP (XEXP (op1
, 1), 0);
2942 if (lhs
!= op0
|| rhs
!= op1
)
2943 return simplify_gen_binary (MULT
, int_mode
, lhs
, rhs
);
2946 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2947 if (CONST_SCALAR_INT_P (op1
)
2948 && GET_CODE (op0
) == XOR
2949 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2950 && mode_signbit_p (mode
, op1
))
2951 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2952 simplify_gen_binary (XOR
, mode
, op1
,
2955 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2956 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2957 && GET_CODE (op0
) == MULT
2958 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2962 in1
= XEXP (XEXP (op0
, 0), 0);
2963 in2
= XEXP (op0
, 1);
2964 return simplify_gen_binary (MINUS
, mode
, op1
,
2965 simplify_gen_binary (MULT
, mode
,
2969 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2970 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2972 if (COMPARISON_P (op0
)
2973 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2974 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2975 && (reversed
= reversed_comparison (op0
, mode
)))
2977 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2979 /* If one of the operands is a PLUS or a MINUS, see if we can
2980 simplify this by the associative law.
2981 Don't use the associative law for floating point.
2982 The inaccuracy makes it nonassociative,
2983 and subtle programs can break if operations are associated. */
2985 if (INTEGRAL_MODE_P (mode
)
2986 && (plus_minus_operand_p (op0
)
2987 || plus_minus_operand_p (op1
))
2988 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2991 /* Reassociate floating point addition only when the user
2992 specifies associative math operations. */
2993 if (FLOAT_MODE_P (mode
)
2994 && flag_associative_math
)
2996 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3001 /* Handle vector series. */
3002 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3004 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
3011 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
3012 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
3013 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
3014 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
3016 rtx xop00
= XEXP (op0
, 0);
3017 rtx xop10
= XEXP (op1
, 0);
3019 if (REG_P (xop00
) && REG_P (xop10
)
3020 && REGNO (xop00
) == REGNO (xop10
)
3021 && GET_MODE (xop00
) == mode
3022 && GET_MODE (xop10
) == mode
3023 && GET_MODE_CLASS (mode
) == MODE_CC
)
3029 /* We can't assume x-x is 0 even with non-IEEE floating point,
3030 but since it is zero except in very strange circumstances, we
3031 will treat it as zero with -ffinite-math-only. */
3032 if (rtx_equal_p (trueop0
, trueop1
)
3033 && ! side_effects_p (op0
)
3034 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
3035 return CONST0_RTX (mode
);
3037 /* Change subtraction from zero into negation. (0 - x) is the
3038 same as -x when x is NaN, infinite, or finite and nonzero.
3039 But if the mode has signed zeros, and does not round towards
3040 -infinity, then 0 - 0 is 0, not -0. */
3041 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
3042 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
3044 /* (-1 - a) is ~a, unless the expression contains symbolic
3045 constants, in which case not retaining additions and
3046 subtractions could cause invalid assembly to be produced. */
3047 if (trueop0
== constm1_rtx
3048 && !contains_symbolic_reference_p (op1
))
3049 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
3051 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
3052 or has signed zeros and supports rounding towards -infinity.
3053 In such a case, 0 - 0 is -0. */
3054 if (!(HONOR_SIGNED_ZEROS (mode
)
3055 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
3056 && !HONOR_SNANS (mode
)
3057 && trueop1
== CONST0_RTX (mode
))
3060 /* See if this is something like X * C - X or vice versa or
3061 if the multiplication is written as a shift. If so, we can
3062 distribute and make a new multiply, shift, or maybe just
3063 have X (if C is 2 in the example above). But don't make
3064 something more expensive than we had before. */
3066 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3068 rtx lhs
= op0
, rhs
= op1
;
3070 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
3071 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
3073 if (GET_CODE (lhs
) == NEG
)
3075 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
3076 lhs
= XEXP (lhs
, 0);
3078 else if (GET_CODE (lhs
) == MULT
3079 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
3081 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
3082 lhs
= XEXP (lhs
, 0);
3084 else if (GET_CODE (lhs
) == ASHIFT
3085 && CONST_INT_P (XEXP (lhs
, 1))
3086 && INTVAL (XEXP (lhs
, 1)) >= 0
3087 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
3089 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
3090 GET_MODE_PRECISION (int_mode
));
3091 lhs
= XEXP (lhs
, 0);
3094 if (GET_CODE (rhs
) == NEG
)
3096 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
3097 rhs
= XEXP (rhs
, 0);
3099 else if (GET_CODE (rhs
) == MULT
3100 && CONST_INT_P (XEXP (rhs
, 1)))
3102 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
3103 rhs
= XEXP (rhs
, 0);
3105 else if (GET_CODE (rhs
) == ASHIFT
3106 && CONST_INT_P (XEXP (rhs
, 1))
3107 && INTVAL (XEXP (rhs
, 1)) >= 0
3108 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
3110 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
3111 GET_MODE_PRECISION (int_mode
));
3112 negcoeff1
= -negcoeff1
;
3113 rhs
= XEXP (rhs
, 0);
3116 if (rtx_equal_p (lhs
, rhs
))
3118 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
3120 bool speed
= optimize_function_for_speed_p (cfun
);
3122 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
3124 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
3125 return (set_src_cost (tem
, int_mode
, speed
)
3126 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
3129 /* Optimize (X + 1) * Y - Y to X * Y. */
3131 if (GET_CODE (op0
) == MULT
)
3133 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
3134 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
)
3135 || (GET_CODE (XEXP (op0
, 0)) == MINUS
3136 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
))
3137 && rtx_equal_p (XEXP (op0
, 1), op1
))
3138 lhs
= XEXP (XEXP (op0
, 0), 0);
3139 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
3140 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
)
3141 || (GET_CODE (XEXP (op0
, 1)) == MINUS
3142 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
))
3143 && rtx_equal_p (XEXP (op0
, 0), op1
))
3144 lhs
= XEXP (XEXP (op0
, 1), 0);
3147 return simplify_gen_binary (MULT
, int_mode
, lhs
, op1
);
3150 /* (a - (-b)) -> (a + b). True even for IEEE. */
3151 if (GET_CODE (op1
) == NEG
)
3152 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
3154 /* (-x - c) may be simplified as (-c - x). */
3155 if (GET_CODE (op0
) == NEG
3156 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
3158 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
3160 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
3163 if ((GET_CODE (op0
) == CONST
3164 || GET_CODE (op0
) == SYMBOL_REF
3165 || GET_CODE (op0
) == LABEL_REF
)
3166 && poly_int_rtx_p (op1
, &offset
))
3167 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
3169 /* Don't let a relocatable value get a negative coeff. */
3170 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
3171 return simplify_gen_binary (PLUS
, mode
,
3173 neg_poly_int_rtx (mode
, op1
));
3175 /* (x - (x & y)) -> (x & ~y) */
3176 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
3178 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
3180 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
3181 GET_MODE (XEXP (op1
, 1)));
3182 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3184 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
3186 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
3187 GET_MODE (XEXP (op1
, 0)));
3188 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3192 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3193 by reversing the comparison code if valid. */
3194 if (STORE_FLAG_VALUE
== 1
3195 && trueop0
== const1_rtx
3196 && COMPARISON_P (op1
)
3197 && (reversed
= reversed_comparison (op1
, mode
)))
3200 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3201 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3202 && GET_CODE (op1
) == MULT
3203 && GET_CODE (XEXP (op1
, 0)) == NEG
)
3207 in1
= XEXP (XEXP (op1
, 0), 0);
3208 in2
= XEXP (op1
, 1);
3209 return simplify_gen_binary (PLUS
, mode
,
3210 simplify_gen_binary (MULT
, mode
,
3215 /* Canonicalize (minus (neg A) (mult B C)) to
3216 (minus (mult (neg B) C) A). */
3217 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3218 && GET_CODE (op1
) == MULT
3219 && GET_CODE (op0
) == NEG
)
3223 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
3224 in2
= XEXP (op1
, 1);
3225 return simplify_gen_binary (MINUS
, mode
,
3226 simplify_gen_binary (MULT
, mode
,
3231 /* If one of the operands is a PLUS or a MINUS, see if we can
3232 simplify this by the associative law. This will, for example,
3233 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3234 Don't use the associative law for floating point.
3235 The inaccuracy makes it nonassociative,
3236 and subtle programs can break if operations are associated. */
3238 if (INTEGRAL_MODE_P (mode
)
3239 && (plus_minus_operand_p (op0
)
3240 || plus_minus_operand_p (op1
))
3241 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
3244 /* Handle vector series. */
3245 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3247 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
3254 if (trueop1
== constm1_rtx
)
3255 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3257 if (GET_CODE (op0
) == NEG
)
3259 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
3260 /* If op1 is a MULT as well and simplify_unary_operation
3261 just moved the NEG to the second operand, simplify_gen_binary
3262 below could through simplify_associative_operation move
3263 the NEG around again and recurse endlessly. */
3265 && GET_CODE (op1
) == MULT
3266 && GET_CODE (temp
) == MULT
3267 && XEXP (op1
, 0) == XEXP (temp
, 0)
3268 && GET_CODE (XEXP (temp
, 1)) == NEG
3269 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
3272 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
3274 if (GET_CODE (op1
) == NEG
)
3276 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3277 /* If op0 is a MULT as well and simplify_unary_operation
3278 just moved the NEG to the second operand, simplify_gen_binary
3279 below could through simplify_associative_operation move
3280 the NEG around again and recurse endlessly. */
3282 && GET_CODE (op0
) == MULT
3283 && GET_CODE (temp
) == MULT
3284 && XEXP (op0
, 0) == XEXP (temp
, 0)
3285 && GET_CODE (XEXP (temp
, 1)) == NEG
3286 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
3289 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
3292 /* Maybe simplify x * 0 to 0. The reduction is not valid if
3293 x is NaN, since x * 0 is then also NaN. Nor is it valid
3294 when the mode has signed zeros, since multiplying a negative
3295 number by 0 will give -0, not 0. */
3296 if (!HONOR_NANS (mode
)
3297 && !HONOR_SIGNED_ZEROS (mode
)
3298 && trueop1
== CONST0_RTX (mode
)
3299 && ! side_effects_p (op0
))
3302 /* In IEEE floating point, x*1 is not equivalent to x for
3304 if (!HONOR_SNANS (mode
)
3305 && trueop1
== CONST1_RTX (mode
))
3308 /* Convert multiply by constant power of two into shift. */
3309 if (mem_depth
== 0 && CONST_SCALAR_INT_P (trueop1
))
3311 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
3313 return simplify_gen_binary (ASHIFT
, mode
, op0
,
3314 gen_int_shift_amount (mode
, val
));
3317 /* x*2 is x+x and x*(-1) is -x */
3318 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3319 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
3320 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
3321 && GET_MODE (op0
) == mode
)
3323 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3325 if (real_equal (d1
, &dconst2
))
3326 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
3328 if (!HONOR_SNANS (mode
)
3329 && real_equal (d1
, &dconstm1
))
3330 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3333 /* Optimize -x * -x as x * x. */
3334 if (FLOAT_MODE_P (mode
)
3335 && GET_CODE (op0
) == NEG
3336 && GET_CODE (op1
) == NEG
3337 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3338 && !side_effects_p (XEXP (op0
, 0)))
3339 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3341 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3342 if (SCALAR_FLOAT_MODE_P (mode
)
3343 && GET_CODE (op0
) == ABS
3344 && GET_CODE (op1
) == ABS
3345 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3346 && !side_effects_p (XEXP (op0
, 0)))
3347 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3349 /* Reassociate multiplication, but for floating point MULTs
3350 only when the user specifies unsafe math optimizations. */
3351 if (! FLOAT_MODE_P (mode
)
3352 || flag_unsafe_math_optimizations
)
3354 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3361 if (trueop1
== CONST0_RTX (mode
))
3363 if (INTEGRAL_MODE_P (mode
)
3364 && trueop1
== CONSTM1_RTX (mode
)
3365 && !side_effects_p (op0
))
3367 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3369 /* A | (~A) -> -1 */
3370 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3371 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3372 && ! side_effects_p (op0
)
3373 && GET_MODE_CLASS (mode
) != MODE_CC
)
3374 return CONSTM1_RTX (mode
);
3376 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3377 if (CONST_INT_P (op1
)
3378 && HWI_COMPUTABLE_MODE_P (mode
)
3379 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
3380 && !side_effects_p (op0
))
3383 /* Canonicalize (X & C1) | C2. */
3384 if (GET_CODE (op0
) == AND
3385 && CONST_INT_P (trueop1
)
3386 && CONST_INT_P (XEXP (op0
, 1)))
3388 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
3389 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
3390 HOST_WIDE_INT c2
= INTVAL (trueop1
);
3392 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3394 && !side_effects_p (XEXP (op0
, 0)))
3397 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3398 if (((c1
|c2
) & mask
) == mask
)
3399 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
3402 /* Convert (A & B) | A to A. */
3403 if (GET_CODE (op0
) == AND
3404 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3405 || rtx_equal_p (XEXP (op0
, 1), op1
))
3406 && ! side_effects_p (XEXP (op0
, 0))
3407 && ! side_effects_p (XEXP (op0
, 1)))
3410 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3411 mode size to (rotate A CX). */
3413 if (GET_CODE (op1
) == ASHIFT
3414 || GET_CODE (op1
) == SUBREG
)
3425 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
3426 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
3427 && CONST_INT_P (XEXP (opleft
, 1))
3428 && CONST_INT_P (XEXP (opright
, 1))
3429 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
3430 == GET_MODE_UNIT_PRECISION (mode
)))
3431 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
3433 /* Same, but for ashift that has been "simplified" to a wider mode
3434 by simplify_shift_const. */
3436 if (GET_CODE (opleft
) == SUBREG
3437 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3438 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
3440 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
3441 && GET_CODE (opright
) == LSHIFTRT
3442 && GET_CODE (XEXP (opright
, 0)) == SUBREG
3443 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
3444 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
3445 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
3446 SUBREG_REG (XEXP (opright
, 0)))
3447 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
3448 && CONST_INT_P (XEXP (opright
, 1))
3449 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
3450 + INTVAL (XEXP (opright
, 1))
3451 == GET_MODE_PRECISION (int_mode
)))
3452 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
3453 XEXP (SUBREG_REG (opleft
), 1));
3455 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3456 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3457 the PLUS does not affect any of the bits in OP1: then we can do
3458 the IOR as a PLUS and we can associate. This is valid if OP1
3459 can be safely shifted left C bits. */
3460 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
3461 && GET_CODE (XEXP (op0
, 0)) == PLUS
3462 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
3463 && CONST_INT_P (XEXP (op0
, 1))
3464 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
3466 int count
= INTVAL (XEXP (op0
, 1));
3467 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
3469 if (mask
>> count
== INTVAL (trueop1
)
3470 && trunc_int_for_mode (mask
, mode
) == mask
3471 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
3472 return simplify_gen_binary (ASHIFTRT
, mode
,
3473 plus_constant (mode
, XEXP (op0
, 0),
3478 /* The following happens with bitfield merging.
3479 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3480 if (GET_CODE (op0
) == AND
3481 && GET_CODE (op1
) == AND
3482 && CONST_INT_P (XEXP (op0
, 1))
3483 && CONST_INT_P (XEXP (op1
, 1))
3484 && (INTVAL (XEXP (op0
, 1))
3485 == ~INTVAL (XEXP (op1
, 1))))
3487 /* The IOR may be on both sides. */
3488 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
3489 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
3490 top0
= op0
, top1
= op1
;
3491 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
3492 top0
= op1
, top1
= op0
;
3495 /* X may be on either side of the inner IOR. */
3497 if (rtx_equal_p (XEXP (top0
, 0),
3498 XEXP (XEXP (top1
, 0), 0)))
3499 tem
= XEXP (XEXP (top1
, 0), 1);
3500 else if (rtx_equal_p (XEXP (top0
, 0),
3501 XEXP (XEXP (top1
, 0), 1)))
3502 tem
= XEXP (XEXP (top1
, 0), 0);
3504 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3506 (AND
, mode
, tem
, XEXP (top1
, 1)));
3510 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3511 if (GET_CODE (op0
) == GET_CODE (op1
)
3512 && (GET_CODE (op0
) == AND
3513 || GET_CODE (op0
) == IOR
3514 || GET_CODE (op0
) == LSHIFTRT
3515 || GET_CODE (op0
) == ASHIFTRT
3516 || GET_CODE (op0
) == ASHIFT
3517 || GET_CODE (op0
) == ROTATE
3518 || GET_CODE (op0
) == ROTATERT
))
3520 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3525 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3529 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3533 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3539 if (trueop1
== CONST0_RTX (mode
))
3541 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3542 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3543 if (rtx_equal_p (trueop0
, trueop1
)
3544 && ! side_effects_p (op0
)
3545 && GET_MODE_CLASS (mode
) != MODE_CC
)
3546 return CONST0_RTX (mode
);
3548 /* Canonicalize XOR of the most significant bit to PLUS. */
3549 if (CONST_SCALAR_INT_P (op1
)
3550 && mode_signbit_p (mode
, op1
))
3551 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3552 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3553 if (CONST_SCALAR_INT_P (op1
)
3554 && GET_CODE (op0
) == PLUS
3555 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3556 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3557 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3558 simplify_gen_binary (XOR
, mode
, op1
,
3561 /* If we are XORing two things that have no bits in common,
3562 convert them into an IOR. This helps to detect rotation encoded
3563 using those methods and possibly other simplifications. */
3565 if (HWI_COMPUTABLE_MODE_P (mode
)
3566 && (nonzero_bits (op0
, mode
)
3567 & nonzero_bits (op1
, mode
)) == 0)
3568 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3570 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3571 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3574 int num_negated
= 0;
3576 if (GET_CODE (op0
) == NOT
)
3577 num_negated
++, op0
= XEXP (op0
, 0);
3578 if (GET_CODE (op1
) == NOT
)
3579 num_negated
++, op1
= XEXP (op1
, 0);
3581 if (num_negated
== 2)
3582 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3583 else if (num_negated
== 1)
3584 return simplify_gen_unary (NOT
, mode
,
3585 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3589 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3590 correspond to a machine insn or result in further simplifications
3591 if B is a constant. */
3593 if (GET_CODE (op0
) == AND
3594 && rtx_equal_p (XEXP (op0
, 1), op1
)
3595 && ! side_effects_p (op1
))
3596 return simplify_gen_binary (AND
, mode
,
3597 simplify_gen_unary (NOT
, mode
,
3598 XEXP (op0
, 0), mode
),
3601 else if (GET_CODE (op0
) == AND
3602 && rtx_equal_p (XEXP (op0
, 0), op1
)
3603 && ! side_effects_p (op1
))
3604 return simplify_gen_binary (AND
, mode
,
3605 simplify_gen_unary (NOT
, mode
,
3606 XEXP (op0
, 1), mode
),
3609 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3610 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3611 out bits inverted twice and not set by C. Similarly, given
3612 (xor (and (xor A B) C) D), simplify without inverting C in
3613 the xor operand: (xor (and A C) (B&C)^D).
3615 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3616 && GET_CODE (XEXP (op0
, 0)) == XOR
3617 && CONST_INT_P (op1
)
3618 && CONST_INT_P (XEXP (op0
, 1))
3619 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3621 enum rtx_code op
= GET_CODE (op0
);
3622 rtx a
= XEXP (XEXP (op0
, 0), 0);
3623 rtx b
= XEXP (XEXP (op0
, 0), 1);
3624 rtx c
= XEXP (op0
, 1);
3626 HOST_WIDE_INT bval
= INTVAL (b
);
3627 HOST_WIDE_INT cval
= INTVAL (c
);
3628 HOST_WIDE_INT dval
= INTVAL (d
);
3629 HOST_WIDE_INT xcval
;
3636 return simplify_gen_binary (XOR
, mode
,
3637 simplify_gen_binary (op
, mode
, a
, c
),
3638 gen_int_mode ((bval
& xcval
) ^ dval
,
3642 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3643 we can transform like this:
3644 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3645 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3646 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3647 Attempt a few simplifications when B and C are both constants. */
3648 if (GET_CODE (op0
) == AND
3649 && CONST_INT_P (op1
)
3650 && CONST_INT_P (XEXP (op0
, 1)))
3652 rtx a
= XEXP (op0
, 0);
3653 rtx b
= XEXP (op0
, 1);
3655 HOST_WIDE_INT bval
= INTVAL (b
);
3656 HOST_WIDE_INT cval
= INTVAL (c
);
3658 /* Instead of computing ~A&C, we compute its negated value,
3659 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3660 optimize for sure. If it does not simplify, we still try
3661 to compute ~A&C below, but since that always allocates
3662 RTL, we don't try that before committing to returning a
3663 simplified expression. */
3664 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3667 if ((~cval
& bval
) == 0)
3669 rtx na_c
= NULL_RTX
;
3671 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3674 /* If ~A does not simplify, don't bother: we don't
3675 want to simplify 2 operations into 3, and if na_c
3676 were to simplify with na, n_na_c would have
3677 simplified as well. */
3678 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3680 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3683 /* Try to simplify ~A&C | ~B&C. */
3684 if (na_c
!= NULL_RTX
)
3685 return simplify_gen_binary (IOR
, mode
, na_c
,
3686 gen_int_mode (~bval
& cval
, mode
));
3690 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3691 if (n_na_c
== CONSTM1_RTX (mode
))
3693 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3694 gen_int_mode (~cval
& bval
,
3696 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3697 gen_int_mode (~bval
& cval
,
3703 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3704 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3705 machines, and also has shorter instruction path length. */
3706 if (GET_CODE (op0
) == AND
3707 && GET_CODE (XEXP (op0
, 0)) == XOR
3708 && CONST_INT_P (XEXP (op0
, 1))
3709 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3712 rtx b
= XEXP (XEXP (op0
, 0), 1);
3713 rtx c
= XEXP (op0
, 1);
3714 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3715 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3716 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3717 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3719 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3720 else if (GET_CODE (op0
) == AND
3721 && GET_CODE (XEXP (op0
, 0)) == XOR
3722 && CONST_INT_P (XEXP (op0
, 1))
3723 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3725 rtx a
= XEXP (XEXP (op0
, 0), 0);
3727 rtx c
= XEXP (op0
, 1);
3728 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3729 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3730 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3731 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3734 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3735 comparison if STORE_FLAG_VALUE is 1. */
3736 if (STORE_FLAG_VALUE
== 1
3737 && trueop1
== const1_rtx
3738 && COMPARISON_P (op0
)
3739 && (reversed
= reversed_comparison (op0
, mode
)))
3742 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3743 is (lt foo (const_int 0)), so we can perform the above
3744 simplification if STORE_FLAG_VALUE is 1. */
3746 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3747 && STORE_FLAG_VALUE
== 1
3748 && trueop1
== const1_rtx
3749 && GET_CODE (op0
) == LSHIFTRT
3750 && CONST_INT_P (XEXP (op0
, 1))
3751 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3752 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3754 /* (xor (comparison foo bar) (const_int sign-bit))
3755 when STORE_FLAG_VALUE is the sign bit. */
3756 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3757 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3758 && trueop1
== const_true_rtx
3759 && COMPARISON_P (op0
)
3760 && (reversed
= reversed_comparison (op0
, int_mode
)))
3763 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3764 if (GET_CODE (op0
) == GET_CODE (op1
)
3765 && (GET_CODE (op0
) == AND
3766 || GET_CODE (op0
) == LSHIFTRT
3767 || GET_CODE (op0
) == ASHIFTRT
3768 || GET_CODE (op0
) == ASHIFT
3769 || GET_CODE (op0
) == ROTATE
3770 || GET_CODE (op0
) == ROTATERT
))
3772 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3777 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3781 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3787 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3789 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3791 if (HWI_COMPUTABLE_MODE_P (mode
))
3793 /* When WORD_REGISTER_OPERATIONS is true, we need to know the
3794 nonzero bits in WORD_MODE rather than MODE. */
3795 scalar_int_mode tmode
= as_a
<scalar_int_mode
> (mode
);
3796 if (WORD_REGISTER_OPERATIONS
3797 && GET_MODE_BITSIZE (tmode
) < BITS_PER_WORD
)
3799 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, tmode
);
3800 HOST_WIDE_INT nzop1
;
3801 if (CONST_INT_P (trueop1
))
3803 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3804 /* If we are turning off bits already known off in OP0, we need
3806 if ((nzop0
& ~val1
) == 0)
3809 nzop1
= nonzero_bits (trueop1
, mode
);
3810 /* If we are clearing all the nonzero bits, the result is zero. */
3811 if ((nzop1
& nzop0
) == 0
3812 && !side_effects_p (op0
) && !side_effects_p (op1
))
3813 return CONST0_RTX (mode
);
3815 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3816 && GET_MODE_CLASS (mode
) != MODE_CC
)
3819 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3820 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3821 && ! side_effects_p (op0
)
3822 && GET_MODE_CLASS (mode
) != MODE_CC
)
3823 return CONST0_RTX (mode
);
3825 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3826 there are no nonzero bits of C outside of X's mode. */
3827 if ((GET_CODE (op0
) == SIGN_EXTEND
3828 || GET_CODE (op0
) == ZERO_EXTEND
)
3829 && CONST_INT_P (trueop1
)
3830 && HWI_COMPUTABLE_MODE_P (mode
)
3831 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3832 & UINTVAL (trueop1
)) == 0)
3834 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3835 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3836 gen_int_mode (INTVAL (trueop1
),
3838 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3841 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3842 we might be able to further simplify the AND with X and potentially
3843 remove the truncation altogether. */
3844 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3846 rtx x
= XEXP (op0
, 0);
3847 machine_mode xmode
= GET_MODE (x
);
3848 tem
= simplify_gen_binary (AND
, xmode
, x
,
3849 gen_int_mode (INTVAL (trueop1
), xmode
));
3850 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3853 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3854 if (GET_CODE (op0
) == IOR
3855 && CONST_INT_P (trueop1
)
3856 && CONST_INT_P (XEXP (op0
, 1)))
3858 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3859 return simplify_gen_binary (IOR
, mode
,
3860 simplify_gen_binary (AND
, mode
,
3861 XEXP (op0
, 0), op1
),
3862 gen_int_mode (tmp
, mode
));
3865 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3866 insn (and may simplify more). */
3867 if (GET_CODE (op0
) == XOR
3868 && rtx_equal_p (XEXP (op0
, 0), op1
)
3869 && ! side_effects_p (op1
))
3870 return simplify_gen_binary (AND
, mode
,
3871 simplify_gen_unary (NOT
, mode
,
3872 XEXP (op0
, 1), mode
),
3875 if (GET_CODE (op0
) == XOR
3876 && rtx_equal_p (XEXP (op0
, 1), op1
)
3877 && ! side_effects_p (op1
))
3878 return simplify_gen_binary (AND
, mode
,
3879 simplify_gen_unary (NOT
, mode
,
3880 XEXP (op0
, 0), mode
),
3883 /* Similarly for (~(A ^ B)) & A. */
3884 if (GET_CODE (op0
) == NOT
3885 && GET_CODE (XEXP (op0
, 0)) == XOR
3886 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3887 && ! side_effects_p (op1
))
3888 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3890 if (GET_CODE (op0
) == NOT
3891 && GET_CODE (XEXP (op0
, 0)) == XOR
3892 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3893 && ! side_effects_p (op1
))
3894 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3896 /* Convert (A | B) & A to A. */
3897 if (GET_CODE (op0
) == IOR
3898 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3899 || rtx_equal_p (XEXP (op0
, 1), op1
))
3900 && ! side_effects_p (XEXP (op0
, 0))
3901 && ! side_effects_p (XEXP (op0
, 1)))
3904 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3905 ((A & N) + B) & M -> (A + B) & M
3906 Similarly if (N & M) == 0,
3907 ((A | N) + B) & M -> (A + B) & M
3908 and for - instead of + and/or ^ instead of |.
3909 Also, if (N & M) == 0, then
3910 (A +- N) & M -> A & M. */
3911 if (CONST_INT_P (trueop1
)
3912 && HWI_COMPUTABLE_MODE_P (mode
)
3913 && ~UINTVAL (trueop1
)
3914 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3915 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3920 pmop
[0] = XEXP (op0
, 0);
3921 pmop
[1] = XEXP (op0
, 1);
3923 if (CONST_INT_P (pmop
[1])
3924 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3925 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3927 for (which
= 0; which
< 2; which
++)
3930 switch (GET_CODE (tem
))
3933 if (CONST_INT_P (XEXP (tem
, 1))
3934 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3935 == UINTVAL (trueop1
))
3936 pmop
[which
] = XEXP (tem
, 0);
3940 if (CONST_INT_P (XEXP (tem
, 1))
3941 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3942 pmop
[which
] = XEXP (tem
, 0);
3949 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3951 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3953 return simplify_gen_binary (code
, mode
, tem
, op1
);
3957 /* (and X (ior (not X) Y) -> (and X Y) */
3958 if (GET_CODE (op1
) == IOR
3959 && GET_CODE (XEXP (op1
, 0)) == NOT
3960 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3961 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3963 /* (and (ior (not X) Y) X) -> (and X Y) */
3964 if (GET_CODE (op0
) == IOR
3965 && GET_CODE (XEXP (op0
, 0)) == NOT
3966 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3967 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3969 /* (and X (ior Y (not X)) -> (and X Y) */
3970 if (GET_CODE (op1
) == IOR
3971 && GET_CODE (XEXP (op1
, 1)) == NOT
3972 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3973 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3975 /* (and (ior Y (not X)) X) -> (and X Y) */
3976 if (GET_CODE (op0
) == IOR
3977 && GET_CODE (XEXP (op0
, 1)) == NOT
3978 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3979 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3981 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3982 if (GET_CODE (op0
) == GET_CODE (op1
)
3983 && (GET_CODE (op0
) == AND
3984 || GET_CODE (op0
) == IOR
3985 || GET_CODE (op0
) == LSHIFTRT
3986 || GET_CODE (op0
) == ASHIFTRT
3987 || GET_CODE (op0
) == ASHIFT
3988 || GET_CODE (op0
) == ROTATE
3989 || GET_CODE (op0
) == ROTATERT
))
3991 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3996 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
4000 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4006 /* 0/x is 0 (or x&0 if x has side-effects). */
4007 if (trueop0
== CONST0_RTX (mode
)
4008 && !cfun
->can_throw_non_call_exceptions
)
4010 if (side_effects_p (op1
))
4011 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
4015 if (trueop1
== CONST1_RTX (mode
))
4017 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
4021 /* Convert divide by power of two into shift. */
4022 if (CONST_INT_P (trueop1
)
4023 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
4024 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
4025 gen_int_shift_amount (mode
, val
));
4029 /* Handle floating point and integers separately. */
4030 if (SCALAR_FLOAT_MODE_P (mode
))
4032 /* Maybe change 0.0 / x to 0.0. This transformation isn't
4033 safe for modes with NaNs, since 0.0 / 0.0 will then be
4034 NaN rather than 0.0. Nor is it safe for modes with signed
4035 zeros, since dividing 0 by a negative number gives -0.0 */
4036 if (trueop0
== CONST0_RTX (mode
)
4037 && !HONOR_NANS (mode
)
4038 && !HONOR_SIGNED_ZEROS (mode
)
4039 && ! side_effects_p (op1
))
4042 if (trueop1
== CONST1_RTX (mode
)
4043 && !HONOR_SNANS (mode
))
4046 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4047 && trueop1
!= CONST0_RTX (mode
))
4049 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
4052 if (real_equal (d1
, &dconstm1
)
4053 && !HONOR_SNANS (mode
))
4054 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
4056 /* Change FP division by a constant into multiplication.
4057 Only do this with -freciprocal-math. */
4058 if (flag_reciprocal_math
4059 && !real_equal (d1
, &dconst0
))
4062 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
4063 tem
= const_double_from_real_value (d
, mode
);
4064 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
4068 else if (SCALAR_INT_MODE_P (mode
))
4070 /* 0/x is 0 (or x&0 if x has side-effects). */
4071 if (trueop0
== CONST0_RTX (mode
)
4072 && !cfun
->can_throw_non_call_exceptions
)
4074 if (side_effects_p (op1
))
4075 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
4079 if (trueop1
== CONST1_RTX (mode
))
4081 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
4086 if (trueop1
== constm1_rtx
)
4088 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
4090 return simplify_gen_unary (NEG
, mode
, x
, mode
);
4096 /* 0%x is 0 (or x&0 if x has side-effects). */
4097 if (trueop0
== CONST0_RTX (mode
))
4099 if (side_effects_p (op1
))
4100 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
4103 /* x%1 is 0 (of x&0 if x has side-effects). */
4104 if (trueop1
== CONST1_RTX (mode
))
4106 if (side_effects_p (op0
))
4107 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
4108 return CONST0_RTX (mode
);
4110 /* Implement modulus by power of two as AND. */
4111 if (CONST_INT_P (trueop1
)
4112 && exact_log2 (UINTVAL (trueop1
)) > 0)
4113 return simplify_gen_binary (AND
, mode
, op0
,
4114 gen_int_mode (UINTVAL (trueop1
) - 1,
4119 /* 0%x is 0 (or x&0 if x has side-effects). */
4120 if (trueop0
== CONST0_RTX (mode
))
4122 if (side_effects_p (op1
))
4123 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
4126 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
4127 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
4129 if (side_effects_p (op0
))
4130 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
4131 return CONST0_RTX (mode
);
4137 if (trueop1
== CONST0_RTX (mode
))
4139 /* Canonicalize rotates by constant amount. If the condition of
4140 reversing direction is met, then reverse the direction. */
4141 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
4142 if (reverse_rotate_by_imm_p (mode
, (code
== ROTATE
), trueop1
))
4144 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
4145 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
4146 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
4147 mode
, op0
, new_amount_rtx
);
4152 if (trueop1
== CONST0_RTX (mode
))
4154 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4156 /* Rotating ~0 always results in ~0. */
4157 if (CONST_INT_P (trueop0
)
4158 && HWI_COMPUTABLE_MODE_P (mode
)
4159 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
4160 && ! side_effects_p (op1
))
4166 scalar constants c1, c2
4167 size (M2) > size (M1)
4168 c1 == size (M2) - size (M1)
4170 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4174 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4176 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
4177 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4179 && CONST_INT_P (op1
)
4180 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
4181 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
4183 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
4184 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
4185 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
4186 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
4187 && subreg_lowpart_p (op0
))
4189 rtx tmp
= gen_int_shift_amount
4190 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
4192 /* Combine would usually zero out the value when combining two
4193 local shifts and the range becomes larger or equal to the mode.
4194 However since we fold away one of the shifts here combine won't
4195 see it so we should immediately zero the result if it's out of
4197 if (code
== LSHIFTRT
4198 && INTVAL (tmp
) >= GET_MODE_BITSIZE (inner_mode
))
4201 tmp
= simplify_gen_binary (code
,
4203 XEXP (SUBREG_REG (op0
), 0),
4206 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
4209 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
4211 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
4212 if (val
!= INTVAL (op1
))
4213 return simplify_gen_binary (code
, mode
, op0
,
4214 gen_int_shift_amount (mode
, val
));
4219 if (CONST_INT_P (trueop0
)
4220 && HWI_COMPUTABLE_MODE_P (mode
)
4221 && (UINTVAL (trueop0
) == (GET_MODE_MASK (mode
) >> 1)
4222 || mode_signbit_p (mode
, trueop0
))
4223 && ! side_effects_p (op1
))
4225 goto simplify_ashift
;
4228 if (CONST_INT_P (trueop0
)
4229 && HWI_COMPUTABLE_MODE_P (mode
)
4230 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
4231 && ! side_effects_p (op1
))
4237 if (trueop1
== CONST0_RTX (mode
))
4239 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4243 && CONST_INT_P (trueop1
)
4244 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4245 && IN_RANGE (UINTVAL (trueop1
),
4246 1, GET_MODE_PRECISION (int_mode
) - 1))
4248 auto c
= (wi::one (GET_MODE_PRECISION (int_mode
))
4249 << UINTVAL (trueop1
));
4250 rtx new_op1
= immed_wide_int_const (c
, int_mode
);
4251 return simplify_gen_binary (MULT
, int_mode
, op0
, new_op1
);
4253 goto canonicalize_shift
;
4256 if (trueop1
== CONST0_RTX (mode
))
4258 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4260 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4261 if (GET_CODE (op0
) == CLZ
4262 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
4263 && CONST_INT_P (trueop1
)
4264 && STORE_FLAG_VALUE
== 1
4265 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
4267 unsigned HOST_WIDE_INT zero_val
= 0;
4269 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
4270 && zero_val
== GET_MODE_PRECISION (inner_mode
)
4271 && INTVAL (trueop1
) == exact_log2 (zero_val
))
4272 return simplify_gen_relational (EQ
, mode
, inner_mode
,
4273 XEXP (op0
, 0), const0_rtx
);
4275 goto canonicalize_shift
;
4278 if (HWI_COMPUTABLE_MODE_P (mode
)
4279 && mode_signbit_p (mode
, trueop1
)
4280 && ! side_effects_p (op0
))
4282 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4284 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4290 if (HWI_COMPUTABLE_MODE_P (mode
)
4291 && CONST_INT_P (trueop1
)
4292 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
4293 && ! side_effects_p (op0
))
4295 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4297 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4303 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
4305 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4307 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4313 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
4315 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4317 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4326 /* Simplify x +/- 0 to x, if possible. */
4327 if (trueop1
== CONST0_RTX (mode
))
4333 /* Simplify x * 0 to 0, if possible. */
4334 if (trueop1
== CONST0_RTX (mode
)
4335 && !side_effects_p (op0
))
4338 /* Simplify x * 1 to x, if possible. */
4339 if (trueop1
== CONST1_RTX (mode
))
4345 /* Simplify x * 0 to 0, if possible. */
4346 if (trueop1
== CONST0_RTX (mode
)
4347 && !side_effects_p (op0
))
4353 /* Simplify x / 1 to x, if possible. */
4354 if (trueop1
== CONST1_RTX (mode
))
4359 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
4360 return gen_vec_duplicate (mode
, op0
);
4361 if (valid_for_const_vector_p (mode
, op0
)
4362 && valid_for_const_vector_p (mode
, op1
))
4363 return gen_const_vec_series (mode
, op0
, op1
);
4367 if (!VECTOR_MODE_P (mode
))
4369 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4370 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
4371 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4372 gcc_assert (XVECLEN (trueop1
, 0) == 1);
4374 /* We can't reason about selections made at runtime. */
4375 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4378 if (vec_duplicate_p (trueop0
, &elt0
))
4381 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4382 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
4385 /* Extract a scalar element from a nested VEC_SELECT expression
4386 (with optional nested VEC_CONCAT expression). Some targets
4387 (i386) extract scalar element from a vector using chain of
4388 nested VEC_SELECT expressions. When input operand is a memory
4389 operand, this operation can be simplified to a simple scalar
4390 load from an offseted memory address. */
4392 if (GET_CODE (trueop0
) == VEC_SELECT
4393 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4394 .is_constant (&n_elts
)))
4396 rtx op0
= XEXP (trueop0
, 0);
4397 rtx op1
= XEXP (trueop0
, 1);
4399 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
4405 gcc_assert (GET_CODE (op1
) == PARALLEL
);
4406 gcc_assert (i
< n_elts
);
4408 /* Select element, pointed by nested selector. */
4409 elem
= INTVAL (XVECEXP (op1
, 0, i
));
4411 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4412 if (GET_CODE (op0
) == VEC_CONCAT
)
4414 rtx op00
= XEXP (op0
, 0);
4415 rtx op01
= XEXP (op0
, 1);
4417 machine_mode mode00
, mode01
;
4418 int n_elts00
, n_elts01
;
4420 mode00
= GET_MODE (op00
);
4421 mode01
= GET_MODE (op01
);
4423 /* Find out the number of elements of each operand.
4424 Since the concatenated result has a constant number
4425 of elements, the operands must too. */
4426 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
4427 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
4429 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
4431 /* Select correct operand of VEC_CONCAT
4432 and adjust selector. */
4433 if (elem
< n_elts01
)
4444 vec
= rtvec_alloc (1);
4445 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
4447 tmp
= gen_rtx_fmt_ee (code
, mode
,
4448 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
4454 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4455 gcc_assert (GET_MODE_INNER (mode
)
4456 == GET_MODE_INNER (GET_MODE (trueop0
)));
4457 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4459 if (vec_duplicate_p (trueop0
, &elt0
))
4460 /* It doesn't matter which elements are selected by trueop1,
4461 because they are all the same. */
4462 return gen_vec_duplicate (mode
, elt0
);
4464 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4466 unsigned n_elts
= XVECLEN (trueop1
, 0);
4467 rtvec v
= rtvec_alloc (n_elts
);
4470 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4471 for (i
= 0; i
< n_elts
; i
++)
4473 rtx x
= XVECEXP (trueop1
, 0, i
);
4475 if (!CONST_INT_P (x
))
4478 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
4482 return gen_rtx_CONST_VECTOR (mode
, v
);
4485 /* Recognize the identity. */
4486 if (GET_MODE (trueop0
) == mode
)
4488 bool maybe_ident
= true;
4489 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4491 rtx j
= XVECEXP (trueop1
, 0, i
);
4492 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4494 maybe_ident
= false;
4502 /* If we select a low-part subreg, return that. */
4503 if (vec_series_lowpart_p (mode
, GET_MODE (trueop0
), trueop1
))
4505 rtx new_rtx
= lowpart_subreg (mode
, trueop0
,
4506 GET_MODE (trueop0
));
4507 if (new_rtx
!= NULL_RTX
)
4511 /* If we build {a,b} then permute it, build the result directly. */
4512 if (XVECLEN (trueop1
, 0) == 2
4513 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4514 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4515 && GET_CODE (trueop0
) == VEC_CONCAT
4516 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
4517 && GET_MODE (XEXP (trueop0
, 0)) == mode
4518 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
4519 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
4521 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4522 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4525 gcc_assert (i0
< 4 && i1
< 4);
4526 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
4527 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
4529 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4532 if (XVECLEN (trueop1
, 0) == 2
4533 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4534 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4535 && GET_CODE (trueop0
) == VEC_CONCAT
4536 && GET_MODE (trueop0
) == mode
)
4538 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4539 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4542 gcc_assert (i0
< 2 && i1
< 2);
4543 subop0
= XEXP (trueop0
, i0
);
4544 subop1
= XEXP (trueop0
, i1
);
4546 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4549 /* If we select one half of a vec_concat, return that. */
4551 if (GET_CODE (trueop0
) == VEC_CONCAT
4552 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4554 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
4556 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4558 rtx subop0
= XEXP (trueop0
, 0);
4559 rtx subop1
= XEXP (trueop0
, 1);
4560 machine_mode mode0
= GET_MODE (subop0
);
4561 machine_mode mode1
= GET_MODE (subop1
);
4562 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4563 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
4565 bool success
= true;
4566 for (int i
= 1; i
< l0
; ++i
)
4568 rtx j
= XVECEXP (trueop1
, 0, i
);
4569 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4578 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
4580 bool success
= true;
4581 for (int i
= 1; i
< l1
; ++i
)
4583 rtx j
= XVECEXP (trueop1
, 0, i
);
4584 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
4595 /* Simplify vec_select of a subreg of X to just a vec_select of X
4596 when X has same component mode as vec_select. */
4597 unsigned HOST_WIDE_INT subreg_offset
= 0;
4598 if (GET_CODE (trueop0
) == SUBREG
4599 && GET_MODE_INNER (mode
)
4600 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0
)))
4601 && GET_MODE_NUNITS (mode
).is_constant (&l1
)
4602 && constant_multiple_p (subreg_memory_offset (trueop0
),
4603 GET_MODE_UNIT_BITSIZE (mode
),
4607 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0
)));
4608 bool success
= true;
4609 for (int i
= 0; i
!= l1
; i
++)
4611 rtx idx
= XVECEXP (trueop1
, 0, i
);
4612 if (!CONST_INT_P (idx
)
4613 || maybe_ge (UINTVAL (idx
) + subreg_offset
, nunits
))
4625 rtvec vec
= rtvec_alloc (l1
);
4626 for (int i
= 0; i
< l1
; i
++)
4628 = GEN_INT (INTVAL (XVECEXP (trueop1
, 0, i
))
4630 par
= gen_rtx_PARALLEL (VOIDmode
, vec
);
4632 return gen_rtx_VEC_SELECT (mode
, SUBREG_REG (trueop0
), par
);
4637 if (XVECLEN (trueop1
, 0) == 1
4638 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4639 && GET_CODE (trueop0
) == VEC_CONCAT
)
4642 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
4644 /* Try to find the element in the VEC_CONCAT. */
4645 while (GET_MODE (vec
) != mode
4646 && GET_CODE (vec
) == VEC_CONCAT
)
4648 poly_int64 vec_size
;
4650 if (CONST_INT_P (XEXP (vec
, 0)))
4652 /* vec_concat of two const_ints doesn't make sense with
4653 respect to modes. */
4654 if (CONST_INT_P (XEXP (vec
, 1)))
4657 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
4658 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
4661 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4663 if (known_lt (offset
, vec_size
))
4664 vec
= XEXP (vec
, 0);
4665 else if (known_ge (offset
, vec_size
))
4668 vec
= XEXP (vec
, 1);
4672 vec
= avoid_constant_pool_reference (vec
);
4675 if (GET_MODE (vec
) == mode
)
4679 /* If we select elements in a vec_merge that all come from the same
4680 operand, select from that operand directly. */
4681 if (GET_CODE (op0
) == VEC_MERGE
)
4683 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4684 if (CONST_INT_P (trueop02
))
4686 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4687 bool all_operand0
= true;
4688 bool all_operand1
= true;
4689 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4691 rtx j
= XVECEXP (trueop1
, 0, i
);
4692 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4693 all_operand1
= false;
4695 all_operand0
= false;
4697 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4698 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4699 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4700 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4704 /* If we have two nested selects that are inverses of each
4705 other, replace them with the source operand. */
4706 if (GET_CODE (trueop0
) == VEC_SELECT
4707 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4709 rtx op0_subop1
= XEXP (trueop0
, 1);
4710 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4711 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4713 /* Apply the outer ordering vector to the inner one. (The inner
4714 ordering vector is expressly permitted to be of a different
4715 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4716 then the two VEC_SELECTs cancel. */
4717 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4719 rtx x
= XVECEXP (trueop1
, 0, i
);
4720 if (!CONST_INT_P (x
))
4722 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4723 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4726 return XEXP (trueop0
, 0);
4732 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4733 ? GET_MODE (trueop0
)
4734 : GET_MODE_INNER (mode
));
4735 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4736 ? GET_MODE (trueop1
)
4737 : GET_MODE_INNER (mode
));
4739 gcc_assert (VECTOR_MODE_P (mode
));
4740 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4741 + GET_MODE_SIZE (op1_mode
),
4742 GET_MODE_SIZE (mode
)));
4744 if (VECTOR_MODE_P (op0_mode
))
4745 gcc_assert (GET_MODE_INNER (mode
)
4746 == GET_MODE_INNER (op0_mode
));
4748 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4750 if (VECTOR_MODE_P (op1_mode
))
4751 gcc_assert (GET_MODE_INNER (mode
)
4752 == GET_MODE_INNER (op1_mode
));
4754 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4756 unsigned int n_elts
, in_n_elts
;
4757 if ((GET_CODE (trueop0
) == CONST_VECTOR
4758 || CONST_SCALAR_INT_P (trueop0
)
4759 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4760 && (GET_CODE (trueop1
) == CONST_VECTOR
4761 || CONST_SCALAR_INT_P (trueop1
)
4762 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4763 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4764 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4766 rtvec v
= rtvec_alloc (n_elts
);
4768 for (i
= 0; i
< n_elts
; i
++)
4772 if (!VECTOR_MODE_P (op0_mode
))
4773 RTVEC_ELT (v
, i
) = trueop0
;
4775 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4779 if (!VECTOR_MODE_P (op1_mode
))
4780 RTVEC_ELT (v
, i
) = trueop1
;
4782 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4787 return gen_rtx_CONST_VECTOR (mode
, v
);
4790 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4791 Restrict the transformation to avoid generating a VEC_SELECT with a
4792 mode unrelated to its operand. */
4793 if (GET_CODE (trueop0
) == VEC_SELECT
4794 && GET_CODE (trueop1
) == VEC_SELECT
4795 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4796 && GET_MODE_INNER (GET_MODE (XEXP (trueop0
, 0)))
4797 == GET_MODE_INNER(mode
))
4799 rtx par0
= XEXP (trueop0
, 1);
4800 rtx par1
= XEXP (trueop1
, 1);
4801 int len0
= XVECLEN (par0
, 0);
4802 int len1
= XVECLEN (par1
, 0);
4803 rtvec vec
= rtvec_alloc (len0
+ len1
);
4804 for (int i
= 0; i
< len0
; i
++)
4805 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4806 for (int i
= 0; i
< len1
; i
++)
4807 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4808 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4809 gen_rtx_PARALLEL (VOIDmode
, vec
));
4818 if (mode
== GET_MODE (op0
)
4819 && mode
== GET_MODE (op1
)
4820 && vec_duplicate_p (op0
, &elt0
)
4821 && vec_duplicate_p (op1
, &elt1
))
4823 /* Try applying the operator to ELT and see if that simplifies.
4824 We can duplicate the result if so.
4826 The reason we don't use simplify_gen_binary is that it isn't
4827 necessarily a win to convert things like:
4829 (plus:V (vec_duplicate:V (reg:S R1))
4830 (vec_duplicate:V (reg:S R2)))
4834 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4836 The first might be done entirely in vector registers while the
4837 second might need a move between register files. */
4838 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4841 return gen_vec_duplicate (mode
, tem
);
4847 /* Return true if binary operation OP distributes over addition in operand
4848 OPNO, with the other operand being held constant. OPNO counts from 1. */
4851 distributes_over_addition_p (rtx_code op
, int opno
)
4869 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4872 if (VECTOR_MODE_P (mode
)
4873 && code
!= VEC_CONCAT
4874 && GET_CODE (op0
) == CONST_VECTOR
4875 && GET_CODE (op1
) == CONST_VECTOR
)
4878 if (CONST_VECTOR_STEPPED_P (op0
)
4879 && CONST_VECTOR_STEPPED_P (op1
))
4880 /* We can operate directly on the encoding if:
4882 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4884 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4886 Addition and subtraction are the supported operators
4887 for which this is true. */
4888 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4889 else if (CONST_VECTOR_STEPPED_P (op0
))
4890 /* We can operate directly on stepped encodings if:
4894 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4896 which is true if (x -> x op c) distributes over addition. */
4897 step_ok_p
= distributes_over_addition_p (code
, 1);
4899 /* Similarly in reverse. */
4900 step_ok_p
= distributes_over_addition_p (code
, 2);
4901 rtx_vector_builder builder
;
4902 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4905 unsigned int count
= builder
.encoded_nelts ();
4906 for (unsigned int i
= 0; i
< count
; i
++)
4908 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4909 CONST_VECTOR_ELT (op0
, i
),
4910 CONST_VECTOR_ELT (op1
, i
));
4911 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4913 builder
.quick_push (x
);
4915 return builder
.build ();
4918 if (VECTOR_MODE_P (mode
)
4919 && code
== VEC_CONCAT
4920 && (CONST_SCALAR_INT_P (op0
)
4921 || CONST_FIXED_P (op0
)
4922 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4923 && (CONST_SCALAR_INT_P (op1
)
4924 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4925 || CONST_FIXED_P (op1
)))
4927 /* Both inputs have a constant number of elements, so the result
4929 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4930 rtvec v
= rtvec_alloc (n_elts
);
4932 gcc_assert (n_elts
>= 2);
4935 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4936 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4938 RTVEC_ELT (v
, 0) = op0
;
4939 RTVEC_ELT (v
, 1) = op1
;
4943 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4944 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4947 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4948 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4949 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4951 for (i
= 0; i
< op0_n_elts
; ++i
)
4952 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4953 for (i
= 0; i
< op1_n_elts
; ++i
)
4954 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4957 return gen_rtx_CONST_VECTOR (mode
, v
);
4960 if (SCALAR_FLOAT_MODE_P (mode
)
4961 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4962 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4963 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4974 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4976 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4978 for (i
= 0; i
< 4; i
++)
4995 real_from_target (&r
, tmp0
, mode
);
4996 return const_double_from_real_value (r
, mode
);
5000 REAL_VALUE_TYPE f0
, f1
, value
, result
;
5001 const REAL_VALUE_TYPE
*opr0
, *opr1
;
5004 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
5005 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
5007 if (HONOR_SNANS (mode
)
5008 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
5009 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
5012 real_convert (&f0
, mode
, opr0
);
5013 real_convert (&f1
, mode
, opr1
);
5016 && real_equal (&f1
, &dconst0
)
5017 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
5020 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
5021 && flag_trapping_math
5022 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
5024 int s0
= REAL_VALUE_NEGATIVE (f0
);
5025 int s1
= REAL_VALUE_NEGATIVE (f1
);
5030 /* Inf + -Inf = NaN plus exception. */
5035 /* Inf - Inf = NaN plus exception. */
5040 /* Inf / Inf = NaN plus exception. */
5047 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
5048 && flag_trapping_math
5049 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
5050 || (REAL_VALUE_ISINF (f1
)
5051 && real_equal (&f0
, &dconst0
))))
5052 /* Inf * 0 = NaN plus exception. */
5055 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
5057 real_convert (&result
, mode
, &value
);
5059 /* Don't constant fold this floating point operation if
5060 the result has overflowed and flag_trapping_math. */
5062 if (flag_trapping_math
5063 && MODE_HAS_INFINITIES (mode
)
5064 && REAL_VALUE_ISINF (result
)
5065 && !REAL_VALUE_ISINF (f0
)
5066 && !REAL_VALUE_ISINF (f1
))
5067 /* Overflow plus exception. */
5070 /* Don't constant fold this floating point operation if the
5071 result may dependent upon the run-time rounding mode and
5072 flag_rounding_math is set, or if GCC's software emulation
5073 is unable to accurately represent the result. */
5075 if ((flag_rounding_math
5076 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
5077 && (inexact
|| !real_identical (&result
, &value
)))
5080 return const_double_from_real_value (result
, mode
);
5084 /* We can fold some multi-word operations. */
5085 scalar_int_mode int_mode
;
5086 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5087 && CONST_SCALAR_INT_P (op0
)
5088 && CONST_SCALAR_INT_P (op1
)
5089 && GET_MODE_PRECISION (int_mode
) <= MAX_BITSIZE_MODE_ANY_INT
)
5092 wi::overflow_type overflow
;
5093 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
5094 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
5096 #if TARGET_SUPPORTS_WIDE_INT == 0
5097 /* This assert keeps the simplification from producing a result
5098 that cannot be represented in a CONST_DOUBLE but a lot of
5099 upstream callers expect that this function never fails to
5100 simplify something and so you if you added this to the test
5101 above the code would die later anyway. If this assert
5102 happens, you just need to make the port support wide int. */
5103 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
5108 result
= wi::sub (pop0
, pop1
);
5112 result
= wi::add (pop0
, pop1
);
5116 result
= wi::mul (pop0
, pop1
);
5120 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
5126 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
5132 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
5138 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
5144 result
= wi::bit_and (pop0
, pop1
);
5148 result
= wi::bit_or (pop0
, pop1
);
5152 result
= wi::bit_xor (pop0
, pop1
);
5156 result
= wi::smin (pop0
, pop1
);
5160 result
= wi::smax (pop0
, pop1
);
5164 result
= wi::umin (pop0
, pop1
);
5168 result
= wi::umax (pop0
, pop1
);
5177 /* The shift count might be in SImode while int_mode might
5178 be narrower. On IA-64 it is even DImode. If the shift
5179 count is too large and doesn't fit into int_mode, we'd
5180 ICE. So, if int_mode is narrower than word, use
5181 word_mode for the shift count. */
5182 if (GET_MODE (op1
) == VOIDmode
5183 && GET_MODE_PRECISION (int_mode
) < BITS_PER_WORD
)
5184 pop1
= rtx_mode_t (op1
, word_mode
);
5186 wide_int wop1
= pop1
;
5187 if (SHIFT_COUNT_TRUNCATED
)
5188 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
5189 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
5195 result
= wi::lrshift (pop0
, wop1
);
5199 result
= wi::arshift (pop0
, wop1
);
5203 result
= wi::lshift (pop0
, wop1
);
5207 if (wi::leu_p (wop1
, wi::clrsb (pop0
)))
5208 result
= wi::lshift (pop0
, wop1
);
5209 else if (wi::neg_p (pop0
))
5210 result
= wi::min_value (int_mode
, SIGNED
);
5212 result
= wi::max_value (int_mode
, SIGNED
);
5216 if (wi::eq_p (pop0
, 0))
5218 else if (wi::leu_p (wop1
, wi::clz (pop0
)))
5219 result
= wi::lshift (pop0
, wop1
);
5221 result
= wi::max_value (int_mode
, UNSIGNED
);
5232 /* The rotate count might be in SImode while int_mode might
5233 be narrower. On IA-64 it is even DImode. If the shift
5234 count is too large and doesn't fit into int_mode, we'd
5235 ICE. So, if int_mode is narrower than word, use
5236 word_mode for the shift count. */
5237 if (GET_MODE (op1
) == VOIDmode
5238 && GET_MODE_PRECISION (int_mode
) < BITS_PER_WORD
)
5239 pop1
= rtx_mode_t (op1
, word_mode
);
5241 if (wi::neg_p (pop1
))
5247 result
= wi::lrotate (pop0
, pop1
);
5251 result
= wi::rrotate (pop0
, pop1
);
5261 result
= wi::add (pop0
, pop1
, SIGNED
, &overflow
);
5262 clamp_signed_saturation
:
5263 if (overflow
== wi::OVF_OVERFLOW
)
5264 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5265 else if (overflow
== wi::OVF_UNDERFLOW
)
5266 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5267 else if (overflow
!= wi::OVF_NONE
)
5272 result
= wi::add (pop0
, pop1
, UNSIGNED
, &overflow
);
5273 clamp_unsigned_saturation
:
5274 if (overflow
!= wi::OVF_NONE
)
5275 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5279 result
= wi::sub (pop0
, pop1
, SIGNED
, &overflow
);
5280 goto clamp_signed_saturation
;
5283 result
= wi::sub (pop0
, pop1
, UNSIGNED
, &overflow
);
5284 if (overflow
!= wi::OVF_NONE
)
5285 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5289 result
= wi::mul (pop0
, pop1
, SIGNED
, &overflow
);
5290 goto clamp_signed_saturation
;
5293 result
= wi::mul (pop0
, pop1
, UNSIGNED
, &overflow
);
5294 goto clamp_unsigned_saturation
;
5297 result
= wi::mul_high (pop0
, pop1
, SIGNED
);
5301 result
= wi::mul_high (pop0
, pop1
, UNSIGNED
);
5307 return immed_wide_int_const (result
, int_mode
);
5310 /* Handle polynomial integers. */
5311 if (NUM_POLY_INT_COEFFS
> 1
5312 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5313 && poly_int_rtx_p (op0
)
5314 && poly_int_rtx_p (op1
))
5316 poly_wide_int result
;
5320 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
5324 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
5328 if (CONST_SCALAR_INT_P (op1
))
5329 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
5335 if (CONST_SCALAR_INT_P (op1
))
5339 GET_MODE (op1
) == VOIDmode
5340 && GET_MODE_PRECISION (int_mode
) < BITS_PER_WORD
5341 ? word_mode
: mode
);
5342 if (SHIFT_COUNT_TRUNCATED
)
5343 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
5344 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
5346 result
= wi::to_poly_wide (op0
, mode
) << shift
;
5353 if (!CONST_SCALAR_INT_P (op1
)
5354 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
5355 rtx_mode_t (op1
, mode
), &result
))
5362 return immed_wide_int_const (result
, int_mode
);
5370 /* Return a positive integer if X should sort after Y. The value
5371 returned is 1 if and only if X and Y are both regs. */
5374 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
5378 result
= (commutative_operand_precedence (y
)
5379 - commutative_operand_precedence (x
));
5381 return result
+ result
;
5383 /* Group together equal REGs to do more simplification. */
5384 if (REG_P (x
) && REG_P (y
))
5385 return REGNO (x
) > REGNO (y
);
5390 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5391 operands may be another PLUS or MINUS.
5393 Rather than test for specific case, we do this by a brute-force method
5394 and do all possible simplifications until no more changes occur. Then
5395 we rebuild the operation.
5397 May return NULL_RTX when no changes were made. */
5400 simplify_context::simplify_plus_minus (rtx_code code
, machine_mode mode
,
5403 struct simplify_plus_minus_op_data
5410 int changed
, n_constants
, canonicalized
= 0;
5413 memset (ops
, 0, sizeof ops
);
5415 /* Set up the two operands and then expand them until nothing has been
5416 changed. If we run out of room in our array, give up; this should
5417 almost never happen. */
5422 ops
[1].neg
= (code
== MINUS
);
5429 for (i
= 0; i
< n_ops
; i
++)
5431 rtx this_op
= ops
[i
].op
;
5432 int this_neg
= ops
[i
].neg
;
5433 enum rtx_code this_code
= GET_CODE (this_op
);
5439 if (n_ops
== ARRAY_SIZE (ops
))
5442 ops
[n_ops
].op
= XEXP (this_op
, 1);
5443 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
5446 ops
[i
].op
= XEXP (this_op
, 0);
5448 /* If this operand was negated then we will potentially
5449 canonicalize the expression. Similarly if we don't
5450 place the operands adjacent we're re-ordering the
5451 expression and thus might be performing a
5452 canonicalization. Ignore register re-ordering.
5453 ??? It might be better to shuffle the ops array here,
5454 but then (plus (plus (A, B), plus (C, D))) wouldn't
5455 be seen as non-canonical. */
5458 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
5463 ops
[i
].op
= XEXP (this_op
, 0);
5464 ops
[i
].neg
= ! this_neg
;
5470 if (n_ops
!= ARRAY_SIZE (ops
)
5471 && GET_CODE (XEXP (this_op
, 0)) == PLUS
5472 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
5473 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
5475 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
5476 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
5477 ops
[n_ops
].neg
= this_neg
;
5485 /* ~a -> (-a - 1) */
5486 if (n_ops
!= ARRAY_SIZE (ops
))
5488 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
5489 ops
[n_ops
++].neg
= this_neg
;
5490 ops
[i
].op
= XEXP (this_op
, 0);
5491 ops
[i
].neg
= !this_neg
;
5497 CASE_CONST_SCALAR_INT
:
5498 case CONST_POLY_INT
:
5502 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
5516 if (n_constants
> 1)
5519 gcc_assert (n_ops
>= 2);
5521 /* If we only have two operands, we can avoid the loops. */
5524 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
5527 /* Get the two operands. Be careful with the order, especially for
5528 the cases where code == MINUS. */
5529 if (ops
[0].neg
&& ops
[1].neg
)
5531 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
5534 else if (ops
[0].neg
)
5545 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
5548 /* Now simplify each pair of operands until nothing changes. */
5551 /* Insertion sort is good enough for a small array. */
5552 for (i
= 1; i
< n_ops
; i
++)
5554 struct simplify_plus_minus_op_data save
;
5558 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
5561 /* Just swapping registers doesn't count as canonicalization. */
5567 ops
[j
+ 1] = ops
[j
];
5569 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
5574 for (i
= n_ops
- 1; i
> 0; i
--)
5575 for (j
= i
- 1; j
>= 0; j
--)
5577 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
5578 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
5580 if (lhs
!= 0 && rhs
!= 0)
5582 enum rtx_code ncode
= PLUS
;
5588 std::swap (lhs
, rhs
);
5590 else if (swap_commutative_operands_p (lhs
, rhs
))
5591 std::swap (lhs
, rhs
);
5593 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
5594 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
5596 rtx tem_lhs
, tem_rhs
;
5598 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
5599 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
5600 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
5603 if (tem
&& !CONSTANT_P (tem
))
5604 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
5607 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
5611 /* Reject "simplifications" that just wrap the two
5612 arguments in a CONST. Failure to do so can result
5613 in infinite recursion with simplify_binary_operation
5614 when it calls us to simplify CONST operations.
5615 Also, if we find such a simplification, don't try
5616 any more combinations with this rhs: We must have
5617 something like symbol+offset, ie. one of the
5618 trivial CONST expressions we handle later. */
5619 if (GET_CODE (tem
) == CONST
5620 && GET_CODE (XEXP (tem
, 0)) == ncode
5621 && XEXP (XEXP (tem
, 0), 0) == lhs
5622 && XEXP (XEXP (tem
, 0), 1) == rhs
)
5625 if (GET_CODE (tem
) == NEG
)
5626 tem
= XEXP (tem
, 0), lneg
= !lneg
;
5627 if (poly_int_rtx_p (tem
) && lneg
)
5628 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
5632 ops
[j
].op
= NULL_RTX
;
5642 /* Pack all the operands to the lower-numbered entries. */
5643 for (i
= 0, j
= 0; j
< n_ops
; j
++)
5652 /* If nothing changed, check that rematerialization of rtl instructions
5653 is still required. */
5656 /* Perform rematerialization if only all operands are registers and
5657 all operations are PLUS. */
5658 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5659 around rs6000 and how it uses the CA register. See PR67145. */
5660 for (i
= 0; i
< n_ops
; i
++)
5662 || !REG_P (ops
[i
].op
)
5663 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
5664 && fixed_regs
[REGNO (ops
[i
].op
)]
5665 && !global_regs
[REGNO (ops
[i
].op
)]
5666 && ops
[i
].op
!= frame_pointer_rtx
5667 && ops
[i
].op
!= arg_pointer_rtx
5668 && ops
[i
].op
!= stack_pointer_rtx
))
5673 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5675 && CONST_INT_P (ops
[1].op
)
5676 && CONSTANT_P (ops
[0].op
)
5678 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
5680 /* We suppressed creation of trivial CONST expressions in the
5681 combination loop to avoid recursion. Create one manually now.
5682 The combination loop should have ensured that there is exactly
5683 one CONST_INT, and the sort will have ensured that it is last
5684 in the array and that any other constant will be next-to-last. */
5687 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
5688 && CONSTANT_P (ops
[n_ops
- 2].op
))
5690 rtx value
= ops
[n_ops
- 1].op
;
5691 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
5692 value
= neg_poly_int_rtx (mode
, value
);
5693 if (CONST_INT_P (value
))
5695 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
5701 /* Put a non-negated operand first, if possible. */
5703 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
5706 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
5715 /* Now make the result by performing the requested operations. */
5718 for (i
= 1; i
< n_ops
; i
++)
5719 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
5720 mode
, result
, ops
[i
].op
);
5725 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5727 plus_minus_operand_p (const_rtx x
)
5729 return GET_CODE (x
) == PLUS
5730 || GET_CODE (x
) == MINUS
5731 || (GET_CODE (x
) == CONST
5732 && GET_CODE (XEXP (x
, 0)) == PLUS
5733 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
5734 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
5737 /* Like simplify_binary_operation except used for relational operators.
5738 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5739 not also be VOIDmode.
5741 CMP_MODE specifies in which mode the comparison is done in, so it is
5742 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5743 the operands or, if both are VOIDmode, the operands are compared in
5744 "infinite precision". */
5746 simplify_context::simplify_relational_operation (rtx_code code
,
5748 machine_mode cmp_mode
,
5751 rtx tem
, trueop0
, trueop1
;
5753 if (cmp_mode
== VOIDmode
)
5754 cmp_mode
= GET_MODE (op0
);
5755 if (cmp_mode
== VOIDmode
)
5756 cmp_mode
= GET_MODE (op1
);
5758 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5760 return relational_result (mode
, cmp_mode
, tem
);
5762 /* For the following tests, ensure const0_rtx is op1. */
5763 if (swap_commutative_operands_p (op0
, op1
)
5764 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5765 std::swap (op0
, op1
), code
= swap_condition (code
);
5767 /* If op0 is a compare, extract the comparison arguments from it. */
5768 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5769 return simplify_gen_relational (code
, mode
, VOIDmode
,
5770 XEXP (op0
, 0), XEXP (op0
, 1));
5772 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
)
5775 trueop0
= avoid_constant_pool_reference (op0
);
5776 trueop1
= avoid_constant_pool_reference (op1
);
5777 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5781 /* This part of simplify_relational_operation is only used when CMP_MODE
5782 is not in class MODE_CC (i.e. it is a real comparison).
5784 MODE is the mode of the result, while CMP_MODE specifies in which
5785 mode the comparison is done in, so it is the mode of the operands. */
5788 simplify_context::simplify_relational_operation_1 (rtx_code code
,
5790 machine_mode cmp_mode
,
5793 enum rtx_code op0code
= GET_CODE (op0
);
5795 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5797 /* If op0 is a comparison, extract the comparison arguments
5801 if (GET_MODE (op0
) == mode
)
5802 return simplify_rtx (op0
);
5804 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5805 XEXP (op0
, 0), XEXP (op0
, 1));
5807 else if (code
== EQ
)
5809 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5810 if (new_code
!= UNKNOWN
)
5811 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5812 XEXP (op0
, 0), XEXP (op0
, 1));
5816 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5817 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5818 if ((code
== LTU
|| code
== GEU
)
5819 && GET_CODE (op0
) == PLUS
5820 && CONST_INT_P (XEXP (op0
, 1))
5821 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5822 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5823 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5824 && XEXP (op0
, 1) != const0_rtx
)
5827 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5828 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5829 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5832 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5833 transformed into (LTU a -C). */
5834 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5835 && CONST_INT_P (XEXP (op0
, 1))
5836 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5837 && XEXP (op0
, 1) != const0_rtx
)
5840 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5841 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5842 XEXP (op0
, 0), new_cmp
);
5845 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5846 if ((code
== LTU
|| code
== GEU
)
5847 && GET_CODE (op0
) == PLUS
5848 && rtx_equal_p (op1
, XEXP (op0
, 1))
5849 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5850 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5851 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5852 copy_rtx (XEXP (op0
, 0)));
5854 if (op1
== const0_rtx
)
5856 /* Canonicalize (GTU x 0) as (NE x 0). */
5858 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5859 /* Canonicalize (LEU x 0) as (EQ x 0). */
5861 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5863 else if (op1
== const1_rtx
)
5868 /* Canonicalize (GE x 1) as (GT x 0). */
5869 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5872 /* Canonicalize (GEU x 1) as (NE x 0). */
5873 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5876 /* Canonicalize (LT x 1) as (LE x 0). */
5877 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5880 /* Canonicalize (LTU x 1) as (EQ x 0). */
5881 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5887 else if (op1
== constm1_rtx
)
5889 /* Canonicalize (LE x -1) as (LT x 0). */
5891 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5892 /* Canonicalize (GT x -1) as (GE x 0). */
5894 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5897 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5898 if ((code
== EQ
|| code
== NE
)
5899 && (op0code
== PLUS
|| op0code
== MINUS
)
5901 && CONSTANT_P (XEXP (op0
, 1))
5902 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5904 rtx x
= XEXP (op0
, 0);
5905 rtx c
= XEXP (op0
, 1);
5906 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5907 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5909 /* Detect an infinite recursive condition, where we oscillate at this
5910 simplification case between:
5911 A + B == C <---> C - B == A,
5912 where A, B, and C are all constants with non-simplifiable expressions,
5913 usually SYMBOL_REFs. */
5914 if (GET_CODE (tem
) == invcode
5916 && rtx_equal_p (c
, XEXP (tem
, 1)))
5919 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5922 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5923 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5924 scalar_int_mode int_mode
, int_cmp_mode
;
5926 && op1
== const0_rtx
5927 && is_int_mode (mode
, &int_mode
)
5928 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5929 /* ??? Work-around BImode bugs in the ia64 backend. */
5930 && int_mode
!= BImode
5931 && int_cmp_mode
!= BImode
5932 && nonzero_bits (op0
, int_cmp_mode
) == 1
5933 && STORE_FLAG_VALUE
== 1)
5934 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5935 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5936 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5938 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5939 if ((code
== EQ
|| code
== NE
)
5940 && op1
== const0_rtx
5942 return simplify_gen_relational (code
, mode
, cmp_mode
,
5943 XEXP (op0
, 0), XEXP (op0
, 1));
5945 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5946 if ((code
== EQ
|| code
== NE
)
5948 && rtx_equal_p (XEXP (op0
, 0), op1
)
5949 && !side_effects_p (XEXP (op0
, 0)))
5950 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5953 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5954 if ((code
== EQ
|| code
== NE
)
5956 && rtx_equal_p (XEXP (op0
, 1), op1
)
5957 && !side_effects_p (XEXP (op0
, 1)))
5958 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5961 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5962 if ((code
== EQ
|| code
== NE
)
5964 && CONST_SCALAR_INT_P (op1
)
5965 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5966 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5967 simplify_gen_binary (XOR
, cmp_mode
,
5968 XEXP (op0
, 1), op1
));
5970 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5971 constant folding if x/y is a constant. */
5972 if ((code
== EQ
|| code
== NE
)
5973 && (op0code
== AND
|| op0code
== IOR
)
5974 && !side_effects_p (op1
)
5975 && op1
!= CONST0_RTX (cmp_mode
))
5977 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5978 (eq/ne (and (not y) x) 0). */
5979 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5980 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5982 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5984 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5986 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5987 CONST0_RTX (cmp_mode
));
5990 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5991 (eq/ne (and (not x) y) 0). */
5992 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5993 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5995 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5997 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5999 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
6000 CONST0_RTX (cmp_mode
));
6004 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
6005 if ((code
== EQ
|| code
== NE
)
6006 && GET_CODE (op0
) == BSWAP
6007 && CONST_SCALAR_INT_P (op1
))
6008 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
6009 simplify_gen_unary (BSWAP
, cmp_mode
,
6012 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
6013 if ((code
== EQ
|| code
== NE
)
6014 && GET_CODE (op0
) == BSWAP
6015 && GET_CODE (op1
) == BSWAP
)
6016 return simplify_gen_relational (code
, mode
, cmp_mode
,
6017 XEXP (op0
, 0), XEXP (op1
, 0));
6019 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
6025 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
6026 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
6027 XEXP (op0
, 0), const0_rtx
);
6032 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
6033 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
6034 XEXP (op0
, 0), const0_rtx
);
6053 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
6054 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
6055 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
6056 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
6057 For floating-point comparisons, assume that the operands were ordered. */
6060 comparison_result (enum rtx_code code
, int known_results
)
6066 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
6069 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
6073 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
6076 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
6080 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
6083 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
6086 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
6088 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
6091 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
6093 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
6096 return const_true_rtx
;
6104 /* Check if the given comparison (done in the given MODE) is actually
6105 a tautology or a contradiction. If the mode is VOIDmode, the
6106 comparison is done in "infinite precision". If no simplification
6107 is possible, this function returns zero. Otherwise, it returns
6108 either const_true_rtx or const0_rtx. */
6111 simplify_const_relational_operation (enum rtx_code code
,
6119 gcc_assert (mode
!= VOIDmode
6120 || (GET_MODE (op0
) == VOIDmode
6121 && GET_MODE (op1
) == VOIDmode
));
6123 /* If op0 is a compare, extract the comparison arguments from it. */
6124 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6126 op1
= XEXP (op0
, 1);
6127 op0
= XEXP (op0
, 0);
6129 if (GET_MODE (op0
) != VOIDmode
)
6130 mode
= GET_MODE (op0
);
6131 else if (GET_MODE (op1
) != VOIDmode
)
6132 mode
= GET_MODE (op1
);
6137 /* We can't simplify MODE_CC values since we don't know what the
6138 actual comparison is. */
6139 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6142 /* Make sure the constant is second. */
6143 if (swap_commutative_operands_p (op0
, op1
))
6145 std::swap (op0
, op1
);
6146 code
= swap_condition (code
);
6149 trueop0
= avoid_constant_pool_reference (op0
);
6150 trueop1
= avoid_constant_pool_reference (op1
);
6152 /* For integer comparisons of A and B maybe we can simplify A - B and can
6153 then simplify a comparison of that with zero. If A and B are both either
6154 a register or a CONST_INT, this can't help; testing for these cases will
6155 prevent infinite recursion here and speed things up.
6157 We can only do this for EQ and NE comparisons as otherwise we may
6158 lose or introduce overflow which we cannot disregard as undefined as
6159 we do not know the signedness of the operation on either the left or
6160 the right hand side of the comparison. */
6162 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
6163 && (code
== EQ
|| code
== NE
)
6164 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
6165 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
6166 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
6167 /* We cannot do this if tem is a nonzero address. */
6168 && ! nonzero_address_p (tem
))
6169 return simplify_const_relational_operation (signed_condition (code
),
6170 mode
, tem
, const0_rtx
);
6172 if (! HONOR_NANS (mode
) && code
== ORDERED
)
6173 return const_true_rtx
;
6175 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
6178 /* For modes without NaNs, if the two operands are equal, we know the
6179 result except if they have side-effects. Even with NaNs we know
6180 the result of unordered comparisons and, if signaling NaNs are
6181 irrelevant, also the result of LT/GT/LTGT. */
6182 if ((! HONOR_NANS (trueop0
)
6183 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
6184 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
6185 && ! HONOR_SNANS (trueop0
)))
6186 && rtx_equal_p (trueop0
, trueop1
)
6187 && ! side_effects_p (trueop0
))
6188 return comparison_result (code
, CMP_EQ
);
6190 /* If the operands are floating-point constants, see if we can fold
6192 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
6193 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
6194 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
6196 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
6197 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
6199 /* Comparisons are unordered iff at least one of the values is NaN. */
6200 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
6210 return const_true_rtx
;
6223 return comparison_result (code
,
6224 (real_equal (d0
, d1
) ? CMP_EQ
:
6225 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
6228 /* Otherwise, see if the operands are both integers. */
6229 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
6230 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
6232 /* It would be nice if we really had a mode here. However, the
6233 largest int representable on the target is as good as
6235 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
6236 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
6237 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
6239 if (wi::eq_p (ptrueop0
, ptrueop1
))
6240 return comparison_result (code
, CMP_EQ
);
6243 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
6244 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
6245 return comparison_result (code
, cr
);
6249 /* Optimize comparisons with upper and lower bounds. */
6250 scalar_int_mode int_mode
;
6251 if (CONST_INT_P (trueop1
)
6252 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6253 && HWI_COMPUTABLE_MODE_P (int_mode
)
6254 && !side_effects_p (trueop0
))
6257 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
6258 HOST_WIDE_INT val
= INTVAL (trueop1
);
6259 HOST_WIDE_INT mmin
, mmax
;
6269 /* Get a reduced range if the sign bit is zero. */
6270 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
6277 rtx mmin_rtx
, mmax_rtx
;
6278 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
6280 mmin
= INTVAL (mmin_rtx
);
6281 mmax
= INTVAL (mmax_rtx
);
6284 unsigned int sign_copies
6285 = num_sign_bit_copies (trueop0
, int_mode
);
6287 mmin
>>= (sign_copies
- 1);
6288 mmax
>>= (sign_copies
- 1);
6294 /* x >= y is always true for y <= mmin, always false for y > mmax. */
6296 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6297 return const_true_rtx
;
6298 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6303 return const_true_rtx
;
6308 /* x <= y is always true for y >= mmax, always false for y < mmin. */
6310 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6311 return const_true_rtx
;
6312 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6317 return const_true_rtx
;
6323 /* x == y is always false for y out of range. */
6324 if (val
< mmin
|| val
> mmax
)
6328 /* x > y is always false for y >= mmax, always true for y < mmin. */
6330 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6332 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6333 return const_true_rtx
;
6339 return const_true_rtx
;
6342 /* x < y is always false for y <= mmin, always true for y > mmax. */
6344 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6346 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6347 return const_true_rtx
;
6353 return const_true_rtx
;
6357 /* x != y is always true for y out of range. */
6358 if (val
< mmin
|| val
> mmax
)
6359 return const_true_rtx
;
6367 /* Optimize integer comparisons with zero. */
6368 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6369 && trueop1
== const0_rtx
6370 && !side_effects_p (trueop0
))
6372 /* Some addresses are known to be nonzero. We don't know
6373 their sign, but equality comparisons are known. */
6374 if (nonzero_address_p (trueop0
))
6376 if (code
== EQ
|| code
== LEU
)
6378 if (code
== NE
|| code
== GTU
)
6379 return const_true_rtx
;
6382 /* See if the first operand is an IOR with a constant. If so, we
6383 may be able to determine the result of this comparison. */
6384 if (GET_CODE (op0
) == IOR
)
6386 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
6387 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
6389 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
6390 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
6391 && (UINTVAL (inner_const
)
6402 return const_true_rtx
;
6406 return const_true_rtx
;
6420 /* Optimize comparison of ABS with zero. */
6421 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
6422 && (GET_CODE (trueop0
) == ABS
6423 || (GET_CODE (trueop0
) == FLOAT_EXTEND
6424 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
6429 /* Optimize abs(x) < 0.0. */
6430 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
6435 /* Optimize abs(x) >= 0.0. */
6436 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
6437 return const_true_rtx
;
6441 /* Optimize ! (abs(x) < 0.0). */
6442 return const_true_rtx
;
6452 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6453 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6454 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6455 can be simplified to that or NULL_RTX if not.
6456 Assume X is compared against zero with CMP_CODE and the true
6457 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6460 simplify_context::simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
,
6461 rtx true_val
, rtx false_val
)
6463 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
6466 /* Result on X == 0 and X !=0 respectively. */
6467 rtx on_zero
, on_nonzero
;
6471 on_nonzero
= false_val
;
6475 on_zero
= false_val
;
6476 on_nonzero
= true_val
;
6479 rtx_code op_code
= GET_CODE (on_nonzero
);
6480 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
6481 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
6482 || !CONST_INT_P (on_zero
))
6485 HOST_WIDE_INT op_val
;
6486 scalar_int_mode mode ATTRIBUTE_UNUSED
6487 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
6488 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
6489 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
6490 && op_val
== INTVAL (on_zero
))
6496 /* Try to simplify X given that it appears within operand OP of a
6497 VEC_MERGE operation whose mask is MASK. X need not use the same
6498 vector mode as the VEC_MERGE, but it must have the same number of
6501 Return the simplified X on success, otherwise return NULL_RTX. */
6504 simplify_context::simplify_merge_mask (rtx x
, rtx mask
, int op
)
6506 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
6507 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
6508 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
6510 if (side_effects_p (XEXP (x
, 1 - op
)))
6513 return XEXP (x
, op
);
6516 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6517 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
6519 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6521 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
6522 GET_MODE (XEXP (x
, 0)));
6525 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6526 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6527 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6528 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
6530 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6531 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6534 if (COMPARISON_P (x
))
6535 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
6536 GET_MODE (XEXP (x
, 0)) != VOIDmode
6537 ? GET_MODE (XEXP (x
, 0))
6538 : GET_MODE (XEXP (x
, 1)),
6539 top0
? top0
: XEXP (x
, 0),
6540 top1
? top1
: XEXP (x
, 1));
6542 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
6543 top0
? top0
: XEXP (x
, 0),
6544 top1
? top1
: XEXP (x
, 1));
6547 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
6548 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6549 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6550 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6551 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
6552 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
6553 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
6555 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6556 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6557 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
6558 if (top0
|| top1
|| top2
)
6559 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
6560 GET_MODE (XEXP (x
, 0)),
6561 top0
? top0
: XEXP (x
, 0),
6562 top1
? top1
: XEXP (x
, 1),
6563 top2
? top2
: XEXP (x
, 2));
6569 /* Simplify CODE, an operation with result mode MODE and three operands,
6570 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6571 a constant. Return 0 if no simplifications is possible. */
6574 simplify_context::simplify_ternary_operation (rtx_code code
, machine_mode mode
,
6575 machine_mode op0_mode
,
6576 rtx op0
, rtx op1
, rtx op2
)
6578 bool any_change
= false;
6580 scalar_int_mode int_mode
, int_op0_mode
;
6581 unsigned int n_elts
;
6586 /* Simplify negations around the multiplication. */
6587 /* -a * -b + c => a * b + c. */
6588 if (GET_CODE (op0
) == NEG
)
6590 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
6592 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
6594 else if (GET_CODE (op1
) == NEG
)
6596 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
6598 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
6601 /* Canonicalize the two multiplication operands. */
6602 /* a * -b + c => -b * a + c. */
6603 if (swap_commutative_operands_p (op0
, op1
))
6604 std::swap (op0
, op1
), any_change
= true;
6607 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
6612 if (CONST_INT_P (op0
)
6613 && CONST_INT_P (op1
)
6614 && CONST_INT_P (op2
)
6615 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6616 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
6617 && HWI_COMPUTABLE_MODE_P (int_mode
))
6619 /* Extracting a bit-field from a constant */
6620 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
6621 HOST_WIDE_INT op1val
= INTVAL (op1
);
6622 HOST_WIDE_INT op2val
= INTVAL (op2
);
6623 if (!BITS_BIG_ENDIAN
)
6625 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
6626 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
6628 /* Not enough information to calculate the bit position. */
6631 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
6633 /* First zero-extend. */
6634 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
6635 /* If desired, propagate sign bit. */
6636 if (code
== SIGN_EXTRACT
6637 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
6639 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
6642 return gen_int_mode (val
, int_mode
);
6647 if (CONST_INT_P (op0
))
6648 return op0
!= const0_rtx
? op1
: op2
;
6650 /* Convert c ? a : a into "a". */
6651 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
6654 /* Convert a != b ? a : b into "a". */
6655 if (GET_CODE (op0
) == NE
6656 && ! side_effects_p (op0
)
6657 && ! HONOR_NANS (mode
)
6658 && ! HONOR_SIGNED_ZEROS (mode
)
6659 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6660 && rtx_equal_p (XEXP (op0
, 1), op2
))
6661 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6662 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6665 /* Convert a == b ? a : b into "b". */
6666 if (GET_CODE (op0
) == EQ
6667 && ! side_effects_p (op0
)
6668 && ! HONOR_NANS (mode
)
6669 && ! HONOR_SIGNED_ZEROS (mode
)
6670 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6671 && rtx_equal_p (XEXP (op0
, 1), op2
))
6672 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6673 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6676 /* Convert (!c) != {0,...,0} ? a : b into
6677 c != {0,...,0} ? b : a for vector modes. */
6678 if (VECTOR_MODE_P (GET_MODE (op1
))
6679 && GET_CODE (op0
) == NE
6680 && GET_CODE (XEXP (op0
, 0)) == NOT
6681 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
6683 rtx cv
= XEXP (op0
, 1);
6686 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
6689 for (int i
= 0; i
< nunits
; ++i
)
6690 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
6697 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
6698 XEXP (XEXP (op0
, 0), 0),
6700 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
6705 /* Convert x == 0 ? N : clz (x) into clz (x) when
6706 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6707 Similarly for ctz (x). */
6708 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
6709 && XEXP (op0
, 1) == const0_rtx
)
6712 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
6718 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6720 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6721 ? GET_MODE (XEXP (op0
, 1))
6722 : GET_MODE (XEXP (op0
, 0)));
6725 /* Look for happy constants in op1 and op2. */
6726 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6728 HOST_WIDE_INT t
= INTVAL (op1
);
6729 HOST_WIDE_INT f
= INTVAL (op2
);
6731 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6732 code
= GET_CODE (op0
);
6733 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6736 tmp
= reversed_comparison_code (op0
, NULL
);
6744 return simplify_gen_relational (code
, mode
, cmp_mode
,
6745 XEXP (op0
, 0), XEXP (op0
, 1));
6748 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6749 cmp_mode
, XEXP (op0
, 0),
6752 /* See if any simplifications were possible. */
6755 if (CONST_INT_P (temp
))
6756 return temp
== const0_rtx
? op2
: op1
;
6758 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6764 gcc_assert (GET_MODE (op0
) == mode
);
6765 gcc_assert (GET_MODE (op1
) == mode
);
6766 gcc_assert (VECTOR_MODE_P (mode
));
6767 trueop2
= avoid_constant_pool_reference (op2
);
6768 if (CONST_INT_P (trueop2
)
6769 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6771 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6772 unsigned HOST_WIDE_INT mask
;
6773 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6776 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6778 if (!(sel
& mask
) && !side_effects_p (op0
))
6780 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6783 rtx trueop0
= avoid_constant_pool_reference (op0
);
6784 rtx trueop1
= avoid_constant_pool_reference (op1
);
6785 if (GET_CODE (trueop0
) == CONST_VECTOR
6786 && GET_CODE (trueop1
) == CONST_VECTOR
)
6788 rtvec v
= rtvec_alloc (n_elts
);
6791 for (i
= 0; i
< n_elts
; i
++)
6792 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6793 ? CONST_VECTOR_ELT (trueop0
, i
)
6794 : CONST_VECTOR_ELT (trueop1
, i
));
6795 return gen_rtx_CONST_VECTOR (mode
, v
);
6798 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6799 if no element from a appears in the result. */
6800 if (GET_CODE (op0
) == VEC_MERGE
)
6802 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6803 if (CONST_INT_P (tem
))
6805 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6806 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6807 return simplify_gen_ternary (code
, mode
, mode
,
6808 XEXP (op0
, 1), op1
, op2
);
6809 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6810 return simplify_gen_ternary (code
, mode
, mode
,
6811 XEXP (op0
, 0), op1
, op2
);
6814 if (GET_CODE (op1
) == VEC_MERGE
)
6816 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6817 if (CONST_INT_P (tem
))
6819 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6820 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6821 return simplify_gen_ternary (code
, mode
, mode
,
6822 op0
, XEXP (op1
, 1), op2
);
6823 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6824 return simplify_gen_ternary (code
, mode
, mode
,
6825 op0
, XEXP (op1
, 0), op2
);
6829 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6831 if (GET_CODE (op0
) == VEC_DUPLICATE
6832 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6833 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6834 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6836 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6837 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6839 if (XEXP (XEXP (op0
, 0), 0) == op1
6840 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6844 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6846 with (vec_concat (X) (B)) if N == 1 or
6847 (vec_concat (A) (X)) if N == 2. */
6848 if (GET_CODE (op0
) == VEC_DUPLICATE
6849 && GET_CODE (op1
) == CONST_VECTOR
6850 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6851 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6852 && IN_RANGE (sel
, 1, 2))
6854 rtx newop0
= XEXP (op0
, 0);
6855 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6857 std::swap (newop0
, newop1
);
6858 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6860 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6861 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6862 Only applies for vectors of two elements. */
6863 if (GET_CODE (op0
) == VEC_DUPLICATE
6864 && GET_CODE (op1
) == VEC_CONCAT
6865 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6866 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6867 && IN_RANGE (sel
, 1, 2))
6869 rtx newop0
= XEXP (op0
, 0);
6870 rtx newop1
= XEXP (op1
, 2 - sel
);
6871 rtx otherop
= XEXP (op1
, sel
- 1);
6873 std::swap (newop0
, newop1
);
6874 /* Don't want to throw away the other part of the vec_concat if
6875 it has side-effects. */
6876 if (!side_effects_p (otherop
))
6877 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6882 (vec_merge:outer (vec_duplicate:outer x:inner)
6883 (subreg:outer y:inner 0)
6886 with (vec_concat:outer x:inner y:inner) if N == 1,
6887 or (vec_concat:outer y:inner x:inner) if N == 2.
6889 Implicitly, this means we have a paradoxical subreg, but such
6890 a check is cheap, so make it anyway.
6892 Only applies for vectors of two elements. */
6893 if (GET_CODE (op0
) == VEC_DUPLICATE
6894 && GET_CODE (op1
) == SUBREG
6895 && GET_MODE (op1
) == GET_MODE (op0
)
6896 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6897 && paradoxical_subreg_p (op1
)
6898 && subreg_lowpart_p (op1
)
6899 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6900 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6901 && IN_RANGE (sel
, 1, 2))
6903 rtx newop0
= XEXP (op0
, 0);
6904 rtx newop1
= SUBREG_REG (op1
);
6906 std::swap (newop0
, newop1
);
6907 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6910 /* Same as above but with switched operands:
6911 Replace (vec_merge:outer (subreg:outer x:inner 0)
6912 (vec_duplicate:outer y:inner)
6915 with (vec_concat:outer x:inner y:inner) if N == 1,
6916 or (vec_concat:outer y:inner x:inner) if N == 2. */
6917 if (GET_CODE (op1
) == VEC_DUPLICATE
6918 && GET_CODE (op0
) == SUBREG
6919 && GET_MODE (op0
) == GET_MODE (op1
)
6920 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6921 && paradoxical_subreg_p (op0
)
6922 && subreg_lowpart_p (op0
)
6923 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6924 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6925 && IN_RANGE (sel
, 1, 2))
6927 rtx newop0
= SUBREG_REG (op0
);
6928 rtx newop1
= XEXP (op1
, 0);
6930 std::swap (newop0
, newop1
);
6931 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6934 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6936 with (vec_concat x y) or (vec_concat y x) depending on value
6938 if (GET_CODE (op0
) == VEC_DUPLICATE
6939 && GET_CODE (op1
) == VEC_DUPLICATE
6940 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6941 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6942 && IN_RANGE (sel
, 1, 2))
6944 rtx newop0
= XEXP (op0
, 0);
6945 rtx newop1
= XEXP (op1
, 0);
6947 std::swap (newop0
, newop1
);
6949 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6953 if (rtx_equal_p (op0
, op1
)
6954 && !side_effects_p (op2
) && !side_effects_p (op1
))
6957 if (!side_effects_p (op2
))
6960 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6962 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6964 return simplify_gen_ternary (code
, mode
, mode
,
6966 top1
? top1
: op1
, op2
);
6978 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6979 starting at byte FIRST_BYTE. Return true on success and add the
6980 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6981 that the bytes follow target memory order. Leave BYTES unmodified
6984 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6985 BYTES before calling this function. */
6988 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6989 unsigned int first_byte
, unsigned int num_bytes
)
6991 /* Check the mode is sensible. */
6992 gcc_assert (GET_MODE (x
) == VOIDmode
6993 ? is_a
<scalar_int_mode
> (mode
)
6994 : mode
== GET_MODE (x
));
6996 if (GET_CODE (x
) == CONST_VECTOR
)
6998 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6999 is necessary. The only complication is that MODE_VECTOR_BOOL
7000 vectors can have several elements per byte. */
7001 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
7002 GET_MODE_NUNITS (mode
));
7003 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
7004 if (elt_bits
< BITS_PER_UNIT
)
7006 /* This is the only case in which elements can be smaller than
7008 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
7009 auto mask
= GET_MODE_MASK (GET_MODE_INNER (mode
));
7010 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
7012 target_unit value
= 0;
7013 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
7015 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & mask
) << j
;
7018 bytes
.quick_push (value
);
7023 unsigned int start
= bytes
.length ();
7024 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
7025 /* Make FIRST_BYTE relative to ELT. */
7026 first_byte
%= elt_bytes
;
7027 while (num_bytes
> 0)
7029 /* Work out how many bytes we want from element ELT. */
7030 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
7031 if (!native_encode_rtx (GET_MODE_INNER (mode
),
7032 CONST_VECTOR_ELT (x
, elt
), bytes
,
7033 first_byte
, chunk_bytes
))
7035 bytes
.truncate (start
);
7040 num_bytes
-= chunk_bytes
;
7045 /* All subsequent cases are limited to scalars. */
7047 if (!is_a
<scalar_mode
> (mode
, &smode
))
7050 /* Make sure that the region is in range. */
7051 unsigned int end_byte
= first_byte
+ num_bytes
;
7052 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
7053 gcc_assert (end_byte
<= mode_bytes
);
7055 if (CONST_SCALAR_INT_P (x
))
7057 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
7058 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
7059 position of each byte. */
7060 rtx_mode_t
value (x
, smode
);
7061 wide_int_ref
value_wi (value
);
7062 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
7064 /* Always constant because the inputs are. */
7066 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
7067 /* Operate directly on the encoding rather than using
7068 wi::extract_uhwi, so that we preserve the sign or zero
7069 extension for modes that are not a whole number of bits in
7070 size. (Zero extension is only used for the combination of
7071 innermode == BImode && STORE_FLAG_VALUE == 1). */
7072 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
7073 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
7074 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
7075 bytes
.quick_push (uhwi
>> shift
);
7080 if (CONST_DOUBLE_P (x
))
7082 /* real_to_target produces an array of integers in target memory order.
7083 All integers before the last one have 32 bits; the last one may
7084 have 32 bits or fewer, depending on whether the mode bitsize
7085 is divisible by 32. Each of these integers is then laid out
7086 in target memory as any other integer would be. */
7087 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
7088 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
7090 /* The (maximum) number of target bytes per element of el32. */
7091 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
7092 gcc_assert (bytes_per_el32
!= 0);
7094 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
7096 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
7098 unsigned int index
= byte
/ bytes_per_el32
;
7099 unsigned int subbyte
= byte
% bytes_per_el32
;
7100 unsigned int int_bytes
= MIN (bytes_per_el32
,
7101 mode_bytes
- index
* bytes_per_el32
);
7102 /* Always constant because the inputs are. */
7104 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
7105 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
7110 if (GET_CODE (x
) == CONST_FIXED
)
7112 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
7114 /* Always constant because the inputs are. */
7116 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
7117 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
7118 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
7120 lsb
-= HOST_BITS_PER_WIDE_INT
;
7121 piece
= CONST_FIXED_VALUE_HIGH (x
);
7123 bytes
.quick_push (piece
>> lsb
);
7131 /* Read a vector of mode MODE from the target memory image given by BYTES,
7132 starting at byte FIRST_BYTE. The vector is known to be encodable using
7133 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
7134 and BYTES is known to have enough bytes to supply NPATTERNS *
7135 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
7136 BITS_PER_UNIT bits and the bytes are in target memory order.
7138 Return the vector on success, otherwise return NULL_RTX. */
7141 native_decode_vector_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
7142 unsigned int first_byte
, unsigned int npatterns
,
7143 unsigned int nelts_per_pattern
)
7145 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
7147 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
7148 GET_MODE_NUNITS (mode
));
7149 if (elt_bits
< BITS_PER_UNIT
)
7151 /* This is the only case in which elements can be smaller than a byte.
7152 Element 0 is always in the lsb of the containing byte. */
7153 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
7154 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
7156 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
7157 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
7158 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
7159 unsigned int value
= bytes
[byte_index
] >> lsb
;
7160 builder
.quick_push (gen_int_mode (value
, GET_MODE_INNER (mode
)));
7165 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
7167 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
7170 builder
.quick_push (x
);
7171 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
7174 return builder
.build ();
7177 /* Read an rtx of mode MODE from the target memory image given by BYTES,
7178 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
7179 bits and the bytes are in target memory order. The image has enough
7180 values to specify all bytes of MODE.
7182 Return the rtx on success, otherwise return NULL_RTX. */
7185 native_decode_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
7186 unsigned int first_byte
)
7188 if (VECTOR_MODE_P (mode
))
7190 /* If we know at compile time how many elements there are,
7191 pull each element directly from BYTES. */
7193 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
7194 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
7198 scalar_int_mode imode
;
7199 if (is_a
<scalar_int_mode
> (mode
, &imode
)
7200 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
7202 /* Pull the bytes msb first, so that we can use simple
7203 shift-and-insert wide_int operations. */
7204 unsigned int size
= GET_MODE_SIZE (imode
);
7205 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
7206 for (unsigned int i
= 0; i
< size
; ++i
)
7208 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
7209 /* Always constant because the inputs are. */
7210 unsigned int subbyte
7211 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
7212 result
<<= BITS_PER_UNIT
;
7213 result
|= bytes
[first_byte
+ subbyte
];
7215 return immed_wide_int_const (result
, imode
);
7218 scalar_float_mode fmode
;
7219 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
7221 /* We need to build an array of integers in target memory order.
7222 All integers before the last one have 32 bits; the last one may
7223 have 32 bits or fewer, depending on whether the mode bitsize
7224 is divisible by 32. */
7225 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
7226 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
7227 memset (el32
, 0, num_el32
* sizeof (long));
7229 /* The (maximum) number of target bytes per element of el32. */
7230 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
7231 gcc_assert (bytes_per_el32
!= 0);
7233 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
7234 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7236 unsigned int index
= byte
/ bytes_per_el32
;
7237 unsigned int subbyte
= byte
% bytes_per_el32
;
7238 unsigned int int_bytes
= MIN (bytes_per_el32
,
7239 mode_bytes
- index
* bytes_per_el32
);
7240 /* Always constant because the inputs are. */
7242 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
7243 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
7246 real_from_target (&r
, el32
, fmode
);
7247 return const_double_from_real_value (r
, fmode
);
7250 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
7252 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
7258 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
7259 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7261 /* Always constant because the inputs are. */
7263 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
7264 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
7265 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
7266 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
7268 f
.data
.low
|= unit
<< lsb
;
7270 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
7276 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
7277 is to convert a runtime BYTE value into a constant one. */
7280 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
7282 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7283 machine_mode mode
= GET_MODE (x
);
7284 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
7285 GET_MODE_NUNITS (mode
));
7286 /* The number of bits needed to encode one element from each pattern. */
7287 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
7289 /* Identify the start point in terms of a sequence number and a byte offset
7290 within that sequence. */
7291 poly_uint64 first_sequence
;
7292 unsigned HOST_WIDE_INT subbit
;
7293 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
7294 &first_sequence
, &subbit
))
7296 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7297 if (nelts_per_pattern
== 1)
7298 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
7300 byte
= subbit
/ BITS_PER_UNIT
;
7301 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
7303 /* The subreg drops the first element from each pattern and
7304 only uses the second element. Find the first sequence
7305 that starts on a byte boundary. */
7306 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
7307 byte
= subbit
/ BITS_PER_UNIT
;
7313 /* Subroutine of simplify_subreg in which:
7315 - X is known to be a CONST_VECTOR
7316 - OUTERMODE is known to be a vector mode
7318 Try to handle the subreg by operating on the CONST_VECTOR encoding
7319 rather than on each individual element of the CONST_VECTOR.
7321 Return the simplified subreg on success, otherwise return NULL_RTX. */
7324 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
7325 machine_mode innermode
, unsigned int first_byte
)
7327 /* Paradoxical subregs of vectors have dubious semantics. */
7328 if (paradoxical_subreg_p (outermode
, innermode
))
7331 /* We can only preserve the semantics of a stepped pattern if the new
7332 vector element is the same as the original one. */
7333 if (CONST_VECTOR_STEPPED_P (x
)
7334 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
7337 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7338 unsigned int x_elt_bits
7339 = vector_element_size (GET_MODE_BITSIZE (innermode
),
7340 GET_MODE_NUNITS (innermode
));
7341 unsigned int out_elt_bits
7342 = vector_element_size (GET_MODE_BITSIZE (outermode
),
7343 GET_MODE_NUNITS (outermode
));
7345 /* The number of bits needed to encode one element from every pattern
7346 of the original vector. */
7347 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
7349 /* The number of bits needed to encode one element from every pattern
7351 unsigned int out_sequence_bits
7352 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
7354 /* Work out the number of interleaved patterns in the output vector
7355 and the number of encoded elements per pattern. */
7356 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
7357 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7359 /* The encoding scheme requires the number of elements to be a multiple
7360 of the number of patterns, so that each pattern appears at least once
7361 and so that the same number of elements appear from each pattern. */
7362 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
7363 unsigned int const_nunits
;
7364 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
7365 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
7367 /* Either the encoding is invalid, or applying it would give us
7368 more elements than we need. Just encode each element directly. */
7369 out_npatterns
= const_nunits
;
7370 nelts_per_pattern
= 1;
7375 /* Get enough bytes of X to form the new encoding. */
7376 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
7377 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
7378 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7379 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
7382 /* Reencode the bytes as OUTERMODE. */
7383 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
7387 /* Try to simplify a subreg of a constant by encoding the subreg region
7388 as a sequence of target bytes and reading them back in the new mode.
7389 Return the new value on success, otherwise return null.
7391 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7392 and byte offset FIRST_BYTE. */
7395 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
7396 machine_mode innermode
, unsigned int first_byte
)
7398 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
7399 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7401 /* Some ports misuse CCmode. */
7402 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
7405 /* Paradoxical subregs read undefined values for bytes outside of the
7406 inner value. However, we have traditionally always sign-extended
7407 integer constants and zero-extended others. */
7408 unsigned int inner_bytes
= buffer_bytes
;
7409 if (paradoxical_subreg_p (outermode
, innermode
))
7411 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
7414 target_unit filler
= 0;
7415 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
7418 /* Add any leading bytes due to big-endian layout. The number of
7419 bytes must be constant because both modes have constant size. */
7420 unsigned int leading_bytes
7421 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
7422 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
7423 buffer
.quick_push (filler
);
7425 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7428 /* Add any trailing bytes due to little-endian layout. */
7429 while (buffer
.length () < buffer_bytes
)
7430 buffer
.quick_push (filler
);
7432 else if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7434 rtx ret
= native_decode_rtx (outermode
, buffer
, 0);
7435 if (ret
&& FLOAT_MODE_P (outermode
))
7437 auto_vec
<target_unit
, 128> buffer2 (buffer_bytes
);
7438 if (!native_encode_rtx (outermode
, ret
, buffer2
, 0, buffer_bytes
))
7440 for (unsigned int i
= 0; i
< buffer_bytes
; ++i
)
7441 if (buffer
[i
] != buffer2
[i
])
7447 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7448 Return 0 if no simplifications are possible. */
7450 simplify_context::simplify_subreg (machine_mode outermode
, rtx op
,
7451 machine_mode innermode
, poly_uint64 byte
)
7453 /* Little bit of sanity checking. */
7454 gcc_assert (innermode
!= VOIDmode
);
7455 gcc_assert (outermode
!= VOIDmode
);
7456 gcc_assert (innermode
!= BLKmode
);
7457 gcc_assert (outermode
!= BLKmode
);
7459 gcc_assert (GET_MODE (op
) == innermode
7460 || GET_MODE (op
) == VOIDmode
);
7462 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
7463 if (!multiple_p (byte
, outersize
))
7466 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
7467 if (maybe_ge (byte
, innersize
))
7470 if (outermode
== innermode
&& known_eq (byte
, 0U))
7473 if (GET_CODE (op
) == CONST_VECTOR
)
7474 byte
= simplify_const_vector_byte_offset (op
, byte
);
7476 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
7480 if (VECTOR_MODE_P (outermode
)
7481 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
7482 && vec_duplicate_p (op
, &elt
))
7483 return gen_vec_duplicate (outermode
, elt
);
7485 if (outermode
== GET_MODE_INNER (innermode
)
7486 && vec_duplicate_p (op
, &elt
))
7490 if (CONST_SCALAR_INT_P (op
)
7491 || CONST_DOUBLE_AS_FLOAT_P (op
)
7492 || CONST_FIXED_P (op
)
7493 || GET_CODE (op
) == CONST_VECTOR
)
7495 unsigned HOST_WIDE_INT cbyte
;
7496 if (byte
.is_constant (&cbyte
))
7498 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
7500 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
7506 fixed_size_mode fs_outermode
;
7507 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
7508 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
7512 /* Changing mode twice with SUBREG => just change it once,
7513 or not at all if changing back op starting mode. */
7514 if (GET_CODE (op
) == SUBREG
)
7516 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
7517 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
7520 if (outermode
== innermostmode
7521 && known_eq (byte
, 0U)
7522 && known_eq (SUBREG_BYTE (op
), 0))
7523 return SUBREG_REG (op
);
7525 /* Work out the memory offset of the final OUTERMODE value relative
7526 to the inner value of OP. */
7527 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
7529 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
7530 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
7532 /* See whether resulting subreg will be paradoxical. */
7533 if (!paradoxical_subreg_p (outermode
, innermostmode
))
7535 /* Bail out in case resulting subreg would be incorrect. */
7536 if (maybe_lt (final_offset
, 0)
7537 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
7538 || !multiple_p (final_offset
, outersize
))
7543 poly_int64 required_offset
= subreg_memory_offset (outermode
,
7545 if (maybe_ne (final_offset
, required_offset
))
7547 /* Paradoxical subregs always have byte offset 0. */
7551 /* Recurse for further possible simplifications. */
7552 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
7556 if (validate_subreg (outermode
, innermostmode
,
7557 SUBREG_REG (op
), final_offset
))
7559 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
7560 if (SUBREG_PROMOTED_VAR_P (op
)
7561 && SUBREG_PROMOTED_SIGN (op
) >= 0
7562 && GET_MODE_CLASS (outermode
) == MODE_INT
7563 && known_ge (outersize
, innersize
)
7564 && known_le (outersize
, innermostsize
)
7565 && subreg_lowpart_p (newx
))
7567 SUBREG_PROMOTED_VAR_P (newx
) = 1;
7568 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
7575 /* SUBREG of a hard register => just change the register number
7576 and/or mode. If the hard register is not valid in that mode,
7577 suppress this simplification. If the hard register is the stack,
7578 frame, or argument pointer, leave this as a SUBREG. */
7580 if (REG_P (op
) && HARD_REGISTER_P (op
))
7582 unsigned int regno
, final_regno
;
7585 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
7586 if (HARD_REGISTER_NUM_P (final_regno
))
7588 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
7589 subreg_memory_offset (outermode
,
7592 /* Propagate original regno. We don't have any way to specify
7593 the offset inside original regno, so do so only for lowpart.
7594 The information is used only by alias analysis that cannot
7595 grog partial register anyway. */
7597 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
7598 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
7603 /* If we have a SUBREG of a register that we are replacing and we are
7604 replacing it with a MEM, make a new MEM and try replacing the
7605 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7606 or if we would be widening it. */
7609 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
7610 /* Allow splitting of volatile memory references in case we don't
7611 have instruction to move the whole thing. */
7612 && (! MEM_VOLATILE_P (op
)
7613 || ! have_insn_for (SET
, innermode
))
7614 && !(STRICT_ALIGNMENT
&& MEM_ALIGN (op
) < GET_MODE_ALIGNMENT (outermode
))
7615 && known_le (outersize
, innersize
))
7616 return adjust_address_nv (op
, outermode
, byte
);
7618 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7620 if (GET_CODE (op
) == CONCAT
7621 || GET_CODE (op
) == VEC_CONCAT
)
7623 poly_uint64 final_offset
;
7626 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
7627 if (part_mode
== VOIDmode
)
7628 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7629 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
7630 if (known_lt (byte
, part_size
))
7632 part
= XEXP (op
, 0);
7633 final_offset
= byte
;
7635 else if (known_ge (byte
, part_size
))
7637 part
= XEXP (op
, 1);
7638 final_offset
= byte
- part_size
;
7643 if (maybe_gt (final_offset
+ outersize
, part_size
))
7646 part_mode
= GET_MODE (part
);
7647 if (part_mode
== VOIDmode
)
7648 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7649 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
7652 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
7653 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
7658 (subreg (vec_merge (X)
7660 (const_int ((1 << N) | M)))
7661 (N * sizeof (outermode)))
7663 (subreg (X) (N * sizeof (outermode)))
7666 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
7667 && idx
< HOST_BITS_PER_WIDE_INT
7668 && GET_CODE (op
) == VEC_MERGE
7669 && GET_MODE_INNER (innermode
) == outermode
7670 && CONST_INT_P (XEXP (op
, 2))
7671 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
7672 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
7674 /* A SUBREG resulting from a zero extension may fold to zero if
7675 it extracts higher bits that the ZERO_EXTEND's source bits. */
7676 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
7678 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
7679 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
7680 return CONST0_RTX (outermode
);
7683 scalar_int_mode int_outermode
, int_innermode
;
7684 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
7685 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
7686 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
7688 /* Handle polynomial integers. The upper bits of a paradoxical
7689 subreg are undefined, so this is safe regardless of whether
7690 we're truncating or extending. */
7691 if (CONST_POLY_INT_P (op
))
7694 = poly_wide_int::from (const_poly_int_value (op
),
7695 GET_MODE_PRECISION (int_outermode
),
7697 return immed_wide_int_const (val
, int_outermode
);
7700 if (GET_MODE_PRECISION (int_outermode
)
7701 < GET_MODE_PRECISION (int_innermode
))
7703 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
7709 /* If the outer mode is not integral, try taking a subreg with the equivalent
7710 integer outer mode and then bitcasting the result.
7711 Other simplifications rely on integer to integer subregs and we'd
7712 potentially miss out on optimizations otherwise. */
7713 if (known_gt (GET_MODE_SIZE (innermode
),
7714 GET_MODE_SIZE (outermode
))
7715 && SCALAR_INT_MODE_P (innermode
)
7716 && !SCALAR_INT_MODE_P (outermode
)
7717 && int_mode_for_size (GET_MODE_BITSIZE (outermode
),
7718 0).exists (&int_outermode
))
7720 rtx tem
= simplify_subreg (int_outermode
, op
, innermode
, byte
);
7722 return simplify_gen_subreg (outermode
, tem
, int_outermode
, byte
);
7725 /* If OP is a vector comparison and the subreg is not changing the
7726 number of elements or the size of the elements, change the result
7727 of the comparison to the new mode. */
7728 if (COMPARISON_P (op
)
7729 && VECTOR_MODE_P (outermode
)
7730 && VECTOR_MODE_P (innermode
)
7731 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
7732 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
7733 GET_MODE_UNIT_SIZE (innermode
)))
7734 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
7735 XEXP (op
, 0), XEXP (op
, 1));
7739 /* Make a SUBREG operation or equivalent if it folds. */
7742 simplify_context::simplify_gen_subreg (machine_mode outermode
, rtx op
,
7743 machine_mode innermode
,
7748 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7752 if (GET_CODE (op
) == SUBREG
7753 || GET_CODE (op
) == CONCAT
7754 || GET_MODE (op
) == VOIDmode
)
7757 if (MODE_COMPOSITE_P (outermode
)
7758 && (CONST_SCALAR_INT_P (op
)
7759 || CONST_DOUBLE_AS_FLOAT_P (op
)
7760 || CONST_FIXED_P (op
)
7761 || GET_CODE (op
) == CONST_VECTOR
))
7764 if (validate_subreg (outermode
, innermode
, op
, byte
))
7765 return gen_rtx_SUBREG (outermode
, op
, byte
);
7770 /* Generates a subreg to get the least significant part of EXPR (in mode
7771 INNER_MODE) to OUTER_MODE. */
7774 simplify_context::lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7775 machine_mode inner_mode
)
7777 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7778 subreg_lowpart_offset (outer_mode
, inner_mode
));
7781 /* Generate RTX to select element at INDEX out of vector OP. */
7784 simplify_context::simplify_gen_vec_select (rtx op
, unsigned int index
)
7786 gcc_assert (VECTOR_MODE_P (GET_MODE (op
)));
7788 scalar_mode imode
= GET_MODE_INNER (GET_MODE (op
));
7790 if (known_eq (index
* GET_MODE_SIZE (imode
),
7791 subreg_lowpart_offset (imode
, GET_MODE (op
))))
7793 rtx res
= lowpart_subreg (imode
, op
, GET_MODE (op
));
7798 rtx tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (index
)));
7799 return gen_rtx_VEC_SELECT (imode
, op
, tmp
);
7803 /* Simplify X, an rtx expression.
7805 Return the simplified expression or NULL if no simplifications
7808 This is the preferred entry point into the simplification routines;
7809 however, we still allow passes to call the more specific routines.
7811 Right now GCC has three (yes, three) major bodies of RTL simplification
7812 code that need to be unified.
7814 1. fold_rtx in cse.cc. This code uses various CSE specific
7815 information to aid in RTL simplification.
7817 2. simplify_rtx in combine.cc. Similar to fold_rtx, except that
7818 it uses combine specific information to aid in RTL
7821 3. The routines in this file.
7824 Long term we want to only have one body of simplification code; to
7825 get to that state I recommend the following steps:
7827 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7828 which are not pass dependent state into these routines.
7830 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7831 use this routine whenever possible.
7833 3. Allow for pass dependent state to be provided to these
7834 routines and add simplifications based on the pass dependent
7835 state. Remove code from cse.cc & combine.cc that becomes
7838 It will take time, but ultimately the compiler will be easier to
7839 maintain and improve. It's totally silly that when we add a
7840 simplification that it needs to be added to 4 places (3 for RTL
7841 simplification and 1 for tree simplification. */
7844 simplify_rtx (const_rtx x
)
7846 const enum rtx_code code
= GET_CODE (x
);
7847 const machine_mode mode
= GET_MODE (x
);
7849 switch (GET_RTX_CLASS (code
))
7852 return simplify_unary_operation (code
, mode
,
7853 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7854 case RTX_COMM_ARITH
:
7855 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7856 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7861 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7864 case RTX_BITFIELD_OPS
:
7865 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7866 XEXP (x
, 0), XEXP (x
, 1),
7870 case RTX_COMM_COMPARE
:
7871 return simplify_relational_operation (code
, mode
,
7872 ((GET_MODE (XEXP (x
, 0))
7874 ? GET_MODE (XEXP (x
, 0))
7875 : GET_MODE (XEXP (x
, 1))),
7881 return simplify_subreg (mode
, SUBREG_REG (x
),
7882 GET_MODE (SUBREG_REG (x
)),
7889 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7890 if (GET_CODE (XEXP (x
, 0)) == HIGH
7891 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7904 namespace selftest
{
7906 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7909 make_test_reg (machine_mode mode
)
7911 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7913 return gen_rtx_REG (mode
, test_reg_num
++);
7917 test_scalar_int_ops (machine_mode mode
)
7919 rtx op0
= make_test_reg (mode
);
7920 rtx op1
= make_test_reg (mode
);
7921 rtx six
= GEN_INT (6);
7923 rtx neg_op0
= simplify_gen_unary (NEG
, mode
, op0
, mode
);
7924 rtx not_op0
= simplify_gen_unary (NOT
, mode
, op0
, mode
);
7925 rtx bswap_op0
= simplify_gen_unary (BSWAP
, mode
, op0
, mode
);
7927 rtx and_op0_op1
= simplify_gen_binary (AND
, mode
, op0
, op1
);
7928 rtx ior_op0_op1
= simplify_gen_binary (IOR
, mode
, op0
, op1
);
7929 rtx xor_op0_op1
= simplify_gen_binary (XOR
, mode
, op0
, op1
);
7931 rtx and_op0_6
= simplify_gen_binary (AND
, mode
, op0
, six
);
7932 rtx and_op1_6
= simplify_gen_binary (AND
, mode
, op1
, six
);
7934 /* Test some binary identities. */
7935 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, op0
, const0_rtx
));
7936 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, const0_rtx
, op0
));
7937 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MINUS
, mode
, op0
, const0_rtx
));
7938 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, op0
, const1_rtx
));
7939 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, const1_rtx
, op0
));
7940 ASSERT_RTX_EQ (op0
, simplify_gen_binary (DIV
, mode
, op0
, const1_rtx
));
7941 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, constm1_rtx
));
7942 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, constm1_rtx
, op0
));
7943 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, const0_rtx
));
7944 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, const0_rtx
, op0
));
7945 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, op0
, const0_rtx
));
7946 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, const0_rtx
, op0
));
7947 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFT
, mode
, op0
, const0_rtx
));
7948 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATE
, mode
, op0
, const0_rtx
));
7949 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFTRT
, mode
, op0
, const0_rtx
));
7950 ASSERT_RTX_EQ (op0
, simplify_gen_binary (LSHIFTRT
, mode
, op0
, const0_rtx
));
7951 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATERT
, mode
, op0
, const0_rtx
));
7953 /* Test some self-inverse operations. */
7954 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NEG
, mode
, neg_op0
, mode
));
7955 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NOT
, mode
, not_op0
, mode
));
7956 ASSERT_RTX_EQ (op0
, simplify_gen_unary (BSWAP
, mode
, bswap_op0
, mode
));
7958 /* Test some reflexive operations. */
7959 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, op0
));
7960 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, op0
));
7961 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMIN
, mode
, op0
, op0
));
7962 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMAX
, mode
, op0
, op0
));
7963 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMIN
, mode
, op0
, op0
));
7964 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMAX
, mode
, op0
, op0
));
7966 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (MINUS
, mode
, op0
, op0
));
7967 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (XOR
, mode
, op0
, op0
));
7969 /* Test simplify_distributive_operation. */
7970 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, xor_op0_op1
, six
),
7971 simplify_gen_binary (XOR
, mode
, and_op0_6
, and_op1_6
));
7972 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, ior_op0_op1
, six
),
7973 simplify_gen_binary (IOR
, mode
, and_op0_6
, and_op1_6
));
7974 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, and_op0_op1
, six
),
7975 simplify_gen_binary (AND
, mode
, and_op0_6
, and_op1_6
));
7977 /* Test useless extensions are eliminated. */
7978 ASSERT_RTX_EQ (op0
, simplify_gen_unary (TRUNCATE
, mode
, op0
, mode
));
7979 ASSERT_RTX_EQ (op0
, simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, mode
));
7980 ASSERT_RTX_EQ (op0
, simplify_gen_unary (SIGN_EXTEND
, mode
, op0
, mode
));
7981 ASSERT_RTX_EQ (op0
, lowpart_subreg (mode
, op0
, mode
));
7984 /* Verify some simplifications of integer extension/truncation.
7985 Machine mode BMODE is the guaranteed wider than SMODE. */
7988 test_scalar_int_ext_ops (machine_mode bmode
, machine_mode smode
)
7990 rtx sreg
= make_test_reg (smode
);
7992 /* Check truncation of extension. */
7993 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7994 simplify_gen_unary (ZERO_EXTEND
, bmode
,
7998 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7999 simplify_gen_unary (SIGN_EXTEND
, bmode
,
8003 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
8004 lowpart_subreg (bmode
, sreg
, smode
),
8009 /* Verify more simplifications of integer extension/truncation.
8010 BMODE is wider than MMODE which is wider than SMODE. */
8013 test_scalar_int_ext_ops2 (machine_mode bmode
, machine_mode mmode
,
8016 rtx breg
= make_test_reg (bmode
);
8017 rtx mreg
= make_test_reg (mmode
);
8018 rtx sreg
= make_test_reg (smode
);
8020 /* Check truncate of truncate. */
8021 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
8022 simplify_gen_unary (TRUNCATE
, mmode
,
8025 simplify_gen_unary (TRUNCATE
, smode
, breg
, bmode
));
8027 /* Check extension of extension. */
8028 ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND
, bmode
,
8029 simplify_gen_unary (ZERO_EXTEND
, mmode
,
8032 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
8033 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
8034 simplify_gen_unary (SIGN_EXTEND
, mmode
,
8037 simplify_gen_unary (SIGN_EXTEND
, bmode
, sreg
, smode
));
8038 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
8039 simplify_gen_unary (ZERO_EXTEND
, mmode
,
8042 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
8044 /* Check truncation of extension. */
8045 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
8046 simplify_gen_unary (ZERO_EXTEND
, bmode
,
8049 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
8050 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
8051 simplify_gen_unary (SIGN_EXTEND
, bmode
,
8054 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
8055 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
8056 lowpart_subreg (bmode
, mreg
, mmode
),
8058 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
8062 /* Verify some simplifications involving scalar expressions. */
8067 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
8069 machine_mode mode
= (machine_mode
) i
;
8070 if (SCALAR_INT_MODE_P (mode
) && mode
!= BImode
)
8071 test_scalar_int_ops (mode
);
8074 test_scalar_int_ext_ops (HImode
, QImode
);
8075 test_scalar_int_ext_ops (SImode
, QImode
);
8076 test_scalar_int_ext_ops (SImode
, HImode
);
8077 test_scalar_int_ext_ops (DImode
, QImode
);
8078 test_scalar_int_ext_ops (DImode
, HImode
);
8079 test_scalar_int_ext_ops (DImode
, SImode
);
8081 test_scalar_int_ext_ops2 (SImode
, HImode
, QImode
);
8082 test_scalar_int_ext_ops2 (DImode
, HImode
, QImode
);
8083 test_scalar_int_ext_ops2 (DImode
, SImode
, QImode
);
8084 test_scalar_int_ext_ops2 (DImode
, SImode
, HImode
);
8087 /* Test vector simplifications involving VEC_DUPLICATE in which the
8088 operands and result have vector mode MODE. SCALAR_REG is a pseudo
8089 register that holds one element of MODE. */
8092 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
8094 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
8095 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
8096 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
8097 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
8099 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
8100 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
8101 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
8102 ASSERT_RTX_EQ (duplicate
,
8103 simplify_unary_operation (NOT
, mode
,
8104 duplicate_not
, mode
));
8106 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
8107 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
8108 ASSERT_RTX_EQ (duplicate
,
8109 simplify_unary_operation (NEG
, mode
,
8110 duplicate_neg
, mode
));
8112 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
8113 ASSERT_RTX_EQ (duplicate
,
8114 simplify_binary_operation (PLUS
, mode
, duplicate
,
8115 CONST0_RTX (mode
)));
8117 ASSERT_RTX_EQ (duplicate
,
8118 simplify_binary_operation (MINUS
, mode
, duplicate
,
8119 CONST0_RTX (mode
)));
8121 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
8122 simplify_binary_operation (MINUS
, mode
, duplicate
,
8126 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
8127 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
8128 ASSERT_RTX_PTR_EQ (scalar_reg
,
8129 simplify_binary_operation (VEC_SELECT
, inner_mode
,
8130 duplicate
, zero_par
));
8132 unsigned HOST_WIDE_INT const_nunits
;
8133 if (nunits
.is_constant (&const_nunits
))
8135 /* And again with the final element. */
8136 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
8137 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
8138 ASSERT_RTX_PTR_EQ (scalar_reg
,
8139 simplify_binary_operation (VEC_SELECT
, inner_mode
,
8140 duplicate
, last_par
));
8142 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
8143 /* Skip this test for vectors of booleans, because offset is in bytes,
8144 while vec_merge indices are in elements (usually bits). */
8145 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_BOOL
)
8147 rtx vector_reg
= make_test_reg (mode
);
8148 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
8150 if (i
>= HOST_BITS_PER_WIDE_INT
)
8152 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
8153 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
8154 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
8156 ASSERT_RTX_EQ (scalar_reg
,
8157 simplify_gen_subreg (inner_mode
, vm
,
8163 /* Test a scalar subreg of a VEC_DUPLICATE. */
8164 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
8165 ASSERT_RTX_EQ (scalar_reg
,
8166 simplify_gen_subreg (inner_mode
, duplicate
,
8169 machine_mode narrower_mode
;
8170 if (maybe_ne (nunits
, 2U)
8171 && multiple_p (nunits
, 2)
8172 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
8173 && VECTOR_MODE_P (narrower_mode
))
8175 /* Test VEC_DUPLICATE of a vector. */
8176 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
8177 nbuilder
.quick_push (const0_rtx
);
8178 nbuilder
.quick_push (const1_rtx
);
8179 rtx_vector_builder
builder (mode
, 2, 1);
8180 builder
.quick_push (const0_rtx
);
8181 builder
.quick_push (const1_rtx
);
8182 ASSERT_RTX_EQ (builder
.build (),
8183 simplify_unary_operation (VEC_DUPLICATE
, mode
,
8187 /* Test VEC_SELECT of a vector. */
8189 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
8190 rtx narrower_duplicate
8191 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
8192 ASSERT_RTX_EQ (narrower_duplicate
,
8193 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
8194 duplicate
, vec_par
));
8196 /* Test a vector subreg of a VEC_DUPLICATE. */
8197 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
8198 ASSERT_RTX_EQ (narrower_duplicate
,
8199 simplify_gen_subreg (narrower_mode
, duplicate
,
8204 /* Test vector simplifications involving VEC_SERIES in which the
8205 operands and result have vector mode MODE. SCALAR_REG is a pseudo
8206 register that holds one element of MODE. */
8209 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
8211 /* Test unary cases with VEC_SERIES arguments. */
8212 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
8213 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
8214 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
8215 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
8216 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
8217 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
8218 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
8219 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
8220 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
8222 ASSERT_RTX_EQ (series_0_r
,
8223 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
8224 ASSERT_RTX_EQ (series_r_m1
,
8225 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
8226 ASSERT_RTX_EQ (series_r_r
,
8227 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
8229 /* Test that a VEC_SERIES with a zero step is simplified away. */
8230 ASSERT_RTX_EQ (duplicate
,
8231 simplify_binary_operation (VEC_SERIES
, mode
,
8232 scalar_reg
, const0_rtx
));
8234 /* Test PLUS and MINUS with VEC_SERIES. */
8235 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
8236 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
8237 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
8238 ASSERT_RTX_EQ (series_r_r
,
8239 simplify_binary_operation (PLUS
, mode
, series_0_r
,
8241 ASSERT_RTX_EQ (series_r_1
,
8242 simplify_binary_operation (PLUS
, mode
, duplicate
,
8244 ASSERT_RTX_EQ (series_r_m1
,
8245 simplify_binary_operation (PLUS
, mode
, duplicate
,
8247 ASSERT_RTX_EQ (series_0_r
,
8248 simplify_binary_operation (MINUS
, mode
, series_r_r
,
8250 ASSERT_RTX_EQ (series_r_m1
,
8251 simplify_binary_operation (MINUS
, mode
, duplicate
,
8253 ASSERT_RTX_EQ (series_r_1
,
8254 simplify_binary_operation (MINUS
, mode
, duplicate
,
8256 ASSERT_RTX_EQ (series_0_m1
,
8257 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
8260 /* Test NEG on constant vector series. */
8261 ASSERT_RTX_EQ (series_0_m1
,
8262 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
8263 ASSERT_RTX_EQ (series_0_1
,
8264 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
8266 /* Test PLUS and MINUS on constant vector series. */
8267 rtx scalar2
= gen_int_mode (2, inner_mode
);
8268 rtx scalar3
= gen_int_mode (3, inner_mode
);
8269 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
8270 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
8271 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
8272 ASSERT_RTX_EQ (series_1_1
,
8273 simplify_binary_operation (PLUS
, mode
, series_0_1
,
8274 CONST1_RTX (mode
)));
8275 ASSERT_RTX_EQ (series_0_m1
,
8276 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
8278 ASSERT_RTX_EQ (series_1_3
,
8279 simplify_binary_operation (PLUS
, mode
, series_1_1
,
8281 ASSERT_RTX_EQ (series_0_1
,
8282 simplify_binary_operation (MINUS
, mode
, series_1_1
,
8283 CONST1_RTX (mode
)));
8284 ASSERT_RTX_EQ (series_1_1
,
8285 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
8287 ASSERT_RTX_EQ (series_1_1
,
8288 simplify_binary_operation (MINUS
, mode
, series_1_3
,
8291 /* Test MULT between constant vectors. */
8292 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
8293 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
8294 rtx scalar9
= gen_int_mode (9, inner_mode
);
8295 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
8296 ASSERT_RTX_EQ (series_0_2
,
8297 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
8298 ASSERT_RTX_EQ (series_3_9
,
8299 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
8300 if (!GET_MODE_NUNITS (mode
).is_constant ())
8301 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
8304 /* Test ASHIFT between constant vectors. */
8305 ASSERT_RTX_EQ (series_0_2
,
8306 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
8307 CONST1_RTX (mode
)));
8308 if (!GET_MODE_NUNITS (mode
).is_constant ())
8309 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
8314 simplify_merge_mask (rtx x
, rtx mask
, int op
)
8316 return simplify_context ().simplify_merge_mask (x
, mask
, op
);
8319 /* Verify simplify_merge_mask works correctly. */
8322 test_vec_merge (machine_mode mode
)
8324 rtx op0
= make_test_reg (mode
);
8325 rtx op1
= make_test_reg (mode
);
8326 rtx op2
= make_test_reg (mode
);
8327 rtx op3
= make_test_reg (mode
);
8328 rtx op4
= make_test_reg (mode
);
8329 rtx op5
= make_test_reg (mode
);
8330 rtx mask1
= make_test_reg (SImode
);
8331 rtx mask2
= make_test_reg (SImode
);
8332 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
8333 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
8334 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
8336 /* Simple vec_merge. */
8337 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
8338 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
8339 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
8340 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
8342 /* Nested vec_merge.
8343 It's tempting to make this simplify right down to opN, but we don't
8344 because all the simplify_* functions assume that the operands have
8345 already been simplified. */
8346 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
8347 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
8348 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
8350 /* Intermediate unary op. */
8351 rtx unop
= gen_rtx_NOT (mode
, vm1
);
8352 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
8353 simplify_merge_mask (unop
, mask1
, 0));
8354 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
8355 simplify_merge_mask (unop
, mask1
, 1));
8357 /* Intermediate binary op. */
8358 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
8359 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
8360 simplify_merge_mask (binop
, mask1
, 0));
8361 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
8362 simplify_merge_mask (binop
, mask1
, 1));
8364 /* Intermediate ternary op. */
8365 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
8366 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
8367 simplify_merge_mask (tenop
, mask1
, 0));
8368 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
8369 simplify_merge_mask (tenop
, mask1
, 1));
8372 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
8373 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
8374 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
8375 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
8377 /* Called indirectly. */
8378 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
8379 simplify_rtx (nvm
));
8382 /* Test subregs of integer vector constant X, trying elements in
8383 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8384 where NELTS is the number of elements in X. Subregs involving
8385 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
8388 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
8389 unsigned int first_valid
= 0)
8391 machine_mode inner_mode
= GET_MODE (x
);
8392 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8394 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
8396 machine_mode outer_mode
= (machine_mode
) modei
;
8397 if (!VECTOR_MODE_P (outer_mode
))
8400 unsigned int outer_nunits
;
8401 if (GET_MODE_INNER (outer_mode
) == int_mode
8402 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
8403 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
8405 /* Test subregs in which the outer mode is a smaller,
8406 constant-sized vector of the same element type. */
8408 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
8409 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
8411 rtx expected
= NULL_RTX
;
8412 if (elt
>= first_valid
)
8414 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
8415 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
8416 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
8417 expected
= builder
.build ();
8419 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
8420 ASSERT_RTX_EQ (expected
,
8421 simplify_subreg (outer_mode
, x
,
8425 else if (known_eq (GET_MODE_SIZE (outer_mode
),
8426 GET_MODE_SIZE (inner_mode
))
8427 && known_eq (elt_bias
, 0U)
8428 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
8429 || known_eq (GET_MODE_BITSIZE (outer_mode
),
8430 GET_MODE_NUNITS (outer_mode
)))
8431 && (!FLOAT_MODE_P (outer_mode
)
8432 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
8433 == GET_MODE_UNIT_PRECISION (outer_mode
)))
8434 && (GET_MODE_SIZE (inner_mode
).is_constant ()
8435 || !CONST_VECTOR_STEPPED_P (x
)))
8437 /* Try converting to OUTER_MODE and back. */
8438 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
8439 ASSERT_TRUE (outer_x
!= NULL_RTX
);
8440 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
8445 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
8447 /* Test each byte in the element range. */
8449 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
8450 for (unsigned int i
= 0; i
< limit
; ++i
)
8452 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
8453 rtx expected
= NULL_RTX
;
8454 if (elt
>= first_valid
)
8456 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
8457 if (BYTES_BIG_ENDIAN
)
8458 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
8459 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
8460 wide_int shifted_elt
8461 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
8462 expected
= immed_wide_int_const (shifted_elt
, QImode
);
8464 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
8465 ASSERT_RTX_EQ (expected
,
8466 simplify_subreg (QImode
, x
, inner_mode
, byte
));
8471 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8472 element per pattern. */
8475 test_vector_subregs_repeating (machine_mode inner_mode
)
8477 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8478 unsigned int min_nunits
= constant_lower_bound (nunits
);
8479 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8480 unsigned int count
= gcd (min_nunits
, 8);
8482 rtx_vector_builder
builder (inner_mode
, count
, 1);
8483 for (unsigned int i
= 0; i
< count
; ++i
)
8484 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
8485 rtx x
= builder
.build ();
8487 test_vector_subregs_modes (x
);
8488 if (!nunits
.is_constant ())
8489 test_vector_subregs_modes (x
, nunits
- min_nunits
);
8492 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8493 elements per pattern. */
8496 test_vector_subregs_fore_back (machine_mode inner_mode
)
8498 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8499 unsigned int min_nunits
= constant_lower_bound (nunits
);
8500 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8501 unsigned int count
= gcd (min_nunits
, 4);
8503 rtx_vector_builder
builder (inner_mode
, count
, 2);
8504 for (unsigned int i
= 0; i
< count
; ++i
)
8505 builder
.quick_push (gen_int_mode (i
, int_mode
));
8506 for (unsigned int i
= 0; i
< count
; ++i
)
8507 builder
.quick_push (gen_int_mode (-1 - (int) i
, int_mode
));
8508 rtx x
= builder
.build ();
8510 test_vector_subregs_modes (x
);
8511 if (!nunits
.is_constant ())
8512 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
8515 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8516 elements per pattern. */
8519 test_vector_subregs_stepped (machine_mode inner_mode
)
8521 /* Build { 0, 1, 2, 3, ... }. */
8522 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8523 rtx_vector_builder
builder (inner_mode
, 1, 3);
8524 for (unsigned int i
= 0; i
< 3; ++i
)
8525 builder
.quick_push (gen_int_mode (i
, int_mode
));
8526 rtx x
= builder
.build ();
8528 test_vector_subregs_modes (x
);
8531 /* Test constant subregs of integer vector mode INNER_MODE. */
8534 test_vector_subregs (machine_mode inner_mode
)
8536 test_vector_subregs_repeating (inner_mode
);
8537 test_vector_subregs_fore_back (inner_mode
);
8538 test_vector_subregs_stepped (inner_mode
);
8541 /* Verify some simplifications involving vectors. */
8546 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
8548 machine_mode mode
= (machine_mode
) i
;
8549 if (VECTOR_MODE_P (mode
))
8551 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
8552 test_vector_ops_duplicate (mode
, scalar_reg
);
8553 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
8554 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
8556 test_vector_ops_series (mode
, scalar_reg
);
8557 test_vector_subregs (mode
);
8559 test_vec_merge (mode
);
8564 template<unsigned int N
>
8565 struct simplify_const_poly_int_tests
8571 struct simplify_const_poly_int_tests
<1>
8573 static void run () {}
8576 /* Test various CONST_POLY_INT properties. */
8578 template<unsigned int N
>
8580 simplify_const_poly_int_tests
<N
>::run ()
8582 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
8583 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
8584 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
8585 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
8586 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
8587 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
8588 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
8589 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
8590 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
8591 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
8592 rtx two
= GEN_INT (2);
8593 rtx six
= GEN_INT (6);
8594 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
8596 /* These tests only try limited operation combinations. Fuller arithmetic
8597 testing is done directly on poly_ints. */
8598 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
8599 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
8600 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
8601 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
8602 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
8603 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
8604 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
8605 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
8606 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
8607 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
8608 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
8611 /* Run all of the selftests within this file. */
8614 simplify_rtx_cc_tests ()
8618 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
8621 } // namespace selftest
8623 #endif /* CHECKING_P */