1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2022 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
50 static bool plus_minus_operand_p (const_rtx
);
52 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
55 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
57 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
60 /* Test whether expression, X, is an immediate constant that represents
61 the most significant bit of machine mode MODE. */
64 mode_signbit_p (machine_mode mode
, const_rtx x
)
66 unsigned HOST_WIDE_INT val
;
68 scalar_int_mode int_mode
;
70 if (!is_int_mode (mode
, &int_mode
))
73 width
= GET_MODE_PRECISION (int_mode
);
77 if (width
<= HOST_BITS_PER_WIDE_INT
80 #if TARGET_SUPPORTS_WIDE_INT
81 else if (CONST_WIDE_INT_P (x
))
84 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
85 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
87 for (i
= 0; i
< elts
- 1; i
++)
88 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
90 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
91 width
%= HOST_BITS_PER_WIDE_INT
;
93 width
= HOST_BITS_PER_WIDE_INT
;
96 else if (width
<= HOST_BITS_PER_DOUBLE_INT
97 && CONST_DOUBLE_AS_INT_P (x
)
98 && CONST_DOUBLE_LOW (x
) == 0)
100 val
= CONST_DOUBLE_HIGH (x
);
101 width
-= HOST_BITS_PER_WIDE_INT
;
105 /* X is not an integer constant. */
108 if (width
< HOST_BITS_PER_WIDE_INT
)
109 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
110 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114 (after masking with the mode mask of MODE). Returns false if the
115 precision of MODE is too large to handle. */
118 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
121 scalar_int_mode int_mode
;
123 if (!is_int_mode (mode
, &int_mode
))
126 width
= GET_MODE_PRECISION (int_mode
);
127 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
130 val
&= GET_MODE_MASK (int_mode
);
131 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135 Returns false if the precision of MODE is too large to handle. */
137 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
141 scalar_int_mode int_mode
;
142 if (!is_int_mode (mode
, &int_mode
))
145 width
= GET_MODE_PRECISION (int_mode
);
146 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
149 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154 Returns false if the precision of MODE is too large to handle. */
156 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
160 scalar_int_mode int_mode
;
161 if (!is_int_mode (mode
, &int_mode
))
164 width
= GET_MODE_PRECISION (int_mode
);
165 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
168 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
172 /* Make a binary operation by properly ordering the operands and
173 seeing if the expression folds. */
176 simplify_context::simplify_gen_binary (rtx_code code
, machine_mode mode
,
181 /* If this simplifies, do it. */
182 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
186 /* Put complex operands first and constants second if commutative. */
187 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
188 && swap_commutative_operands_p (op0
, op1
))
189 std::swap (op0
, op1
);
191 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
194 /* If X is a MEM referencing the constant pool, return the real value.
195 Otherwise return X. */
197 avoid_constant_pool_reference (rtx x
)
201 poly_int64 offset
= 0;
203 switch (GET_CODE (x
))
209 /* Handle float extensions of constant pool references. */
211 c
= avoid_constant_pool_reference (tmp
);
212 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
213 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
221 if (GET_MODE (x
) == BLKmode
)
226 /* Call target hook to avoid the effects of -fpic etc.... */
227 addr
= targetm
.delegitimize_address (addr
);
229 /* Split the address into a base and integer offset. */
230 addr
= strip_offset (addr
, &offset
);
232 if (GET_CODE (addr
) == LO_SUM
)
233 addr
= XEXP (addr
, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr
) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr
))
240 c
= get_pool_constant (addr
);
241 cmode
= get_pool_mode (addr
);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
248 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
250 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
251 if (tem
&& CONSTANT_P (tem
))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x
)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x
))
272 tree decl
= MEM_EXPR (x
);
273 machine_mode mode
= GET_MODE (x
);
274 poly_int64 offset
= 0;
276 switch (TREE_CODE (decl
))
286 case ARRAY_RANGE_REF
:
291 case VIEW_CONVERT_EXPR
:
293 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
295 int unsignedp
, reversep
, volatilep
= 0;
298 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
299 &unsignedp
, &reversep
, &volatilep
);
300 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
301 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
302 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
305 offset
+= bytepos
+ toffset_val
;
311 && mode
== GET_MODE (x
)
313 && (TREE_STATIC (decl
)
314 || DECL_THREAD_LOCAL_P (decl
))
315 && DECL_RTL_SET_P (decl
)
316 && MEM_P (DECL_RTL (decl
)))
320 offset
+= MEM_OFFSET (x
);
322 newx
= DECL_RTL (decl
);
326 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
327 poly_int64 n_offset
, o_offset
;
329 /* Avoid creating a new MEM needlessly if we already had
330 the same address. We do if there's no OFFSET and the
331 old address X is identical to NEWX, or if X is of the
332 form (plus NEWX OFFSET), or the NEWX is of the form
333 (plus Y (const_int Z)) and X is that with the offset
334 added: (plus Y (const_int Z+OFFSET)). */
335 n
= strip_offset (n
, &n_offset
);
336 o
= strip_offset (o
, &o_offset
);
337 if (!(known_eq (o_offset
, n_offset
+ offset
)
338 && rtx_equal_p (o
, n
)))
339 x
= adjust_address_nv (newx
, mode
, offset
);
341 else if (GET_MODE (x
) == GET_MODE (newx
)
342 && known_eq (offset
, 0))
350 /* Make a unary operation by first seeing if it folds and otherwise making
351 the specified operation. */
354 simplify_context::simplify_gen_unary (rtx_code code
, machine_mode mode
, rtx op
,
355 machine_mode op_mode
)
359 /* If this simplifies, use it. */
360 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
363 return gen_rtx_fmt_e (code
, mode
, op
);
366 /* Likewise for ternary operations. */
369 simplify_context::simplify_gen_ternary (rtx_code code
, machine_mode mode
,
370 machine_mode op0_mode
,
371 rtx op0
, rtx op1
, rtx op2
)
375 /* If this simplifies, use it. */
376 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
377 op0
, op1
, op2
)) != 0)
380 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
383 /* Likewise, for relational operations.
384 CMP_MODE specifies mode comparison is done in. */
387 simplify_context::simplify_gen_relational (rtx_code code
, machine_mode mode
,
388 machine_mode cmp_mode
,
393 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
397 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 and simplify the result. If FN is non-NULL, call this callback on each
402 X, if it returns non-NULL, replace X with its return value and simplify the
406 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
407 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
409 enum rtx_code code
= GET_CODE (x
);
410 machine_mode mode
= GET_MODE (x
);
411 machine_mode op_mode
;
413 rtx op0
, op1
, op2
, newx
, op
;
417 if (UNLIKELY (fn
!= NULL
))
419 newx
= fn (x
, old_rtx
, data
);
423 else if (rtx_equal_p (x
, old_rtx
))
424 return copy_rtx ((rtx
) data
);
426 switch (GET_RTX_CLASS (code
))
430 op_mode
= GET_MODE (op0
);
431 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
432 if (op0
== XEXP (x
, 0))
434 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
438 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
439 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
442 return simplify_gen_binary (code
, mode
, op0
, op1
);
445 case RTX_COMM_COMPARE
:
448 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
449 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
450 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
451 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
453 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
456 case RTX_BITFIELD_OPS
:
458 op_mode
= GET_MODE (op0
);
459 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
460 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
461 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
464 if (op_mode
== VOIDmode
)
465 op_mode
= GET_MODE (op0
);
466 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
471 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
472 if (op0
== SUBREG_REG (x
))
474 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
475 GET_MODE (SUBREG_REG (x
)),
477 return op0
? op0
: x
;
484 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
485 if (op0
== XEXP (x
, 0))
487 return replace_equiv_address_nv (x
, op0
);
489 else if (code
== LO_SUM
)
491 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
492 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
494 /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 if (GET_CODE (op0
) == HIGH
)
497 rtx base0
, base1
, offset0
, offset1
;
498 split_const (XEXP (op0
, 0), &base0
, &offset0
);
499 split_const (op1
, &base1
, &offset1
);
500 if (rtx_equal_p (base0
, base1
))
504 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
506 return gen_rtx_LO_SUM (mode
, op0
, op1
);
515 fmt
= GET_RTX_FORMAT (code
);
516 for (i
= 0; fmt
[i
]; i
++)
521 newvec
= XVEC (newx
, i
);
522 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
524 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
526 if (op
!= RTVEC_ELT (vec
, j
))
530 newvec
= shallow_copy_rtvec (vec
);
532 newx
= shallow_copy_rtx (x
);
533 XVEC (newx
, i
) = newvec
;
535 RTVEC_ELT (newvec
, j
) = op
;
543 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
544 if (op
!= XEXP (x
, i
))
547 newx
= shallow_copy_rtx (x
);
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
562 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_context::simplify_truncation (machine_mode mode
, rtx op
,
614 machine_mode op_mode
)
616 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
617 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
618 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
620 gcc_assert (precision
<= op_precision
);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op
) == ZERO_EXTEND
624 || GET_CODE (op
) == SIGN_EXTEND
)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
632 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
633 if (mode
== origmode
)
635 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
636 return simplify_gen_unary (TRUNCATE
, mode
,
637 XEXP (op
, 0), origmode
);
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (op
, 0), origmode
);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
647 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
648 && (GET_CODE (op
) == PLUS
649 || GET_CODE (op
) == MINUS
650 || GET_CODE (op
) == MULT
))
652 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
655 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
657 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
661 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 the outer subreg is effectively a truncation to the original mode. */
664 if ((GET_CODE (op
) == LSHIFTRT
665 || GET_CODE (op
) == ASHIFTRT
)
666 /* Ensure that OP_MODE is at least twice as wide as MODE
667 to avoid the possibility that an outer LSHIFTRT shifts by more
668 than the sign extension's sign_bit_copies and introduces zeros
669 into the high bits of the result. */
670 && 2 * precision
<= op_precision
671 && CONST_INT_P (XEXP (op
, 1))
672 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
673 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
674 && UINTVAL (XEXP (op
, 1)) < precision
)
675 return simplify_gen_binary (ASHIFTRT
, mode
,
676 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
678 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 the outer subreg is effectively a truncation to the original mode. */
681 if ((GET_CODE (op
) == LSHIFTRT
682 || GET_CODE (op
) == ASHIFTRT
)
683 && CONST_INT_P (XEXP (op
, 1))
684 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
685 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
686 && UINTVAL (XEXP (op
, 1)) < precision
)
687 return simplify_gen_binary (LSHIFTRT
, mode
,
688 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
690 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 the outer subreg is effectively a truncation to the original mode. */
693 if (GET_CODE (op
) == ASHIFT
694 && CONST_INT_P (XEXP (op
, 1))
695 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
696 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
697 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
698 && UINTVAL (XEXP (op
, 1)) < precision
)
699 return simplify_gen_binary (ASHIFT
, mode
,
700 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
702 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
705 if (GET_CODE (op
) == AND
706 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
707 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
708 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
709 && CONST_INT_P (XEXP (op
, 1)))
711 rtx op0
= (XEXP (XEXP (op
, 0), 0));
712 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
713 rtx mask_op
= XEXP (op
, 1);
714 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
715 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
717 if (shift
< precision
718 /* If doing this transform works for an X with all bits set,
719 it works for any X. */
720 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
721 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
722 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
723 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
725 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
726 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
730 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
733 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
734 && REG_P (XEXP (op
, 0))
735 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
736 && CONST_INT_P (XEXP (op
, 1))
737 && CONST_INT_P (XEXP (op
, 2)))
739 rtx op0
= XEXP (op
, 0);
740 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
741 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
742 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
744 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
747 pos
-= op_precision
- precision
;
748 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
749 XEXP (op
, 1), GEN_INT (pos
));
752 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
754 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
756 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
757 XEXP (op
, 1), XEXP (op
, 2));
761 /* Recognize a word extraction from a multi-word subreg. */
762 if ((GET_CODE (op
) == LSHIFTRT
763 || GET_CODE (op
) == ASHIFTRT
)
764 && SCALAR_INT_MODE_P (mode
)
765 && SCALAR_INT_MODE_P (op_mode
)
766 && precision
>= BITS_PER_WORD
767 && 2 * precision
<= op_precision
768 && CONST_INT_P (XEXP (op
, 1))
769 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
770 && UINTVAL (XEXP (op
, 1)) < op_precision
)
772 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
773 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
774 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
776 ? byte
- shifted_bytes
777 : byte
+ shifted_bytes
));
780 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 and try replacing the TRUNCATE and shift with it. Don't do this
782 if the MEM has a mode-dependent address. */
783 if ((GET_CODE (op
) == LSHIFTRT
784 || GET_CODE (op
) == ASHIFTRT
)
785 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
786 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
787 && MEM_P (XEXP (op
, 0))
788 && CONST_INT_P (XEXP (op
, 1))
789 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
790 && INTVAL (XEXP (op
, 1)) > 0
791 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
792 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
793 MEM_ADDR_SPACE (XEXP (op
, 0)))
794 && ! MEM_VOLATILE_P (XEXP (op
, 0))
795 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
796 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
798 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
799 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
800 return adjust_address_nv (XEXP (op
, 0), int_mode
,
802 ? byte
- shifted_bytes
803 : byte
+ shifted_bytes
));
806 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 (OP:SI foo:SI) if OP is NEG or ABS. */
808 if ((GET_CODE (op
) == ABS
809 || GET_CODE (op
) == NEG
)
810 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
811 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
812 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
813 return simplify_gen_unary (GET_CODE (op
), mode
,
814 XEXP (XEXP (op
, 0), 0), mode
);
816 /* Simplifications of (truncate:A (subreg:B X 0)). */
817 if (GET_CODE (op
) == SUBREG
818 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
819 && SCALAR_INT_MODE_P (op_mode
)
820 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
821 && subreg_lowpart_p (op
))
823 /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 if (GET_CODE (SUBREG_REG (op
)) == TRUNCATE
)
826 rtx inner
= XEXP (SUBREG_REG (op
), 0);
827 if (GET_MODE_PRECISION (int_mode
)
828 <= GET_MODE_PRECISION (subreg_mode
))
829 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
832 /* If subreg above is paradoxical and C is narrower
833 than A, return (subreg:A (truncate:C X) 0). */
834 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
),
838 /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 paradoxical subregs (B is wider than C). */
840 if (is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
))
842 unsigned int int_op_prec
= GET_MODE_PRECISION (int_op_mode
);
843 unsigned int subreg_prec
= GET_MODE_PRECISION (subreg_mode
);
844 if (int_op_prec
> subreg_prec
)
846 if (int_mode
== subreg_mode
)
847 return SUBREG_REG (op
);
848 if (GET_MODE_PRECISION (int_mode
) < subreg_prec
)
849 return simplify_gen_unary (TRUNCATE
, int_mode
,
850 SUBREG_REG (op
), subreg_mode
);
852 /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 A is narrower than B and B is narrower than C. */
854 else if (int_op_prec
< subreg_prec
855 && GET_MODE_PRECISION (int_mode
) < int_op_prec
)
856 return simplify_gen_unary (TRUNCATE
, int_mode
,
857 SUBREG_REG (op
), subreg_mode
);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op
) == TRUNCATE
)
863 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
864 GET_MODE (XEXP (op
, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
868 if (GET_CODE (op
) == IOR
869 && SCALAR_INT_MODE_P (mode
)
870 && SCALAR_INT_MODE_P (op_mode
)
871 && CONST_INT_P (XEXP (op
, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_context::simplify_unary_operation (rtx_code code
, machine_mode mode
,
883 rtx op
, machine_mode op_mode
)
887 trueop
= avoid_constant_pool_reference (op
);
889 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
893 return simplify_unary_operation_1 (code
, mode
, op
);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
900 exact_int_to_float_conversion_p (const_rtx op
)
902 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
903 /* Constants can reach here with -frounding-math, if they do then
904 the conversion isn't exact. */
905 if (op0_mode
== VOIDmode
)
907 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
908 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
909 int in_bits
= in_prec
;
910 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
912 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
913 if (GET_CODE (op
) == FLOAT
)
914 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
915 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
916 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
919 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
921 return in_bits
<= out_bits
;
924 /* Perform some simplifications we can do even if the operands
927 simplify_context::simplify_unary_operation_1 (rtx_code code
, machine_mode mode
,
930 enum rtx_code reversed
;
931 rtx temp
, elt
, base
, step
;
932 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
937 /* (not (not X)) == X. */
938 if (GET_CODE (op
) == NOT
)
941 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
942 comparison is all ones. */
943 if (COMPARISON_P (op
)
944 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
945 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
946 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
947 XEXP (op
, 0), XEXP (op
, 1));
949 /* (not (plus X -1)) can become (neg X). */
950 if (GET_CODE (op
) == PLUS
951 && XEXP (op
, 1) == constm1_rtx
)
952 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
954 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
955 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
956 and MODE_VECTOR_INT. */
957 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
958 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
961 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
962 if (GET_CODE (op
) == XOR
963 && CONST_INT_P (XEXP (op
, 1))
964 && (temp
= simplify_unary_operation (NOT
, mode
,
965 XEXP (op
, 1), mode
)) != 0)
966 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
968 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
969 if (GET_CODE (op
) == PLUS
970 && CONST_INT_P (XEXP (op
, 1))
971 && mode_signbit_p (mode
, XEXP (op
, 1))
972 && (temp
= simplify_unary_operation (NOT
, mode
,
973 XEXP (op
, 1), mode
)) != 0)
974 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
977 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
978 operands other than 1, but that is not valid. We could do a
979 similar simplification for (not (lshiftrt C X)) where C is
980 just the sign bit, but this doesn't seem common enough to
982 if (GET_CODE (op
) == ASHIFT
983 && XEXP (op
, 0) == const1_rtx
)
985 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
986 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
989 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
990 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
991 so we can perform the above simplification. */
992 if (STORE_FLAG_VALUE
== -1
993 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
994 && GET_CODE (op
) == ASHIFTRT
995 && CONST_INT_P (XEXP (op
, 1))
996 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
997 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
998 XEXP (op
, 0), const0_rtx
);
1001 if (partial_subreg_p (op
)
1002 && subreg_lowpart_p (op
)
1003 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
1004 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
1006 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1009 x
= gen_rtx_ROTATE (inner_mode
,
1010 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1012 XEXP (SUBREG_REG (op
), 1));
1013 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1018 /* Apply De Morgan's laws to reduce number of patterns for machines
1019 with negating logical insns (and-not, nand, etc.). If result has
1020 only one NOT, put it first, since that is how the patterns are
1022 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1024 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1025 machine_mode op_mode
;
1027 op_mode
= GET_MODE (in1
);
1028 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1030 op_mode
= GET_MODE (in2
);
1031 if (op_mode
== VOIDmode
)
1033 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1035 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1036 std::swap (in1
, in2
);
1038 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1042 /* (not (bswap x)) -> (bswap (not x)). */
1043 if (GET_CODE (op
) == BSWAP
)
1045 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1046 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1051 /* (neg (neg X)) == X. */
1052 if (GET_CODE (op
) == NEG
)
1053 return XEXP (op
, 0);
1055 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1056 If comparison is not reversible use
1058 if (GET_CODE (op
) == IF_THEN_ELSE
)
1060 rtx cond
= XEXP (op
, 0);
1061 rtx true_rtx
= XEXP (op
, 1);
1062 rtx false_rtx
= XEXP (op
, 2);
1064 if ((GET_CODE (true_rtx
) == NEG
1065 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1066 || (GET_CODE (false_rtx
) == NEG
1067 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1069 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1070 temp
= reversed_comparison (cond
, mode
);
1074 std::swap (true_rtx
, false_rtx
);
1076 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1077 mode
, temp
, true_rtx
, false_rtx
);
1081 /* (neg (plus X 1)) can become (not X). */
1082 if (GET_CODE (op
) == PLUS
1083 && XEXP (op
, 1) == const1_rtx
)
1084 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1086 /* Similarly, (neg (not X)) is (plus X 1). */
1087 if (GET_CODE (op
) == NOT
)
1088 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1091 /* (neg (minus X Y)) can become (minus Y X). This transformation
1092 isn't safe for modes with signed zeros, since if X and Y are
1093 both +0, (minus Y X) is the same as (minus X Y). If the
1094 rounding mode is towards +infinity (or -infinity) then the two
1095 expressions will be rounded differently. */
1096 if (GET_CODE (op
) == MINUS
1097 && !HONOR_SIGNED_ZEROS (mode
)
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1099 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1101 if (GET_CODE (op
) == PLUS
1102 && !HONOR_SIGNED_ZEROS (mode
)
1103 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1105 /* (neg (plus A C)) is simplified to (minus -C A). */
1106 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1107 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1109 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1111 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1114 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1115 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1116 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1119 /* (neg (mult A B)) becomes (mult A (neg B)).
1120 This works even for floating-point values. */
1121 if (GET_CODE (op
) == MULT
1122 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1124 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1125 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1128 /* NEG commutes with ASHIFT since it is multiplication. Only do
1129 this if we can then eliminate the NEG (e.g., if the operand
1131 if (GET_CODE (op
) == ASHIFT
)
1133 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1135 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1138 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1139 C is equal to the width of MODE minus 1. */
1140 if (GET_CODE (op
) == ASHIFTRT
1141 && CONST_INT_P (XEXP (op
, 1))
1142 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1143 return simplify_gen_binary (LSHIFTRT
, mode
,
1144 XEXP (op
, 0), XEXP (op
, 1));
1146 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1147 C is equal to the width of MODE minus 1. */
1148 if (GET_CODE (op
) == LSHIFTRT
1149 && CONST_INT_P (XEXP (op
, 1))
1150 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1151 return simplify_gen_binary (ASHIFTRT
, mode
,
1152 XEXP (op
, 0), XEXP (op
, 1));
1154 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1155 if (GET_CODE (op
) == XOR
1156 && XEXP (op
, 1) == const1_rtx
1157 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1158 return plus_constant (mode
, XEXP (op
, 0), -1);
1160 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1161 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1162 if (GET_CODE (op
) == LT
1163 && XEXP (op
, 1) == const0_rtx
1164 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1166 int_mode
= as_a
<scalar_int_mode
> (mode
);
1167 int isize
= GET_MODE_PRECISION (inner
);
1168 if (STORE_FLAG_VALUE
== 1)
1170 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1171 gen_int_shift_amount (inner
,
1173 if (int_mode
== inner
)
1175 if (GET_MODE_PRECISION (int_mode
) > isize
)
1176 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1177 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1179 else if (STORE_FLAG_VALUE
== -1)
1181 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1182 gen_int_shift_amount (inner
,
1184 if (int_mode
== inner
)
1186 if (GET_MODE_PRECISION (int_mode
) > isize
)
1187 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1188 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1192 if (vec_series_p (op
, &base
, &step
))
1194 /* Only create a new series if we can simplify both parts. In other
1195 cases this isn't really a simplification, and it's not necessarily
1196 a win to replace a vector operation with a scalar operation. */
1197 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1198 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1201 step
= simplify_unary_operation (NEG
, inner_mode
,
1204 return gen_vec_series (mode
, base
, step
);
1210 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1211 with the umulXi3_highpart patterns. */
1212 if (GET_CODE (op
) == LSHIFTRT
1213 && GET_CODE (XEXP (op
, 0)) == MULT
)
1216 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1218 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1220 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1224 /* We can't handle truncation to a partial integer mode here
1225 because we don't know the real bitsize of the partial
1230 if (GET_MODE (op
) != VOIDmode
)
1232 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1237 /* If we know that the value is already truncated, we can
1238 replace the TRUNCATE with a SUBREG. */
1239 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1240 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1241 || truncated_to_mode (mode
, op
)))
1243 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1248 /* A truncate of a comparison can be replaced with a subreg if
1249 STORE_FLAG_VALUE permits. This is like the previous test,
1250 but it works even if the comparison is done in a mode larger
1251 than HOST_BITS_PER_WIDE_INT. */
1252 if (HWI_COMPUTABLE_MODE_P (mode
)
1253 && COMPARISON_P (op
)
1254 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
1255 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1257 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1262 /* A truncate of a memory is just loading the low part of the memory
1263 if we are not changing the meaning of the address. */
1264 if (GET_CODE (op
) == MEM
1265 && !VECTOR_MODE_P (mode
)
1266 && !MEM_VOLATILE_P (op
)
1267 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1269 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1274 /* Check for useless truncation. */
1275 if (GET_MODE (op
) == mode
)
1279 case FLOAT_TRUNCATE
:
1280 /* Check for useless truncation. */
1281 if (GET_MODE (op
) == mode
)
1284 if (DECIMAL_FLOAT_MODE_P (mode
))
1287 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1288 if (GET_CODE (op
) == FLOAT_EXTEND
1289 && GET_MODE (XEXP (op
, 0)) == mode
)
1290 return XEXP (op
, 0);
1292 /* (float_truncate:SF (float_truncate:DF foo:XF))
1293 = (float_truncate:SF foo:XF).
1294 This may eliminate double rounding, so it is unsafe.
1296 (float_truncate:SF (float_extend:XF foo:DF))
1297 = (float_truncate:SF foo:DF).
1299 (float_truncate:DF (float_extend:XF foo:SF))
1300 = (float_extend:DF foo:SF). */
1301 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1302 && flag_unsafe_math_optimizations
)
1303 || GET_CODE (op
) == FLOAT_EXTEND
)
1304 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1305 > GET_MODE_UNIT_SIZE (mode
)
1306 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1308 XEXP (op
, 0), mode
);
1310 /* (float_truncate (float x)) is (float x) */
1311 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1312 && (flag_unsafe_math_optimizations
1313 || exact_int_to_float_conversion_p (op
)))
1314 return simplify_gen_unary (GET_CODE (op
), mode
,
1316 GET_MODE (XEXP (op
, 0)));
1318 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1319 (OP:SF foo:SF) if OP is NEG or ABS. */
1320 if ((GET_CODE (op
) == ABS
1321 || GET_CODE (op
) == NEG
)
1322 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1323 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1324 return simplify_gen_unary (GET_CODE (op
), mode
,
1325 XEXP (XEXP (op
, 0), 0), mode
);
1327 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1328 is (float_truncate:SF x). */
1329 if (GET_CODE (op
) == SUBREG
1330 && subreg_lowpart_p (op
)
1331 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1332 return SUBREG_REG (op
);
1336 /* Check for useless extension. */
1337 if (GET_MODE (op
) == mode
)
1340 if (DECIMAL_FLOAT_MODE_P (mode
))
1343 /* (float_extend (float_extend x)) is (float_extend x)
1345 (float_extend (float x)) is (float x) assuming that double
1346 rounding can't happen.
1348 if (GET_CODE (op
) == FLOAT_EXTEND
1349 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1350 && exact_int_to_float_conversion_p (op
)))
1351 return simplify_gen_unary (GET_CODE (op
), mode
,
1353 GET_MODE (XEXP (op
, 0)));
1358 /* (abs (neg <foo>)) -> (abs <foo>) */
1359 if (GET_CODE (op
) == NEG
)
1360 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1361 GET_MODE (XEXP (op
, 0)));
1363 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1365 if (GET_MODE (op
) == VOIDmode
)
1368 /* If operand is something known to be positive, ignore the ABS. */
1369 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1370 || val_signbit_known_clear_p (GET_MODE (op
),
1371 nonzero_bits (op
, GET_MODE (op
))))
1374 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1375 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1376 && (num_sign_bit_copies (op
, int_mode
)
1377 == GET_MODE_PRECISION (int_mode
)))
1378 return gen_rtx_NEG (int_mode
, op
);
1383 /* (ffs (*_extend <X>)) = (ffs <X>) */
1384 if (GET_CODE (op
) == SIGN_EXTEND
1385 || GET_CODE (op
) == ZERO_EXTEND
)
1386 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1387 GET_MODE (XEXP (op
, 0)));
1391 switch (GET_CODE (op
))
1395 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1396 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1397 GET_MODE (XEXP (op
, 0)));
1401 /* Rotations don't affect popcount. */
1402 if (!side_effects_p (XEXP (op
, 1)))
1403 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1404 GET_MODE (XEXP (op
, 0)));
1413 switch (GET_CODE (op
))
1419 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1420 GET_MODE (XEXP (op
, 0)));
1424 /* Rotations don't affect parity. */
1425 if (!side_effects_p (XEXP (op
, 1)))
1426 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1427 GET_MODE (XEXP (op
, 0)));
1431 /* (parity (parity x)) -> parity (x). */
1440 /* (bswap (bswap x)) -> x. */
1441 if (GET_CODE (op
) == BSWAP
)
1442 return XEXP (op
, 0);
1446 /* (float (sign_extend <X>)) = (float <X>). */
1447 if (GET_CODE (op
) == SIGN_EXTEND
)
1448 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1449 GET_MODE (XEXP (op
, 0)));
1453 /* Check for useless extension. */
1454 if (GET_MODE (op
) == mode
)
1457 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1458 becomes just the MINUS if its mode is MODE. This allows
1459 folding switch statements on machines using casesi (such as
1461 if (GET_CODE (op
) == TRUNCATE
1462 && GET_MODE (XEXP (op
, 0)) == mode
1463 && GET_CODE (XEXP (op
, 0)) == MINUS
1464 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1465 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1466 return XEXP (op
, 0);
1468 /* Extending a widening multiplication should be canonicalized to
1469 a wider widening multiplication. */
1470 if (GET_CODE (op
) == MULT
)
1472 rtx lhs
= XEXP (op
, 0);
1473 rtx rhs
= XEXP (op
, 1);
1474 enum rtx_code lcode
= GET_CODE (lhs
);
1475 enum rtx_code rcode
= GET_CODE (rhs
);
1477 /* Widening multiplies usually extend both operands, but sometimes
1478 they use a shift to extract a portion of a register. */
1479 if ((lcode
== SIGN_EXTEND
1480 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1481 && (rcode
== SIGN_EXTEND
1482 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1484 machine_mode lmode
= GET_MODE (lhs
);
1485 machine_mode rmode
= GET_MODE (rhs
);
1488 if (lcode
== ASHIFTRT
)
1489 /* Number of bits not shifted off the end. */
1490 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1491 - INTVAL (XEXP (lhs
, 1)));
1492 else /* lcode == SIGN_EXTEND */
1493 /* Size of inner mode. */
1494 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1496 if (rcode
== ASHIFTRT
)
1497 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1498 - INTVAL (XEXP (rhs
, 1)));
1499 else /* rcode == SIGN_EXTEND */
1500 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1502 /* We can only widen multiplies if the result is mathematiclly
1503 equivalent. I.e. if overflow was impossible. */
1504 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1505 return simplify_gen_binary
1507 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1508 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1512 /* Check for a sign extension of a subreg of a promoted
1513 variable, where the promotion is sign-extended, and the
1514 target mode is the same as the variable's promotion. */
1515 if (GET_CODE (op
) == SUBREG
1516 && SUBREG_PROMOTED_VAR_P (op
)
1517 && SUBREG_PROMOTED_SIGNED_P (op
))
1519 rtx subreg
= SUBREG_REG (op
);
1520 machine_mode subreg_mode
= GET_MODE (subreg
);
1521 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1523 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1526 /* Preserve SUBREG_PROMOTED_VAR_P. */
1527 if (partial_subreg_p (temp
))
1529 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1530 SUBREG_PROMOTED_SET (temp
, SRP_SIGNED
);
1536 /* Sign-extending a sign-extended subreg. */
1537 return simplify_gen_unary (SIGN_EXTEND
, mode
,
1538 subreg
, subreg_mode
);
1541 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1542 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1543 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1545 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1546 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1547 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1548 GET_MODE (XEXP (op
, 0)));
1551 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1552 is (sign_extend:M (subreg:O <X>)) if there is mode with
1553 GET_MODE_BITSIZE (N) - I bits.
1554 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1555 is similarly (zero_extend:M (subreg:O <X>)). */
1556 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1557 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1558 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1559 && CONST_INT_P (XEXP (op
, 1))
1560 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1561 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1562 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1564 scalar_int_mode tmode
;
1565 gcc_assert (GET_MODE_PRECISION (int_mode
)
1566 > GET_MODE_PRECISION (op_mode
));
1567 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1568 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1571 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1573 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1574 ? SIGN_EXTEND
: ZERO_EXTEND
,
1575 int_mode
, inner
, tmode
);
1579 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1580 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1581 if (GET_CODE (op
) == LSHIFTRT
1582 && CONST_INT_P (XEXP (op
, 1))
1583 && XEXP (op
, 1) != const0_rtx
)
1584 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1586 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1587 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1588 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1589 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1590 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1592 if (GET_CODE (op
) == TRUNCATE
1593 && GET_CODE (XEXP (op
, 0)) == LSHIFTRT
1594 && CONST_INT_P (XEXP (XEXP (op
, 0), 1)))
1596 scalar_int_mode m_mode
, n_mode
, o_mode
;
1597 rtx old_shift
= XEXP (op
, 0);
1598 if (is_a
<scalar_int_mode
> (mode
, &m_mode
)
1599 && is_a
<scalar_int_mode
> (GET_MODE (op
), &n_mode
)
1600 && is_a
<scalar_int_mode
> (GET_MODE (old_shift
), &o_mode
)
1601 && GET_MODE_PRECISION (o_mode
) - GET_MODE_PRECISION (n_mode
)
1602 == INTVAL (XEXP (old_shift
, 1)))
1604 rtx new_shift
= simplify_gen_binary (ASHIFTRT
,
1605 GET_MODE (old_shift
),
1606 XEXP (old_shift
, 0),
1607 XEXP (old_shift
, 1));
1608 if (GET_MODE_PRECISION (m_mode
) > GET_MODE_PRECISION (o_mode
))
1609 return simplify_gen_unary (SIGN_EXTEND
, mode
, new_shift
,
1610 GET_MODE (new_shift
));
1611 if (mode
!= GET_MODE (new_shift
))
1612 return simplify_gen_unary (TRUNCATE
, mode
, new_shift
,
1613 GET_MODE (new_shift
));
1618 #if defined(POINTERS_EXTEND_UNSIGNED)
1619 /* As we do not know which address space the pointer is referring to,
1620 we can do this only if the target does not support different pointer
1621 or address modes depending on the address space. */
1622 if (target_default_pointer_address_modes_p ()
1623 && ! POINTERS_EXTEND_UNSIGNED
1624 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1626 || (GET_CODE (op
) == SUBREG
1627 && REG_P (SUBREG_REG (op
))
1628 && REG_POINTER (SUBREG_REG (op
))
1629 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1630 && !targetm
.have_ptr_extend ())
1633 = convert_memory_address_addr_space_1 (Pmode
, op
,
1634 ADDR_SPACE_GENERIC
, false,
1643 /* Check for useless extension. */
1644 if (GET_MODE (op
) == mode
)
1647 /* Check for a zero extension of a subreg of a promoted
1648 variable, where the promotion is zero-extended, and the
1649 target mode is the same as the variable's promotion. */
1650 if (GET_CODE (op
) == SUBREG
1651 && SUBREG_PROMOTED_VAR_P (op
)
1652 && SUBREG_PROMOTED_UNSIGNED_P (op
))
1654 rtx subreg
= SUBREG_REG (op
);
1655 machine_mode subreg_mode
= GET_MODE (subreg
);
1656 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1658 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1661 /* Preserve SUBREG_PROMOTED_VAR_P. */
1662 if (partial_subreg_p (temp
))
1664 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1665 SUBREG_PROMOTED_SET (temp
, SRP_UNSIGNED
);
1671 /* Zero-extending a zero-extended subreg. */
1672 return simplify_gen_unary (ZERO_EXTEND
, mode
,
1673 subreg
, subreg_mode
);
1676 /* Extending a widening multiplication should be canonicalized to
1677 a wider widening multiplication. */
1678 if (GET_CODE (op
) == MULT
)
1680 rtx lhs
= XEXP (op
, 0);
1681 rtx rhs
= XEXP (op
, 1);
1682 enum rtx_code lcode
= GET_CODE (lhs
);
1683 enum rtx_code rcode
= GET_CODE (rhs
);
1685 /* Widening multiplies usually extend both operands, but sometimes
1686 they use a shift to extract a portion of a register. */
1687 if ((lcode
== ZERO_EXTEND
1688 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1689 && (rcode
== ZERO_EXTEND
1690 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1692 machine_mode lmode
= GET_MODE (lhs
);
1693 machine_mode rmode
= GET_MODE (rhs
);
1696 if (lcode
== LSHIFTRT
)
1697 /* Number of bits not shifted off the end. */
1698 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1699 - INTVAL (XEXP (lhs
, 1)));
1700 else /* lcode == ZERO_EXTEND */
1701 /* Size of inner mode. */
1702 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1704 if (rcode
== LSHIFTRT
)
1705 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1706 - INTVAL (XEXP (rhs
, 1)));
1707 else /* rcode == ZERO_EXTEND */
1708 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1710 /* We can only widen multiplies if the result is mathematiclly
1711 equivalent. I.e. if overflow was impossible. */
1712 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1713 return simplify_gen_binary
1715 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1716 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1720 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1721 if (GET_CODE (op
) == ZERO_EXTEND
)
1722 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1723 GET_MODE (XEXP (op
, 0)));
1725 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1726 is (zero_extend:M (subreg:O <X>)) if there is mode with
1727 GET_MODE_PRECISION (N) - I bits. */
1728 if (GET_CODE (op
) == LSHIFTRT
1729 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1730 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1731 && CONST_INT_P (XEXP (op
, 1))
1732 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1733 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1734 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1736 scalar_int_mode tmode
;
1737 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1738 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1741 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1743 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1748 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1749 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1751 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1752 (and:SI (reg:SI) (const_int 63)). */
1753 if (partial_subreg_p (op
)
1754 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1755 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1756 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1757 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1758 && subreg_lowpart_p (op
)
1759 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1760 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1762 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1763 return SUBREG_REG (op
);
1764 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1768 #if defined(POINTERS_EXTEND_UNSIGNED)
1769 /* As we do not know which address space the pointer is referring to,
1770 we can do this only if the target does not support different pointer
1771 or address modes depending on the address space. */
1772 if (target_default_pointer_address_modes_p ()
1773 && POINTERS_EXTEND_UNSIGNED
> 0
1774 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1776 || (GET_CODE (op
) == SUBREG
1777 && REG_P (SUBREG_REG (op
))
1778 && REG_POINTER (SUBREG_REG (op
))
1779 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1780 && !targetm
.have_ptr_extend ())
1783 = convert_memory_address_addr_space_1 (Pmode
, op
,
1784 ADDR_SPACE_GENERIC
, false,
1796 if (VECTOR_MODE_P (mode
)
1797 && vec_duplicate_p (op
, &elt
)
1798 && code
!= VEC_DUPLICATE
)
1800 if (code
== SIGN_EXTEND
|| code
== ZERO_EXTEND
)
1801 /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1802 operations by promoting VEC_DUPLICATE to the root of the expression
1803 (as far as possible). */
1804 temp
= simplify_gen_unary (code
, GET_MODE_INNER (mode
),
1805 elt
, GET_MODE_INNER (GET_MODE (op
)));
1807 /* Try applying the operator to ELT and see if that simplifies.
1808 We can duplicate the result if so.
1810 The reason we traditionally haven't used simplify_gen_unary
1811 for these codes is that it didn't necessarily seem to be a
1812 win to convert things like:
1814 (neg:V (vec_duplicate:V (reg:S R)))
1818 (vec_duplicate:V (neg:S (reg:S R)))
1820 The first might be done entirely in vector registers while the
1821 second might need a move between register files.
1823 However, there also cases where promoting the vec_duplicate is
1824 more efficient, and there is definite value in having a canonical
1825 form when matching instruction patterns. We should consider
1826 extending the simplify_gen_unary code above to more cases. */
1827 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1828 elt
, GET_MODE_INNER (GET_MODE (op
)));
1830 return gen_vec_duplicate (mode
, temp
);
1836 /* Try to compute the value of a unary operation CODE whose output mode is to
1837 be MODE with input operand OP whose mode was originally OP_MODE.
1838 Return zero if the value cannot be computed. */
1840 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1841 rtx op
, machine_mode op_mode
)
1843 scalar_int_mode result_mode
;
1845 if (code
== VEC_DUPLICATE
)
1847 gcc_assert (VECTOR_MODE_P (mode
));
1848 if (GET_MODE (op
) != VOIDmode
)
1850 if (!VECTOR_MODE_P (GET_MODE (op
)))
1851 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1853 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1856 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1857 return gen_const_vec_duplicate (mode
, op
);
1858 if (GET_CODE (op
) == CONST_VECTOR
1859 && (CONST_VECTOR_DUPLICATE_P (op
)
1860 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1862 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1863 ? CONST_VECTOR_NPATTERNS (op
)
1864 : CONST_VECTOR_NUNITS (op
).to_constant ());
1865 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1866 rtx_vector_builder
builder (mode
, npatterns
, 1);
1867 for (unsigned i
= 0; i
< npatterns
; i
++)
1868 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1869 return builder
.build ();
1873 if (VECTOR_MODE_P (mode
)
1874 && GET_CODE (op
) == CONST_VECTOR
1875 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1877 gcc_assert (GET_MODE (op
) == op_mode
);
1879 rtx_vector_builder builder
;
1880 if (!builder
.new_unary_operation (mode
, op
, false))
1883 unsigned int count
= builder
.encoded_nelts ();
1884 for (unsigned int i
= 0; i
< count
; i
++)
1886 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1887 CONST_VECTOR_ELT (op
, i
),
1888 GET_MODE_INNER (op_mode
));
1889 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1891 builder
.quick_push (x
);
1893 return builder
.build ();
1896 /* The order of these tests is critical so that, for example, we don't
1897 check the wrong mode (input vs. output) for a conversion operation,
1898 such as FIX. At some point, this should be simplified. */
1900 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1904 if (op_mode
== VOIDmode
)
1906 /* CONST_INT have VOIDmode as the mode. We assume that all
1907 the bits of the constant are significant, though, this is
1908 a dangerous assumption as many times CONST_INTs are
1909 created and used with garbage in the bits outside of the
1910 precision of the implied mode of the const_int. */
1911 op_mode
= MAX_MODE_INT
;
1914 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1916 /* Avoid the folding if flag_signaling_nans is on and
1917 operand is a signaling NaN. */
1918 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1921 d
= real_value_truncate (mode
, d
);
1923 /* Avoid the folding if flag_rounding_math is on and the
1924 conversion is not exact. */
1925 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1928 wide_int w
= real_to_integer (&d
, &fail
,
1930 (as_a
<scalar_int_mode
> (op_mode
)));
1931 if (fail
|| wi::ne_p (w
, wide_int (rtx_mode_t (op
, op_mode
))))
1935 return const_double_from_real_value (d
, mode
);
1937 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1941 if (op_mode
== VOIDmode
)
1943 /* CONST_INT have VOIDmode as the mode. We assume that all
1944 the bits of the constant are significant, though, this is
1945 a dangerous assumption as many times CONST_INTs are
1946 created and used with garbage in the bits outside of the
1947 precision of the implied mode of the const_int. */
1948 op_mode
= MAX_MODE_INT
;
1951 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1953 /* Avoid the folding if flag_signaling_nans is on and
1954 operand is a signaling NaN. */
1955 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1958 d
= real_value_truncate (mode
, d
);
1960 /* Avoid the folding if flag_rounding_math is on and the
1961 conversion is not exact. */
1962 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1965 wide_int w
= real_to_integer (&d
, &fail
,
1967 (as_a
<scalar_int_mode
> (op_mode
)));
1968 if (fail
|| wi::ne_p (w
, wide_int (rtx_mode_t (op
, op_mode
))))
1972 return const_double_from_real_value (d
, mode
);
1975 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1977 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1978 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1982 scalar_int_mode imode
= (op_mode
== VOIDmode
1984 : as_a
<scalar_int_mode
> (op_mode
));
1985 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1988 #if TARGET_SUPPORTS_WIDE_INT == 0
1989 /* This assert keeps the simplification from producing a result
1990 that cannot be represented in a CONST_DOUBLE but a lot of
1991 upstream callers expect that this function never fails to
1992 simplify something and so you if you added this to the test
1993 above the code would die later anyway. If this assert
1994 happens, you just need to make the port support wide int. */
1995 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
2001 result
= wi::bit_not (op0
);
2005 result
= wi::neg (op0
);
2009 result
= wi::abs (op0
);
2013 result
= wi::shwi (wi::ffs (op0
), result_mode
);
2017 if (wi::ne_p (op0
, 0))
2018 int_value
= wi::clz (op0
);
2019 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
2021 result
= wi::shwi (int_value
, result_mode
);
2025 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
2029 if (wi::ne_p (op0
, 0))
2030 int_value
= wi::ctz (op0
);
2031 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
2033 result
= wi::shwi (int_value
, result_mode
);
2037 result
= wi::shwi (wi::popcount (op0
), result_mode
);
2041 result
= wi::shwi (wi::parity (op0
), result_mode
);
2045 result
= wide_int (op0
).bswap ();
2050 result
= wide_int::from (op0
, width
, UNSIGNED
);
2054 result
= wide_int::from (op0
, width
, SIGNED
);
2058 if (wi::only_sign_bit_p (op0
))
2059 result
= wi::max_value (GET_MODE_PRECISION (imode
), SIGNED
);
2061 result
= wi::neg (op0
);
2065 if (wi::only_sign_bit_p (op0
))
2066 result
= wi::max_value (GET_MODE_PRECISION (imode
), SIGNED
);
2068 result
= wi::abs (op0
);
2076 return immed_wide_int_const (result
, result_mode
);
2079 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2080 && SCALAR_FLOAT_MODE_P (mode
)
2081 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
2083 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
2089 d
= real_value_abs (&d
);
2092 d
= real_value_negate (&d
);
2094 case FLOAT_TRUNCATE
:
2095 /* Don't perform the operation if flag_signaling_nans is on
2096 and the operand is a signaling NaN. */
2097 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2099 /* Or if flag_rounding_math is on and the truncation is not
2101 if (HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2102 && !exact_real_truncate (mode
, &d
))
2104 d
= real_value_truncate (mode
, d
);
2107 /* Don't perform the operation if flag_signaling_nans is on
2108 and the operand is a signaling NaN. */
2109 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2111 /* All this does is change the mode, unless changing
2113 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
2114 real_convert (&d
, mode
, &d
);
2117 /* Don't perform the operation if flag_signaling_nans is on
2118 and the operand is a signaling NaN. */
2119 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2121 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
2128 real_to_target (tmp
, &d
, GET_MODE (op
));
2129 for (i
= 0; i
< 4; i
++)
2131 real_from_target (&d
, tmp
, mode
);
2137 return const_double_from_real_value (d
, mode
);
2139 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2140 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
2141 && is_int_mode (mode
, &result_mode
))
2143 unsigned int width
= GET_MODE_PRECISION (result_mode
);
2144 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
2147 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2148 operators are intentionally left unspecified (to ease implementation
2149 by target backends), for consistency, this routine implements the
2150 same semantics for constant folding as used by the middle-end. */
2152 /* This was formerly used only for non-IEEE float.
2153 eggert@twinsun.com says it is safe for IEEE also. */
2155 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
2156 wide_int wmax
, wmin
;
2157 /* This is part of the abi to real_to_integer, but we check
2158 things before making this call. */
2164 if (REAL_VALUE_ISNAN (*x
))
2167 /* Test against the signed upper bound. */
2168 wmax
= wi::max_value (width
, SIGNED
);
2169 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2170 if (real_less (&t
, x
))
2171 return immed_wide_int_const (wmax
, mode
);
2173 /* Test against the signed lower bound. */
2174 wmin
= wi::min_value (width
, SIGNED
);
2175 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2176 if (real_less (x
, &t
))
2177 return immed_wide_int_const (wmin
, mode
);
2179 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2183 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2186 /* Test against the unsigned upper bound. */
2187 wmax
= wi::max_value (width
, UNSIGNED
);
2188 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2189 if (real_less (&t
, x
))
2190 return immed_wide_int_const (wmax
, mode
);
2192 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2200 /* Handle polynomial integers. */
2201 else if (CONST_POLY_INT_P (op
))
2203 poly_wide_int result
;
2207 result
= -const_poly_int_value (op
);
2211 result
= ~const_poly_int_value (op
);
2217 return immed_wide_int_const (result
, mode
);
2223 /* Subroutine of simplify_binary_operation to simplify a binary operation
2224 CODE that can commute with byte swapping, with result mode MODE and
2225 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2226 Return zero if no simplification or canonicalization is possible. */
2229 simplify_context::simplify_byte_swapping_operation (rtx_code code
,
2235 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2236 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2238 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2239 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2240 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2243 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2244 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2246 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2247 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2253 /* Subroutine of simplify_binary_operation to simplify a commutative,
2254 associative binary operation CODE with result mode MODE, operating
2255 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2256 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2257 canonicalization is possible. */
2260 simplify_context::simplify_associative_operation (rtx_code code
,
2266 /* Normally expressions simplified by simplify-rtx.cc are combined
2267 at most from a few machine instructions and therefore the
2268 expressions should be fairly small. During var-tracking
2269 we can see arbitrarily large expressions though and reassociating
2270 those can be quadratic, so punt after encountering max_assoc_count
2271 simplify_associative_operation calls during outermost simplify_*
2273 if (++assoc_count
>= max_assoc_count
)
2276 /* Linearize the operator to the left. */
2277 if (GET_CODE (op1
) == code
)
2279 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2280 if (GET_CODE (op0
) == code
)
2282 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2283 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2286 /* "a op (b op c)" becomes "(b op c) op a". */
2287 if (! swap_commutative_operands_p (op1
, op0
))
2288 return simplify_gen_binary (code
, mode
, op1
, op0
);
2290 std::swap (op0
, op1
);
2293 if (GET_CODE (op0
) == code
)
2295 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2296 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2298 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2299 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2302 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2303 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2305 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2307 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2308 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2310 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2316 /* Return a mask describing the COMPARISON. */
2318 comparison_to_mask (enum rtx_code comparison
)
2358 /* Return a comparison corresponding to the MASK. */
2359 static enum rtx_code
2360 mask_to_comparison (int mask
)
2400 /* Return true if CODE is valid for comparisons of mode MODE, false
2403 It is always safe to return false, even if the code was valid for the
2404 given mode as that will merely suppress optimizations. */
2407 comparison_code_valid_for_mode (enum rtx_code code
, enum machine_mode mode
)
2411 /* These are valid for integral, floating and vector modes. */
2418 return (INTEGRAL_MODE_P (mode
)
2419 || FLOAT_MODE_P (mode
)
2420 || VECTOR_MODE_P (mode
));
2422 /* These are valid for floating point modes. */
2431 return FLOAT_MODE_P (mode
);
2433 /* These are filtered out in simplify_logical_operation, but
2434 we check for them too as a matter of safety. They are valid
2435 for integral and vector modes. */
2440 return INTEGRAL_MODE_P (mode
) || VECTOR_MODE_P (mode
);
2447 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2448 false/true value of comparison with MODE where comparison operands
2452 relational_result (machine_mode mode
, machine_mode cmp_mode
, rtx res
)
2454 if (SCALAR_FLOAT_MODE_P (mode
))
2456 if (res
== const0_rtx
)
2457 return CONST0_RTX (mode
);
2458 #ifdef FLOAT_STORE_FLAG_VALUE
2459 REAL_VALUE_TYPE val
= FLOAT_STORE_FLAG_VALUE (mode
);
2460 return const_double_from_real_value (val
, mode
);
2465 if (VECTOR_MODE_P (mode
))
2467 if (res
== const0_rtx
)
2468 return CONST0_RTX (mode
);
2469 #ifdef VECTOR_STORE_FLAG_VALUE
2470 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2471 if (val
== NULL_RTX
)
2473 if (val
== const1_rtx
)
2474 return CONST1_RTX (mode
);
2476 return gen_const_vec_duplicate (mode
, val
);
2481 /* For vector comparison with scalar int result, it is unknown
2482 if the target means here a comparison into an integral bitmask,
2483 or comparison where all comparisons true mean const_true_rtx
2484 whole result, or where any comparisons true mean const_true_rtx
2485 whole result. For const0_rtx all the cases are the same. */
2486 if (VECTOR_MODE_P (cmp_mode
)
2487 && SCALAR_INT_MODE_P (mode
)
2488 && res
== const_true_rtx
)
2494 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2495 and OP1, which should be both relational operations. Return 0 if no such
2496 simplification is possible. */
2498 simplify_context::simplify_logical_relational_operation (rtx_code code
,
2502 /* We only handle IOR of two relational operations. */
2506 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2509 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2510 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2513 enum rtx_code code0
= GET_CODE (op0
);
2514 enum rtx_code code1
= GET_CODE (op1
);
2516 /* We don't handle unsigned comparisons currently. */
2517 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2519 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2522 int mask0
= comparison_to_mask (code0
);
2523 int mask1
= comparison_to_mask (code1
);
2525 int mask
= mask0
| mask1
;
2528 return relational_result (mode
, GET_MODE (op0
), const_true_rtx
);
2530 code
= mask_to_comparison (mask
);
2532 /* Many comparison codes are only valid for certain mode classes. */
2533 if (!comparison_code_valid_for_mode (code
, mode
))
2536 op0
= XEXP (op1
, 0);
2537 op1
= XEXP (op1
, 1);
2539 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2542 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2543 and OP1. Return 0 if no simplification is possible.
2545 Don't use this for relational operations such as EQ or LT.
2546 Use simplify_relational_operation instead. */
2548 simplify_context::simplify_binary_operation (rtx_code code
, machine_mode mode
,
2551 rtx trueop0
, trueop1
;
2554 /* Relational operations don't work here. We must know the mode
2555 of the operands in order to do the comparison correctly.
2556 Assuming a full word can give incorrect results.
2557 Consider comparing 128 with -128 in QImode. */
2558 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2559 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2561 /* Make sure the constant is second. */
2562 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2563 && swap_commutative_operands_p (op0
, op1
))
2564 std::swap (op0
, op1
);
2566 trueop0
= avoid_constant_pool_reference (op0
);
2567 trueop1
= avoid_constant_pool_reference (op1
);
2569 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2572 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2577 /* If the above steps did not result in a simplification and op0 or op1
2578 were constant pool references, use the referenced constants directly. */
2579 if (trueop0
!= op0
|| trueop1
!= op1
)
2580 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2585 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2586 which OP0 and OP1 are both vector series or vector duplicates
2587 (which are really just series with a step of 0). If so, try to
2588 form a new series by applying CODE to the bases and to the steps.
2589 Return null if no simplification is possible.
2591 MODE is the mode of the operation and is known to be a vector
2595 simplify_context::simplify_binary_operation_series (rtx_code code
,
2600 if (vec_duplicate_p (op0
, &base0
))
2602 else if (!vec_series_p (op0
, &base0
, &step0
))
2606 if (vec_duplicate_p (op1
, &base1
))
2608 else if (!vec_series_p (op1
, &base1
, &step1
))
2611 /* Only create a new series if we can simplify both parts. In other
2612 cases this isn't really a simplification, and it's not necessarily
2613 a win to replace a vector operation with a scalar operation. */
2614 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2615 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2619 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2623 return gen_vec_series (mode
, new_base
, new_step
);
2626 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2627 operation CODE with result mode MODE, operating on OP0 and OP1.
2628 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2629 Returns NULL_RTX if no simplification is possible. */
2632 simplify_context::simplify_distributive_operation (rtx_code code
,
2636 enum rtx_code op
= GET_CODE (op0
);
2637 gcc_assert (GET_CODE (op1
) == op
);
2639 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))
2640 && ! side_effects_p (XEXP (op0
, 1)))
2641 return simplify_gen_binary (op
, mode
,
2642 simplify_gen_binary (code
, mode
,
2647 if (GET_RTX_CLASS (op
) == RTX_COMM_ARITH
)
2649 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2650 && ! side_effects_p (XEXP (op0
, 0)))
2651 return simplify_gen_binary (op
, mode
,
2652 simplify_gen_binary (code
, mode
,
2656 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 1))
2657 && ! side_effects_p (XEXP (op0
, 0)))
2658 return simplify_gen_binary (op
, mode
,
2659 simplify_gen_binary (code
, mode
,
2663 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 0))
2664 && ! side_effects_p (XEXP (op0
, 1)))
2665 return simplify_gen_binary (op
, mode
,
2666 simplify_gen_binary (code
, mode
,
2675 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2676 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2677 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2678 actual constants. */
2681 simplify_context::simplify_binary_operation_1 (rtx_code code
,
2684 rtx trueop0
, rtx trueop1
)
2686 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2688 scalar_int_mode int_mode
, inner_mode
;
2691 /* Even if we can't compute a constant result,
2692 there are some cases worth simplifying. */
2697 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2698 when x is NaN, infinite, or finite and nonzero. They aren't
2699 when x is -0 and the rounding mode is not towards -infinity,
2700 since (-0) + 0 is then 0. */
2701 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2704 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2705 transformations are safe even for IEEE. */
2706 if (GET_CODE (op0
) == NEG
)
2707 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2708 else if (GET_CODE (op1
) == NEG
)
2709 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2711 /* (~a) + 1 -> -a */
2712 if (INTEGRAL_MODE_P (mode
)
2713 && GET_CODE (op0
) == NOT
2714 && trueop1
== const1_rtx
)
2715 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2717 /* Handle both-operands-constant cases. We can only add
2718 CONST_INTs to constants since the sum of relocatable symbols
2719 can't be handled by most assemblers. Don't add CONST_INT
2720 to CONST_INT since overflow won't be computed properly if wider
2721 than HOST_BITS_PER_WIDE_INT. */
2723 if ((GET_CODE (op0
) == CONST
2724 || GET_CODE (op0
) == SYMBOL_REF
2725 || GET_CODE (op0
) == LABEL_REF
)
2726 && poly_int_rtx_p (op1
, &offset
))
2727 return plus_constant (mode
, op0
, offset
);
2728 else if ((GET_CODE (op1
) == CONST
2729 || GET_CODE (op1
) == SYMBOL_REF
2730 || GET_CODE (op1
) == LABEL_REF
)
2731 && poly_int_rtx_p (op0
, &offset
))
2732 return plus_constant (mode
, op1
, offset
);
2734 /* See if this is something like X * C - X or vice versa or
2735 if the multiplication is written as a shift. If so, we can
2736 distribute and make a new multiply, shift, or maybe just
2737 have X (if C is 2 in the example above). But don't make
2738 something more expensive than we had before. */
2740 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2742 rtx lhs
= op0
, rhs
= op1
;
2744 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2745 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2747 if (GET_CODE (lhs
) == NEG
)
2749 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2750 lhs
= XEXP (lhs
, 0);
2752 else if (GET_CODE (lhs
) == MULT
2753 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2755 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2756 lhs
= XEXP (lhs
, 0);
2758 else if (GET_CODE (lhs
) == ASHIFT
2759 && CONST_INT_P (XEXP (lhs
, 1))
2760 && INTVAL (XEXP (lhs
, 1)) >= 0
2761 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2763 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2764 GET_MODE_PRECISION (int_mode
));
2765 lhs
= XEXP (lhs
, 0);
2768 if (GET_CODE (rhs
) == NEG
)
2770 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2771 rhs
= XEXP (rhs
, 0);
2773 else if (GET_CODE (rhs
) == MULT
2774 && CONST_INT_P (XEXP (rhs
, 1)))
2776 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2777 rhs
= XEXP (rhs
, 0);
2779 else if (GET_CODE (rhs
) == ASHIFT
2780 && CONST_INT_P (XEXP (rhs
, 1))
2781 && INTVAL (XEXP (rhs
, 1)) >= 0
2782 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2784 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2785 GET_MODE_PRECISION (int_mode
));
2786 rhs
= XEXP (rhs
, 0);
2789 if (rtx_equal_p (lhs
, rhs
))
2791 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2793 bool speed
= optimize_function_for_speed_p (cfun
);
2795 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2797 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2798 return (set_src_cost (tem
, int_mode
, speed
)
2799 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2802 /* Optimize (X - 1) * Y + Y to X * Y. */
2805 if (GET_CODE (op0
) == MULT
)
2807 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2808 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
)
2809 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2810 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
))
2811 && rtx_equal_p (XEXP (op0
, 1), op1
))
2812 lhs
= XEXP (XEXP (op0
, 0), 0);
2813 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2814 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
)
2815 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2816 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
))
2817 && rtx_equal_p (XEXP (op0
, 0), op1
))
2818 lhs
= XEXP (XEXP (op0
, 1), 0);
2820 else if (GET_CODE (op1
) == MULT
)
2822 if (((GET_CODE (XEXP (op1
, 0)) == PLUS
2823 && XEXP (XEXP (op1
, 0), 1) == constm1_rtx
)
2824 || (GET_CODE (XEXP (op1
, 0)) == MINUS
2825 && XEXP (XEXP (op1
, 0), 1) == const1_rtx
))
2826 && rtx_equal_p (XEXP (op1
, 1), op0
))
2827 rhs
= XEXP (XEXP (op1
, 0), 0);
2828 else if (((GET_CODE (XEXP (op1
, 1)) == PLUS
2829 && XEXP (XEXP (op1
, 1), 1) == constm1_rtx
)
2830 || (GET_CODE (XEXP (op1
, 1)) == MINUS
2831 && XEXP (XEXP (op1
, 1), 1) == const1_rtx
))
2832 && rtx_equal_p (XEXP (op1
, 0), op0
))
2833 rhs
= XEXP (XEXP (op1
, 1), 0);
2835 if (lhs
!= op0
|| rhs
!= op1
)
2836 return simplify_gen_binary (MULT
, int_mode
, lhs
, rhs
);
2839 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2840 if (CONST_SCALAR_INT_P (op1
)
2841 && GET_CODE (op0
) == XOR
2842 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2843 && mode_signbit_p (mode
, op1
))
2844 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2845 simplify_gen_binary (XOR
, mode
, op1
,
2848 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2849 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2850 && GET_CODE (op0
) == MULT
2851 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2855 in1
= XEXP (XEXP (op0
, 0), 0);
2856 in2
= XEXP (op0
, 1);
2857 return simplify_gen_binary (MINUS
, mode
, op1
,
2858 simplify_gen_binary (MULT
, mode
,
2862 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2863 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2865 if (COMPARISON_P (op0
)
2866 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2867 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2868 && (reversed
= reversed_comparison (op0
, mode
)))
2870 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2872 /* If one of the operands is a PLUS or a MINUS, see if we can
2873 simplify this by the associative law.
2874 Don't use the associative law for floating point.
2875 The inaccuracy makes it nonassociative,
2876 and subtle programs can break if operations are associated. */
2878 if (INTEGRAL_MODE_P (mode
)
2879 && (plus_minus_operand_p (op0
)
2880 || plus_minus_operand_p (op1
))
2881 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2884 /* Reassociate floating point addition only when the user
2885 specifies associative math operations. */
2886 if (FLOAT_MODE_P (mode
)
2887 && flag_associative_math
)
2889 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2894 /* Handle vector series. */
2895 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2897 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2904 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2905 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2906 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2907 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2909 rtx xop00
= XEXP (op0
, 0);
2910 rtx xop10
= XEXP (op1
, 0);
2912 if (REG_P (xop00
) && REG_P (xop10
)
2913 && REGNO (xop00
) == REGNO (xop10
)
2914 && GET_MODE (xop00
) == mode
2915 && GET_MODE (xop10
) == mode
2916 && GET_MODE_CLASS (mode
) == MODE_CC
)
2922 /* We can't assume x-x is 0 even with non-IEEE floating point,
2923 but since it is zero except in very strange circumstances, we
2924 will treat it as zero with -ffinite-math-only. */
2925 if (rtx_equal_p (trueop0
, trueop1
)
2926 && ! side_effects_p (op0
)
2927 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2928 return CONST0_RTX (mode
);
2930 /* Change subtraction from zero into negation. (0 - x) is the
2931 same as -x when x is NaN, infinite, or finite and nonzero.
2932 But if the mode has signed zeros, and does not round towards
2933 -infinity, then 0 - 0 is 0, not -0. */
2934 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2935 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2937 /* (-1 - a) is ~a, unless the expression contains symbolic
2938 constants, in which case not retaining additions and
2939 subtractions could cause invalid assembly to be produced. */
2940 if (trueop0
== constm1_rtx
2941 && !contains_symbolic_reference_p (op1
))
2942 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2944 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2945 or has signed zeros and supports rounding towards -infinity.
2946 In such a case, 0 - 0 is -0. */
2947 if (!(HONOR_SIGNED_ZEROS (mode
)
2948 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2949 && !HONOR_SNANS (mode
)
2950 && trueop1
== CONST0_RTX (mode
))
2953 /* See if this is something like X * C - X or vice versa or
2954 if the multiplication is written as a shift. If so, we can
2955 distribute and make a new multiply, shift, or maybe just
2956 have X (if C is 2 in the example above). But don't make
2957 something more expensive than we had before. */
2959 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2961 rtx lhs
= op0
, rhs
= op1
;
2963 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2964 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2966 if (GET_CODE (lhs
) == NEG
)
2968 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2969 lhs
= XEXP (lhs
, 0);
2971 else if (GET_CODE (lhs
) == MULT
2972 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2974 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2975 lhs
= XEXP (lhs
, 0);
2977 else if (GET_CODE (lhs
) == ASHIFT
2978 && CONST_INT_P (XEXP (lhs
, 1))
2979 && INTVAL (XEXP (lhs
, 1)) >= 0
2980 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2982 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2983 GET_MODE_PRECISION (int_mode
));
2984 lhs
= XEXP (lhs
, 0);
2987 if (GET_CODE (rhs
) == NEG
)
2989 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2990 rhs
= XEXP (rhs
, 0);
2992 else if (GET_CODE (rhs
) == MULT
2993 && CONST_INT_P (XEXP (rhs
, 1)))
2995 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2996 rhs
= XEXP (rhs
, 0);
2998 else if (GET_CODE (rhs
) == ASHIFT
2999 && CONST_INT_P (XEXP (rhs
, 1))
3000 && INTVAL (XEXP (rhs
, 1)) >= 0
3001 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
3003 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
3004 GET_MODE_PRECISION (int_mode
));
3005 negcoeff1
= -negcoeff1
;
3006 rhs
= XEXP (rhs
, 0);
3009 if (rtx_equal_p (lhs
, rhs
))
3011 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
3013 bool speed
= optimize_function_for_speed_p (cfun
);
3015 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
3017 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
3018 return (set_src_cost (tem
, int_mode
, speed
)
3019 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
3022 /* Optimize (X + 1) * Y - Y to X * Y. */
3024 if (GET_CODE (op0
) == MULT
)
3026 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
3027 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
)
3028 || (GET_CODE (XEXP (op0
, 0)) == MINUS
3029 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
))
3030 && rtx_equal_p (XEXP (op0
, 1), op1
))
3031 lhs
= XEXP (XEXP (op0
, 0), 0);
3032 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
3033 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
)
3034 || (GET_CODE (XEXP (op0
, 1)) == MINUS
3035 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
))
3036 && rtx_equal_p (XEXP (op0
, 0), op1
))
3037 lhs
= XEXP (XEXP (op0
, 1), 0);
3040 return simplify_gen_binary (MULT
, int_mode
, lhs
, op1
);
3043 /* (a - (-b)) -> (a + b). True even for IEEE. */
3044 if (GET_CODE (op1
) == NEG
)
3045 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
3047 /* (-x - c) may be simplified as (-c - x). */
3048 if (GET_CODE (op0
) == NEG
3049 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
3051 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
3053 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
3056 if ((GET_CODE (op0
) == CONST
3057 || GET_CODE (op0
) == SYMBOL_REF
3058 || GET_CODE (op0
) == LABEL_REF
)
3059 && poly_int_rtx_p (op1
, &offset
))
3060 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
3062 /* Don't let a relocatable value get a negative coeff. */
3063 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
3064 return simplify_gen_binary (PLUS
, mode
,
3066 neg_poly_int_rtx (mode
, op1
));
3068 /* (x - (x & y)) -> (x & ~y) */
3069 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
3071 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
3073 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
3074 GET_MODE (XEXP (op1
, 1)));
3075 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3077 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
3079 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
3080 GET_MODE (XEXP (op1
, 0)));
3081 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3085 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3086 by reversing the comparison code if valid. */
3087 if (STORE_FLAG_VALUE
== 1
3088 && trueop0
== const1_rtx
3089 && COMPARISON_P (op1
)
3090 && (reversed
= reversed_comparison (op1
, mode
)))
3093 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3094 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3095 && GET_CODE (op1
) == MULT
3096 && GET_CODE (XEXP (op1
, 0)) == NEG
)
3100 in1
= XEXP (XEXP (op1
, 0), 0);
3101 in2
= XEXP (op1
, 1);
3102 return simplify_gen_binary (PLUS
, mode
,
3103 simplify_gen_binary (MULT
, mode
,
3108 /* Canonicalize (minus (neg A) (mult B C)) to
3109 (minus (mult (neg B) C) A). */
3110 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3111 && GET_CODE (op1
) == MULT
3112 && GET_CODE (op0
) == NEG
)
3116 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
3117 in2
= XEXP (op1
, 1);
3118 return simplify_gen_binary (MINUS
, mode
,
3119 simplify_gen_binary (MULT
, mode
,
3124 /* If one of the operands is a PLUS or a MINUS, see if we can
3125 simplify this by the associative law. This will, for example,
3126 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3127 Don't use the associative law for floating point.
3128 The inaccuracy makes it nonassociative,
3129 and subtle programs can break if operations are associated. */
3131 if (INTEGRAL_MODE_P (mode
)
3132 && (plus_minus_operand_p (op0
)
3133 || plus_minus_operand_p (op1
))
3134 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
3137 /* Handle vector series. */
3138 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3140 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
3147 if (trueop1
== constm1_rtx
)
3148 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3150 if (GET_CODE (op0
) == NEG
)
3152 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
3153 /* If op1 is a MULT as well and simplify_unary_operation
3154 just moved the NEG to the second operand, simplify_gen_binary
3155 below could through simplify_associative_operation move
3156 the NEG around again and recurse endlessly. */
3158 && GET_CODE (op1
) == MULT
3159 && GET_CODE (temp
) == MULT
3160 && XEXP (op1
, 0) == XEXP (temp
, 0)
3161 && GET_CODE (XEXP (temp
, 1)) == NEG
3162 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
3165 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
3167 if (GET_CODE (op1
) == NEG
)
3169 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3170 /* If op0 is a MULT as well and simplify_unary_operation
3171 just moved the NEG to the second operand, simplify_gen_binary
3172 below could through simplify_associative_operation move
3173 the NEG around again and recurse endlessly. */
3175 && GET_CODE (op0
) == MULT
3176 && GET_CODE (temp
) == MULT
3177 && XEXP (op0
, 0) == XEXP (temp
, 0)
3178 && GET_CODE (XEXP (temp
, 1)) == NEG
3179 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
3182 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
3185 /* Maybe simplify x * 0 to 0. The reduction is not valid if
3186 x is NaN, since x * 0 is then also NaN. Nor is it valid
3187 when the mode has signed zeros, since multiplying a negative
3188 number by 0 will give -0, not 0. */
3189 if (!HONOR_NANS (mode
)
3190 && !HONOR_SIGNED_ZEROS (mode
)
3191 && trueop1
== CONST0_RTX (mode
)
3192 && ! side_effects_p (op0
))
3195 /* In IEEE floating point, x*1 is not equivalent to x for
3197 if (!HONOR_SNANS (mode
)
3198 && trueop1
== CONST1_RTX (mode
))
3201 /* Convert multiply by constant power of two into shift. */
3202 if (mem_depth
== 0 && CONST_SCALAR_INT_P (trueop1
))
3204 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
3206 return simplify_gen_binary (ASHIFT
, mode
, op0
,
3207 gen_int_shift_amount (mode
, val
));
3210 /* x*2 is x+x and x*(-1) is -x */
3211 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3212 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
3213 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
3214 && GET_MODE (op0
) == mode
)
3216 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3218 if (real_equal (d1
, &dconst2
))
3219 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
3221 if (!HONOR_SNANS (mode
)
3222 && real_equal (d1
, &dconstm1
))
3223 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3226 /* Optimize -x * -x as x * x. */
3227 if (FLOAT_MODE_P (mode
)
3228 && GET_CODE (op0
) == NEG
3229 && GET_CODE (op1
) == NEG
3230 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3231 && !side_effects_p (XEXP (op0
, 0)))
3232 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3234 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3235 if (SCALAR_FLOAT_MODE_P (mode
)
3236 && GET_CODE (op0
) == ABS
3237 && GET_CODE (op1
) == ABS
3238 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3239 && !side_effects_p (XEXP (op0
, 0)))
3240 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3242 /* Reassociate multiplication, but for floating point MULTs
3243 only when the user specifies unsafe math optimizations. */
3244 if (! FLOAT_MODE_P (mode
)
3245 || flag_unsafe_math_optimizations
)
3247 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3254 if (trueop1
== CONST0_RTX (mode
))
3256 if (INTEGRAL_MODE_P (mode
)
3257 && trueop1
== CONSTM1_RTX (mode
)
3258 && !side_effects_p (op0
))
3260 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3262 /* A | (~A) -> -1 */
3263 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3264 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3265 && ! side_effects_p (op0
)
3266 && SCALAR_INT_MODE_P (mode
))
3269 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3270 if (CONST_INT_P (op1
)
3271 && HWI_COMPUTABLE_MODE_P (mode
)
3272 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
3273 && !side_effects_p (op0
))
3276 /* Canonicalize (X & C1) | C2. */
3277 if (GET_CODE (op0
) == AND
3278 && CONST_INT_P (trueop1
)
3279 && CONST_INT_P (XEXP (op0
, 1)))
3281 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
3282 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
3283 HOST_WIDE_INT c2
= INTVAL (trueop1
);
3285 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3287 && !side_effects_p (XEXP (op0
, 0)))
3290 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3291 if (((c1
|c2
) & mask
) == mask
)
3292 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
3295 /* Convert (A & B) | A to A. */
3296 if (GET_CODE (op0
) == AND
3297 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3298 || rtx_equal_p (XEXP (op0
, 1), op1
))
3299 && ! side_effects_p (XEXP (op0
, 0))
3300 && ! side_effects_p (XEXP (op0
, 1)))
3303 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3304 mode size to (rotate A CX). */
3306 if (GET_CODE (op1
) == ASHIFT
3307 || GET_CODE (op1
) == SUBREG
)
3318 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
3319 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
3320 && CONST_INT_P (XEXP (opleft
, 1))
3321 && CONST_INT_P (XEXP (opright
, 1))
3322 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
3323 == GET_MODE_UNIT_PRECISION (mode
)))
3324 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
3326 /* Same, but for ashift that has been "simplified" to a wider mode
3327 by simplify_shift_const. */
3329 if (GET_CODE (opleft
) == SUBREG
3330 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3331 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
3333 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
3334 && GET_CODE (opright
) == LSHIFTRT
3335 && GET_CODE (XEXP (opright
, 0)) == SUBREG
3336 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
3337 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
3338 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
3339 SUBREG_REG (XEXP (opright
, 0)))
3340 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
3341 && CONST_INT_P (XEXP (opright
, 1))
3342 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
3343 + INTVAL (XEXP (opright
, 1))
3344 == GET_MODE_PRECISION (int_mode
)))
3345 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
3346 XEXP (SUBREG_REG (opleft
), 1));
3348 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3349 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3350 the PLUS does not affect any of the bits in OP1: then we can do
3351 the IOR as a PLUS and we can associate. This is valid if OP1
3352 can be safely shifted left C bits. */
3353 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
3354 && GET_CODE (XEXP (op0
, 0)) == PLUS
3355 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
3356 && CONST_INT_P (XEXP (op0
, 1))
3357 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
3359 int count
= INTVAL (XEXP (op0
, 1));
3360 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
3362 if (mask
>> count
== INTVAL (trueop1
)
3363 && trunc_int_for_mode (mask
, mode
) == mask
3364 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
3365 return simplify_gen_binary (ASHIFTRT
, mode
,
3366 plus_constant (mode
, XEXP (op0
, 0),
3371 /* The following happens with bitfield merging.
3372 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3373 if (GET_CODE (op0
) == AND
3374 && GET_CODE (op1
) == AND
3375 && CONST_INT_P (XEXP (op0
, 1))
3376 && CONST_INT_P (XEXP (op1
, 1))
3377 && (INTVAL (XEXP (op0
, 1))
3378 == ~INTVAL (XEXP (op1
, 1))))
3380 /* The IOR may be on both sides. */
3381 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
3382 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
3383 top0
= op0
, top1
= op1
;
3384 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
3385 top0
= op1
, top1
= op0
;
3388 /* X may be on either side of the inner IOR. */
3390 if (rtx_equal_p (XEXP (top0
, 0),
3391 XEXP (XEXP (top1
, 0), 0)))
3392 tem
= XEXP (XEXP (top1
, 0), 1);
3393 else if (rtx_equal_p (XEXP (top0
, 0),
3394 XEXP (XEXP (top1
, 0), 1)))
3395 tem
= XEXP (XEXP (top1
, 0), 0);
3397 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3399 (AND
, mode
, tem
, XEXP (top1
, 1)));
3403 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3404 if (GET_CODE (op0
) == GET_CODE (op1
)
3405 && (GET_CODE (op0
) == AND
3406 || GET_CODE (op0
) == IOR
3407 || GET_CODE (op0
) == LSHIFTRT
3408 || GET_CODE (op0
) == ASHIFTRT
3409 || GET_CODE (op0
) == ASHIFT
3410 || GET_CODE (op0
) == ROTATE
3411 || GET_CODE (op0
) == ROTATERT
))
3413 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3418 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3422 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3426 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3432 if (trueop1
== CONST0_RTX (mode
))
3434 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3435 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3436 if (rtx_equal_p (trueop0
, trueop1
)
3437 && ! side_effects_p (op0
)
3438 && GET_MODE_CLASS (mode
) != MODE_CC
)
3439 return CONST0_RTX (mode
);
3441 /* Canonicalize XOR of the most significant bit to PLUS. */
3442 if (CONST_SCALAR_INT_P (op1
)
3443 && mode_signbit_p (mode
, op1
))
3444 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3445 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3446 if (CONST_SCALAR_INT_P (op1
)
3447 && GET_CODE (op0
) == PLUS
3448 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3449 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3450 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3451 simplify_gen_binary (XOR
, mode
, op1
,
3454 /* If we are XORing two things that have no bits in common,
3455 convert them into an IOR. This helps to detect rotation encoded
3456 using those methods and possibly other simplifications. */
3458 if (HWI_COMPUTABLE_MODE_P (mode
)
3459 && (nonzero_bits (op0
, mode
)
3460 & nonzero_bits (op1
, mode
)) == 0)
3461 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3463 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3464 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3467 int num_negated
= 0;
3469 if (GET_CODE (op0
) == NOT
)
3470 num_negated
++, op0
= XEXP (op0
, 0);
3471 if (GET_CODE (op1
) == NOT
)
3472 num_negated
++, op1
= XEXP (op1
, 0);
3474 if (num_negated
== 2)
3475 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3476 else if (num_negated
== 1)
3477 return simplify_gen_unary (NOT
, mode
,
3478 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3482 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3483 correspond to a machine insn or result in further simplifications
3484 if B is a constant. */
3486 if (GET_CODE (op0
) == AND
3487 && rtx_equal_p (XEXP (op0
, 1), op1
)
3488 && ! side_effects_p (op1
))
3489 return simplify_gen_binary (AND
, mode
,
3490 simplify_gen_unary (NOT
, mode
,
3491 XEXP (op0
, 0), mode
),
3494 else if (GET_CODE (op0
) == AND
3495 && rtx_equal_p (XEXP (op0
, 0), op1
)
3496 && ! side_effects_p (op1
))
3497 return simplify_gen_binary (AND
, mode
,
3498 simplify_gen_unary (NOT
, mode
,
3499 XEXP (op0
, 1), mode
),
3502 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3503 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3504 out bits inverted twice and not set by C. Similarly, given
3505 (xor (and (xor A B) C) D), simplify without inverting C in
3506 the xor operand: (xor (and A C) (B&C)^D).
3508 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3509 && GET_CODE (XEXP (op0
, 0)) == XOR
3510 && CONST_INT_P (op1
)
3511 && CONST_INT_P (XEXP (op0
, 1))
3512 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3514 enum rtx_code op
= GET_CODE (op0
);
3515 rtx a
= XEXP (XEXP (op0
, 0), 0);
3516 rtx b
= XEXP (XEXP (op0
, 0), 1);
3517 rtx c
= XEXP (op0
, 1);
3519 HOST_WIDE_INT bval
= INTVAL (b
);
3520 HOST_WIDE_INT cval
= INTVAL (c
);
3521 HOST_WIDE_INT dval
= INTVAL (d
);
3522 HOST_WIDE_INT xcval
;
3529 return simplify_gen_binary (XOR
, mode
,
3530 simplify_gen_binary (op
, mode
, a
, c
),
3531 gen_int_mode ((bval
& xcval
) ^ dval
,
3535 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3536 we can transform like this:
3537 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3538 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3539 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3540 Attempt a few simplifications when B and C are both constants. */
3541 if (GET_CODE (op0
) == AND
3542 && CONST_INT_P (op1
)
3543 && CONST_INT_P (XEXP (op0
, 1)))
3545 rtx a
= XEXP (op0
, 0);
3546 rtx b
= XEXP (op0
, 1);
3548 HOST_WIDE_INT bval
= INTVAL (b
);
3549 HOST_WIDE_INT cval
= INTVAL (c
);
3551 /* Instead of computing ~A&C, we compute its negated value,
3552 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3553 optimize for sure. If it does not simplify, we still try
3554 to compute ~A&C below, but since that always allocates
3555 RTL, we don't try that before committing to returning a
3556 simplified expression. */
3557 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3560 if ((~cval
& bval
) == 0)
3562 rtx na_c
= NULL_RTX
;
3564 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3567 /* If ~A does not simplify, don't bother: we don't
3568 want to simplify 2 operations into 3, and if na_c
3569 were to simplify with na, n_na_c would have
3570 simplified as well. */
3571 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3573 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3576 /* Try to simplify ~A&C | ~B&C. */
3577 if (na_c
!= NULL_RTX
)
3578 return simplify_gen_binary (IOR
, mode
, na_c
,
3579 gen_int_mode (~bval
& cval
, mode
));
3583 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3584 if (n_na_c
== CONSTM1_RTX (mode
))
3586 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3587 gen_int_mode (~cval
& bval
,
3589 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3590 gen_int_mode (~bval
& cval
,
3596 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3597 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3598 machines, and also has shorter instruction path length. */
3599 if (GET_CODE (op0
) == AND
3600 && GET_CODE (XEXP (op0
, 0)) == XOR
3601 && CONST_INT_P (XEXP (op0
, 1))
3602 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3605 rtx b
= XEXP (XEXP (op0
, 0), 1);
3606 rtx c
= XEXP (op0
, 1);
3607 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3608 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3609 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3610 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3612 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3613 else if (GET_CODE (op0
) == AND
3614 && GET_CODE (XEXP (op0
, 0)) == XOR
3615 && CONST_INT_P (XEXP (op0
, 1))
3616 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3618 rtx a
= XEXP (XEXP (op0
, 0), 0);
3620 rtx c
= XEXP (op0
, 1);
3621 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3622 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3623 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3624 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3627 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3628 comparison if STORE_FLAG_VALUE is 1. */
3629 if (STORE_FLAG_VALUE
== 1
3630 && trueop1
== const1_rtx
3631 && COMPARISON_P (op0
)
3632 && (reversed
= reversed_comparison (op0
, mode
)))
3635 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3636 is (lt foo (const_int 0)), so we can perform the above
3637 simplification if STORE_FLAG_VALUE is 1. */
3639 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3640 && STORE_FLAG_VALUE
== 1
3641 && trueop1
== const1_rtx
3642 && GET_CODE (op0
) == LSHIFTRT
3643 && CONST_INT_P (XEXP (op0
, 1))
3644 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3645 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3647 /* (xor (comparison foo bar) (const_int sign-bit))
3648 when STORE_FLAG_VALUE is the sign bit. */
3649 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3650 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3651 && trueop1
== const_true_rtx
3652 && COMPARISON_P (op0
)
3653 && (reversed
= reversed_comparison (op0
, int_mode
)))
3656 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3657 if (GET_CODE (op0
) == GET_CODE (op1
)
3658 && (GET_CODE (op0
) == AND
3659 || GET_CODE (op0
) == LSHIFTRT
3660 || GET_CODE (op0
) == ASHIFTRT
3661 || GET_CODE (op0
) == ASHIFT
3662 || GET_CODE (op0
) == ROTATE
3663 || GET_CODE (op0
) == ROTATERT
))
3665 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3670 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3674 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3680 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3682 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3684 if (HWI_COMPUTABLE_MODE_P (mode
))
3686 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3687 HOST_WIDE_INT nzop1
;
3688 if (CONST_INT_P (trueop1
))
3690 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3691 /* If we are turning off bits already known off in OP0, we need
3693 if ((nzop0
& ~val1
) == 0)
3696 nzop1
= nonzero_bits (trueop1
, mode
);
3697 /* If we are clearing all the nonzero bits, the result is zero. */
3698 if ((nzop1
& nzop0
) == 0
3699 && !side_effects_p (op0
) && !side_effects_p (op1
))
3700 return CONST0_RTX (mode
);
3702 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3703 && GET_MODE_CLASS (mode
) != MODE_CC
)
3706 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3707 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3708 && ! side_effects_p (op0
)
3709 && GET_MODE_CLASS (mode
) != MODE_CC
)
3710 return CONST0_RTX (mode
);
3712 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3713 there are no nonzero bits of C outside of X's mode. */
3714 if ((GET_CODE (op0
) == SIGN_EXTEND
3715 || GET_CODE (op0
) == ZERO_EXTEND
)
3716 && CONST_INT_P (trueop1
)
3717 && HWI_COMPUTABLE_MODE_P (mode
)
3718 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3719 & UINTVAL (trueop1
)) == 0)
3721 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3722 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3723 gen_int_mode (INTVAL (trueop1
),
3725 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3728 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3729 we might be able to further simplify the AND with X and potentially
3730 remove the truncation altogether. */
3731 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3733 rtx x
= XEXP (op0
, 0);
3734 machine_mode xmode
= GET_MODE (x
);
3735 tem
= simplify_gen_binary (AND
, xmode
, x
,
3736 gen_int_mode (INTVAL (trueop1
), xmode
));
3737 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3740 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3741 if (GET_CODE (op0
) == IOR
3742 && CONST_INT_P (trueop1
)
3743 && CONST_INT_P (XEXP (op0
, 1)))
3745 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3746 return simplify_gen_binary (IOR
, mode
,
3747 simplify_gen_binary (AND
, mode
,
3748 XEXP (op0
, 0), op1
),
3749 gen_int_mode (tmp
, mode
));
3752 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3753 insn (and may simplify more). */
3754 if (GET_CODE (op0
) == XOR
3755 && rtx_equal_p (XEXP (op0
, 0), op1
)
3756 && ! side_effects_p (op1
))
3757 return simplify_gen_binary (AND
, mode
,
3758 simplify_gen_unary (NOT
, mode
,
3759 XEXP (op0
, 1), mode
),
3762 if (GET_CODE (op0
) == XOR
3763 && rtx_equal_p (XEXP (op0
, 1), op1
)
3764 && ! side_effects_p (op1
))
3765 return simplify_gen_binary (AND
, mode
,
3766 simplify_gen_unary (NOT
, mode
,
3767 XEXP (op0
, 0), mode
),
3770 /* Similarly for (~(A ^ B)) & A. */
3771 if (GET_CODE (op0
) == NOT
3772 && GET_CODE (XEXP (op0
, 0)) == XOR
3773 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3774 && ! side_effects_p (op1
))
3775 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3777 if (GET_CODE (op0
) == NOT
3778 && GET_CODE (XEXP (op0
, 0)) == XOR
3779 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3780 && ! side_effects_p (op1
))
3781 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3783 /* Convert (A | B) & A to A. */
3784 if (GET_CODE (op0
) == IOR
3785 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3786 || rtx_equal_p (XEXP (op0
, 1), op1
))
3787 && ! side_effects_p (XEXP (op0
, 0))
3788 && ! side_effects_p (XEXP (op0
, 1)))
3791 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3792 ((A & N) + B) & M -> (A + B) & M
3793 Similarly if (N & M) == 0,
3794 ((A | N) + B) & M -> (A + B) & M
3795 and for - instead of + and/or ^ instead of |.
3796 Also, if (N & M) == 0, then
3797 (A +- N) & M -> A & M. */
3798 if (CONST_INT_P (trueop1
)
3799 && HWI_COMPUTABLE_MODE_P (mode
)
3800 && ~UINTVAL (trueop1
)
3801 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3802 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3807 pmop
[0] = XEXP (op0
, 0);
3808 pmop
[1] = XEXP (op0
, 1);
3810 if (CONST_INT_P (pmop
[1])
3811 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3812 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3814 for (which
= 0; which
< 2; which
++)
3817 switch (GET_CODE (tem
))
3820 if (CONST_INT_P (XEXP (tem
, 1))
3821 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3822 == UINTVAL (trueop1
))
3823 pmop
[which
] = XEXP (tem
, 0);
3827 if (CONST_INT_P (XEXP (tem
, 1))
3828 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3829 pmop
[which
] = XEXP (tem
, 0);
3836 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3838 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3840 return simplify_gen_binary (code
, mode
, tem
, op1
);
3844 /* (and X (ior (not X) Y) -> (and X Y) */
3845 if (GET_CODE (op1
) == IOR
3846 && GET_CODE (XEXP (op1
, 0)) == NOT
3847 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3848 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3850 /* (and (ior (not X) Y) X) -> (and X Y) */
3851 if (GET_CODE (op0
) == IOR
3852 && GET_CODE (XEXP (op0
, 0)) == NOT
3853 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3854 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3856 /* (and X (ior Y (not X)) -> (and X Y) */
3857 if (GET_CODE (op1
) == IOR
3858 && GET_CODE (XEXP (op1
, 1)) == NOT
3859 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3860 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3862 /* (and (ior Y (not X)) X) -> (and X Y) */
3863 if (GET_CODE (op0
) == IOR
3864 && GET_CODE (XEXP (op0
, 1)) == NOT
3865 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3866 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3868 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3869 if (GET_CODE (op0
) == GET_CODE (op1
)
3870 && (GET_CODE (op0
) == AND
3871 || GET_CODE (op0
) == IOR
3872 || GET_CODE (op0
) == LSHIFTRT
3873 || GET_CODE (op0
) == ASHIFTRT
3874 || GET_CODE (op0
) == ASHIFT
3875 || GET_CODE (op0
) == ROTATE
3876 || GET_CODE (op0
) == ROTATERT
))
3878 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3883 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3887 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3893 /* 0/x is 0 (or x&0 if x has side-effects). */
3894 if (trueop0
== CONST0_RTX (mode
)
3895 && !cfun
->can_throw_non_call_exceptions
)
3897 if (side_effects_p (op1
))
3898 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3902 if (trueop1
== CONST1_RTX (mode
))
3904 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3908 /* Convert divide by power of two into shift. */
3909 if (CONST_INT_P (trueop1
)
3910 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3911 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3912 gen_int_shift_amount (mode
, val
));
3916 /* Handle floating point and integers separately. */
3917 if (SCALAR_FLOAT_MODE_P (mode
))
3919 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3920 safe for modes with NaNs, since 0.0 / 0.0 will then be
3921 NaN rather than 0.0. Nor is it safe for modes with signed
3922 zeros, since dividing 0 by a negative number gives -0.0 */
3923 if (trueop0
== CONST0_RTX (mode
)
3924 && !HONOR_NANS (mode
)
3925 && !HONOR_SIGNED_ZEROS (mode
)
3926 && ! side_effects_p (op1
))
3929 if (trueop1
== CONST1_RTX (mode
)
3930 && !HONOR_SNANS (mode
))
3933 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3934 && trueop1
!= CONST0_RTX (mode
))
3936 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3939 if (real_equal (d1
, &dconstm1
)
3940 && !HONOR_SNANS (mode
))
3941 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3943 /* Change FP division by a constant into multiplication.
3944 Only do this with -freciprocal-math. */
3945 if (flag_reciprocal_math
3946 && !real_equal (d1
, &dconst0
))
3949 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3950 tem
= const_double_from_real_value (d
, mode
);
3951 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3955 else if (SCALAR_INT_MODE_P (mode
))
3957 /* 0/x is 0 (or x&0 if x has side-effects). */
3958 if (trueop0
== CONST0_RTX (mode
)
3959 && !cfun
->can_throw_non_call_exceptions
)
3961 if (side_effects_p (op1
))
3962 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3966 if (trueop1
== CONST1_RTX (mode
))
3968 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3973 if (trueop1
== constm1_rtx
)
3975 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3977 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3983 /* 0%x is 0 (or x&0 if x has side-effects). */
3984 if (trueop0
== CONST0_RTX (mode
))
3986 if (side_effects_p (op1
))
3987 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3990 /* x%1 is 0 (of x&0 if x has side-effects). */
3991 if (trueop1
== CONST1_RTX (mode
))
3993 if (side_effects_p (op0
))
3994 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3995 return CONST0_RTX (mode
);
3997 /* Implement modulus by power of two as AND. */
3998 if (CONST_INT_P (trueop1
)
3999 && exact_log2 (UINTVAL (trueop1
)) > 0)
4000 return simplify_gen_binary (AND
, mode
, op0
,
4001 gen_int_mode (UINTVAL (trueop1
) - 1,
4006 /* 0%x is 0 (or x&0 if x has side-effects). */
4007 if (trueop0
== CONST0_RTX (mode
))
4009 if (side_effects_p (op1
))
4010 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
4013 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
4014 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
4016 if (side_effects_p (op0
))
4017 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
4018 return CONST0_RTX (mode
);
4024 if (trueop1
== CONST0_RTX (mode
))
4026 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
4027 prefer left rotation, if op1 is from bitsize / 2 + 1 to
4028 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
4030 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
4031 if (CONST_INT_P (trueop1
)
4032 && IN_RANGE (INTVAL (trueop1
),
4033 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
4034 GET_MODE_UNIT_PRECISION (mode
) - 1))
4036 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
4037 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
4038 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
4039 mode
, op0
, new_amount_rtx
);
4044 if (trueop1
== CONST0_RTX (mode
))
4046 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4048 /* Rotating ~0 always results in ~0. */
4049 if (CONST_INT_P (trueop0
)
4050 && HWI_COMPUTABLE_MODE_P (mode
)
4051 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
4052 && ! side_effects_p (op1
))
4058 scalar constants c1, c2
4059 size (M2) > size (M1)
4060 c1 == size (M2) - size (M1)
4062 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4066 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4068 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
4069 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4071 && CONST_INT_P (op1
)
4072 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
4073 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
4075 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
4076 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
4077 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
4078 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
4079 && subreg_lowpart_p (op0
))
4081 rtx tmp
= gen_int_shift_amount
4082 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
4084 /* Combine would usually zero out the value when combining two
4085 local shifts and the range becomes larger or equal to the mode.
4086 However since we fold away one of the shifts here combine won't
4087 see it so we should immediately zero the result if it's out of
4089 if (code
== LSHIFTRT
4090 && INTVAL (tmp
) >= GET_MODE_BITSIZE (inner_mode
))
4093 tmp
= simplify_gen_binary (code
,
4095 XEXP (SUBREG_REG (op0
), 0),
4098 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
4101 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
4103 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
4104 if (val
!= INTVAL (op1
))
4105 return simplify_gen_binary (code
, mode
, op0
,
4106 gen_int_shift_amount (mode
, val
));
4111 if (CONST_INT_P (trueop0
)
4112 && HWI_COMPUTABLE_MODE_P (mode
)
4113 && (UINTVAL (trueop0
) == (GET_MODE_MASK (mode
) >> 1)
4114 || mode_signbit_p (mode
, trueop0
))
4115 && ! side_effects_p (op1
))
4117 goto simplify_ashift
;
4120 if (CONST_INT_P (trueop0
)
4121 && HWI_COMPUTABLE_MODE_P (mode
)
4122 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
4123 && ! side_effects_p (op1
))
4129 if (trueop1
== CONST0_RTX (mode
))
4131 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4135 && CONST_INT_P (trueop1
)
4136 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4137 && IN_RANGE (UINTVAL (trueop1
),
4138 1, GET_MODE_PRECISION (int_mode
) - 1))
4140 auto c
= (wi::one (GET_MODE_PRECISION (int_mode
))
4141 << UINTVAL (trueop1
));
4142 rtx new_op1
= immed_wide_int_const (c
, int_mode
);
4143 return simplify_gen_binary (MULT
, int_mode
, op0
, new_op1
);
4145 goto canonicalize_shift
;
4148 if (trueop1
== CONST0_RTX (mode
))
4150 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4152 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4153 if (GET_CODE (op0
) == CLZ
4154 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
4155 && CONST_INT_P (trueop1
)
4156 && STORE_FLAG_VALUE
== 1
4157 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
4159 unsigned HOST_WIDE_INT zero_val
= 0;
4161 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
4162 && zero_val
== GET_MODE_PRECISION (inner_mode
)
4163 && INTVAL (trueop1
) == exact_log2 (zero_val
))
4164 return simplify_gen_relational (EQ
, mode
, inner_mode
,
4165 XEXP (op0
, 0), const0_rtx
);
4167 goto canonicalize_shift
;
4170 if (HWI_COMPUTABLE_MODE_P (mode
)
4171 && mode_signbit_p (mode
, trueop1
)
4172 && ! side_effects_p (op0
))
4174 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4176 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4182 if (HWI_COMPUTABLE_MODE_P (mode
)
4183 && CONST_INT_P (trueop1
)
4184 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
4185 && ! side_effects_p (op0
))
4187 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4189 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4195 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
4197 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4199 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4205 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
4207 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4209 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4218 /* Simplify x +/- 0 to x, if possible. */
4219 if (trueop1
== CONST0_RTX (mode
))
4225 /* Simplify x * 0 to 0, if possible. */
4226 if (trueop1
== CONST0_RTX (mode
)
4227 && !side_effects_p (op0
))
4230 /* Simplify x * 1 to x, if possible. */
4231 if (trueop1
== CONST1_RTX (mode
))
4237 /* Simplify x * 0 to 0, if possible. */
4238 if (trueop1
== CONST0_RTX (mode
)
4239 && !side_effects_p (op0
))
4245 /* Simplify x / 1 to x, if possible. */
4246 if (trueop1
== CONST1_RTX (mode
))
4251 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
4252 return gen_vec_duplicate (mode
, op0
);
4253 if (valid_for_const_vector_p (mode
, op0
)
4254 && valid_for_const_vector_p (mode
, op1
))
4255 return gen_const_vec_series (mode
, op0
, op1
);
4259 if (!VECTOR_MODE_P (mode
))
4261 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4262 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
4263 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4264 gcc_assert (XVECLEN (trueop1
, 0) == 1);
4266 /* We can't reason about selections made at runtime. */
4267 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4270 if (vec_duplicate_p (trueop0
, &elt0
))
4273 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4274 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
4277 /* Extract a scalar element from a nested VEC_SELECT expression
4278 (with optional nested VEC_CONCAT expression). Some targets
4279 (i386) extract scalar element from a vector using chain of
4280 nested VEC_SELECT expressions. When input operand is a memory
4281 operand, this operation can be simplified to a simple scalar
4282 load from an offseted memory address. */
4284 if (GET_CODE (trueop0
) == VEC_SELECT
4285 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4286 .is_constant (&n_elts
)))
4288 rtx op0
= XEXP (trueop0
, 0);
4289 rtx op1
= XEXP (trueop0
, 1);
4291 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
4297 gcc_assert (GET_CODE (op1
) == PARALLEL
);
4298 gcc_assert (i
< n_elts
);
4300 /* Select element, pointed by nested selector. */
4301 elem
= INTVAL (XVECEXP (op1
, 0, i
));
4303 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4304 if (GET_CODE (op0
) == VEC_CONCAT
)
4306 rtx op00
= XEXP (op0
, 0);
4307 rtx op01
= XEXP (op0
, 1);
4309 machine_mode mode00
, mode01
;
4310 int n_elts00
, n_elts01
;
4312 mode00
= GET_MODE (op00
);
4313 mode01
= GET_MODE (op01
);
4315 /* Find out the number of elements of each operand.
4316 Since the concatenated result has a constant number
4317 of elements, the operands must too. */
4318 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
4319 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
4321 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
4323 /* Select correct operand of VEC_CONCAT
4324 and adjust selector. */
4325 if (elem
< n_elts01
)
4336 vec
= rtvec_alloc (1);
4337 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
4339 tmp
= gen_rtx_fmt_ee (code
, mode
,
4340 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
4346 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4347 gcc_assert (GET_MODE_INNER (mode
)
4348 == GET_MODE_INNER (GET_MODE (trueop0
)));
4349 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4351 if (vec_duplicate_p (trueop0
, &elt0
))
4352 /* It doesn't matter which elements are selected by trueop1,
4353 because they are all the same. */
4354 return gen_vec_duplicate (mode
, elt0
);
4356 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4358 unsigned n_elts
= XVECLEN (trueop1
, 0);
4359 rtvec v
= rtvec_alloc (n_elts
);
4362 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4363 for (i
= 0; i
< n_elts
; i
++)
4365 rtx x
= XVECEXP (trueop1
, 0, i
);
4367 if (!CONST_INT_P (x
))
4370 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
4374 return gen_rtx_CONST_VECTOR (mode
, v
);
4377 /* Recognize the identity. */
4378 if (GET_MODE (trueop0
) == mode
)
4380 bool maybe_ident
= true;
4381 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4383 rtx j
= XVECEXP (trueop1
, 0, i
);
4384 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4386 maybe_ident
= false;
4394 /* If we select a low-part subreg, return that. */
4395 if (vec_series_lowpart_p (mode
, GET_MODE (trueop0
), trueop1
))
4397 rtx new_rtx
= lowpart_subreg (mode
, trueop0
,
4398 GET_MODE (trueop0
));
4399 if (new_rtx
!= NULL_RTX
)
4403 /* If we build {a,b} then permute it, build the result directly. */
4404 if (XVECLEN (trueop1
, 0) == 2
4405 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4406 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4407 && GET_CODE (trueop0
) == VEC_CONCAT
4408 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
4409 && GET_MODE (XEXP (trueop0
, 0)) == mode
4410 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
4411 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
4413 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4414 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4417 gcc_assert (i0
< 4 && i1
< 4);
4418 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
4419 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
4421 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4424 if (XVECLEN (trueop1
, 0) == 2
4425 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4426 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4427 && GET_CODE (trueop0
) == VEC_CONCAT
4428 && GET_MODE (trueop0
) == mode
)
4430 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4431 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4434 gcc_assert (i0
< 2 && i1
< 2);
4435 subop0
= XEXP (trueop0
, i0
);
4436 subop1
= XEXP (trueop0
, i1
);
4438 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4441 /* If we select one half of a vec_concat, return that. */
4443 if (GET_CODE (trueop0
) == VEC_CONCAT
4444 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4446 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
4448 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4450 rtx subop0
= XEXP (trueop0
, 0);
4451 rtx subop1
= XEXP (trueop0
, 1);
4452 machine_mode mode0
= GET_MODE (subop0
);
4453 machine_mode mode1
= GET_MODE (subop1
);
4454 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4455 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
4457 bool success
= true;
4458 for (int i
= 1; i
< l0
; ++i
)
4460 rtx j
= XVECEXP (trueop1
, 0, i
);
4461 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4470 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
4472 bool success
= true;
4473 for (int i
= 1; i
< l1
; ++i
)
4475 rtx j
= XVECEXP (trueop1
, 0, i
);
4476 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
4487 /* Simplify vec_select of a subreg of X to just a vec_select of X
4488 when X has same component mode as vec_select. */
4489 unsigned HOST_WIDE_INT subreg_offset
= 0;
4490 if (GET_CODE (trueop0
) == SUBREG
4491 && GET_MODE_INNER (mode
)
4492 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0
)))
4493 && GET_MODE_NUNITS (mode
).is_constant (&l1
)
4494 && constant_multiple_p (subreg_memory_offset (trueop0
),
4495 GET_MODE_UNIT_BITSIZE (mode
),
4499 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0
)));
4500 bool success
= true;
4501 for (int i
= 0; i
!= l1
; i
++)
4503 rtx idx
= XVECEXP (trueop1
, 0, i
);
4504 if (!CONST_INT_P (idx
)
4505 || maybe_ge (UINTVAL (idx
) + subreg_offset
, nunits
))
4517 rtvec vec
= rtvec_alloc (l1
);
4518 for (int i
= 0; i
< l1
; i
++)
4520 = GEN_INT (INTVAL (XVECEXP (trueop1
, 0, i
))
4522 par
= gen_rtx_PARALLEL (VOIDmode
, vec
);
4524 return gen_rtx_VEC_SELECT (mode
, SUBREG_REG (trueop0
), par
);
4529 if (XVECLEN (trueop1
, 0) == 1
4530 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4531 && GET_CODE (trueop0
) == VEC_CONCAT
)
4534 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
4536 /* Try to find the element in the VEC_CONCAT. */
4537 while (GET_MODE (vec
) != mode
4538 && GET_CODE (vec
) == VEC_CONCAT
)
4540 poly_int64 vec_size
;
4542 if (CONST_INT_P (XEXP (vec
, 0)))
4544 /* vec_concat of two const_ints doesn't make sense with
4545 respect to modes. */
4546 if (CONST_INT_P (XEXP (vec
, 1)))
4549 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
4550 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
4553 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4555 if (known_lt (offset
, vec_size
))
4556 vec
= XEXP (vec
, 0);
4557 else if (known_ge (offset
, vec_size
))
4560 vec
= XEXP (vec
, 1);
4564 vec
= avoid_constant_pool_reference (vec
);
4567 if (GET_MODE (vec
) == mode
)
4571 /* If we select elements in a vec_merge that all come from the same
4572 operand, select from that operand directly. */
4573 if (GET_CODE (op0
) == VEC_MERGE
)
4575 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4576 if (CONST_INT_P (trueop02
))
4578 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4579 bool all_operand0
= true;
4580 bool all_operand1
= true;
4581 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4583 rtx j
= XVECEXP (trueop1
, 0, i
);
4584 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4585 all_operand1
= false;
4587 all_operand0
= false;
4589 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4590 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4591 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4592 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4596 /* If we have two nested selects that are inverses of each
4597 other, replace them with the source operand. */
4598 if (GET_CODE (trueop0
) == VEC_SELECT
4599 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4601 rtx op0_subop1
= XEXP (trueop0
, 1);
4602 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4603 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4605 /* Apply the outer ordering vector to the inner one. (The inner
4606 ordering vector is expressly permitted to be of a different
4607 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4608 then the two VEC_SELECTs cancel. */
4609 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4611 rtx x
= XVECEXP (trueop1
, 0, i
);
4612 if (!CONST_INT_P (x
))
4614 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4615 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4618 return XEXP (trueop0
, 0);
4624 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4625 ? GET_MODE (trueop0
)
4626 : GET_MODE_INNER (mode
));
4627 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4628 ? GET_MODE (trueop1
)
4629 : GET_MODE_INNER (mode
));
4631 gcc_assert (VECTOR_MODE_P (mode
));
4632 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4633 + GET_MODE_SIZE (op1_mode
),
4634 GET_MODE_SIZE (mode
)));
4636 if (VECTOR_MODE_P (op0_mode
))
4637 gcc_assert (GET_MODE_INNER (mode
)
4638 == GET_MODE_INNER (op0_mode
));
4640 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4642 if (VECTOR_MODE_P (op1_mode
))
4643 gcc_assert (GET_MODE_INNER (mode
)
4644 == GET_MODE_INNER (op1_mode
));
4646 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4648 unsigned int n_elts
, in_n_elts
;
4649 if ((GET_CODE (trueop0
) == CONST_VECTOR
4650 || CONST_SCALAR_INT_P (trueop0
)
4651 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4652 && (GET_CODE (trueop1
) == CONST_VECTOR
4653 || CONST_SCALAR_INT_P (trueop1
)
4654 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4655 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4656 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4658 rtvec v
= rtvec_alloc (n_elts
);
4660 for (i
= 0; i
< n_elts
; i
++)
4664 if (!VECTOR_MODE_P (op0_mode
))
4665 RTVEC_ELT (v
, i
) = trueop0
;
4667 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4671 if (!VECTOR_MODE_P (op1_mode
))
4672 RTVEC_ELT (v
, i
) = trueop1
;
4674 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4679 return gen_rtx_CONST_VECTOR (mode
, v
);
4682 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4683 Restrict the transformation to avoid generating a VEC_SELECT with a
4684 mode unrelated to its operand. */
4685 if (GET_CODE (trueop0
) == VEC_SELECT
4686 && GET_CODE (trueop1
) == VEC_SELECT
4687 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4688 && GET_MODE_INNER (GET_MODE (XEXP (trueop0
, 0)))
4689 == GET_MODE_INNER(mode
))
4691 rtx par0
= XEXP (trueop0
, 1);
4692 rtx par1
= XEXP (trueop1
, 1);
4693 int len0
= XVECLEN (par0
, 0);
4694 int len1
= XVECLEN (par1
, 0);
4695 rtvec vec
= rtvec_alloc (len0
+ len1
);
4696 for (int i
= 0; i
< len0
; i
++)
4697 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4698 for (int i
= 0; i
< len1
; i
++)
4699 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4700 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4701 gen_rtx_PARALLEL (VOIDmode
, vec
));
4710 if (mode
== GET_MODE (op0
)
4711 && mode
== GET_MODE (op1
)
4712 && vec_duplicate_p (op0
, &elt0
)
4713 && vec_duplicate_p (op1
, &elt1
))
4715 /* Try applying the operator to ELT and see if that simplifies.
4716 We can duplicate the result if so.
4718 The reason we don't use simplify_gen_binary is that it isn't
4719 necessarily a win to convert things like:
4721 (plus:V (vec_duplicate:V (reg:S R1))
4722 (vec_duplicate:V (reg:S R2)))
4726 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4728 The first might be done entirely in vector registers while the
4729 second might need a move between register files. */
4730 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4733 return gen_vec_duplicate (mode
, tem
);
4739 /* Return true if binary operation OP distributes over addition in operand
4740 OPNO, with the other operand being held constant. OPNO counts from 1. */
4743 distributes_over_addition_p (rtx_code op
, int opno
)
4761 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4764 if (VECTOR_MODE_P (mode
)
4765 && code
!= VEC_CONCAT
4766 && GET_CODE (op0
) == CONST_VECTOR
4767 && GET_CODE (op1
) == CONST_VECTOR
)
4770 if (CONST_VECTOR_STEPPED_P (op0
)
4771 && CONST_VECTOR_STEPPED_P (op1
))
4772 /* We can operate directly on the encoding if:
4774 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4776 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4778 Addition and subtraction are the supported operators
4779 for which this is true. */
4780 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4781 else if (CONST_VECTOR_STEPPED_P (op0
))
4782 /* We can operate directly on stepped encodings if:
4786 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4788 which is true if (x -> x op c) distributes over addition. */
4789 step_ok_p
= distributes_over_addition_p (code
, 1);
4791 /* Similarly in reverse. */
4792 step_ok_p
= distributes_over_addition_p (code
, 2);
4793 rtx_vector_builder builder
;
4794 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4797 unsigned int count
= builder
.encoded_nelts ();
4798 for (unsigned int i
= 0; i
< count
; i
++)
4800 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4801 CONST_VECTOR_ELT (op0
, i
),
4802 CONST_VECTOR_ELT (op1
, i
));
4803 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4805 builder
.quick_push (x
);
4807 return builder
.build ();
4810 if (VECTOR_MODE_P (mode
)
4811 && code
== VEC_CONCAT
4812 && (CONST_SCALAR_INT_P (op0
)
4813 || CONST_FIXED_P (op0
)
4814 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4815 && (CONST_SCALAR_INT_P (op1
)
4816 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4817 || CONST_FIXED_P (op1
)))
4819 /* Both inputs have a constant number of elements, so the result
4821 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4822 rtvec v
= rtvec_alloc (n_elts
);
4824 gcc_assert (n_elts
>= 2);
4827 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4828 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4830 RTVEC_ELT (v
, 0) = op0
;
4831 RTVEC_ELT (v
, 1) = op1
;
4835 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4836 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4839 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4840 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4841 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4843 for (i
= 0; i
< op0_n_elts
; ++i
)
4844 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4845 for (i
= 0; i
< op1_n_elts
; ++i
)
4846 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4849 return gen_rtx_CONST_VECTOR (mode
, v
);
4852 if (SCALAR_FLOAT_MODE_P (mode
)
4853 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4854 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4855 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4866 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4868 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4870 for (i
= 0; i
< 4; i
++)
4887 real_from_target (&r
, tmp0
, mode
);
4888 return const_double_from_real_value (r
, mode
);
4892 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4893 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4896 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4897 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4899 if (HONOR_SNANS (mode
)
4900 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4901 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4904 real_convert (&f0
, mode
, opr0
);
4905 real_convert (&f1
, mode
, opr1
);
4908 && real_equal (&f1
, &dconst0
)
4909 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4912 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4913 && flag_trapping_math
4914 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4916 int s0
= REAL_VALUE_NEGATIVE (f0
);
4917 int s1
= REAL_VALUE_NEGATIVE (f1
);
4922 /* Inf + -Inf = NaN plus exception. */
4927 /* Inf - Inf = NaN plus exception. */
4932 /* Inf / Inf = NaN plus exception. */
4939 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4940 && flag_trapping_math
4941 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4942 || (REAL_VALUE_ISINF (f1
)
4943 && real_equal (&f0
, &dconst0
))))
4944 /* Inf * 0 = NaN plus exception. */
4947 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4949 real_convert (&result
, mode
, &value
);
4951 /* Don't constant fold this floating point operation if
4952 the result has overflowed and flag_trapping_math. */
4954 if (flag_trapping_math
4955 && MODE_HAS_INFINITIES (mode
)
4956 && REAL_VALUE_ISINF (result
)
4957 && !REAL_VALUE_ISINF (f0
)
4958 && !REAL_VALUE_ISINF (f1
))
4959 /* Overflow plus exception. */
4962 /* Don't constant fold this floating point operation if the
4963 result may dependent upon the run-time rounding mode and
4964 flag_rounding_math is set, or if GCC's software emulation
4965 is unable to accurately represent the result. */
4967 if ((flag_rounding_math
4968 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4969 && (inexact
|| !real_identical (&result
, &value
)))
4972 return const_double_from_real_value (result
, mode
);
4976 /* We can fold some multi-word operations. */
4977 scalar_int_mode int_mode
;
4978 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4979 && CONST_SCALAR_INT_P (op0
)
4980 && CONST_SCALAR_INT_P (op1
)
4981 && GET_MODE_PRECISION (int_mode
) <= MAX_BITSIZE_MODE_ANY_INT
)
4984 wi::overflow_type overflow
;
4985 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4986 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4988 #if TARGET_SUPPORTS_WIDE_INT == 0
4989 /* This assert keeps the simplification from producing a result
4990 that cannot be represented in a CONST_DOUBLE but a lot of
4991 upstream callers expect that this function never fails to
4992 simplify something and so you if you added this to the test
4993 above the code would die later anyway. If this assert
4994 happens, you just need to make the port support wide int. */
4995 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
5000 result
= wi::sub (pop0
, pop1
);
5004 result
= wi::add (pop0
, pop1
);
5008 result
= wi::mul (pop0
, pop1
);
5012 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
5018 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
5024 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
5030 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
5036 result
= wi::bit_and (pop0
, pop1
);
5040 result
= wi::bit_or (pop0
, pop1
);
5044 result
= wi::bit_xor (pop0
, pop1
);
5048 result
= wi::smin (pop0
, pop1
);
5052 result
= wi::smax (pop0
, pop1
);
5056 result
= wi::umin (pop0
, pop1
);
5060 result
= wi::umax (pop0
, pop1
);
5069 /* The shift count might be in SImode while int_mode might
5070 be narrower. On IA-64 it is even DImode. If the shift
5071 count is too large and doesn't fit into int_mode, we'd
5072 ICE. So, if int_mode is narrower than word, use
5073 word_mode for the shift count. */
5074 if (GET_MODE (op1
) == VOIDmode
5075 && GET_MODE_PRECISION (int_mode
) < BITS_PER_WORD
)
5076 pop1
= rtx_mode_t (op1
, word_mode
);
5078 wide_int wop1
= pop1
;
5079 if (SHIFT_COUNT_TRUNCATED
)
5080 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
5081 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
5087 result
= wi::lrshift (pop0
, wop1
);
5091 result
= wi::arshift (pop0
, wop1
);
5095 result
= wi::lshift (pop0
, wop1
);
5099 if (wi::leu_p (wop1
, wi::clrsb (pop0
)))
5100 result
= wi::lshift (pop0
, wop1
);
5101 else if (wi::neg_p (pop0
))
5102 result
= wi::min_value (int_mode
, SIGNED
);
5104 result
= wi::max_value (int_mode
, SIGNED
);
5108 if (wi::eq_p (pop0
, 0))
5110 else if (wi::leu_p (wop1
, wi::clz (pop0
)))
5111 result
= wi::lshift (pop0
, wop1
);
5113 result
= wi::max_value (int_mode
, UNSIGNED
);
5124 /* The rotate count might be in SImode while int_mode might
5125 be narrower. On IA-64 it is even DImode. If the shift
5126 count is too large and doesn't fit into int_mode, we'd
5127 ICE. So, if int_mode is narrower than word, use
5128 word_mode for the shift count. */
5129 if (GET_MODE (op1
) == VOIDmode
5130 && GET_MODE_PRECISION (int_mode
) < BITS_PER_WORD
)
5131 pop1
= rtx_mode_t (op1
, word_mode
);
5133 if (wi::neg_p (pop1
))
5139 result
= wi::lrotate (pop0
, pop1
);
5143 result
= wi::rrotate (pop0
, pop1
);
5153 result
= wi::add (pop0
, pop1
, SIGNED
, &overflow
);
5154 clamp_signed_saturation
:
5155 if (overflow
== wi::OVF_OVERFLOW
)
5156 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5157 else if (overflow
== wi::OVF_UNDERFLOW
)
5158 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5159 else if (overflow
!= wi::OVF_NONE
)
5164 result
= wi::add (pop0
, pop1
, UNSIGNED
, &overflow
);
5165 clamp_unsigned_saturation
:
5166 if (overflow
!= wi::OVF_NONE
)
5167 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5171 result
= wi::sub (pop0
, pop1
, SIGNED
, &overflow
);
5172 goto clamp_signed_saturation
;
5175 result
= wi::sub (pop0
, pop1
, UNSIGNED
, &overflow
);
5176 if (overflow
!= wi::OVF_NONE
)
5177 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5181 result
= wi::mul (pop0
, pop1
, SIGNED
, &overflow
);
5182 goto clamp_signed_saturation
;
5185 result
= wi::mul (pop0
, pop1
, UNSIGNED
, &overflow
);
5186 goto clamp_unsigned_saturation
;
5189 result
= wi::mul_high (pop0
, pop1
, SIGNED
);
5193 result
= wi::mul_high (pop0
, pop1
, UNSIGNED
);
5199 return immed_wide_int_const (result
, int_mode
);
5202 /* Handle polynomial integers. */
5203 if (NUM_POLY_INT_COEFFS
> 1
5204 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5205 && poly_int_rtx_p (op0
)
5206 && poly_int_rtx_p (op1
))
5208 poly_wide_int result
;
5212 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
5216 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
5220 if (CONST_SCALAR_INT_P (op1
))
5221 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
5227 if (CONST_SCALAR_INT_P (op1
))
5231 GET_MODE (op1
) == VOIDmode
5232 && GET_MODE_PRECISION (int_mode
) < BITS_PER_WORD
5233 ? word_mode
: mode
);
5234 if (SHIFT_COUNT_TRUNCATED
)
5235 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
5236 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
5238 result
= wi::to_poly_wide (op0
, mode
) << shift
;
5245 if (!CONST_SCALAR_INT_P (op1
)
5246 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
5247 rtx_mode_t (op1
, mode
), &result
))
5254 return immed_wide_int_const (result
, int_mode
);
5262 /* Return a positive integer if X should sort after Y. The value
5263 returned is 1 if and only if X and Y are both regs. */
5266 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
5270 result
= (commutative_operand_precedence (y
)
5271 - commutative_operand_precedence (x
));
5273 return result
+ result
;
5275 /* Group together equal REGs to do more simplification. */
5276 if (REG_P (x
) && REG_P (y
))
5277 return REGNO (x
) > REGNO (y
);
5282 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5283 operands may be another PLUS or MINUS.
5285 Rather than test for specific case, we do this by a brute-force method
5286 and do all possible simplifications until no more changes occur. Then
5287 we rebuild the operation.
5289 May return NULL_RTX when no changes were made. */
5292 simplify_context::simplify_plus_minus (rtx_code code
, machine_mode mode
,
5295 struct simplify_plus_minus_op_data
5302 int changed
, n_constants
, canonicalized
= 0;
5305 memset (ops
, 0, sizeof ops
);
5307 /* Set up the two operands and then expand them until nothing has been
5308 changed. If we run out of room in our array, give up; this should
5309 almost never happen. */
5314 ops
[1].neg
= (code
== MINUS
);
5321 for (i
= 0; i
< n_ops
; i
++)
5323 rtx this_op
= ops
[i
].op
;
5324 int this_neg
= ops
[i
].neg
;
5325 enum rtx_code this_code
= GET_CODE (this_op
);
5331 if (n_ops
== ARRAY_SIZE (ops
))
5334 ops
[n_ops
].op
= XEXP (this_op
, 1);
5335 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
5338 ops
[i
].op
= XEXP (this_op
, 0);
5340 /* If this operand was negated then we will potentially
5341 canonicalize the expression. Similarly if we don't
5342 place the operands adjacent we're re-ordering the
5343 expression and thus might be performing a
5344 canonicalization. Ignore register re-ordering.
5345 ??? It might be better to shuffle the ops array here,
5346 but then (plus (plus (A, B), plus (C, D))) wouldn't
5347 be seen as non-canonical. */
5350 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
5355 ops
[i
].op
= XEXP (this_op
, 0);
5356 ops
[i
].neg
= ! this_neg
;
5362 if (n_ops
!= ARRAY_SIZE (ops
)
5363 && GET_CODE (XEXP (this_op
, 0)) == PLUS
5364 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
5365 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
5367 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
5368 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
5369 ops
[n_ops
].neg
= this_neg
;
5377 /* ~a -> (-a - 1) */
5378 if (n_ops
!= ARRAY_SIZE (ops
))
5380 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
5381 ops
[n_ops
++].neg
= this_neg
;
5382 ops
[i
].op
= XEXP (this_op
, 0);
5383 ops
[i
].neg
= !this_neg
;
5389 CASE_CONST_SCALAR_INT
:
5390 case CONST_POLY_INT
:
5394 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
5408 if (n_constants
> 1)
5411 gcc_assert (n_ops
>= 2);
5413 /* If we only have two operands, we can avoid the loops. */
5416 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
5419 /* Get the two operands. Be careful with the order, especially for
5420 the cases where code == MINUS. */
5421 if (ops
[0].neg
&& ops
[1].neg
)
5423 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
5426 else if (ops
[0].neg
)
5437 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
5440 /* Now simplify each pair of operands until nothing changes. */
5443 /* Insertion sort is good enough for a small array. */
5444 for (i
= 1; i
< n_ops
; i
++)
5446 struct simplify_plus_minus_op_data save
;
5450 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
5453 /* Just swapping registers doesn't count as canonicalization. */
5459 ops
[j
+ 1] = ops
[j
];
5461 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
5466 for (i
= n_ops
- 1; i
> 0; i
--)
5467 for (j
= i
- 1; j
>= 0; j
--)
5469 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
5470 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
5472 if (lhs
!= 0 && rhs
!= 0)
5474 enum rtx_code ncode
= PLUS
;
5480 std::swap (lhs
, rhs
);
5482 else if (swap_commutative_operands_p (lhs
, rhs
))
5483 std::swap (lhs
, rhs
);
5485 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
5486 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
5488 rtx tem_lhs
, tem_rhs
;
5490 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
5491 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
5492 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
5495 if (tem
&& !CONSTANT_P (tem
))
5496 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
5499 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
5503 /* Reject "simplifications" that just wrap the two
5504 arguments in a CONST. Failure to do so can result
5505 in infinite recursion with simplify_binary_operation
5506 when it calls us to simplify CONST operations.
5507 Also, if we find such a simplification, don't try
5508 any more combinations with this rhs: We must have
5509 something like symbol+offset, ie. one of the
5510 trivial CONST expressions we handle later. */
5511 if (GET_CODE (tem
) == CONST
5512 && GET_CODE (XEXP (tem
, 0)) == ncode
5513 && XEXP (XEXP (tem
, 0), 0) == lhs
5514 && XEXP (XEXP (tem
, 0), 1) == rhs
)
5517 if (GET_CODE (tem
) == NEG
)
5518 tem
= XEXP (tem
, 0), lneg
= !lneg
;
5519 if (poly_int_rtx_p (tem
) && lneg
)
5520 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
5524 ops
[j
].op
= NULL_RTX
;
5534 /* Pack all the operands to the lower-numbered entries. */
5535 for (i
= 0, j
= 0; j
< n_ops
; j
++)
5544 /* If nothing changed, check that rematerialization of rtl instructions
5545 is still required. */
5548 /* Perform rematerialization if only all operands are registers and
5549 all operations are PLUS. */
5550 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5551 around rs6000 and how it uses the CA register. See PR67145. */
5552 for (i
= 0; i
< n_ops
; i
++)
5554 || !REG_P (ops
[i
].op
)
5555 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
5556 && fixed_regs
[REGNO (ops
[i
].op
)]
5557 && !global_regs
[REGNO (ops
[i
].op
)]
5558 && ops
[i
].op
!= frame_pointer_rtx
5559 && ops
[i
].op
!= arg_pointer_rtx
5560 && ops
[i
].op
!= stack_pointer_rtx
))
5565 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5567 && CONST_INT_P (ops
[1].op
)
5568 && CONSTANT_P (ops
[0].op
)
5570 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
5572 /* We suppressed creation of trivial CONST expressions in the
5573 combination loop to avoid recursion. Create one manually now.
5574 The combination loop should have ensured that there is exactly
5575 one CONST_INT, and the sort will have ensured that it is last
5576 in the array and that any other constant will be next-to-last. */
5579 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
5580 && CONSTANT_P (ops
[n_ops
- 2].op
))
5582 rtx value
= ops
[n_ops
- 1].op
;
5583 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
5584 value
= neg_poly_int_rtx (mode
, value
);
5585 if (CONST_INT_P (value
))
5587 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
5593 /* Put a non-negated operand first, if possible. */
5595 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
5598 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
5607 /* Now make the result by performing the requested operations. */
5610 for (i
= 1; i
< n_ops
; i
++)
5611 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
5612 mode
, result
, ops
[i
].op
);
5617 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5619 plus_minus_operand_p (const_rtx x
)
5621 return GET_CODE (x
) == PLUS
5622 || GET_CODE (x
) == MINUS
5623 || (GET_CODE (x
) == CONST
5624 && GET_CODE (XEXP (x
, 0)) == PLUS
5625 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
5626 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
5629 /* Like simplify_binary_operation except used for relational operators.
5630 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5631 not also be VOIDmode.
5633 CMP_MODE specifies in which mode the comparison is done in, so it is
5634 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5635 the operands or, if both are VOIDmode, the operands are compared in
5636 "infinite precision". */
5638 simplify_context::simplify_relational_operation (rtx_code code
,
5640 machine_mode cmp_mode
,
5643 rtx tem
, trueop0
, trueop1
;
5645 if (cmp_mode
== VOIDmode
)
5646 cmp_mode
= GET_MODE (op0
);
5647 if (cmp_mode
== VOIDmode
)
5648 cmp_mode
= GET_MODE (op1
);
5650 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5652 return relational_result (mode
, cmp_mode
, tem
);
5654 /* For the following tests, ensure const0_rtx is op1. */
5655 if (swap_commutative_operands_p (op0
, op1
)
5656 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5657 std::swap (op0
, op1
), code
= swap_condition (code
);
5659 /* If op0 is a compare, extract the comparison arguments from it. */
5660 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5661 return simplify_gen_relational (code
, mode
, VOIDmode
,
5662 XEXP (op0
, 0), XEXP (op0
, 1));
5664 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
)
5667 trueop0
= avoid_constant_pool_reference (op0
);
5668 trueop1
= avoid_constant_pool_reference (op1
);
5669 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5673 /* This part of simplify_relational_operation is only used when CMP_MODE
5674 is not in class MODE_CC (i.e. it is a real comparison).
5676 MODE is the mode of the result, while CMP_MODE specifies in which
5677 mode the comparison is done in, so it is the mode of the operands. */
5680 simplify_context::simplify_relational_operation_1 (rtx_code code
,
5682 machine_mode cmp_mode
,
5685 enum rtx_code op0code
= GET_CODE (op0
);
5687 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5689 /* If op0 is a comparison, extract the comparison arguments
5693 if (GET_MODE (op0
) == mode
)
5694 return simplify_rtx (op0
);
5696 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5697 XEXP (op0
, 0), XEXP (op0
, 1));
5699 else if (code
== EQ
)
5701 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5702 if (new_code
!= UNKNOWN
)
5703 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5704 XEXP (op0
, 0), XEXP (op0
, 1));
5708 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5709 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5710 if ((code
== LTU
|| code
== GEU
)
5711 && GET_CODE (op0
) == PLUS
5712 && CONST_INT_P (XEXP (op0
, 1))
5713 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5714 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5715 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5716 && XEXP (op0
, 1) != const0_rtx
)
5719 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5720 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5721 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5724 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5725 transformed into (LTU a -C). */
5726 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5727 && CONST_INT_P (XEXP (op0
, 1))
5728 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5729 && XEXP (op0
, 1) != const0_rtx
)
5732 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5733 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5734 XEXP (op0
, 0), new_cmp
);
5737 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5738 if ((code
== LTU
|| code
== GEU
)
5739 && GET_CODE (op0
) == PLUS
5740 && rtx_equal_p (op1
, XEXP (op0
, 1))
5741 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5742 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5743 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5744 copy_rtx (XEXP (op0
, 0)));
5746 if (op1
== const0_rtx
)
5748 /* Canonicalize (GTU x 0) as (NE x 0). */
5750 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5751 /* Canonicalize (LEU x 0) as (EQ x 0). */
5753 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5755 else if (op1
== const1_rtx
)
5760 /* Canonicalize (GE x 1) as (GT x 0). */
5761 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5764 /* Canonicalize (GEU x 1) as (NE x 0). */
5765 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5768 /* Canonicalize (LT x 1) as (LE x 0). */
5769 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5772 /* Canonicalize (LTU x 1) as (EQ x 0). */
5773 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5779 else if (op1
== constm1_rtx
)
5781 /* Canonicalize (LE x -1) as (LT x 0). */
5783 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5784 /* Canonicalize (GT x -1) as (GE x 0). */
5786 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5789 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5790 if ((code
== EQ
|| code
== NE
)
5791 && (op0code
== PLUS
|| op0code
== MINUS
)
5793 && CONSTANT_P (XEXP (op0
, 1))
5794 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5796 rtx x
= XEXP (op0
, 0);
5797 rtx c
= XEXP (op0
, 1);
5798 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5799 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5801 /* Detect an infinite recursive condition, where we oscillate at this
5802 simplification case between:
5803 A + B == C <---> C - B == A,
5804 where A, B, and C are all constants with non-simplifiable expressions,
5805 usually SYMBOL_REFs. */
5806 if (GET_CODE (tem
) == invcode
5808 && rtx_equal_p (c
, XEXP (tem
, 1)))
5811 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5814 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5815 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5816 scalar_int_mode int_mode
, int_cmp_mode
;
5818 && op1
== const0_rtx
5819 && is_int_mode (mode
, &int_mode
)
5820 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5821 /* ??? Work-around BImode bugs in the ia64 backend. */
5822 && int_mode
!= BImode
5823 && int_cmp_mode
!= BImode
5824 && nonzero_bits (op0
, int_cmp_mode
) == 1
5825 && STORE_FLAG_VALUE
== 1)
5826 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5827 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5828 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5830 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5831 if ((code
== EQ
|| code
== NE
)
5832 && op1
== const0_rtx
5834 return simplify_gen_relational (code
, mode
, cmp_mode
,
5835 XEXP (op0
, 0), XEXP (op0
, 1));
5837 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5838 if ((code
== EQ
|| code
== NE
)
5840 && rtx_equal_p (XEXP (op0
, 0), op1
)
5841 && !side_effects_p (XEXP (op0
, 0)))
5842 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5845 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5846 if ((code
== EQ
|| code
== NE
)
5848 && rtx_equal_p (XEXP (op0
, 1), op1
)
5849 && !side_effects_p (XEXP (op0
, 1)))
5850 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5853 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5854 if ((code
== EQ
|| code
== NE
)
5856 && CONST_SCALAR_INT_P (op1
)
5857 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5858 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5859 simplify_gen_binary (XOR
, cmp_mode
,
5860 XEXP (op0
, 1), op1
));
5862 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5863 constant folding if x/y is a constant. */
5864 if ((code
== EQ
|| code
== NE
)
5865 && (op0code
== AND
|| op0code
== IOR
)
5866 && !side_effects_p (op1
)
5867 && op1
!= CONST0_RTX (cmp_mode
))
5869 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5870 (eq/ne (and (not y) x) 0). */
5871 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5872 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5874 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5876 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5878 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5879 CONST0_RTX (cmp_mode
));
5882 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5883 (eq/ne (and (not x) y) 0). */
5884 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5885 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5887 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5889 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5891 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5892 CONST0_RTX (cmp_mode
));
5896 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5897 if ((code
== EQ
|| code
== NE
)
5898 && GET_CODE (op0
) == BSWAP
5899 && CONST_SCALAR_INT_P (op1
))
5900 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5901 simplify_gen_unary (BSWAP
, cmp_mode
,
5904 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5905 if ((code
== EQ
|| code
== NE
)
5906 && GET_CODE (op0
) == BSWAP
5907 && GET_CODE (op1
) == BSWAP
)
5908 return simplify_gen_relational (code
, mode
, cmp_mode
,
5909 XEXP (op0
, 0), XEXP (op1
, 0));
5911 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5917 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5918 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5919 XEXP (op0
, 0), const0_rtx
);
5924 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5925 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5926 XEXP (op0
, 0), const0_rtx
);
5945 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5946 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5947 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5948 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5949 For floating-point comparisons, assume that the operands were ordered. */
5952 comparison_result (enum rtx_code code
, int known_results
)
5958 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5961 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5965 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5968 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5972 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5975 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5978 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5980 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5983 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5985 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5988 return const_true_rtx
;
5996 /* Check if the given comparison (done in the given MODE) is actually
5997 a tautology or a contradiction. If the mode is VOIDmode, the
5998 comparison is done in "infinite precision". If no simplification
5999 is possible, this function returns zero. Otherwise, it returns
6000 either const_true_rtx or const0_rtx. */
6003 simplify_const_relational_operation (enum rtx_code code
,
6011 gcc_assert (mode
!= VOIDmode
6012 || (GET_MODE (op0
) == VOIDmode
6013 && GET_MODE (op1
) == VOIDmode
));
6015 /* If op0 is a compare, extract the comparison arguments from it. */
6016 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6018 op1
= XEXP (op0
, 1);
6019 op0
= XEXP (op0
, 0);
6021 if (GET_MODE (op0
) != VOIDmode
)
6022 mode
= GET_MODE (op0
);
6023 else if (GET_MODE (op1
) != VOIDmode
)
6024 mode
= GET_MODE (op1
);
6029 /* We can't simplify MODE_CC values since we don't know what the
6030 actual comparison is. */
6031 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6034 /* Make sure the constant is second. */
6035 if (swap_commutative_operands_p (op0
, op1
))
6037 std::swap (op0
, op1
);
6038 code
= swap_condition (code
);
6041 trueop0
= avoid_constant_pool_reference (op0
);
6042 trueop1
= avoid_constant_pool_reference (op1
);
6044 /* For integer comparisons of A and B maybe we can simplify A - B and can
6045 then simplify a comparison of that with zero. If A and B are both either
6046 a register or a CONST_INT, this can't help; testing for these cases will
6047 prevent infinite recursion here and speed things up.
6049 We can only do this for EQ and NE comparisons as otherwise we may
6050 lose or introduce overflow which we cannot disregard as undefined as
6051 we do not know the signedness of the operation on either the left or
6052 the right hand side of the comparison. */
6054 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
6055 && (code
== EQ
|| code
== NE
)
6056 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
6057 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
6058 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
6059 /* We cannot do this if tem is a nonzero address. */
6060 && ! nonzero_address_p (tem
))
6061 return simplify_const_relational_operation (signed_condition (code
),
6062 mode
, tem
, const0_rtx
);
6064 if (! HONOR_NANS (mode
) && code
== ORDERED
)
6065 return const_true_rtx
;
6067 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
6070 /* For modes without NaNs, if the two operands are equal, we know the
6071 result except if they have side-effects. Even with NaNs we know
6072 the result of unordered comparisons and, if signaling NaNs are
6073 irrelevant, also the result of LT/GT/LTGT. */
6074 if ((! HONOR_NANS (trueop0
)
6075 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
6076 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
6077 && ! HONOR_SNANS (trueop0
)))
6078 && rtx_equal_p (trueop0
, trueop1
)
6079 && ! side_effects_p (trueop0
))
6080 return comparison_result (code
, CMP_EQ
);
6082 /* If the operands are floating-point constants, see if we can fold
6084 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
6085 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
6086 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
6088 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
6089 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
6091 /* Comparisons are unordered iff at least one of the values is NaN. */
6092 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
6102 return const_true_rtx
;
6115 return comparison_result (code
,
6116 (real_equal (d0
, d1
) ? CMP_EQ
:
6117 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
6120 /* Otherwise, see if the operands are both integers. */
6121 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
6122 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
6124 /* It would be nice if we really had a mode here. However, the
6125 largest int representable on the target is as good as
6127 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
6128 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
6129 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
6131 if (wi::eq_p (ptrueop0
, ptrueop1
))
6132 return comparison_result (code
, CMP_EQ
);
6135 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
6136 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
6137 return comparison_result (code
, cr
);
6141 /* Optimize comparisons with upper and lower bounds. */
6142 scalar_int_mode int_mode
;
6143 if (CONST_INT_P (trueop1
)
6144 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6145 && HWI_COMPUTABLE_MODE_P (int_mode
)
6146 && !side_effects_p (trueop0
))
6149 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
6150 HOST_WIDE_INT val
= INTVAL (trueop1
);
6151 HOST_WIDE_INT mmin
, mmax
;
6161 /* Get a reduced range if the sign bit is zero. */
6162 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
6169 rtx mmin_rtx
, mmax_rtx
;
6170 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
6172 mmin
= INTVAL (mmin_rtx
);
6173 mmax
= INTVAL (mmax_rtx
);
6176 unsigned int sign_copies
6177 = num_sign_bit_copies (trueop0
, int_mode
);
6179 mmin
>>= (sign_copies
- 1);
6180 mmax
>>= (sign_copies
- 1);
6186 /* x >= y is always true for y <= mmin, always false for y > mmax. */
6188 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6189 return const_true_rtx
;
6190 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6195 return const_true_rtx
;
6200 /* x <= y is always true for y >= mmax, always false for y < mmin. */
6202 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6203 return const_true_rtx
;
6204 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6209 return const_true_rtx
;
6215 /* x == y is always false for y out of range. */
6216 if (val
< mmin
|| val
> mmax
)
6220 /* x > y is always false for y >= mmax, always true for y < mmin. */
6222 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6224 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6225 return const_true_rtx
;
6231 return const_true_rtx
;
6234 /* x < y is always false for y <= mmin, always true for y > mmax. */
6236 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6238 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6239 return const_true_rtx
;
6245 return const_true_rtx
;
6249 /* x != y is always true for y out of range. */
6250 if (val
< mmin
|| val
> mmax
)
6251 return const_true_rtx
;
6259 /* Optimize integer comparisons with zero. */
6260 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6261 && trueop1
== const0_rtx
6262 && !side_effects_p (trueop0
))
6264 /* Some addresses are known to be nonzero. We don't know
6265 their sign, but equality comparisons are known. */
6266 if (nonzero_address_p (trueop0
))
6268 if (code
== EQ
|| code
== LEU
)
6270 if (code
== NE
|| code
== GTU
)
6271 return const_true_rtx
;
6274 /* See if the first operand is an IOR with a constant. If so, we
6275 may be able to determine the result of this comparison. */
6276 if (GET_CODE (op0
) == IOR
)
6278 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
6279 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
6281 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
6282 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
6283 && (UINTVAL (inner_const
)
6294 return const_true_rtx
;
6298 return const_true_rtx
;
6312 /* Optimize comparison of ABS with zero. */
6313 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
6314 && (GET_CODE (trueop0
) == ABS
6315 || (GET_CODE (trueop0
) == FLOAT_EXTEND
6316 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
6321 /* Optimize abs(x) < 0.0. */
6322 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
6327 /* Optimize abs(x) >= 0.0. */
6328 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
6329 return const_true_rtx
;
6333 /* Optimize ! (abs(x) < 0.0). */
6334 return const_true_rtx
;
6344 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6345 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6346 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6347 can be simplified to that or NULL_RTX if not.
6348 Assume X is compared against zero with CMP_CODE and the true
6349 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6352 simplify_context::simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
,
6353 rtx true_val
, rtx false_val
)
6355 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
6358 /* Result on X == 0 and X !=0 respectively. */
6359 rtx on_zero
, on_nonzero
;
6363 on_nonzero
= false_val
;
6367 on_zero
= false_val
;
6368 on_nonzero
= true_val
;
6371 rtx_code op_code
= GET_CODE (on_nonzero
);
6372 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
6373 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
6374 || !CONST_INT_P (on_zero
))
6377 HOST_WIDE_INT op_val
;
6378 scalar_int_mode mode ATTRIBUTE_UNUSED
6379 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
6380 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
6381 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
6382 && op_val
== INTVAL (on_zero
))
6388 /* Try to simplify X given that it appears within operand OP of a
6389 VEC_MERGE operation whose mask is MASK. X need not use the same
6390 vector mode as the VEC_MERGE, but it must have the same number of
6393 Return the simplified X on success, otherwise return NULL_RTX. */
6396 simplify_context::simplify_merge_mask (rtx x
, rtx mask
, int op
)
6398 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
6399 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
6400 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
6402 if (side_effects_p (XEXP (x
, 1 - op
)))
6405 return XEXP (x
, op
);
6408 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6409 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
6411 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6413 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
6414 GET_MODE (XEXP (x
, 0)));
6417 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6418 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6419 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6420 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
6422 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6423 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6426 if (COMPARISON_P (x
))
6427 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
6428 GET_MODE (XEXP (x
, 0)) != VOIDmode
6429 ? GET_MODE (XEXP (x
, 0))
6430 : GET_MODE (XEXP (x
, 1)),
6431 top0
? top0
: XEXP (x
, 0),
6432 top1
? top1
: XEXP (x
, 1));
6434 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
6435 top0
? top0
: XEXP (x
, 0),
6436 top1
? top1
: XEXP (x
, 1));
6439 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
6440 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6441 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6442 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6443 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
6444 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
6445 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
6447 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6448 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6449 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
6450 if (top0
|| top1
|| top2
)
6451 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
6452 GET_MODE (XEXP (x
, 0)),
6453 top0
? top0
: XEXP (x
, 0),
6454 top1
? top1
: XEXP (x
, 1),
6455 top2
? top2
: XEXP (x
, 2));
6461 /* Simplify CODE, an operation with result mode MODE and three operands,
6462 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6463 a constant. Return 0 if no simplifications is possible. */
6466 simplify_context::simplify_ternary_operation (rtx_code code
, machine_mode mode
,
6467 machine_mode op0_mode
,
6468 rtx op0
, rtx op1
, rtx op2
)
6470 bool any_change
= false;
6472 scalar_int_mode int_mode
, int_op0_mode
;
6473 unsigned int n_elts
;
6478 /* Simplify negations around the multiplication. */
6479 /* -a * -b + c => a * b + c. */
6480 if (GET_CODE (op0
) == NEG
)
6482 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
6484 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
6486 else if (GET_CODE (op1
) == NEG
)
6488 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
6490 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
6493 /* Canonicalize the two multiplication operands. */
6494 /* a * -b + c => -b * a + c. */
6495 if (swap_commutative_operands_p (op0
, op1
))
6496 std::swap (op0
, op1
), any_change
= true;
6499 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
6504 if (CONST_INT_P (op0
)
6505 && CONST_INT_P (op1
)
6506 && CONST_INT_P (op2
)
6507 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6508 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
6509 && HWI_COMPUTABLE_MODE_P (int_mode
))
6511 /* Extracting a bit-field from a constant */
6512 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
6513 HOST_WIDE_INT op1val
= INTVAL (op1
);
6514 HOST_WIDE_INT op2val
= INTVAL (op2
);
6515 if (!BITS_BIG_ENDIAN
)
6517 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
6518 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
6520 /* Not enough information to calculate the bit position. */
6523 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
6525 /* First zero-extend. */
6526 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
6527 /* If desired, propagate sign bit. */
6528 if (code
== SIGN_EXTRACT
6529 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
6531 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
6534 return gen_int_mode (val
, int_mode
);
6539 if (CONST_INT_P (op0
))
6540 return op0
!= const0_rtx
? op1
: op2
;
6542 /* Convert c ? a : a into "a". */
6543 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
6546 /* Convert a != b ? a : b into "a". */
6547 if (GET_CODE (op0
) == NE
6548 && ! side_effects_p (op0
)
6549 && ! HONOR_NANS (mode
)
6550 && ! HONOR_SIGNED_ZEROS (mode
)
6551 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6552 && rtx_equal_p (XEXP (op0
, 1), op2
))
6553 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6554 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6557 /* Convert a == b ? a : b into "b". */
6558 if (GET_CODE (op0
) == EQ
6559 && ! side_effects_p (op0
)
6560 && ! HONOR_NANS (mode
)
6561 && ! HONOR_SIGNED_ZEROS (mode
)
6562 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6563 && rtx_equal_p (XEXP (op0
, 1), op2
))
6564 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6565 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6568 /* Convert (!c) != {0,...,0} ? a : b into
6569 c != {0,...,0} ? b : a for vector modes. */
6570 if (VECTOR_MODE_P (GET_MODE (op1
))
6571 && GET_CODE (op0
) == NE
6572 && GET_CODE (XEXP (op0
, 0)) == NOT
6573 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
6575 rtx cv
= XEXP (op0
, 1);
6578 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
6581 for (int i
= 0; i
< nunits
; ++i
)
6582 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
6589 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
6590 XEXP (XEXP (op0
, 0), 0),
6592 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
6597 /* Convert x == 0 ? N : clz (x) into clz (x) when
6598 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6599 Similarly for ctz (x). */
6600 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
6601 && XEXP (op0
, 1) == const0_rtx
)
6604 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
6610 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6612 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6613 ? GET_MODE (XEXP (op0
, 1))
6614 : GET_MODE (XEXP (op0
, 0)));
6617 /* Look for happy constants in op1 and op2. */
6618 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6620 HOST_WIDE_INT t
= INTVAL (op1
);
6621 HOST_WIDE_INT f
= INTVAL (op2
);
6623 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6624 code
= GET_CODE (op0
);
6625 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6628 tmp
= reversed_comparison_code (op0
, NULL
);
6636 return simplify_gen_relational (code
, mode
, cmp_mode
,
6637 XEXP (op0
, 0), XEXP (op0
, 1));
6640 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6641 cmp_mode
, XEXP (op0
, 0),
6644 /* See if any simplifications were possible. */
6647 if (CONST_INT_P (temp
))
6648 return temp
== const0_rtx
? op2
: op1
;
6650 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6656 gcc_assert (GET_MODE (op0
) == mode
);
6657 gcc_assert (GET_MODE (op1
) == mode
);
6658 gcc_assert (VECTOR_MODE_P (mode
));
6659 trueop2
= avoid_constant_pool_reference (op2
);
6660 if (CONST_INT_P (trueop2
)
6661 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6663 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6664 unsigned HOST_WIDE_INT mask
;
6665 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6668 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6670 if (!(sel
& mask
) && !side_effects_p (op0
))
6672 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6675 rtx trueop0
= avoid_constant_pool_reference (op0
);
6676 rtx trueop1
= avoid_constant_pool_reference (op1
);
6677 if (GET_CODE (trueop0
) == CONST_VECTOR
6678 && GET_CODE (trueop1
) == CONST_VECTOR
)
6680 rtvec v
= rtvec_alloc (n_elts
);
6683 for (i
= 0; i
< n_elts
; i
++)
6684 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6685 ? CONST_VECTOR_ELT (trueop0
, i
)
6686 : CONST_VECTOR_ELT (trueop1
, i
));
6687 return gen_rtx_CONST_VECTOR (mode
, v
);
6690 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6691 if no element from a appears in the result. */
6692 if (GET_CODE (op0
) == VEC_MERGE
)
6694 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6695 if (CONST_INT_P (tem
))
6697 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6698 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6699 return simplify_gen_ternary (code
, mode
, mode
,
6700 XEXP (op0
, 1), op1
, op2
);
6701 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6702 return simplify_gen_ternary (code
, mode
, mode
,
6703 XEXP (op0
, 0), op1
, op2
);
6706 if (GET_CODE (op1
) == VEC_MERGE
)
6708 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6709 if (CONST_INT_P (tem
))
6711 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6712 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6713 return simplify_gen_ternary (code
, mode
, mode
,
6714 op0
, XEXP (op1
, 1), op2
);
6715 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6716 return simplify_gen_ternary (code
, mode
, mode
,
6717 op0
, XEXP (op1
, 0), op2
);
6721 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6723 if (GET_CODE (op0
) == VEC_DUPLICATE
6724 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6725 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6726 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6728 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6729 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6731 if (XEXP (XEXP (op0
, 0), 0) == op1
6732 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6736 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6738 with (vec_concat (X) (B)) if N == 1 or
6739 (vec_concat (A) (X)) if N == 2. */
6740 if (GET_CODE (op0
) == VEC_DUPLICATE
6741 && GET_CODE (op1
) == CONST_VECTOR
6742 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6743 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6744 && IN_RANGE (sel
, 1, 2))
6746 rtx newop0
= XEXP (op0
, 0);
6747 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6749 std::swap (newop0
, newop1
);
6750 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6752 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6753 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6754 Only applies for vectors of two elements. */
6755 if (GET_CODE (op0
) == VEC_DUPLICATE
6756 && GET_CODE (op1
) == VEC_CONCAT
6757 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6758 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6759 && IN_RANGE (sel
, 1, 2))
6761 rtx newop0
= XEXP (op0
, 0);
6762 rtx newop1
= XEXP (op1
, 2 - sel
);
6763 rtx otherop
= XEXP (op1
, sel
- 1);
6765 std::swap (newop0
, newop1
);
6766 /* Don't want to throw away the other part of the vec_concat if
6767 it has side-effects. */
6768 if (!side_effects_p (otherop
))
6769 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6774 (vec_merge:outer (vec_duplicate:outer x:inner)
6775 (subreg:outer y:inner 0)
6778 with (vec_concat:outer x:inner y:inner) if N == 1,
6779 or (vec_concat:outer y:inner x:inner) if N == 2.
6781 Implicitly, this means we have a paradoxical subreg, but such
6782 a check is cheap, so make it anyway.
6784 Only applies for vectors of two elements. */
6785 if (GET_CODE (op0
) == VEC_DUPLICATE
6786 && GET_CODE (op1
) == SUBREG
6787 && GET_MODE (op1
) == GET_MODE (op0
)
6788 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6789 && paradoxical_subreg_p (op1
)
6790 && subreg_lowpart_p (op1
)
6791 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6792 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6793 && IN_RANGE (sel
, 1, 2))
6795 rtx newop0
= XEXP (op0
, 0);
6796 rtx newop1
= SUBREG_REG (op1
);
6798 std::swap (newop0
, newop1
);
6799 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6802 /* Same as above but with switched operands:
6803 Replace (vec_merge:outer (subreg:outer x:inner 0)
6804 (vec_duplicate:outer y:inner)
6807 with (vec_concat:outer x:inner y:inner) if N == 1,
6808 or (vec_concat:outer y:inner x:inner) if N == 2. */
6809 if (GET_CODE (op1
) == VEC_DUPLICATE
6810 && GET_CODE (op0
) == SUBREG
6811 && GET_MODE (op0
) == GET_MODE (op1
)
6812 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6813 && paradoxical_subreg_p (op0
)
6814 && subreg_lowpart_p (op0
)
6815 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6816 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6817 && IN_RANGE (sel
, 1, 2))
6819 rtx newop0
= SUBREG_REG (op0
);
6820 rtx newop1
= XEXP (op1
, 0);
6822 std::swap (newop0
, newop1
);
6823 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6826 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6828 with (vec_concat x y) or (vec_concat y x) depending on value
6830 if (GET_CODE (op0
) == VEC_DUPLICATE
6831 && GET_CODE (op1
) == VEC_DUPLICATE
6832 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6833 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6834 && IN_RANGE (sel
, 1, 2))
6836 rtx newop0
= XEXP (op0
, 0);
6837 rtx newop1
= XEXP (op1
, 0);
6839 std::swap (newop0
, newop1
);
6841 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6845 if (rtx_equal_p (op0
, op1
)
6846 && !side_effects_p (op2
) && !side_effects_p (op1
))
6849 if (!side_effects_p (op2
))
6852 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6854 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6856 return simplify_gen_ternary (code
, mode
, mode
,
6858 top1
? top1
: op1
, op2
);
6870 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6871 starting at byte FIRST_BYTE. Return true on success and add the
6872 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6873 that the bytes follow target memory order. Leave BYTES unmodified
6876 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6877 BYTES before calling this function. */
6880 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6881 unsigned int first_byte
, unsigned int num_bytes
)
6883 /* Check the mode is sensible. */
6884 gcc_assert (GET_MODE (x
) == VOIDmode
6885 ? is_a
<scalar_int_mode
> (mode
)
6886 : mode
== GET_MODE (x
));
6888 if (GET_CODE (x
) == CONST_VECTOR
)
6890 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6891 is necessary. The only complication is that MODE_VECTOR_BOOL
6892 vectors can have several elements per byte. */
6893 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6894 GET_MODE_NUNITS (mode
));
6895 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
6896 if (elt_bits
< BITS_PER_UNIT
)
6898 /* This is the only case in which elements can be smaller than
6900 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6901 auto mask
= GET_MODE_MASK (GET_MODE_INNER (mode
));
6902 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
6904 target_unit value
= 0;
6905 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
6907 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & mask
) << j
;
6910 bytes
.quick_push (value
);
6915 unsigned int start
= bytes
.length ();
6916 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
6917 /* Make FIRST_BYTE relative to ELT. */
6918 first_byte
%= elt_bytes
;
6919 while (num_bytes
> 0)
6921 /* Work out how many bytes we want from element ELT. */
6922 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
6923 if (!native_encode_rtx (GET_MODE_INNER (mode
),
6924 CONST_VECTOR_ELT (x
, elt
), bytes
,
6925 first_byte
, chunk_bytes
))
6927 bytes
.truncate (start
);
6932 num_bytes
-= chunk_bytes
;
6937 /* All subsequent cases are limited to scalars. */
6939 if (!is_a
<scalar_mode
> (mode
, &smode
))
6942 /* Make sure that the region is in range. */
6943 unsigned int end_byte
= first_byte
+ num_bytes
;
6944 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6945 gcc_assert (end_byte
<= mode_bytes
);
6947 if (CONST_SCALAR_INT_P (x
))
6949 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6950 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6951 position of each byte. */
6952 rtx_mode_t
value (x
, smode
);
6953 wide_int_ref
value_wi (value
);
6954 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6956 /* Always constant because the inputs are. */
6958 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6959 /* Operate directly on the encoding rather than using
6960 wi::extract_uhwi, so that we preserve the sign or zero
6961 extension for modes that are not a whole number of bits in
6962 size. (Zero extension is only used for the combination of
6963 innermode == BImode && STORE_FLAG_VALUE == 1). */
6964 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
6965 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
6966 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
6967 bytes
.quick_push (uhwi
>> shift
);
6972 if (CONST_DOUBLE_P (x
))
6974 /* real_to_target produces an array of integers in target memory order.
6975 All integers before the last one have 32 bits; the last one may
6976 have 32 bits or fewer, depending on whether the mode bitsize
6977 is divisible by 32. Each of these integers is then laid out
6978 in target memory as any other integer would be. */
6979 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6980 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
6982 /* The (maximum) number of target bytes per element of el32. */
6983 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6984 gcc_assert (bytes_per_el32
!= 0);
6986 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6988 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6990 unsigned int index
= byte
/ bytes_per_el32
;
6991 unsigned int subbyte
= byte
% bytes_per_el32
;
6992 unsigned int int_bytes
= MIN (bytes_per_el32
,
6993 mode_bytes
- index
* bytes_per_el32
);
6994 /* Always constant because the inputs are. */
6996 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6997 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
7002 if (GET_CODE (x
) == CONST_FIXED
)
7004 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
7006 /* Always constant because the inputs are. */
7008 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
7009 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
7010 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
7012 lsb
-= HOST_BITS_PER_WIDE_INT
;
7013 piece
= CONST_FIXED_VALUE_HIGH (x
);
7015 bytes
.quick_push (piece
>> lsb
);
7023 /* Read a vector of mode MODE from the target memory image given by BYTES,
7024 starting at byte FIRST_BYTE. The vector is known to be encodable using
7025 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
7026 and BYTES is known to have enough bytes to supply NPATTERNS *
7027 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
7028 BITS_PER_UNIT bits and the bytes are in target memory order.
7030 Return the vector on success, otherwise return NULL_RTX. */
7033 native_decode_vector_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
7034 unsigned int first_byte
, unsigned int npatterns
,
7035 unsigned int nelts_per_pattern
)
7037 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
7039 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
7040 GET_MODE_NUNITS (mode
));
7041 if (elt_bits
< BITS_PER_UNIT
)
7043 /* This is the only case in which elements can be smaller than a byte.
7044 Element 0 is always in the lsb of the containing byte. */
7045 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
7046 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
7048 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
7049 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
7050 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
7051 unsigned int value
= bytes
[byte_index
] >> lsb
;
7052 builder
.quick_push (gen_int_mode (value
, GET_MODE_INNER (mode
)));
7057 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
7059 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
7062 builder
.quick_push (x
);
7063 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
7066 return builder
.build ();
7069 /* Read an rtx of mode MODE from the target memory image given by BYTES,
7070 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
7071 bits and the bytes are in target memory order. The image has enough
7072 values to specify all bytes of MODE.
7074 Return the rtx on success, otherwise return NULL_RTX. */
7077 native_decode_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
7078 unsigned int first_byte
)
7080 if (VECTOR_MODE_P (mode
))
7082 /* If we know at compile time how many elements there are,
7083 pull each element directly from BYTES. */
7085 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
7086 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
7090 scalar_int_mode imode
;
7091 if (is_a
<scalar_int_mode
> (mode
, &imode
)
7092 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
7094 /* Pull the bytes msb first, so that we can use simple
7095 shift-and-insert wide_int operations. */
7096 unsigned int size
= GET_MODE_SIZE (imode
);
7097 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
7098 for (unsigned int i
= 0; i
< size
; ++i
)
7100 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
7101 /* Always constant because the inputs are. */
7102 unsigned int subbyte
7103 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
7104 result
<<= BITS_PER_UNIT
;
7105 result
|= bytes
[first_byte
+ subbyte
];
7107 return immed_wide_int_const (result
, imode
);
7110 scalar_float_mode fmode
;
7111 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
7113 /* We need to build an array of integers in target memory order.
7114 All integers before the last one have 32 bits; the last one may
7115 have 32 bits or fewer, depending on whether the mode bitsize
7116 is divisible by 32. */
7117 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
7118 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
7119 memset (el32
, 0, num_el32
* sizeof (long));
7121 /* The (maximum) number of target bytes per element of el32. */
7122 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
7123 gcc_assert (bytes_per_el32
!= 0);
7125 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
7126 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7128 unsigned int index
= byte
/ bytes_per_el32
;
7129 unsigned int subbyte
= byte
% bytes_per_el32
;
7130 unsigned int int_bytes
= MIN (bytes_per_el32
,
7131 mode_bytes
- index
* bytes_per_el32
);
7132 /* Always constant because the inputs are. */
7134 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
7135 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
7138 real_from_target (&r
, el32
, fmode
);
7139 return const_double_from_real_value (r
, fmode
);
7142 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
7144 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
7150 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
7151 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7153 /* Always constant because the inputs are. */
7155 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
7156 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
7157 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
7158 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
7160 f
.data
.low
|= unit
<< lsb
;
7162 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
7168 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
7169 is to convert a runtime BYTE value into a constant one. */
7172 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
7174 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7175 machine_mode mode
= GET_MODE (x
);
7176 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
7177 GET_MODE_NUNITS (mode
));
7178 /* The number of bits needed to encode one element from each pattern. */
7179 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
7181 /* Identify the start point in terms of a sequence number and a byte offset
7182 within that sequence. */
7183 poly_uint64 first_sequence
;
7184 unsigned HOST_WIDE_INT subbit
;
7185 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
7186 &first_sequence
, &subbit
))
7188 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7189 if (nelts_per_pattern
== 1)
7190 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
7192 byte
= subbit
/ BITS_PER_UNIT
;
7193 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
7195 /* The subreg drops the first element from each pattern and
7196 only uses the second element. Find the first sequence
7197 that starts on a byte boundary. */
7198 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
7199 byte
= subbit
/ BITS_PER_UNIT
;
7205 /* Subroutine of simplify_subreg in which:
7207 - X is known to be a CONST_VECTOR
7208 - OUTERMODE is known to be a vector mode
7210 Try to handle the subreg by operating on the CONST_VECTOR encoding
7211 rather than on each individual element of the CONST_VECTOR.
7213 Return the simplified subreg on success, otherwise return NULL_RTX. */
7216 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
7217 machine_mode innermode
, unsigned int first_byte
)
7219 /* Paradoxical subregs of vectors have dubious semantics. */
7220 if (paradoxical_subreg_p (outermode
, innermode
))
7223 /* We can only preserve the semantics of a stepped pattern if the new
7224 vector element is the same as the original one. */
7225 if (CONST_VECTOR_STEPPED_P (x
)
7226 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
7229 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7230 unsigned int x_elt_bits
7231 = vector_element_size (GET_MODE_BITSIZE (innermode
),
7232 GET_MODE_NUNITS (innermode
));
7233 unsigned int out_elt_bits
7234 = vector_element_size (GET_MODE_BITSIZE (outermode
),
7235 GET_MODE_NUNITS (outermode
));
7237 /* The number of bits needed to encode one element from every pattern
7238 of the original vector. */
7239 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
7241 /* The number of bits needed to encode one element from every pattern
7243 unsigned int out_sequence_bits
7244 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
7246 /* Work out the number of interleaved patterns in the output vector
7247 and the number of encoded elements per pattern. */
7248 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
7249 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7251 /* The encoding scheme requires the number of elements to be a multiple
7252 of the number of patterns, so that each pattern appears at least once
7253 and so that the same number of elements appear from each pattern. */
7254 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
7255 unsigned int const_nunits
;
7256 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
7257 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
7259 /* Either the encoding is invalid, or applying it would give us
7260 more elements than we need. Just encode each element directly. */
7261 out_npatterns
= const_nunits
;
7262 nelts_per_pattern
= 1;
7267 /* Get enough bytes of X to form the new encoding. */
7268 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
7269 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
7270 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7271 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
7274 /* Reencode the bytes as OUTERMODE. */
7275 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
7279 /* Try to simplify a subreg of a constant by encoding the subreg region
7280 as a sequence of target bytes and reading them back in the new mode.
7281 Return the new value on success, otherwise return null.
7283 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7284 and byte offset FIRST_BYTE. */
7287 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
7288 machine_mode innermode
, unsigned int first_byte
)
7290 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
7291 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7293 /* Some ports misuse CCmode. */
7294 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
7297 /* Paradoxical subregs read undefined values for bytes outside of the
7298 inner value. However, we have traditionally always sign-extended
7299 integer constants and zero-extended others. */
7300 unsigned int inner_bytes
= buffer_bytes
;
7301 if (paradoxical_subreg_p (outermode
, innermode
))
7303 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
7306 target_unit filler
= 0;
7307 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
7310 /* Add any leading bytes due to big-endian layout. The number of
7311 bytes must be constant because both modes have constant size. */
7312 unsigned int leading_bytes
7313 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
7314 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
7315 buffer
.quick_push (filler
);
7317 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7320 /* Add any trailing bytes due to little-endian layout. */
7321 while (buffer
.length () < buffer_bytes
)
7322 buffer
.quick_push (filler
);
7324 else if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7326 rtx ret
= native_decode_rtx (outermode
, buffer
, 0);
7327 if (ret
&& FLOAT_MODE_P (outermode
))
7329 auto_vec
<target_unit
, 128> buffer2 (buffer_bytes
);
7330 if (!native_encode_rtx (outermode
, ret
, buffer2
, 0, buffer_bytes
))
7332 for (unsigned int i
= 0; i
< buffer_bytes
; ++i
)
7333 if (buffer
[i
] != buffer2
[i
])
7339 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7340 Return 0 if no simplifications are possible. */
7342 simplify_context::simplify_subreg (machine_mode outermode
, rtx op
,
7343 machine_mode innermode
, poly_uint64 byte
)
7345 /* Little bit of sanity checking. */
7346 gcc_assert (innermode
!= VOIDmode
);
7347 gcc_assert (outermode
!= VOIDmode
);
7348 gcc_assert (innermode
!= BLKmode
);
7349 gcc_assert (outermode
!= BLKmode
);
7351 gcc_assert (GET_MODE (op
) == innermode
7352 || GET_MODE (op
) == VOIDmode
);
7354 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
7355 if (!multiple_p (byte
, outersize
))
7358 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
7359 if (maybe_ge (byte
, innersize
))
7362 if (outermode
== innermode
&& known_eq (byte
, 0U))
7365 if (GET_CODE (op
) == CONST_VECTOR
)
7366 byte
= simplify_const_vector_byte_offset (op
, byte
);
7368 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
7372 if (VECTOR_MODE_P (outermode
)
7373 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
7374 && vec_duplicate_p (op
, &elt
))
7375 return gen_vec_duplicate (outermode
, elt
);
7377 if (outermode
== GET_MODE_INNER (innermode
)
7378 && vec_duplicate_p (op
, &elt
))
7382 if (CONST_SCALAR_INT_P (op
)
7383 || CONST_DOUBLE_AS_FLOAT_P (op
)
7384 || CONST_FIXED_P (op
)
7385 || GET_CODE (op
) == CONST_VECTOR
)
7387 unsigned HOST_WIDE_INT cbyte
;
7388 if (byte
.is_constant (&cbyte
))
7390 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
7392 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
7398 fixed_size_mode fs_outermode
;
7399 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
7400 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
7404 /* Changing mode twice with SUBREG => just change it once,
7405 or not at all if changing back op starting mode. */
7406 if (GET_CODE (op
) == SUBREG
)
7408 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
7409 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
7412 if (outermode
== innermostmode
7413 && known_eq (byte
, 0U)
7414 && known_eq (SUBREG_BYTE (op
), 0))
7415 return SUBREG_REG (op
);
7417 /* Work out the memory offset of the final OUTERMODE value relative
7418 to the inner value of OP. */
7419 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
7421 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
7422 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
7424 /* See whether resulting subreg will be paradoxical. */
7425 if (!paradoxical_subreg_p (outermode
, innermostmode
))
7427 /* Bail out in case resulting subreg would be incorrect. */
7428 if (maybe_lt (final_offset
, 0)
7429 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
7430 || !multiple_p (final_offset
, outersize
))
7435 poly_int64 required_offset
= subreg_memory_offset (outermode
,
7437 if (maybe_ne (final_offset
, required_offset
))
7439 /* Paradoxical subregs always have byte offset 0. */
7443 /* Recurse for further possible simplifications. */
7444 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
7448 if (validate_subreg (outermode
, innermostmode
,
7449 SUBREG_REG (op
), final_offset
))
7451 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
7452 if (SUBREG_PROMOTED_VAR_P (op
)
7453 && SUBREG_PROMOTED_SIGN (op
) >= 0
7454 && GET_MODE_CLASS (outermode
) == MODE_INT
7455 && known_ge (outersize
, innersize
)
7456 && known_le (outersize
, innermostsize
)
7457 && subreg_lowpart_p (newx
))
7459 SUBREG_PROMOTED_VAR_P (newx
) = 1;
7460 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
7467 /* SUBREG of a hard register => just change the register number
7468 and/or mode. If the hard register is not valid in that mode,
7469 suppress this simplification. If the hard register is the stack,
7470 frame, or argument pointer, leave this as a SUBREG. */
7472 if (REG_P (op
) && HARD_REGISTER_P (op
))
7474 unsigned int regno
, final_regno
;
7477 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
7478 if (HARD_REGISTER_NUM_P (final_regno
))
7480 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
7481 subreg_memory_offset (outermode
,
7484 /* Propagate original regno. We don't have any way to specify
7485 the offset inside original regno, so do so only for lowpart.
7486 The information is used only by alias analysis that cannot
7487 grog partial register anyway. */
7489 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
7490 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
7495 /* If we have a SUBREG of a register that we are replacing and we are
7496 replacing it with a MEM, make a new MEM and try replacing the
7497 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7498 or if we would be widening it. */
7501 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
7502 /* Allow splitting of volatile memory references in case we don't
7503 have instruction to move the whole thing. */
7504 && (! MEM_VOLATILE_P (op
)
7505 || ! have_insn_for (SET
, innermode
))
7506 && !(STRICT_ALIGNMENT
&& MEM_ALIGN (op
) < GET_MODE_ALIGNMENT (outermode
))
7507 && known_le (outersize
, innersize
))
7508 return adjust_address_nv (op
, outermode
, byte
);
7510 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7512 if (GET_CODE (op
) == CONCAT
7513 || GET_CODE (op
) == VEC_CONCAT
)
7515 poly_uint64 final_offset
;
7518 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
7519 if (part_mode
== VOIDmode
)
7520 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7521 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
7522 if (known_lt (byte
, part_size
))
7524 part
= XEXP (op
, 0);
7525 final_offset
= byte
;
7527 else if (known_ge (byte
, part_size
))
7529 part
= XEXP (op
, 1);
7530 final_offset
= byte
- part_size
;
7535 if (maybe_gt (final_offset
+ outersize
, part_size
))
7538 part_mode
= GET_MODE (part
);
7539 if (part_mode
== VOIDmode
)
7540 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7541 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
7544 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
7545 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
7550 (subreg (vec_merge (X)
7552 (const_int ((1 << N) | M)))
7553 (N * sizeof (outermode)))
7555 (subreg (X) (N * sizeof (outermode)))
7558 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
7559 && idx
< HOST_BITS_PER_WIDE_INT
7560 && GET_CODE (op
) == VEC_MERGE
7561 && GET_MODE_INNER (innermode
) == outermode
7562 && CONST_INT_P (XEXP (op
, 2))
7563 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
7564 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
7566 /* A SUBREG resulting from a zero extension may fold to zero if
7567 it extracts higher bits that the ZERO_EXTEND's source bits. */
7568 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
7570 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
7571 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
7572 return CONST0_RTX (outermode
);
7575 scalar_int_mode int_outermode
, int_innermode
;
7576 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
7577 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
7578 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
7580 /* Handle polynomial integers. The upper bits of a paradoxical
7581 subreg are undefined, so this is safe regardless of whether
7582 we're truncating or extending. */
7583 if (CONST_POLY_INT_P (op
))
7586 = poly_wide_int::from (const_poly_int_value (op
),
7587 GET_MODE_PRECISION (int_outermode
),
7589 return immed_wide_int_const (val
, int_outermode
);
7592 if (GET_MODE_PRECISION (int_outermode
)
7593 < GET_MODE_PRECISION (int_innermode
))
7595 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
7601 /* If OP is a vector comparison and the subreg is not changing the
7602 number of elements or the size of the elements, change the result
7603 of the comparison to the new mode. */
7604 if (COMPARISON_P (op
)
7605 && VECTOR_MODE_P (outermode
)
7606 && VECTOR_MODE_P (innermode
)
7607 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
7608 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
7609 GET_MODE_UNIT_SIZE (innermode
)))
7610 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
7611 XEXP (op
, 0), XEXP (op
, 1));
7615 /* Make a SUBREG operation or equivalent if it folds. */
7618 simplify_context::simplify_gen_subreg (machine_mode outermode
, rtx op
,
7619 machine_mode innermode
,
7624 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7628 if (GET_CODE (op
) == SUBREG
7629 || GET_CODE (op
) == CONCAT
7630 || GET_MODE (op
) == VOIDmode
)
7633 if (MODE_COMPOSITE_P (outermode
)
7634 && (CONST_SCALAR_INT_P (op
)
7635 || CONST_DOUBLE_AS_FLOAT_P (op
)
7636 || CONST_FIXED_P (op
)
7637 || GET_CODE (op
) == CONST_VECTOR
))
7640 if (validate_subreg (outermode
, innermode
, op
, byte
))
7641 return gen_rtx_SUBREG (outermode
, op
, byte
);
7646 /* Generates a subreg to get the least significant part of EXPR (in mode
7647 INNER_MODE) to OUTER_MODE. */
7650 simplify_context::lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7651 machine_mode inner_mode
)
7653 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7654 subreg_lowpart_offset (outer_mode
, inner_mode
));
7657 /* Generate RTX to select element at INDEX out of vector OP. */
7660 simplify_context::simplify_gen_vec_select (rtx op
, unsigned int index
)
7662 gcc_assert (VECTOR_MODE_P (GET_MODE (op
)));
7664 scalar_mode imode
= GET_MODE_INNER (GET_MODE (op
));
7666 if (known_eq (index
* GET_MODE_SIZE (imode
),
7667 subreg_lowpart_offset (imode
, GET_MODE (op
))))
7669 rtx res
= lowpart_subreg (imode
, op
, GET_MODE (op
));
7674 rtx tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (index
)));
7675 return gen_rtx_VEC_SELECT (imode
, op
, tmp
);
7679 /* Simplify X, an rtx expression.
7681 Return the simplified expression or NULL if no simplifications
7684 This is the preferred entry point into the simplification routines;
7685 however, we still allow passes to call the more specific routines.
7687 Right now GCC has three (yes, three) major bodies of RTL simplification
7688 code that need to be unified.
7690 1. fold_rtx in cse.cc. This code uses various CSE specific
7691 information to aid in RTL simplification.
7693 2. simplify_rtx in combine.cc. Similar to fold_rtx, except that
7694 it uses combine specific information to aid in RTL
7697 3. The routines in this file.
7700 Long term we want to only have one body of simplification code; to
7701 get to that state I recommend the following steps:
7703 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7704 which are not pass dependent state into these routines.
7706 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7707 use this routine whenever possible.
7709 3. Allow for pass dependent state to be provided to these
7710 routines and add simplifications based on the pass dependent
7711 state. Remove code from cse.cc & combine.cc that becomes
7714 It will take time, but ultimately the compiler will be easier to
7715 maintain and improve. It's totally silly that when we add a
7716 simplification that it needs to be added to 4 places (3 for RTL
7717 simplification and 1 for tree simplification. */
7720 simplify_rtx (const_rtx x
)
7722 const enum rtx_code code
= GET_CODE (x
);
7723 const machine_mode mode
= GET_MODE (x
);
7725 switch (GET_RTX_CLASS (code
))
7728 return simplify_unary_operation (code
, mode
,
7729 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7730 case RTX_COMM_ARITH
:
7731 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7732 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7737 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7740 case RTX_BITFIELD_OPS
:
7741 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7742 XEXP (x
, 0), XEXP (x
, 1),
7746 case RTX_COMM_COMPARE
:
7747 return simplify_relational_operation (code
, mode
,
7748 ((GET_MODE (XEXP (x
, 0))
7750 ? GET_MODE (XEXP (x
, 0))
7751 : GET_MODE (XEXP (x
, 1))),
7757 return simplify_subreg (mode
, SUBREG_REG (x
),
7758 GET_MODE (SUBREG_REG (x
)),
7765 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7766 if (GET_CODE (XEXP (x
, 0)) == HIGH
7767 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7780 namespace selftest
{
7782 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7785 make_test_reg (machine_mode mode
)
7787 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7789 return gen_rtx_REG (mode
, test_reg_num
++);
7793 test_scalar_int_ops (machine_mode mode
)
7795 rtx op0
= make_test_reg (mode
);
7796 rtx op1
= make_test_reg (mode
);
7797 rtx six
= GEN_INT (6);
7799 rtx neg_op0
= simplify_gen_unary (NEG
, mode
, op0
, mode
);
7800 rtx not_op0
= simplify_gen_unary (NOT
, mode
, op0
, mode
);
7801 rtx bswap_op0
= simplify_gen_unary (BSWAP
, mode
, op0
, mode
);
7803 rtx and_op0_op1
= simplify_gen_binary (AND
, mode
, op0
, op1
);
7804 rtx ior_op0_op1
= simplify_gen_binary (IOR
, mode
, op0
, op1
);
7805 rtx xor_op0_op1
= simplify_gen_binary (XOR
, mode
, op0
, op1
);
7807 rtx and_op0_6
= simplify_gen_binary (AND
, mode
, op0
, six
);
7808 rtx and_op1_6
= simplify_gen_binary (AND
, mode
, op1
, six
);
7810 /* Test some binary identities. */
7811 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, op0
, const0_rtx
));
7812 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, const0_rtx
, op0
));
7813 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MINUS
, mode
, op0
, const0_rtx
));
7814 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, op0
, const1_rtx
));
7815 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, const1_rtx
, op0
));
7816 ASSERT_RTX_EQ (op0
, simplify_gen_binary (DIV
, mode
, op0
, const1_rtx
));
7817 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, constm1_rtx
));
7818 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, constm1_rtx
, op0
));
7819 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, const0_rtx
));
7820 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, const0_rtx
, op0
));
7821 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, op0
, const0_rtx
));
7822 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, const0_rtx
, op0
));
7823 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFT
, mode
, op0
, const0_rtx
));
7824 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATE
, mode
, op0
, const0_rtx
));
7825 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFTRT
, mode
, op0
, const0_rtx
));
7826 ASSERT_RTX_EQ (op0
, simplify_gen_binary (LSHIFTRT
, mode
, op0
, const0_rtx
));
7827 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATERT
, mode
, op0
, const0_rtx
));
7829 /* Test some self-inverse operations. */
7830 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NEG
, mode
, neg_op0
, mode
));
7831 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NOT
, mode
, not_op0
, mode
));
7832 ASSERT_RTX_EQ (op0
, simplify_gen_unary (BSWAP
, mode
, bswap_op0
, mode
));
7834 /* Test some reflexive operations. */
7835 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, op0
));
7836 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, op0
));
7837 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMIN
, mode
, op0
, op0
));
7838 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMAX
, mode
, op0
, op0
));
7839 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMIN
, mode
, op0
, op0
));
7840 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMAX
, mode
, op0
, op0
));
7842 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (MINUS
, mode
, op0
, op0
));
7843 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (XOR
, mode
, op0
, op0
));
7845 /* Test simplify_distributive_operation. */
7846 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, xor_op0_op1
, six
),
7847 simplify_gen_binary (XOR
, mode
, and_op0_6
, and_op1_6
));
7848 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, ior_op0_op1
, six
),
7849 simplify_gen_binary (IOR
, mode
, and_op0_6
, and_op1_6
));
7850 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, and_op0_op1
, six
),
7851 simplify_gen_binary (AND
, mode
, and_op0_6
, and_op1_6
));
7853 /* Test useless extensions are eliminated. */
7854 ASSERT_RTX_EQ (op0
, simplify_gen_unary (TRUNCATE
, mode
, op0
, mode
));
7855 ASSERT_RTX_EQ (op0
, simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, mode
));
7856 ASSERT_RTX_EQ (op0
, simplify_gen_unary (SIGN_EXTEND
, mode
, op0
, mode
));
7857 ASSERT_RTX_EQ (op0
, lowpart_subreg (mode
, op0
, mode
));
7860 /* Verify some simplifications of integer extension/truncation.
7861 Machine mode BMODE is the guaranteed wider than SMODE. */
7864 test_scalar_int_ext_ops (machine_mode bmode
, machine_mode smode
)
7866 rtx sreg
= make_test_reg (smode
);
7868 /* Check truncation of extension. */
7869 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7870 simplify_gen_unary (ZERO_EXTEND
, bmode
,
7874 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7875 simplify_gen_unary (SIGN_EXTEND
, bmode
,
7879 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7880 lowpart_subreg (bmode
, sreg
, smode
),
7885 /* Verify more simplifications of integer extension/truncation.
7886 BMODE is wider than MMODE which is wider than SMODE. */
7889 test_scalar_int_ext_ops2 (machine_mode bmode
, machine_mode mmode
,
7892 rtx breg
= make_test_reg (bmode
);
7893 rtx mreg
= make_test_reg (mmode
);
7894 rtx sreg
= make_test_reg (smode
);
7896 /* Check truncate of truncate. */
7897 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7898 simplify_gen_unary (TRUNCATE
, mmode
,
7901 simplify_gen_unary (TRUNCATE
, smode
, breg
, bmode
));
7903 /* Check extension of extension. */
7904 ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND
, bmode
,
7905 simplify_gen_unary (ZERO_EXTEND
, mmode
,
7908 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
7909 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
7910 simplify_gen_unary (SIGN_EXTEND
, mmode
,
7913 simplify_gen_unary (SIGN_EXTEND
, bmode
, sreg
, smode
));
7914 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
7915 simplify_gen_unary (ZERO_EXTEND
, mmode
,
7918 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
7920 /* Check truncation of extension. */
7921 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7922 simplify_gen_unary (ZERO_EXTEND
, bmode
,
7925 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7926 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7927 simplify_gen_unary (SIGN_EXTEND
, bmode
,
7930 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7931 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7932 lowpart_subreg (bmode
, mreg
, mmode
),
7934 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7938 /* Verify some simplifications involving scalar expressions. */
7943 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7945 machine_mode mode
= (machine_mode
) i
;
7946 if (SCALAR_INT_MODE_P (mode
) && mode
!= BImode
)
7947 test_scalar_int_ops (mode
);
7950 test_scalar_int_ext_ops (HImode
, QImode
);
7951 test_scalar_int_ext_ops (SImode
, QImode
);
7952 test_scalar_int_ext_ops (SImode
, HImode
);
7953 test_scalar_int_ext_ops (DImode
, QImode
);
7954 test_scalar_int_ext_ops (DImode
, HImode
);
7955 test_scalar_int_ext_ops (DImode
, SImode
);
7957 test_scalar_int_ext_ops2 (SImode
, HImode
, QImode
);
7958 test_scalar_int_ext_ops2 (DImode
, HImode
, QImode
);
7959 test_scalar_int_ext_ops2 (DImode
, SImode
, QImode
);
7960 test_scalar_int_ext_ops2 (DImode
, SImode
, HImode
);
7963 /* Test vector simplifications involving VEC_DUPLICATE in which the
7964 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7965 register that holds one element of MODE. */
7968 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
7970 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7971 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7972 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
7973 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7975 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7976 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
7977 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
7978 ASSERT_RTX_EQ (duplicate
,
7979 simplify_unary_operation (NOT
, mode
,
7980 duplicate_not
, mode
));
7982 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7983 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
7984 ASSERT_RTX_EQ (duplicate
,
7985 simplify_unary_operation (NEG
, mode
,
7986 duplicate_neg
, mode
));
7988 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7989 ASSERT_RTX_EQ (duplicate
,
7990 simplify_binary_operation (PLUS
, mode
, duplicate
,
7991 CONST0_RTX (mode
)));
7993 ASSERT_RTX_EQ (duplicate
,
7994 simplify_binary_operation (MINUS
, mode
, duplicate
,
7995 CONST0_RTX (mode
)));
7997 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
7998 simplify_binary_operation (MINUS
, mode
, duplicate
,
8002 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
8003 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
8004 ASSERT_RTX_PTR_EQ (scalar_reg
,
8005 simplify_binary_operation (VEC_SELECT
, inner_mode
,
8006 duplicate
, zero_par
));
8008 unsigned HOST_WIDE_INT const_nunits
;
8009 if (nunits
.is_constant (&const_nunits
))
8011 /* And again with the final element. */
8012 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
8013 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
8014 ASSERT_RTX_PTR_EQ (scalar_reg
,
8015 simplify_binary_operation (VEC_SELECT
, inner_mode
,
8016 duplicate
, last_par
));
8018 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
8019 /* Skip this test for vectors of booleans, because offset is in bytes,
8020 while vec_merge indices are in elements (usually bits). */
8021 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_BOOL
)
8023 rtx vector_reg
= make_test_reg (mode
);
8024 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
8026 if (i
>= HOST_BITS_PER_WIDE_INT
)
8028 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
8029 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
8030 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
8032 ASSERT_RTX_EQ (scalar_reg
,
8033 simplify_gen_subreg (inner_mode
, vm
,
8039 /* Test a scalar subreg of a VEC_DUPLICATE. */
8040 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
8041 ASSERT_RTX_EQ (scalar_reg
,
8042 simplify_gen_subreg (inner_mode
, duplicate
,
8045 machine_mode narrower_mode
;
8046 if (maybe_ne (nunits
, 2U)
8047 && multiple_p (nunits
, 2)
8048 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
8049 && VECTOR_MODE_P (narrower_mode
))
8051 /* Test VEC_DUPLICATE of a vector. */
8052 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
8053 nbuilder
.quick_push (const0_rtx
);
8054 nbuilder
.quick_push (const1_rtx
);
8055 rtx_vector_builder
builder (mode
, 2, 1);
8056 builder
.quick_push (const0_rtx
);
8057 builder
.quick_push (const1_rtx
);
8058 ASSERT_RTX_EQ (builder
.build (),
8059 simplify_unary_operation (VEC_DUPLICATE
, mode
,
8063 /* Test VEC_SELECT of a vector. */
8065 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
8066 rtx narrower_duplicate
8067 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
8068 ASSERT_RTX_EQ (narrower_duplicate
,
8069 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
8070 duplicate
, vec_par
));
8072 /* Test a vector subreg of a VEC_DUPLICATE. */
8073 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
8074 ASSERT_RTX_EQ (narrower_duplicate
,
8075 simplify_gen_subreg (narrower_mode
, duplicate
,
8080 /* Test vector simplifications involving VEC_SERIES in which the
8081 operands and result have vector mode MODE. SCALAR_REG is a pseudo
8082 register that holds one element of MODE. */
8085 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
8087 /* Test unary cases with VEC_SERIES arguments. */
8088 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
8089 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
8090 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
8091 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
8092 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
8093 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
8094 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
8095 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
8096 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
8098 ASSERT_RTX_EQ (series_0_r
,
8099 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
8100 ASSERT_RTX_EQ (series_r_m1
,
8101 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
8102 ASSERT_RTX_EQ (series_r_r
,
8103 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
8105 /* Test that a VEC_SERIES with a zero step is simplified away. */
8106 ASSERT_RTX_EQ (duplicate
,
8107 simplify_binary_operation (VEC_SERIES
, mode
,
8108 scalar_reg
, const0_rtx
));
8110 /* Test PLUS and MINUS with VEC_SERIES. */
8111 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
8112 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
8113 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
8114 ASSERT_RTX_EQ (series_r_r
,
8115 simplify_binary_operation (PLUS
, mode
, series_0_r
,
8117 ASSERT_RTX_EQ (series_r_1
,
8118 simplify_binary_operation (PLUS
, mode
, duplicate
,
8120 ASSERT_RTX_EQ (series_r_m1
,
8121 simplify_binary_operation (PLUS
, mode
, duplicate
,
8123 ASSERT_RTX_EQ (series_0_r
,
8124 simplify_binary_operation (MINUS
, mode
, series_r_r
,
8126 ASSERT_RTX_EQ (series_r_m1
,
8127 simplify_binary_operation (MINUS
, mode
, duplicate
,
8129 ASSERT_RTX_EQ (series_r_1
,
8130 simplify_binary_operation (MINUS
, mode
, duplicate
,
8132 ASSERT_RTX_EQ (series_0_m1
,
8133 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
8136 /* Test NEG on constant vector series. */
8137 ASSERT_RTX_EQ (series_0_m1
,
8138 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
8139 ASSERT_RTX_EQ (series_0_1
,
8140 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
8142 /* Test PLUS and MINUS on constant vector series. */
8143 rtx scalar2
= gen_int_mode (2, inner_mode
);
8144 rtx scalar3
= gen_int_mode (3, inner_mode
);
8145 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
8146 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
8147 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
8148 ASSERT_RTX_EQ (series_1_1
,
8149 simplify_binary_operation (PLUS
, mode
, series_0_1
,
8150 CONST1_RTX (mode
)));
8151 ASSERT_RTX_EQ (series_0_m1
,
8152 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
8154 ASSERT_RTX_EQ (series_1_3
,
8155 simplify_binary_operation (PLUS
, mode
, series_1_1
,
8157 ASSERT_RTX_EQ (series_0_1
,
8158 simplify_binary_operation (MINUS
, mode
, series_1_1
,
8159 CONST1_RTX (mode
)));
8160 ASSERT_RTX_EQ (series_1_1
,
8161 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
8163 ASSERT_RTX_EQ (series_1_1
,
8164 simplify_binary_operation (MINUS
, mode
, series_1_3
,
8167 /* Test MULT between constant vectors. */
8168 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
8169 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
8170 rtx scalar9
= gen_int_mode (9, inner_mode
);
8171 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
8172 ASSERT_RTX_EQ (series_0_2
,
8173 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
8174 ASSERT_RTX_EQ (series_3_9
,
8175 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
8176 if (!GET_MODE_NUNITS (mode
).is_constant ())
8177 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
8180 /* Test ASHIFT between constant vectors. */
8181 ASSERT_RTX_EQ (series_0_2
,
8182 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
8183 CONST1_RTX (mode
)));
8184 if (!GET_MODE_NUNITS (mode
).is_constant ())
8185 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
8190 simplify_merge_mask (rtx x
, rtx mask
, int op
)
8192 return simplify_context ().simplify_merge_mask (x
, mask
, op
);
8195 /* Verify simplify_merge_mask works correctly. */
8198 test_vec_merge (machine_mode mode
)
8200 rtx op0
= make_test_reg (mode
);
8201 rtx op1
= make_test_reg (mode
);
8202 rtx op2
= make_test_reg (mode
);
8203 rtx op3
= make_test_reg (mode
);
8204 rtx op4
= make_test_reg (mode
);
8205 rtx op5
= make_test_reg (mode
);
8206 rtx mask1
= make_test_reg (SImode
);
8207 rtx mask2
= make_test_reg (SImode
);
8208 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
8209 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
8210 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
8212 /* Simple vec_merge. */
8213 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
8214 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
8215 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
8216 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
8218 /* Nested vec_merge.
8219 It's tempting to make this simplify right down to opN, but we don't
8220 because all the simplify_* functions assume that the operands have
8221 already been simplified. */
8222 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
8223 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
8224 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
8226 /* Intermediate unary op. */
8227 rtx unop
= gen_rtx_NOT (mode
, vm1
);
8228 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
8229 simplify_merge_mask (unop
, mask1
, 0));
8230 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
8231 simplify_merge_mask (unop
, mask1
, 1));
8233 /* Intermediate binary op. */
8234 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
8235 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
8236 simplify_merge_mask (binop
, mask1
, 0));
8237 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
8238 simplify_merge_mask (binop
, mask1
, 1));
8240 /* Intermediate ternary op. */
8241 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
8242 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
8243 simplify_merge_mask (tenop
, mask1
, 0));
8244 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
8245 simplify_merge_mask (tenop
, mask1
, 1));
8248 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
8249 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
8250 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
8251 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
8253 /* Called indirectly. */
8254 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
8255 simplify_rtx (nvm
));
8258 /* Test subregs of integer vector constant X, trying elements in
8259 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8260 where NELTS is the number of elements in X. Subregs involving
8261 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
8264 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
8265 unsigned int first_valid
= 0)
8267 machine_mode inner_mode
= GET_MODE (x
);
8268 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8270 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
8272 machine_mode outer_mode
= (machine_mode
) modei
;
8273 if (!VECTOR_MODE_P (outer_mode
))
8276 unsigned int outer_nunits
;
8277 if (GET_MODE_INNER (outer_mode
) == int_mode
8278 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
8279 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
8281 /* Test subregs in which the outer mode is a smaller,
8282 constant-sized vector of the same element type. */
8284 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
8285 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
8287 rtx expected
= NULL_RTX
;
8288 if (elt
>= first_valid
)
8290 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
8291 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
8292 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
8293 expected
= builder
.build ();
8295 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
8296 ASSERT_RTX_EQ (expected
,
8297 simplify_subreg (outer_mode
, x
,
8301 else if (known_eq (GET_MODE_SIZE (outer_mode
),
8302 GET_MODE_SIZE (inner_mode
))
8303 && known_eq (elt_bias
, 0U)
8304 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
8305 || known_eq (GET_MODE_BITSIZE (outer_mode
),
8306 GET_MODE_NUNITS (outer_mode
)))
8307 && (!FLOAT_MODE_P (outer_mode
)
8308 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
8309 == GET_MODE_UNIT_PRECISION (outer_mode
)))
8310 && (GET_MODE_SIZE (inner_mode
).is_constant ()
8311 || !CONST_VECTOR_STEPPED_P (x
)))
8313 /* Try converting to OUTER_MODE and back. */
8314 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
8315 ASSERT_TRUE (outer_x
!= NULL_RTX
);
8316 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
8321 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
8323 /* Test each byte in the element range. */
8325 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
8326 for (unsigned int i
= 0; i
< limit
; ++i
)
8328 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
8329 rtx expected
= NULL_RTX
;
8330 if (elt
>= first_valid
)
8332 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
8333 if (BYTES_BIG_ENDIAN
)
8334 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
8335 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
8336 wide_int shifted_elt
8337 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
8338 expected
= immed_wide_int_const (shifted_elt
, QImode
);
8340 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
8341 ASSERT_RTX_EQ (expected
,
8342 simplify_subreg (QImode
, x
, inner_mode
, byte
));
8347 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8348 element per pattern. */
8351 test_vector_subregs_repeating (machine_mode inner_mode
)
8353 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8354 unsigned int min_nunits
= constant_lower_bound (nunits
);
8355 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8356 unsigned int count
= gcd (min_nunits
, 8);
8358 rtx_vector_builder
builder (inner_mode
, count
, 1);
8359 for (unsigned int i
= 0; i
< count
; ++i
)
8360 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
8361 rtx x
= builder
.build ();
8363 test_vector_subregs_modes (x
);
8364 if (!nunits
.is_constant ())
8365 test_vector_subregs_modes (x
, nunits
- min_nunits
);
8368 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8369 elements per pattern. */
8372 test_vector_subregs_fore_back (machine_mode inner_mode
)
8374 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8375 unsigned int min_nunits
= constant_lower_bound (nunits
);
8376 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8377 unsigned int count
= gcd (min_nunits
, 4);
8379 rtx_vector_builder
builder (inner_mode
, count
, 2);
8380 for (unsigned int i
= 0; i
< count
; ++i
)
8381 builder
.quick_push (gen_int_mode (i
, int_mode
));
8382 for (unsigned int i
= 0; i
< count
; ++i
)
8383 builder
.quick_push (gen_int_mode (-(int) i
, int_mode
));
8384 rtx x
= builder
.build ();
8386 test_vector_subregs_modes (x
);
8387 if (!nunits
.is_constant ())
8388 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
8391 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8392 elements per pattern. */
8395 test_vector_subregs_stepped (machine_mode inner_mode
)
8397 /* Build { 0, 1, 2, 3, ... }. */
8398 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8399 rtx_vector_builder
builder (inner_mode
, 1, 3);
8400 for (unsigned int i
= 0; i
< 3; ++i
)
8401 builder
.quick_push (gen_int_mode (i
, int_mode
));
8402 rtx x
= builder
.build ();
8404 test_vector_subregs_modes (x
);
8407 /* Test constant subregs of integer vector mode INNER_MODE. */
8410 test_vector_subregs (machine_mode inner_mode
)
8412 test_vector_subregs_repeating (inner_mode
);
8413 test_vector_subregs_fore_back (inner_mode
);
8414 test_vector_subregs_stepped (inner_mode
);
8417 /* Verify some simplifications involving vectors. */
8422 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
8424 machine_mode mode
= (machine_mode
) i
;
8425 if (VECTOR_MODE_P (mode
))
8427 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
8428 test_vector_ops_duplicate (mode
, scalar_reg
);
8429 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
8430 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
8432 test_vector_ops_series (mode
, scalar_reg
);
8433 test_vector_subregs (mode
);
8435 test_vec_merge (mode
);
8440 template<unsigned int N
>
8441 struct simplify_const_poly_int_tests
8447 struct simplify_const_poly_int_tests
<1>
8449 static void run () {}
8452 /* Test various CONST_POLY_INT properties. */
8454 template<unsigned int N
>
8456 simplify_const_poly_int_tests
<N
>::run ()
8458 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
8459 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
8460 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
8461 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
8462 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
8463 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
8464 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
8465 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
8466 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
8467 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
8468 rtx two
= GEN_INT (2);
8469 rtx six
= GEN_INT (6);
8470 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
8472 /* These tests only try limited operation combinations. Fuller arithmetic
8473 testing is done directly on poly_ints. */
8474 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
8475 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
8476 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
8477 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
8478 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
8479 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
8480 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
8481 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
8482 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
8483 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
8484 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
8487 /* Run all of the selftests within this file. */
8490 simplify_rtx_cc_tests ()
8494 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
8497 } // namespace selftest
8499 #endif /* CHECKING_P */