1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
50 static bool plus_minus_operand_p (const_rtx
);
52 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
55 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
57 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
60 /* Test whether expression, X, is an immediate constant that represents
61 the most significant bit of machine mode MODE. */
64 mode_signbit_p (machine_mode mode
, const_rtx x
)
66 unsigned HOST_WIDE_INT val
;
68 scalar_int_mode int_mode
;
70 if (!is_int_mode (mode
, &int_mode
))
73 width
= GET_MODE_PRECISION (int_mode
);
77 if (width
<= HOST_BITS_PER_WIDE_INT
80 #if TARGET_SUPPORTS_WIDE_INT
81 else if (CONST_WIDE_INT_P (x
))
84 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
85 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
87 for (i
= 0; i
< elts
- 1; i
++)
88 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
90 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
91 width
%= HOST_BITS_PER_WIDE_INT
;
93 width
= HOST_BITS_PER_WIDE_INT
;
96 else if (width
<= HOST_BITS_PER_DOUBLE_INT
97 && CONST_DOUBLE_AS_INT_P (x
)
98 && CONST_DOUBLE_LOW (x
) == 0)
100 val
= CONST_DOUBLE_HIGH (x
);
101 width
-= HOST_BITS_PER_WIDE_INT
;
105 /* X is not an integer constant. */
108 if (width
< HOST_BITS_PER_WIDE_INT
)
109 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
110 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114 (after masking with the mode mask of MODE). Returns false if the
115 precision of MODE is too large to handle. */
118 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
121 scalar_int_mode int_mode
;
123 if (!is_int_mode (mode
, &int_mode
))
126 width
= GET_MODE_PRECISION (int_mode
);
127 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
130 val
&= GET_MODE_MASK (int_mode
);
131 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135 Returns false if the precision of MODE is too large to handle. */
137 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
141 scalar_int_mode int_mode
;
142 if (!is_int_mode (mode
, &int_mode
))
145 width
= GET_MODE_PRECISION (int_mode
);
146 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
149 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154 Returns false if the precision of MODE is too large to handle. */
156 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
160 scalar_int_mode int_mode
;
161 if (!is_int_mode (mode
, &int_mode
))
164 width
= GET_MODE_PRECISION (int_mode
);
165 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
168 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
172 /* Make a binary operation by properly ordering the operands and
173 seeing if the expression folds. */
176 simplify_context::simplify_gen_binary (rtx_code code
, machine_mode mode
,
181 /* If this simplifies, do it. */
182 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
186 /* Put complex operands first and constants second if commutative. */
187 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
188 && swap_commutative_operands_p (op0
, op1
))
189 std::swap (op0
, op1
);
191 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
194 /* If X is a MEM referencing the constant pool, return the real value.
195 Otherwise return X. */
197 avoid_constant_pool_reference (rtx x
)
201 poly_int64 offset
= 0;
203 switch (GET_CODE (x
))
209 /* Handle float extensions of constant pool references. */
211 c
= avoid_constant_pool_reference (tmp
);
212 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
213 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
221 if (GET_MODE (x
) == BLKmode
)
226 /* Call target hook to avoid the effects of -fpic etc.... */
227 addr
= targetm
.delegitimize_address (addr
);
229 /* Split the address into a base and integer offset. */
230 addr
= strip_offset (addr
, &offset
);
232 if (GET_CODE (addr
) == LO_SUM
)
233 addr
= XEXP (addr
, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr
) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr
))
240 c
= get_pool_constant (addr
);
241 cmode
= get_pool_mode (addr
);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
248 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
250 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
251 if (tem
&& CONSTANT_P (tem
))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x
)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x
))
272 tree decl
= MEM_EXPR (x
);
273 machine_mode mode
= GET_MODE (x
);
274 poly_int64 offset
= 0;
276 switch (TREE_CODE (decl
))
286 case ARRAY_RANGE_REF
:
291 case VIEW_CONVERT_EXPR
:
293 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
295 int unsignedp
, reversep
, volatilep
= 0;
298 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
299 &unsignedp
, &reversep
, &volatilep
);
300 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
301 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
302 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
305 offset
+= bytepos
+ toffset_val
;
311 && mode
== GET_MODE (x
)
313 && (TREE_STATIC (decl
)
314 || DECL_THREAD_LOCAL_P (decl
))
315 && DECL_RTL_SET_P (decl
)
316 && MEM_P (DECL_RTL (decl
)))
320 offset
+= MEM_OFFSET (x
);
322 newx
= DECL_RTL (decl
);
326 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
327 poly_int64 n_offset
, o_offset
;
329 /* Avoid creating a new MEM needlessly if we already had
330 the same address. We do if there's no OFFSET and the
331 old address X is identical to NEWX, or if X is of the
332 form (plus NEWX OFFSET), or the NEWX is of the form
333 (plus Y (const_int Z)) and X is that with the offset
334 added: (plus Y (const_int Z+OFFSET)). */
335 n
= strip_offset (n
, &n_offset
);
336 o
= strip_offset (o
, &o_offset
);
337 if (!(known_eq (o_offset
, n_offset
+ offset
)
338 && rtx_equal_p (o
, n
)))
339 x
= adjust_address_nv (newx
, mode
, offset
);
341 else if (GET_MODE (x
) == GET_MODE (newx
)
342 && known_eq (offset
, 0))
350 /* Make a unary operation by first seeing if it folds and otherwise making
351 the specified operation. */
354 simplify_context::simplify_gen_unary (rtx_code code
, machine_mode mode
, rtx op
,
355 machine_mode op_mode
)
359 /* If this simplifies, use it. */
360 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
363 return gen_rtx_fmt_e (code
, mode
, op
);
366 /* Likewise for ternary operations. */
369 simplify_context::simplify_gen_ternary (rtx_code code
, machine_mode mode
,
370 machine_mode op0_mode
,
371 rtx op0
, rtx op1
, rtx op2
)
375 /* If this simplifies, use it. */
376 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
377 op0
, op1
, op2
)) != 0)
380 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
383 /* Likewise, for relational operations.
384 CMP_MODE specifies mode comparison is done in. */
387 simplify_context::simplify_gen_relational (rtx_code code
, machine_mode mode
,
388 machine_mode cmp_mode
,
393 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
397 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 and simplify the result. If FN is non-NULL, call this callback on each
402 X, if it returns non-NULL, replace X with its return value and simplify the
406 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
407 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
409 enum rtx_code code
= GET_CODE (x
);
410 machine_mode mode
= GET_MODE (x
);
411 machine_mode op_mode
;
413 rtx op0
, op1
, op2
, newx
, op
;
417 if (__builtin_expect (fn
!= NULL
, 0))
419 newx
= fn (x
, old_rtx
, data
);
423 else if (rtx_equal_p (x
, old_rtx
))
424 return copy_rtx ((rtx
) data
);
426 switch (GET_RTX_CLASS (code
))
430 op_mode
= GET_MODE (op0
);
431 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
432 if (op0
== XEXP (x
, 0))
434 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
438 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
439 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
442 return simplify_gen_binary (code
, mode
, op0
, op1
);
445 case RTX_COMM_COMPARE
:
448 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
449 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
450 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
451 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
453 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
456 case RTX_BITFIELD_OPS
:
458 op_mode
= GET_MODE (op0
);
459 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
460 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
461 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
464 if (op_mode
== VOIDmode
)
465 op_mode
= GET_MODE (op0
);
466 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
471 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
472 if (op0
== SUBREG_REG (x
))
474 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
475 GET_MODE (SUBREG_REG (x
)),
477 return op0
? op0
: x
;
484 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
485 if (op0
== XEXP (x
, 0))
487 return replace_equiv_address_nv (x
, op0
);
489 else if (code
== LO_SUM
)
491 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
492 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
494 /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 if (GET_CODE (op0
) == HIGH
)
497 rtx base0
, base1
, offset0
, offset1
;
498 split_const (XEXP (op0
, 0), &base0
, &offset0
);
499 split_const (op1
, &base1
, &offset1
);
500 if (rtx_equal_p (base0
, base1
))
504 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
506 return gen_rtx_LO_SUM (mode
, op0
, op1
);
515 fmt
= GET_RTX_FORMAT (code
);
516 for (i
= 0; fmt
[i
]; i
++)
521 newvec
= XVEC (newx
, i
);
522 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
524 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
526 if (op
!= RTVEC_ELT (vec
, j
))
530 newvec
= shallow_copy_rtvec (vec
);
532 newx
= shallow_copy_rtx (x
);
533 XVEC (newx
, i
) = newvec
;
535 RTVEC_ELT (newvec
, j
) = op
;
543 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
544 if (op
!= XEXP (x
, i
))
547 newx
= shallow_copy_rtx (x
);
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
562 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_context::simplify_truncation (machine_mode mode
, rtx op
,
614 machine_mode op_mode
)
616 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
617 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
618 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
620 gcc_assert (precision
<= op_precision
);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op
) == ZERO_EXTEND
624 || GET_CODE (op
) == SIGN_EXTEND
)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
632 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
633 if (mode
== origmode
)
635 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
636 return simplify_gen_unary (TRUNCATE
, mode
,
637 XEXP (op
, 0), origmode
);
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (op
, 0), origmode
);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
647 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
648 && (GET_CODE (op
) == PLUS
649 || GET_CODE (op
) == MINUS
650 || GET_CODE (op
) == MULT
))
652 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
655 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
657 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
661 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 the outer subreg is effectively a truncation to the original mode. */
664 if ((GET_CODE (op
) == LSHIFTRT
665 || GET_CODE (op
) == ASHIFTRT
)
666 /* Ensure that OP_MODE is at least twice as wide as MODE
667 to avoid the possibility that an outer LSHIFTRT shifts by more
668 than the sign extension's sign_bit_copies and introduces zeros
669 into the high bits of the result. */
670 && 2 * precision
<= op_precision
671 && CONST_INT_P (XEXP (op
, 1))
672 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
673 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
674 && UINTVAL (XEXP (op
, 1)) < precision
)
675 return simplify_gen_binary (ASHIFTRT
, mode
,
676 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
678 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 the outer subreg is effectively a truncation to the original mode. */
681 if ((GET_CODE (op
) == LSHIFTRT
682 || GET_CODE (op
) == ASHIFTRT
)
683 && CONST_INT_P (XEXP (op
, 1))
684 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
685 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
686 && UINTVAL (XEXP (op
, 1)) < precision
)
687 return simplify_gen_binary (LSHIFTRT
, mode
,
688 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
690 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 the outer subreg is effectively a truncation to the original mode. */
693 if (GET_CODE (op
) == ASHIFT
694 && CONST_INT_P (XEXP (op
, 1))
695 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
696 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
697 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
698 && UINTVAL (XEXP (op
, 1)) < precision
)
699 return simplify_gen_binary (ASHIFT
, mode
,
700 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
702 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
705 if (GET_CODE (op
) == AND
706 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
707 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
708 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
709 && CONST_INT_P (XEXP (op
, 1)))
711 rtx op0
= (XEXP (XEXP (op
, 0), 0));
712 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
713 rtx mask_op
= XEXP (op
, 1);
714 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
715 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
717 if (shift
< precision
718 /* If doing this transform works for an X with all bits set,
719 it works for any X. */
720 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
721 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
722 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
723 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
725 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
726 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
730 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
733 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
734 && REG_P (XEXP (op
, 0))
735 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
736 && CONST_INT_P (XEXP (op
, 1))
737 && CONST_INT_P (XEXP (op
, 2)))
739 rtx op0
= XEXP (op
, 0);
740 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
741 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
742 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
744 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
747 pos
-= op_precision
- precision
;
748 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
749 XEXP (op
, 1), GEN_INT (pos
));
752 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
754 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
756 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
757 XEXP (op
, 1), XEXP (op
, 2));
761 /* Recognize a word extraction from a multi-word subreg. */
762 if ((GET_CODE (op
) == LSHIFTRT
763 || GET_CODE (op
) == ASHIFTRT
)
764 && SCALAR_INT_MODE_P (mode
)
765 && SCALAR_INT_MODE_P (op_mode
)
766 && precision
>= BITS_PER_WORD
767 && 2 * precision
<= op_precision
768 && CONST_INT_P (XEXP (op
, 1))
769 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
770 && UINTVAL (XEXP (op
, 1)) < op_precision
)
772 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
773 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
774 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
776 ? byte
- shifted_bytes
777 : byte
+ shifted_bytes
));
780 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 and try replacing the TRUNCATE and shift with it. Don't do this
782 if the MEM has a mode-dependent address. */
783 if ((GET_CODE (op
) == LSHIFTRT
784 || GET_CODE (op
) == ASHIFTRT
)
785 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
786 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
787 && MEM_P (XEXP (op
, 0))
788 && CONST_INT_P (XEXP (op
, 1))
789 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
790 && INTVAL (XEXP (op
, 1)) > 0
791 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
792 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
793 MEM_ADDR_SPACE (XEXP (op
, 0)))
794 && ! MEM_VOLATILE_P (XEXP (op
, 0))
795 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
796 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
798 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
799 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
800 return adjust_address_nv (XEXP (op
, 0), int_mode
,
802 ? byte
- shifted_bytes
803 : byte
+ shifted_bytes
));
806 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 (OP:SI foo:SI) if OP is NEG or ABS. */
808 if ((GET_CODE (op
) == ABS
809 || GET_CODE (op
) == NEG
)
810 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
811 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
812 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
813 return simplify_gen_unary (GET_CODE (op
), mode
,
814 XEXP (XEXP (op
, 0), 0), mode
);
816 /* Simplifications of (truncate:A (subreg:B X 0)). */
817 if (GET_CODE (op
) == SUBREG
818 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
819 && SCALAR_INT_MODE_P (op_mode
)
820 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
821 && subreg_lowpart_p (op
))
823 /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 if (GET_CODE (SUBREG_REG (op
)) == TRUNCATE
)
826 rtx inner
= XEXP (SUBREG_REG (op
), 0);
827 if (GET_MODE_PRECISION (int_mode
)
828 <= GET_MODE_PRECISION (subreg_mode
))
829 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
832 /* If subreg above is paradoxical and C is narrower
833 than A, return (subreg:A (truncate:C X) 0). */
834 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
),
838 /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 paradoxical subregs (B is wider than C). */
840 if (is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
))
842 unsigned int int_op_prec
= GET_MODE_PRECISION (int_op_mode
);
843 unsigned int subreg_prec
= GET_MODE_PRECISION (subreg_mode
);
844 if (int_op_prec
> subreg_prec
)
846 if (int_mode
== subreg_mode
)
847 return SUBREG_REG (op
);
848 if (GET_MODE_PRECISION (int_mode
) < subreg_prec
)
849 return simplify_gen_unary (TRUNCATE
, int_mode
,
850 SUBREG_REG (op
), subreg_mode
);
852 /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 A is narrower than B and B is narrower than C. */
854 else if (int_op_prec
< subreg_prec
855 && GET_MODE_PRECISION (int_mode
) < int_op_prec
)
856 return simplify_gen_unary (TRUNCATE
, int_mode
,
857 SUBREG_REG (op
), subreg_mode
);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op
) == TRUNCATE
)
863 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
864 GET_MODE (XEXP (op
, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
868 if (GET_CODE (op
) == IOR
869 && SCALAR_INT_MODE_P (mode
)
870 && SCALAR_INT_MODE_P (op_mode
)
871 && CONST_INT_P (XEXP (op
, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_context::simplify_unary_operation (rtx_code code
, machine_mode mode
,
883 rtx op
, machine_mode op_mode
)
887 trueop
= avoid_constant_pool_reference (op
);
889 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
893 return simplify_unary_operation_1 (code
, mode
, op
);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
900 exact_int_to_float_conversion_p (const_rtx op
)
902 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
903 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode
!= VOIDmode
);
906 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
907 int in_bits
= in_prec
;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
910 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
911 if (GET_CODE (op
) == FLOAT
)
912 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
913 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
914 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
917 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
919 return in_bits
<= out_bits
;
922 /* Perform some simplifications we can do even if the operands
925 simplify_context::simplify_unary_operation_1 (rtx_code code
, machine_mode mode
,
928 enum rtx_code reversed
;
929 rtx temp
, elt
, base
, step
;
930 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
935 /* (not (not X)) == X. */
936 if (GET_CODE (op
) == NOT
)
939 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
940 comparison is all ones. */
941 if (COMPARISON_P (op
)
942 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
943 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
944 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
945 XEXP (op
, 0), XEXP (op
, 1));
947 /* (not (plus X -1)) can become (neg X). */
948 if (GET_CODE (op
) == PLUS
949 && XEXP (op
, 1) == constm1_rtx
)
950 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
952 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
953 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
954 and MODE_VECTOR_INT. */
955 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
956 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
959 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
960 if (GET_CODE (op
) == XOR
961 && CONST_INT_P (XEXP (op
, 1))
962 && (temp
= simplify_unary_operation (NOT
, mode
,
963 XEXP (op
, 1), mode
)) != 0)
964 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
966 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
967 if (GET_CODE (op
) == PLUS
968 && CONST_INT_P (XEXP (op
, 1))
969 && mode_signbit_p (mode
, XEXP (op
, 1))
970 && (temp
= simplify_unary_operation (NOT
, mode
,
971 XEXP (op
, 1), mode
)) != 0)
972 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
975 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
976 operands other than 1, but that is not valid. We could do a
977 similar simplification for (not (lshiftrt C X)) where C is
978 just the sign bit, but this doesn't seem common enough to
980 if (GET_CODE (op
) == ASHIFT
981 && XEXP (op
, 0) == const1_rtx
)
983 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
984 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
987 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
988 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
989 so we can perform the above simplification. */
990 if (STORE_FLAG_VALUE
== -1
991 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
992 && GET_CODE (op
) == ASHIFTRT
993 && CONST_INT_P (XEXP (op
, 1))
994 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
995 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
996 XEXP (op
, 0), const0_rtx
);
999 if (partial_subreg_p (op
)
1000 && subreg_lowpart_p (op
)
1001 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
1002 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
1004 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1007 x
= gen_rtx_ROTATE (inner_mode
,
1008 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1010 XEXP (SUBREG_REG (op
), 1));
1011 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1016 /* Apply De Morgan's laws to reduce number of patterns for machines
1017 with negating logical insns (and-not, nand, etc.). If result has
1018 only one NOT, put it first, since that is how the patterns are
1020 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1022 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1023 machine_mode op_mode
;
1025 op_mode
= GET_MODE (in1
);
1026 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1028 op_mode
= GET_MODE (in2
);
1029 if (op_mode
== VOIDmode
)
1031 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1033 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1034 std::swap (in1
, in2
);
1036 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1040 /* (not (bswap x)) -> (bswap (not x)). */
1041 if (GET_CODE (op
) == BSWAP
)
1043 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1044 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1049 /* (neg (neg X)) == X. */
1050 if (GET_CODE (op
) == NEG
)
1051 return XEXP (op
, 0);
1053 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1054 If comparison is not reversible use
1056 if (GET_CODE (op
) == IF_THEN_ELSE
)
1058 rtx cond
= XEXP (op
, 0);
1059 rtx true_rtx
= XEXP (op
, 1);
1060 rtx false_rtx
= XEXP (op
, 2);
1062 if ((GET_CODE (true_rtx
) == NEG
1063 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1064 || (GET_CODE (false_rtx
) == NEG
1065 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1067 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1068 temp
= reversed_comparison (cond
, mode
);
1072 std::swap (true_rtx
, false_rtx
);
1074 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1075 mode
, temp
, true_rtx
, false_rtx
);
1079 /* (neg (plus X 1)) can become (not X). */
1080 if (GET_CODE (op
) == PLUS
1081 && XEXP (op
, 1) == const1_rtx
)
1082 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1084 /* Similarly, (neg (not X)) is (plus X 1). */
1085 if (GET_CODE (op
) == NOT
)
1086 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1089 /* (neg (minus X Y)) can become (minus Y X). This transformation
1090 isn't safe for modes with signed zeros, since if X and Y are
1091 both +0, (minus Y X) is the same as (minus X Y). If the
1092 rounding mode is towards +infinity (or -infinity) then the two
1093 expressions will be rounded differently. */
1094 if (GET_CODE (op
) == MINUS
1095 && !HONOR_SIGNED_ZEROS (mode
)
1096 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1097 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1099 if (GET_CODE (op
) == PLUS
1100 && !HONOR_SIGNED_ZEROS (mode
)
1101 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1103 /* (neg (plus A C)) is simplified to (minus -C A). */
1104 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1105 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1107 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1109 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1112 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1113 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1114 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1117 /* (neg (mult A B)) becomes (mult A (neg B)).
1118 This works even for floating-point values. */
1119 if (GET_CODE (op
) == MULT
1120 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1122 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1123 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1126 /* NEG commutes with ASHIFT since it is multiplication. Only do
1127 this if we can then eliminate the NEG (e.g., if the operand
1129 if (GET_CODE (op
) == ASHIFT
)
1131 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1133 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1136 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1137 C is equal to the width of MODE minus 1. */
1138 if (GET_CODE (op
) == ASHIFTRT
1139 && CONST_INT_P (XEXP (op
, 1))
1140 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1141 return simplify_gen_binary (LSHIFTRT
, mode
,
1142 XEXP (op
, 0), XEXP (op
, 1));
1144 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1145 C is equal to the width of MODE minus 1. */
1146 if (GET_CODE (op
) == LSHIFTRT
1147 && CONST_INT_P (XEXP (op
, 1))
1148 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1149 return simplify_gen_binary (ASHIFTRT
, mode
,
1150 XEXP (op
, 0), XEXP (op
, 1));
1152 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1153 if (GET_CODE (op
) == XOR
1154 && XEXP (op
, 1) == const1_rtx
1155 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1156 return plus_constant (mode
, XEXP (op
, 0), -1);
1158 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1159 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1160 if (GET_CODE (op
) == LT
1161 && XEXP (op
, 1) == const0_rtx
1162 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1164 int_mode
= as_a
<scalar_int_mode
> (mode
);
1165 int isize
= GET_MODE_PRECISION (inner
);
1166 if (STORE_FLAG_VALUE
== 1)
1168 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1169 gen_int_shift_amount (inner
,
1171 if (int_mode
== inner
)
1173 if (GET_MODE_PRECISION (int_mode
) > isize
)
1174 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1175 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1177 else if (STORE_FLAG_VALUE
== -1)
1179 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1180 gen_int_shift_amount (inner
,
1182 if (int_mode
== inner
)
1184 if (GET_MODE_PRECISION (int_mode
) > isize
)
1185 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1186 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1190 if (vec_series_p (op
, &base
, &step
))
1192 /* Only create a new series if we can simplify both parts. In other
1193 cases this isn't really a simplification, and it's not necessarily
1194 a win to replace a vector operation with a scalar operation. */
1195 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1196 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1199 step
= simplify_unary_operation (NEG
, inner_mode
,
1202 return gen_vec_series (mode
, base
, step
);
1208 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1209 with the umulXi3_highpart patterns. */
1210 if (GET_CODE (op
) == LSHIFTRT
1211 && GET_CODE (XEXP (op
, 0)) == MULT
)
1214 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1216 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1218 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1222 /* We can't handle truncation to a partial integer mode here
1223 because we don't know the real bitsize of the partial
1228 if (GET_MODE (op
) != VOIDmode
)
1230 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1235 /* If we know that the value is already truncated, we can
1236 replace the TRUNCATE with a SUBREG. */
1237 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1238 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1239 || truncated_to_mode (mode
, op
)))
1241 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1246 /* A truncate of a comparison can be replaced with a subreg if
1247 STORE_FLAG_VALUE permits. This is like the previous test,
1248 but it works even if the comparison is done in a mode larger
1249 than HOST_BITS_PER_WIDE_INT. */
1250 if (HWI_COMPUTABLE_MODE_P (mode
)
1251 && COMPARISON_P (op
)
1252 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
1253 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1255 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1260 /* A truncate of a memory is just loading the low part of the memory
1261 if we are not changing the meaning of the address. */
1262 if (GET_CODE (op
) == MEM
1263 && !VECTOR_MODE_P (mode
)
1264 && !MEM_VOLATILE_P (op
)
1265 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1267 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1272 /* Check for useless truncation. */
1273 if (GET_MODE (op
) == mode
)
1277 case FLOAT_TRUNCATE
:
1278 /* Check for useless truncation. */
1279 if (GET_MODE (op
) == mode
)
1282 if (DECIMAL_FLOAT_MODE_P (mode
))
1285 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1286 if (GET_CODE (op
) == FLOAT_EXTEND
1287 && GET_MODE (XEXP (op
, 0)) == mode
)
1288 return XEXP (op
, 0);
1290 /* (float_truncate:SF (float_truncate:DF foo:XF))
1291 = (float_truncate:SF foo:XF).
1292 This may eliminate double rounding, so it is unsafe.
1294 (float_truncate:SF (float_extend:XF foo:DF))
1295 = (float_truncate:SF foo:DF).
1297 (float_truncate:DF (float_extend:XF foo:SF))
1298 = (float_extend:DF foo:SF). */
1299 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1300 && flag_unsafe_math_optimizations
)
1301 || GET_CODE (op
) == FLOAT_EXTEND
)
1302 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1303 > GET_MODE_UNIT_SIZE (mode
)
1304 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1306 XEXP (op
, 0), mode
);
1308 /* (float_truncate (float x)) is (float x) */
1309 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1310 && (flag_unsafe_math_optimizations
1311 || exact_int_to_float_conversion_p (op
)))
1312 return simplify_gen_unary (GET_CODE (op
), mode
,
1314 GET_MODE (XEXP (op
, 0)));
1316 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1317 (OP:SF foo:SF) if OP is NEG or ABS. */
1318 if ((GET_CODE (op
) == ABS
1319 || GET_CODE (op
) == NEG
)
1320 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1321 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1322 return simplify_gen_unary (GET_CODE (op
), mode
,
1323 XEXP (XEXP (op
, 0), 0), mode
);
1325 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1326 is (float_truncate:SF x). */
1327 if (GET_CODE (op
) == SUBREG
1328 && subreg_lowpart_p (op
)
1329 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1330 return SUBREG_REG (op
);
1334 /* Check for useless extension. */
1335 if (GET_MODE (op
) == mode
)
1338 if (DECIMAL_FLOAT_MODE_P (mode
))
1341 /* (float_extend (float_extend x)) is (float_extend x)
1343 (float_extend (float x)) is (float x) assuming that double
1344 rounding can't happen.
1346 if (GET_CODE (op
) == FLOAT_EXTEND
1347 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1348 && exact_int_to_float_conversion_p (op
)))
1349 return simplify_gen_unary (GET_CODE (op
), mode
,
1351 GET_MODE (XEXP (op
, 0)));
1356 /* (abs (neg <foo>)) -> (abs <foo>) */
1357 if (GET_CODE (op
) == NEG
)
1358 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1359 GET_MODE (XEXP (op
, 0)));
1361 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1363 if (GET_MODE (op
) == VOIDmode
)
1366 /* If operand is something known to be positive, ignore the ABS. */
1367 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1368 || val_signbit_known_clear_p (GET_MODE (op
),
1369 nonzero_bits (op
, GET_MODE (op
))))
1372 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1373 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1374 && (num_sign_bit_copies (op
, int_mode
)
1375 == GET_MODE_PRECISION (int_mode
)))
1376 return gen_rtx_NEG (int_mode
, op
);
1381 /* (ffs (*_extend <X>)) = (ffs <X>) */
1382 if (GET_CODE (op
) == SIGN_EXTEND
1383 || GET_CODE (op
) == ZERO_EXTEND
)
1384 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1385 GET_MODE (XEXP (op
, 0)));
1389 switch (GET_CODE (op
))
1393 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1394 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1395 GET_MODE (XEXP (op
, 0)));
1399 /* Rotations don't affect popcount. */
1400 if (!side_effects_p (XEXP (op
, 1)))
1401 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1402 GET_MODE (XEXP (op
, 0)));
1411 switch (GET_CODE (op
))
1417 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1418 GET_MODE (XEXP (op
, 0)));
1422 /* Rotations don't affect parity. */
1423 if (!side_effects_p (XEXP (op
, 1)))
1424 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1425 GET_MODE (XEXP (op
, 0)));
1429 /* (parity (parity x)) -> parity (x). */
1438 /* (bswap (bswap x)) -> x. */
1439 if (GET_CODE (op
) == BSWAP
)
1440 return XEXP (op
, 0);
1444 /* (float (sign_extend <X>)) = (float <X>). */
1445 if (GET_CODE (op
) == SIGN_EXTEND
)
1446 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1447 GET_MODE (XEXP (op
, 0)));
1451 /* Check for useless extension. */
1452 if (GET_MODE (op
) == mode
)
1455 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1456 becomes just the MINUS if its mode is MODE. This allows
1457 folding switch statements on machines using casesi (such as
1459 if (GET_CODE (op
) == TRUNCATE
1460 && GET_MODE (XEXP (op
, 0)) == mode
1461 && GET_CODE (XEXP (op
, 0)) == MINUS
1462 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1463 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1464 return XEXP (op
, 0);
1466 /* Extending a widening multiplication should be canonicalized to
1467 a wider widening multiplication. */
1468 if (GET_CODE (op
) == MULT
)
1470 rtx lhs
= XEXP (op
, 0);
1471 rtx rhs
= XEXP (op
, 1);
1472 enum rtx_code lcode
= GET_CODE (lhs
);
1473 enum rtx_code rcode
= GET_CODE (rhs
);
1475 /* Widening multiplies usually extend both operands, but sometimes
1476 they use a shift to extract a portion of a register. */
1477 if ((lcode
== SIGN_EXTEND
1478 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1479 && (rcode
== SIGN_EXTEND
1480 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1482 machine_mode lmode
= GET_MODE (lhs
);
1483 machine_mode rmode
= GET_MODE (rhs
);
1486 if (lcode
== ASHIFTRT
)
1487 /* Number of bits not shifted off the end. */
1488 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1489 - INTVAL (XEXP (lhs
, 1)));
1490 else /* lcode == SIGN_EXTEND */
1491 /* Size of inner mode. */
1492 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1494 if (rcode
== ASHIFTRT
)
1495 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1496 - INTVAL (XEXP (rhs
, 1)));
1497 else /* rcode == SIGN_EXTEND */
1498 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1500 /* We can only widen multiplies if the result is mathematiclly
1501 equivalent. I.e. if overflow was impossible. */
1502 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1503 return simplify_gen_binary
1505 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1506 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1510 /* Check for a sign extension of a subreg of a promoted
1511 variable, where the promotion is sign-extended, and the
1512 target mode is the same as the variable's promotion. */
1513 if (GET_CODE (op
) == SUBREG
1514 && SUBREG_PROMOTED_VAR_P (op
)
1515 && SUBREG_PROMOTED_SIGNED_P (op
))
1517 rtx subreg
= SUBREG_REG (op
);
1518 machine_mode subreg_mode
= GET_MODE (subreg
);
1519 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1521 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1524 /* Preserve SUBREG_PROMOTED_VAR_P. */
1525 if (partial_subreg_p (temp
))
1527 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1528 SUBREG_PROMOTED_SET (temp
, 1);
1534 /* Sign-extending a sign-extended subreg. */
1535 return simplify_gen_unary (SIGN_EXTEND
, mode
,
1536 subreg
, subreg_mode
);
1539 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1540 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1541 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1543 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1544 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1545 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1546 GET_MODE (XEXP (op
, 0)));
1549 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1550 is (sign_extend:M (subreg:O <X>)) if there is mode with
1551 GET_MODE_BITSIZE (N) - I bits.
1552 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1553 is similarly (zero_extend:M (subreg:O <X>)). */
1554 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1555 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1556 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1557 && CONST_INT_P (XEXP (op
, 1))
1558 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1559 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1560 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1562 scalar_int_mode tmode
;
1563 gcc_assert (GET_MODE_PRECISION (int_mode
)
1564 > GET_MODE_PRECISION (op_mode
));
1565 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1566 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1569 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1571 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1572 ? SIGN_EXTEND
: ZERO_EXTEND
,
1573 int_mode
, inner
, tmode
);
1577 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1578 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1579 if (GET_CODE (op
) == LSHIFTRT
1580 && CONST_INT_P (XEXP (op
, 1))
1581 && XEXP (op
, 1) != const0_rtx
)
1582 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1584 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1585 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1586 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1587 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1588 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1590 if (GET_CODE (op
) == TRUNCATE
1591 && GET_CODE (XEXP (op
, 0)) == LSHIFTRT
1592 && CONST_INT_P (XEXP (XEXP (op
, 0), 1)))
1594 scalar_int_mode m_mode
, n_mode
, o_mode
;
1595 rtx old_shift
= XEXP (op
, 0);
1596 if (is_a
<scalar_int_mode
> (mode
, &m_mode
)
1597 && is_a
<scalar_int_mode
> (GET_MODE (op
), &n_mode
)
1598 && is_a
<scalar_int_mode
> (GET_MODE (old_shift
), &o_mode
)
1599 && GET_MODE_PRECISION (o_mode
) - GET_MODE_PRECISION (n_mode
)
1600 == INTVAL (XEXP (old_shift
, 1)))
1602 rtx new_shift
= simplify_gen_binary (ASHIFTRT
,
1603 GET_MODE (old_shift
),
1604 XEXP (old_shift
, 0),
1605 XEXP (old_shift
, 1));
1606 if (GET_MODE_PRECISION (m_mode
) > GET_MODE_PRECISION (o_mode
))
1607 return simplify_gen_unary (SIGN_EXTEND
, mode
, new_shift
,
1608 GET_MODE (new_shift
));
1609 if (mode
!= GET_MODE (new_shift
))
1610 return simplify_gen_unary (TRUNCATE
, mode
, new_shift
,
1611 GET_MODE (new_shift
));
1616 #if defined(POINTERS_EXTEND_UNSIGNED)
1617 /* As we do not know which address space the pointer is referring to,
1618 we can do this only if the target does not support different pointer
1619 or address modes depending on the address space. */
1620 if (target_default_pointer_address_modes_p ()
1621 && ! POINTERS_EXTEND_UNSIGNED
1622 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1624 || (GET_CODE (op
) == SUBREG
1625 && REG_P (SUBREG_REG (op
))
1626 && REG_POINTER (SUBREG_REG (op
))
1627 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1628 && !targetm
.have_ptr_extend ())
1631 = convert_memory_address_addr_space_1 (Pmode
, op
,
1632 ADDR_SPACE_GENERIC
, false,
1641 /* Check for useless extension. */
1642 if (GET_MODE (op
) == mode
)
1645 /* Check for a zero extension of a subreg of a promoted
1646 variable, where the promotion is zero-extended, and the
1647 target mode is the same as the variable's promotion. */
1648 if (GET_CODE (op
) == SUBREG
1649 && SUBREG_PROMOTED_VAR_P (op
)
1650 && SUBREG_PROMOTED_UNSIGNED_P (op
))
1652 rtx subreg
= SUBREG_REG (op
);
1653 machine_mode subreg_mode
= GET_MODE (subreg
);
1654 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1656 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1659 /* Preserve SUBREG_PROMOTED_VAR_P. */
1660 if (partial_subreg_p (temp
))
1662 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1663 SUBREG_PROMOTED_SET (temp
, 0);
1669 /* Zero-extending a zero-extended subreg. */
1670 return simplify_gen_unary (ZERO_EXTEND
, mode
,
1671 subreg
, subreg_mode
);
1674 /* Extending a widening multiplication should be canonicalized to
1675 a wider widening multiplication. */
1676 if (GET_CODE (op
) == MULT
)
1678 rtx lhs
= XEXP (op
, 0);
1679 rtx rhs
= XEXP (op
, 1);
1680 enum rtx_code lcode
= GET_CODE (lhs
);
1681 enum rtx_code rcode
= GET_CODE (rhs
);
1683 /* Widening multiplies usually extend both operands, but sometimes
1684 they use a shift to extract a portion of a register. */
1685 if ((lcode
== ZERO_EXTEND
1686 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1687 && (rcode
== ZERO_EXTEND
1688 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1690 machine_mode lmode
= GET_MODE (lhs
);
1691 machine_mode rmode
= GET_MODE (rhs
);
1694 if (lcode
== LSHIFTRT
)
1695 /* Number of bits not shifted off the end. */
1696 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1697 - INTVAL (XEXP (lhs
, 1)));
1698 else /* lcode == ZERO_EXTEND */
1699 /* Size of inner mode. */
1700 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1702 if (rcode
== LSHIFTRT
)
1703 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1704 - INTVAL (XEXP (rhs
, 1)));
1705 else /* rcode == ZERO_EXTEND */
1706 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1708 /* We can only widen multiplies if the result is mathematiclly
1709 equivalent. I.e. if overflow was impossible. */
1710 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1711 return simplify_gen_binary
1713 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1714 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1718 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1719 if (GET_CODE (op
) == ZERO_EXTEND
)
1720 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1721 GET_MODE (XEXP (op
, 0)));
1723 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1724 is (zero_extend:M (subreg:O <X>)) if there is mode with
1725 GET_MODE_PRECISION (N) - I bits. */
1726 if (GET_CODE (op
) == LSHIFTRT
1727 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1728 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1729 && CONST_INT_P (XEXP (op
, 1))
1730 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1731 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1732 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1734 scalar_int_mode tmode
;
1735 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1736 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1739 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1741 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1746 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1747 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1749 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1750 (and:SI (reg:SI) (const_int 63)). */
1751 if (partial_subreg_p (op
)
1752 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1753 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1754 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1755 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1756 && subreg_lowpart_p (op
)
1757 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1758 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1760 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1761 return SUBREG_REG (op
);
1762 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1766 #if defined(POINTERS_EXTEND_UNSIGNED)
1767 /* As we do not know which address space the pointer is referring to,
1768 we can do this only if the target does not support different pointer
1769 or address modes depending on the address space. */
1770 if (target_default_pointer_address_modes_p ()
1771 && POINTERS_EXTEND_UNSIGNED
> 0
1772 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1774 || (GET_CODE (op
) == SUBREG
1775 && REG_P (SUBREG_REG (op
))
1776 && REG_POINTER (SUBREG_REG (op
))
1777 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1778 && !targetm
.have_ptr_extend ())
1781 = convert_memory_address_addr_space_1 (Pmode
, op
,
1782 ADDR_SPACE_GENERIC
, false,
1794 if (VECTOR_MODE_P (mode
)
1795 && vec_duplicate_p (op
, &elt
)
1796 && code
!= VEC_DUPLICATE
)
1798 if (code
== SIGN_EXTEND
|| code
== ZERO_EXTEND
)
1799 /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1800 operations by promoting VEC_DUPLICATE to the root of the expression
1801 (as far as possible). */
1802 temp
= simplify_gen_unary (code
, GET_MODE_INNER (mode
),
1803 elt
, GET_MODE_INNER (GET_MODE (op
)));
1805 /* Try applying the operator to ELT and see if that simplifies.
1806 We can duplicate the result if so.
1808 The reason we traditionally haven't used simplify_gen_unary
1809 for these codes is that it didn't necessarily seem to be a
1810 win to convert things like:
1812 (neg:V (vec_duplicate:V (reg:S R)))
1816 (vec_duplicate:V (neg:S (reg:S R)))
1818 The first might be done entirely in vector registers while the
1819 second might need a move between register files.
1821 However, there also cases where promoting the vec_duplicate is
1822 more efficient, and there is definite value in having a canonical
1823 form when matching instruction patterns. We should consider
1824 extending the simplify_gen_unary code above to more cases. */
1825 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1826 elt
, GET_MODE_INNER (GET_MODE (op
)));
1828 return gen_vec_duplicate (mode
, temp
);
1834 /* Try to compute the value of a unary operation CODE whose output mode is to
1835 be MODE with input operand OP whose mode was originally OP_MODE.
1836 Return zero if the value cannot be computed. */
1838 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1839 rtx op
, machine_mode op_mode
)
1841 scalar_int_mode result_mode
;
1843 if (code
== VEC_DUPLICATE
)
1845 gcc_assert (VECTOR_MODE_P (mode
));
1846 if (GET_MODE (op
) != VOIDmode
)
1848 if (!VECTOR_MODE_P (GET_MODE (op
)))
1849 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1851 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1854 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1855 return gen_const_vec_duplicate (mode
, op
);
1856 if (GET_CODE (op
) == CONST_VECTOR
1857 && (CONST_VECTOR_DUPLICATE_P (op
)
1858 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1860 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1861 ? CONST_VECTOR_NPATTERNS (op
)
1862 : CONST_VECTOR_NUNITS (op
).to_constant ());
1863 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1864 rtx_vector_builder
builder (mode
, npatterns
, 1);
1865 for (unsigned i
= 0; i
< npatterns
; i
++)
1866 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1867 return builder
.build ();
1871 if (VECTOR_MODE_P (mode
)
1872 && GET_CODE (op
) == CONST_VECTOR
1873 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1875 gcc_assert (GET_MODE (op
) == op_mode
);
1877 rtx_vector_builder builder
;
1878 if (!builder
.new_unary_operation (mode
, op
, false))
1881 unsigned int count
= builder
.encoded_nelts ();
1882 for (unsigned int i
= 0; i
< count
; i
++)
1884 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1885 CONST_VECTOR_ELT (op
, i
),
1886 GET_MODE_INNER (op_mode
));
1887 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1889 builder
.quick_push (x
);
1891 return builder
.build ();
1894 /* The order of these tests is critical so that, for example, we don't
1895 check the wrong mode (input vs. output) for a conversion operation,
1896 such as FIX. At some point, this should be simplified. */
1898 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1902 if (op_mode
== VOIDmode
)
1904 /* CONST_INT have VOIDmode as the mode. We assume that all
1905 the bits of the constant are significant, though, this is
1906 a dangerous assumption as many times CONST_INTs are
1907 created and used with garbage in the bits outside of the
1908 precision of the implied mode of the const_int. */
1909 op_mode
= MAX_MODE_INT
;
1912 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1914 /* Avoid the folding if flag_signaling_nans is on and
1915 operand is a signaling NaN. */
1916 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1919 d
= real_value_truncate (mode
, d
);
1920 return const_double_from_real_value (d
, mode
);
1922 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1926 if (op_mode
== VOIDmode
)
1928 /* CONST_INT have VOIDmode as the mode. We assume that all
1929 the bits of the constant are significant, though, this is
1930 a dangerous assumption as many times CONST_INTs are
1931 created and used with garbage in the bits outside of the
1932 precision of the implied mode of the const_int. */
1933 op_mode
= MAX_MODE_INT
;
1936 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1938 /* Avoid the folding if flag_signaling_nans is on and
1939 operand is a signaling NaN. */
1940 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1943 d
= real_value_truncate (mode
, d
);
1944 return const_double_from_real_value (d
, mode
);
1947 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1949 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1950 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1954 scalar_int_mode imode
= (op_mode
== VOIDmode
1956 : as_a
<scalar_int_mode
> (op_mode
));
1957 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1960 #if TARGET_SUPPORTS_WIDE_INT == 0
1961 /* This assert keeps the simplification from producing a result
1962 that cannot be represented in a CONST_DOUBLE but a lot of
1963 upstream callers expect that this function never fails to
1964 simplify something and so you if you added this to the test
1965 above the code would die later anyway. If this assert
1966 happens, you just need to make the port support wide int. */
1967 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1973 result
= wi::bit_not (op0
);
1977 result
= wi::neg (op0
);
1981 result
= wi::abs (op0
);
1985 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1989 if (wi::ne_p (op0
, 0))
1990 int_value
= wi::clz (op0
);
1991 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1993 result
= wi::shwi (int_value
, result_mode
);
1997 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
2001 if (wi::ne_p (op0
, 0))
2002 int_value
= wi::ctz (op0
);
2003 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
2005 result
= wi::shwi (int_value
, result_mode
);
2009 result
= wi::shwi (wi::popcount (op0
), result_mode
);
2013 result
= wi::shwi (wi::parity (op0
), result_mode
);
2017 result
= wide_int (op0
).bswap ();
2022 result
= wide_int::from (op0
, width
, UNSIGNED
);
2026 result
= wide_int::from (op0
, width
, SIGNED
);
2034 return immed_wide_int_const (result
, result_mode
);
2037 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2038 && SCALAR_FLOAT_MODE_P (mode
)
2039 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
2041 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
2047 d
= real_value_abs (&d
);
2050 d
= real_value_negate (&d
);
2052 case FLOAT_TRUNCATE
:
2053 /* Don't perform the operation if flag_signaling_nans is on
2054 and the operand is a signaling NaN. */
2055 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2057 d
= real_value_truncate (mode
, d
);
2060 /* Don't perform the operation if flag_signaling_nans is on
2061 and the operand is a signaling NaN. */
2062 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2064 /* All this does is change the mode, unless changing
2066 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
2067 real_convert (&d
, mode
, &d
);
2070 /* Don't perform the operation if flag_signaling_nans is on
2071 and the operand is a signaling NaN. */
2072 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2074 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
2081 real_to_target (tmp
, &d
, GET_MODE (op
));
2082 for (i
= 0; i
< 4; i
++)
2084 real_from_target (&d
, tmp
, mode
);
2090 return const_double_from_real_value (d
, mode
);
2092 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2093 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
2094 && is_int_mode (mode
, &result_mode
))
2096 unsigned int width
= GET_MODE_PRECISION (result_mode
);
2097 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
2100 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2101 operators are intentionally left unspecified (to ease implementation
2102 by target backends), for consistency, this routine implements the
2103 same semantics for constant folding as used by the middle-end. */
2105 /* This was formerly used only for non-IEEE float.
2106 eggert@twinsun.com says it is safe for IEEE also. */
2108 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
2109 wide_int wmax
, wmin
;
2110 /* This is part of the abi to real_to_integer, but we check
2111 things before making this call. */
2117 if (REAL_VALUE_ISNAN (*x
))
2120 /* Test against the signed upper bound. */
2121 wmax
= wi::max_value (width
, SIGNED
);
2122 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2123 if (real_less (&t
, x
))
2124 return immed_wide_int_const (wmax
, mode
);
2126 /* Test against the signed lower bound. */
2127 wmin
= wi::min_value (width
, SIGNED
);
2128 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2129 if (real_less (x
, &t
))
2130 return immed_wide_int_const (wmin
, mode
);
2132 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2136 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2139 /* Test against the unsigned upper bound. */
2140 wmax
= wi::max_value (width
, UNSIGNED
);
2141 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2142 if (real_less (&t
, x
))
2143 return immed_wide_int_const (wmax
, mode
);
2145 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2153 /* Handle polynomial integers. */
2154 else if (CONST_POLY_INT_P (op
))
2156 poly_wide_int result
;
2160 result
= -const_poly_int_value (op
);
2164 result
= ~const_poly_int_value (op
);
2170 return immed_wide_int_const (result
, mode
);
2176 /* Subroutine of simplify_binary_operation to simplify a binary operation
2177 CODE that can commute with byte swapping, with result mode MODE and
2178 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2179 Return zero if no simplification or canonicalization is possible. */
2182 simplify_context::simplify_byte_swapping_operation (rtx_code code
,
2188 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2189 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2191 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2192 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2193 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2196 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2197 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2199 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2200 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2206 /* Subroutine of simplify_binary_operation to simplify a commutative,
2207 associative binary operation CODE with result mode MODE, operating
2208 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2209 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2210 canonicalization is possible. */
2213 simplify_context::simplify_associative_operation (rtx_code code
,
2219 /* Linearize the operator to the left. */
2220 if (GET_CODE (op1
) == code
)
2222 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2223 if (GET_CODE (op0
) == code
)
2225 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2226 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2229 /* "a op (b op c)" becomes "(b op c) op a". */
2230 if (! swap_commutative_operands_p (op1
, op0
))
2231 return simplify_gen_binary (code
, mode
, op1
, op0
);
2233 std::swap (op0
, op1
);
2236 if (GET_CODE (op0
) == code
)
2238 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2239 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2241 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2242 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2245 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2246 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2248 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2250 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2251 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2253 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2259 /* Return a mask describing the COMPARISON. */
2261 comparison_to_mask (enum rtx_code comparison
)
2301 /* Return a comparison corresponding to the MASK. */
2302 static enum rtx_code
2303 mask_to_comparison (int mask
)
2343 /* Return true if CODE is valid for comparisons of mode MODE, false
2346 It is always safe to return false, even if the code was valid for the
2347 given mode as that will merely suppress optimizations. */
2350 comparison_code_valid_for_mode (enum rtx_code code
, enum machine_mode mode
)
2354 /* These are valid for integral, floating and vector modes. */
2361 return (INTEGRAL_MODE_P (mode
)
2362 || FLOAT_MODE_P (mode
)
2363 || VECTOR_MODE_P (mode
));
2365 /* These are valid for floating point modes. */
2374 return FLOAT_MODE_P (mode
);
2376 /* These are filtered out in simplify_logical_operation, but
2377 we check for them too as a matter of safety. They are valid
2378 for integral and vector modes. */
2383 return INTEGRAL_MODE_P (mode
) || VECTOR_MODE_P (mode
);
2390 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2391 false/true value of comparison with MODE where comparison operands
2395 relational_result (machine_mode mode
, machine_mode cmp_mode
, rtx res
)
2397 if (SCALAR_FLOAT_MODE_P (mode
))
2399 if (res
== const0_rtx
)
2400 return CONST0_RTX (mode
);
2401 #ifdef FLOAT_STORE_FLAG_VALUE
2402 REAL_VALUE_TYPE val
= FLOAT_STORE_FLAG_VALUE (mode
);
2403 return const_double_from_real_value (val
, mode
);
2408 if (VECTOR_MODE_P (mode
))
2410 if (res
== const0_rtx
)
2411 return CONST0_RTX (mode
);
2412 #ifdef VECTOR_STORE_FLAG_VALUE
2413 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2414 if (val
== NULL_RTX
)
2416 if (val
== const1_rtx
)
2417 return CONST1_RTX (mode
);
2419 return gen_const_vec_duplicate (mode
, val
);
2424 /* For vector comparison with scalar int result, it is unknown
2425 if the target means here a comparison into an integral bitmask,
2426 or comparison where all comparisons true mean const_true_rtx
2427 whole result, or where any comparisons true mean const_true_rtx
2428 whole result. For const0_rtx all the cases are the same. */
2429 if (VECTOR_MODE_P (cmp_mode
)
2430 && SCALAR_INT_MODE_P (mode
)
2431 && res
== const_true_rtx
)
2437 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2438 and OP1, which should be both relational operations. Return 0 if no such
2439 simplification is possible. */
2441 simplify_context::simplify_logical_relational_operation (rtx_code code
,
2445 /* We only handle IOR of two relational operations. */
2449 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2452 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2453 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2456 enum rtx_code code0
= GET_CODE (op0
);
2457 enum rtx_code code1
= GET_CODE (op1
);
2459 /* We don't handle unsigned comparisons currently. */
2460 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2462 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2465 int mask0
= comparison_to_mask (code0
);
2466 int mask1
= comparison_to_mask (code1
);
2468 int mask
= mask0
| mask1
;
2471 return relational_result (mode
, GET_MODE (op0
), const_true_rtx
);
2473 code
= mask_to_comparison (mask
);
2475 /* Many comparison codes are only valid for certain mode classes. */
2476 if (!comparison_code_valid_for_mode (code
, mode
))
2479 op0
= XEXP (op1
, 0);
2480 op1
= XEXP (op1
, 1);
2482 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2485 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2486 and OP1. Return 0 if no simplification is possible.
2488 Don't use this for relational operations such as EQ or LT.
2489 Use simplify_relational_operation instead. */
2491 simplify_context::simplify_binary_operation (rtx_code code
, machine_mode mode
,
2494 rtx trueop0
, trueop1
;
2497 /* Relational operations don't work here. We must know the mode
2498 of the operands in order to do the comparison correctly.
2499 Assuming a full word can give incorrect results.
2500 Consider comparing 128 with -128 in QImode. */
2501 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2502 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2504 /* Make sure the constant is second. */
2505 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2506 && swap_commutative_operands_p (op0
, op1
))
2507 std::swap (op0
, op1
);
2509 trueop0
= avoid_constant_pool_reference (op0
);
2510 trueop1
= avoid_constant_pool_reference (op1
);
2512 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2515 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2520 /* If the above steps did not result in a simplification and op0 or op1
2521 were constant pool references, use the referenced constants directly. */
2522 if (trueop0
!= op0
|| trueop1
!= op1
)
2523 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2528 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2529 which OP0 and OP1 are both vector series or vector duplicates
2530 (which are really just series with a step of 0). If so, try to
2531 form a new series by applying CODE to the bases and to the steps.
2532 Return null if no simplification is possible.
2534 MODE is the mode of the operation and is known to be a vector
2538 simplify_context::simplify_binary_operation_series (rtx_code code
,
2543 if (vec_duplicate_p (op0
, &base0
))
2545 else if (!vec_series_p (op0
, &base0
, &step0
))
2549 if (vec_duplicate_p (op1
, &base1
))
2551 else if (!vec_series_p (op1
, &base1
, &step1
))
2554 /* Only create a new series if we can simplify both parts. In other
2555 cases this isn't really a simplification, and it's not necessarily
2556 a win to replace a vector operation with a scalar operation. */
2557 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2558 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2562 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2566 return gen_vec_series (mode
, new_base
, new_step
);
2569 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2570 operation CODE with result mode MODE, operating on OP0 and OP1.
2571 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2572 Returns NULL_RTX if no simplification is possible. */
2575 simplify_context::simplify_distributive_operation (rtx_code code
,
2579 enum rtx_code op
= GET_CODE (op0
);
2580 gcc_assert (GET_CODE (op1
) == op
);
2582 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))
2583 && ! side_effects_p (XEXP (op0
, 1)))
2584 return simplify_gen_binary (op
, mode
,
2585 simplify_gen_binary (code
, mode
,
2590 if (GET_RTX_CLASS (op
) == RTX_COMM_ARITH
)
2592 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2593 && ! side_effects_p (XEXP (op0
, 0)))
2594 return simplify_gen_binary (op
, mode
,
2595 simplify_gen_binary (code
, mode
,
2599 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 1))
2600 && ! side_effects_p (XEXP (op0
, 0)))
2601 return simplify_gen_binary (op
, mode
,
2602 simplify_gen_binary (code
, mode
,
2606 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 0))
2607 && ! side_effects_p (XEXP (op0
, 1)))
2608 return simplify_gen_binary (op
, mode
,
2609 simplify_gen_binary (code
, mode
,
2618 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2619 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2620 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2621 actual constants. */
2624 simplify_context::simplify_binary_operation_1 (rtx_code code
,
2627 rtx trueop0
, rtx trueop1
)
2629 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2631 scalar_int_mode int_mode
, inner_mode
;
2634 /* Even if we can't compute a constant result,
2635 there are some cases worth simplifying. */
2640 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2641 when x is NaN, infinite, or finite and nonzero. They aren't
2642 when x is -0 and the rounding mode is not towards -infinity,
2643 since (-0) + 0 is then 0. */
2644 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2647 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2648 transformations are safe even for IEEE. */
2649 if (GET_CODE (op0
) == NEG
)
2650 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2651 else if (GET_CODE (op1
) == NEG
)
2652 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2654 /* (~a) + 1 -> -a */
2655 if (INTEGRAL_MODE_P (mode
)
2656 && GET_CODE (op0
) == NOT
2657 && trueop1
== const1_rtx
)
2658 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2660 /* Handle both-operands-constant cases. We can only add
2661 CONST_INTs to constants since the sum of relocatable symbols
2662 can't be handled by most assemblers. Don't add CONST_INT
2663 to CONST_INT since overflow won't be computed properly if wider
2664 than HOST_BITS_PER_WIDE_INT. */
2666 if ((GET_CODE (op0
) == CONST
2667 || GET_CODE (op0
) == SYMBOL_REF
2668 || GET_CODE (op0
) == LABEL_REF
)
2669 && poly_int_rtx_p (op1
, &offset
))
2670 return plus_constant (mode
, op0
, offset
);
2671 else if ((GET_CODE (op1
) == CONST
2672 || GET_CODE (op1
) == SYMBOL_REF
2673 || GET_CODE (op1
) == LABEL_REF
)
2674 && poly_int_rtx_p (op0
, &offset
))
2675 return plus_constant (mode
, op1
, offset
);
2677 /* See if this is something like X * C - X or vice versa or
2678 if the multiplication is written as a shift. If so, we can
2679 distribute and make a new multiply, shift, or maybe just
2680 have X (if C is 2 in the example above). But don't make
2681 something more expensive than we had before. */
2683 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2685 rtx lhs
= op0
, rhs
= op1
;
2687 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2688 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2690 if (GET_CODE (lhs
) == NEG
)
2692 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2693 lhs
= XEXP (lhs
, 0);
2695 else if (GET_CODE (lhs
) == MULT
2696 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2698 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2699 lhs
= XEXP (lhs
, 0);
2701 else if (GET_CODE (lhs
) == ASHIFT
2702 && CONST_INT_P (XEXP (lhs
, 1))
2703 && INTVAL (XEXP (lhs
, 1)) >= 0
2704 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2706 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2707 GET_MODE_PRECISION (int_mode
));
2708 lhs
= XEXP (lhs
, 0);
2711 if (GET_CODE (rhs
) == NEG
)
2713 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2714 rhs
= XEXP (rhs
, 0);
2716 else if (GET_CODE (rhs
) == MULT
2717 && CONST_INT_P (XEXP (rhs
, 1)))
2719 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2720 rhs
= XEXP (rhs
, 0);
2722 else if (GET_CODE (rhs
) == ASHIFT
2723 && CONST_INT_P (XEXP (rhs
, 1))
2724 && INTVAL (XEXP (rhs
, 1)) >= 0
2725 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2727 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2728 GET_MODE_PRECISION (int_mode
));
2729 rhs
= XEXP (rhs
, 0);
2732 if (rtx_equal_p (lhs
, rhs
))
2734 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2736 bool speed
= optimize_function_for_speed_p (cfun
);
2738 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2740 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2741 return (set_src_cost (tem
, int_mode
, speed
)
2742 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2745 /* Optimize (X - 1) * Y + Y to X * Y. */
2748 if (GET_CODE (op0
) == MULT
)
2750 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2751 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
)
2752 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2753 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
))
2754 && rtx_equal_p (XEXP (op0
, 1), op1
))
2755 lhs
= XEXP (XEXP (op0
, 0), 0);
2756 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2757 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
)
2758 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2759 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
))
2760 && rtx_equal_p (XEXP (op0
, 0), op1
))
2761 lhs
= XEXP (XEXP (op0
, 1), 0);
2763 else if (GET_CODE (op1
) == MULT
)
2765 if (((GET_CODE (XEXP (op1
, 0)) == PLUS
2766 && XEXP (XEXP (op1
, 0), 1) == constm1_rtx
)
2767 || (GET_CODE (XEXP (op1
, 0)) == MINUS
2768 && XEXP (XEXP (op1
, 0), 1) == const1_rtx
))
2769 && rtx_equal_p (XEXP (op1
, 1), op0
))
2770 rhs
= XEXP (XEXP (op1
, 0), 0);
2771 else if (((GET_CODE (XEXP (op1
, 1)) == PLUS
2772 && XEXP (XEXP (op1
, 1), 1) == constm1_rtx
)
2773 || (GET_CODE (XEXP (op1
, 1)) == MINUS
2774 && XEXP (XEXP (op1
, 1), 1) == const1_rtx
))
2775 && rtx_equal_p (XEXP (op1
, 0), op0
))
2776 rhs
= XEXP (XEXP (op1
, 1), 0);
2778 if (lhs
!= op0
|| rhs
!= op1
)
2779 return simplify_gen_binary (MULT
, int_mode
, lhs
, rhs
);
2782 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2783 if (CONST_SCALAR_INT_P (op1
)
2784 && GET_CODE (op0
) == XOR
2785 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2786 && mode_signbit_p (mode
, op1
))
2787 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2788 simplify_gen_binary (XOR
, mode
, op1
,
2791 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2792 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2793 && GET_CODE (op0
) == MULT
2794 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2798 in1
= XEXP (XEXP (op0
, 0), 0);
2799 in2
= XEXP (op0
, 1);
2800 return simplify_gen_binary (MINUS
, mode
, op1
,
2801 simplify_gen_binary (MULT
, mode
,
2805 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2806 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2808 if (COMPARISON_P (op0
)
2809 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2810 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2811 && (reversed
= reversed_comparison (op0
, mode
)))
2813 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2815 /* If one of the operands is a PLUS or a MINUS, see if we can
2816 simplify this by the associative law.
2817 Don't use the associative law for floating point.
2818 The inaccuracy makes it nonassociative,
2819 and subtle programs can break if operations are associated. */
2821 if (INTEGRAL_MODE_P (mode
)
2822 && (plus_minus_operand_p (op0
)
2823 || plus_minus_operand_p (op1
))
2824 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2827 /* Reassociate floating point addition only when the user
2828 specifies associative math operations. */
2829 if (FLOAT_MODE_P (mode
)
2830 && flag_associative_math
)
2832 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2837 /* Handle vector series. */
2838 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2840 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2847 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2848 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2849 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2850 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2852 rtx xop00
= XEXP (op0
, 0);
2853 rtx xop10
= XEXP (op1
, 0);
2855 if (REG_P (xop00
) && REG_P (xop10
)
2856 && REGNO (xop00
) == REGNO (xop10
)
2857 && GET_MODE (xop00
) == mode
2858 && GET_MODE (xop10
) == mode
2859 && GET_MODE_CLASS (mode
) == MODE_CC
)
2865 /* We can't assume x-x is 0 even with non-IEEE floating point,
2866 but since it is zero except in very strange circumstances, we
2867 will treat it as zero with -ffinite-math-only. */
2868 if (rtx_equal_p (trueop0
, trueop1
)
2869 && ! side_effects_p (op0
)
2870 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2871 return CONST0_RTX (mode
);
2873 /* Change subtraction from zero into negation. (0 - x) is the
2874 same as -x when x is NaN, infinite, or finite and nonzero.
2875 But if the mode has signed zeros, and does not round towards
2876 -infinity, then 0 - 0 is 0, not -0. */
2877 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2878 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2880 /* (-1 - a) is ~a, unless the expression contains symbolic
2881 constants, in which case not retaining additions and
2882 subtractions could cause invalid assembly to be produced. */
2883 if (trueop0
== constm1_rtx
2884 && !contains_symbolic_reference_p (op1
))
2885 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2887 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2888 or has signed zeros and supports rounding towards -infinity.
2889 In such a case, 0 - 0 is -0. */
2890 if (!(HONOR_SIGNED_ZEROS (mode
)
2891 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2892 && !HONOR_SNANS (mode
)
2893 && trueop1
== CONST0_RTX (mode
))
2896 /* See if this is something like X * C - X or vice versa or
2897 if the multiplication is written as a shift. If so, we can
2898 distribute and make a new multiply, shift, or maybe just
2899 have X (if C is 2 in the example above). But don't make
2900 something more expensive than we had before. */
2902 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2904 rtx lhs
= op0
, rhs
= op1
;
2906 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2907 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2909 if (GET_CODE (lhs
) == NEG
)
2911 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2912 lhs
= XEXP (lhs
, 0);
2914 else if (GET_CODE (lhs
) == MULT
2915 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2917 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2918 lhs
= XEXP (lhs
, 0);
2920 else if (GET_CODE (lhs
) == ASHIFT
2921 && CONST_INT_P (XEXP (lhs
, 1))
2922 && INTVAL (XEXP (lhs
, 1)) >= 0
2923 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2925 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2926 GET_MODE_PRECISION (int_mode
));
2927 lhs
= XEXP (lhs
, 0);
2930 if (GET_CODE (rhs
) == NEG
)
2932 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2933 rhs
= XEXP (rhs
, 0);
2935 else if (GET_CODE (rhs
) == MULT
2936 && CONST_INT_P (XEXP (rhs
, 1)))
2938 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2939 rhs
= XEXP (rhs
, 0);
2941 else if (GET_CODE (rhs
) == ASHIFT
2942 && CONST_INT_P (XEXP (rhs
, 1))
2943 && INTVAL (XEXP (rhs
, 1)) >= 0
2944 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2946 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2947 GET_MODE_PRECISION (int_mode
));
2948 negcoeff1
= -negcoeff1
;
2949 rhs
= XEXP (rhs
, 0);
2952 if (rtx_equal_p (lhs
, rhs
))
2954 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2956 bool speed
= optimize_function_for_speed_p (cfun
);
2958 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2960 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2961 return (set_src_cost (tem
, int_mode
, speed
)
2962 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2965 /* Optimize (X + 1) * Y - Y to X * Y. */
2967 if (GET_CODE (op0
) == MULT
)
2969 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2970 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
)
2971 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2972 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
))
2973 && rtx_equal_p (XEXP (op0
, 1), op1
))
2974 lhs
= XEXP (XEXP (op0
, 0), 0);
2975 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2976 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
)
2977 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2978 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
))
2979 && rtx_equal_p (XEXP (op0
, 0), op1
))
2980 lhs
= XEXP (XEXP (op0
, 1), 0);
2983 return simplify_gen_binary (MULT
, int_mode
, lhs
, op1
);
2986 /* (a - (-b)) -> (a + b). True even for IEEE. */
2987 if (GET_CODE (op1
) == NEG
)
2988 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2990 /* (-x - c) may be simplified as (-c - x). */
2991 if (GET_CODE (op0
) == NEG
2992 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2994 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2996 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2999 if ((GET_CODE (op0
) == CONST
3000 || GET_CODE (op0
) == SYMBOL_REF
3001 || GET_CODE (op0
) == LABEL_REF
)
3002 && poly_int_rtx_p (op1
, &offset
))
3003 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
3005 /* Don't let a relocatable value get a negative coeff. */
3006 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
3007 return simplify_gen_binary (PLUS
, mode
,
3009 neg_poly_int_rtx (mode
, op1
));
3011 /* (x - (x & y)) -> (x & ~y) */
3012 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
3014 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
3016 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
3017 GET_MODE (XEXP (op1
, 1)));
3018 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3020 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
3022 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
3023 GET_MODE (XEXP (op1
, 0)));
3024 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3028 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3029 by reversing the comparison code if valid. */
3030 if (STORE_FLAG_VALUE
== 1
3031 && trueop0
== const1_rtx
3032 && COMPARISON_P (op1
)
3033 && (reversed
= reversed_comparison (op1
, mode
)))
3036 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3037 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3038 && GET_CODE (op1
) == MULT
3039 && GET_CODE (XEXP (op1
, 0)) == NEG
)
3043 in1
= XEXP (XEXP (op1
, 0), 0);
3044 in2
= XEXP (op1
, 1);
3045 return simplify_gen_binary (PLUS
, mode
,
3046 simplify_gen_binary (MULT
, mode
,
3051 /* Canonicalize (minus (neg A) (mult B C)) to
3052 (minus (mult (neg B) C) A). */
3053 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3054 && GET_CODE (op1
) == MULT
3055 && GET_CODE (op0
) == NEG
)
3059 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
3060 in2
= XEXP (op1
, 1);
3061 return simplify_gen_binary (MINUS
, mode
,
3062 simplify_gen_binary (MULT
, mode
,
3067 /* If one of the operands is a PLUS or a MINUS, see if we can
3068 simplify this by the associative law. This will, for example,
3069 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3070 Don't use the associative law for floating point.
3071 The inaccuracy makes it nonassociative,
3072 and subtle programs can break if operations are associated. */
3074 if (INTEGRAL_MODE_P (mode
)
3075 && (plus_minus_operand_p (op0
)
3076 || plus_minus_operand_p (op1
))
3077 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
3080 /* Handle vector series. */
3081 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3083 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
3090 if (trueop1
== constm1_rtx
)
3091 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3093 if (GET_CODE (op0
) == NEG
)
3095 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
3096 /* If op1 is a MULT as well and simplify_unary_operation
3097 just moved the NEG to the second operand, simplify_gen_binary
3098 below could through simplify_associative_operation move
3099 the NEG around again and recurse endlessly. */
3101 && GET_CODE (op1
) == MULT
3102 && GET_CODE (temp
) == MULT
3103 && XEXP (op1
, 0) == XEXP (temp
, 0)
3104 && GET_CODE (XEXP (temp
, 1)) == NEG
3105 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
3108 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
3110 if (GET_CODE (op1
) == NEG
)
3112 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3113 /* If op0 is a MULT as well and simplify_unary_operation
3114 just moved the NEG to the second operand, simplify_gen_binary
3115 below could through simplify_associative_operation move
3116 the NEG around again and recurse endlessly. */
3118 && GET_CODE (op0
) == MULT
3119 && GET_CODE (temp
) == MULT
3120 && XEXP (op0
, 0) == XEXP (temp
, 0)
3121 && GET_CODE (XEXP (temp
, 1)) == NEG
3122 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
3125 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
3128 /* Maybe simplify x * 0 to 0. The reduction is not valid if
3129 x is NaN, since x * 0 is then also NaN. Nor is it valid
3130 when the mode has signed zeros, since multiplying a negative
3131 number by 0 will give -0, not 0. */
3132 if (!HONOR_NANS (mode
)
3133 && !HONOR_SIGNED_ZEROS (mode
)
3134 && trueop1
== CONST0_RTX (mode
)
3135 && ! side_effects_p (op0
))
3138 /* In IEEE floating point, x*1 is not equivalent to x for
3140 if (!HONOR_SNANS (mode
)
3141 && trueop1
== CONST1_RTX (mode
))
3144 /* Convert multiply by constant power of two into shift. */
3145 if (mem_depth
== 0 && CONST_SCALAR_INT_P (trueop1
))
3147 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
3149 return simplify_gen_binary (ASHIFT
, mode
, op0
,
3150 gen_int_shift_amount (mode
, val
));
3153 /* x*2 is x+x and x*(-1) is -x */
3154 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3155 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
3156 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
3157 && GET_MODE (op0
) == mode
)
3159 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3161 if (real_equal (d1
, &dconst2
))
3162 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
3164 if (!HONOR_SNANS (mode
)
3165 && real_equal (d1
, &dconstm1
))
3166 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3169 /* Optimize -x * -x as x * x. */
3170 if (FLOAT_MODE_P (mode
)
3171 && GET_CODE (op0
) == NEG
3172 && GET_CODE (op1
) == NEG
3173 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3174 && !side_effects_p (XEXP (op0
, 0)))
3175 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3177 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3178 if (SCALAR_FLOAT_MODE_P (mode
)
3179 && GET_CODE (op0
) == ABS
3180 && GET_CODE (op1
) == ABS
3181 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3182 && !side_effects_p (XEXP (op0
, 0)))
3183 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3185 /* Reassociate multiplication, but for floating point MULTs
3186 only when the user specifies unsafe math optimizations. */
3187 if (! FLOAT_MODE_P (mode
)
3188 || flag_unsafe_math_optimizations
)
3190 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3197 if (trueop1
== CONST0_RTX (mode
))
3199 if (INTEGRAL_MODE_P (mode
)
3200 && trueop1
== CONSTM1_RTX (mode
)
3201 && !side_effects_p (op0
))
3203 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3205 /* A | (~A) -> -1 */
3206 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3207 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3208 && ! side_effects_p (op0
)
3209 && SCALAR_INT_MODE_P (mode
))
3212 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3213 if (CONST_INT_P (op1
)
3214 && HWI_COMPUTABLE_MODE_P (mode
)
3215 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
3216 && !side_effects_p (op0
))
3219 /* Canonicalize (X & C1) | C2. */
3220 if (GET_CODE (op0
) == AND
3221 && CONST_INT_P (trueop1
)
3222 && CONST_INT_P (XEXP (op0
, 1)))
3224 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
3225 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
3226 HOST_WIDE_INT c2
= INTVAL (trueop1
);
3228 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3230 && !side_effects_p (XEXP (op0
, 0)))
3233 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3234 if (((c1
|c2
) & mask
) == mask
)
3235 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
3238 /* Convert (A & B) | A to A. */
3239 if (GET_CODE (op0
) == AND
3240 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3241 || rtx_equal_p (XEXP (op0
, 1), op1
))
3242 && ! side_effects_p (XEXP (op0
, 0))
3243 && ! side_effects_p (XEXP (op0
, 1)))
3246 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3247 mode size to (rotate A CX). */
3249 if (GET_CODE (op1
) == ASHIFT
3250 || GET_CODE (op1
) == SUBREG
)
3261 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
3262 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
3263 && CONST_INT_P (XEXP (opleft
, 1))
3264 && CONST_INT_P (XEXP (opright
, 1))
3265 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
3266 == GET_MODE_UNIT_PRECISION (mode
)))
3267 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
3269 /* Same, but for ashift that has been "simplified" to a wider mode
3270 by simplify_shift_const. */
3272 if (GET_CODE (opleft
) == SUBREG
3273 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3274 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
3276 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
3277 && GET_CODE (opright
) == LSHIFTRT
3278 && GET_CODE (XEXP (opright
, 0)) == SUBREG
3279 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
3280 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
3281 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
3282 SUBREG_REG (XEXP (opright
, 0)))
3283 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
3284 && CONST_INT_P (XEXP (opright
, 1))
3285 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
3286 + INTVAL (XEXP (opright
, 1))
3287 == GET_MODE_PRECISION (int_mode
)))
3288 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
3289 XEXP (SUBREG_REG (opleft
), 1));
3291 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3292 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3293 the PLUS does not affect any of the bits in OP1: then we can do
3294 the IOR as a PLUS and we can associate. This is valid if OP1
3295 can be safely shifted left C bits. */
3296 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
3297 && GET_CODE (XEXP (op0
, 0)) == PLUS
3298 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
3299 && CONST_INT_P (XEXP (op0
, 1))
3300 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
3302 int count
= INTVAL (XEXP (op0
, 1));
3303 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
3305 if (mask
>> count
== INTVAL (trueop1
)
3306 && trunc_int_for_mode (mask
, mode
) == mask
3307 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
3308 return simplify_gen_binary (ASHIFTRT
, mode
,
3309 plus_constant (mode
, XEXP (op0
, 0),
3314 /* The following happens with bitfield merging.
3315 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3316 if (GET_CODE (op0
) == AND
3317 && GET_CODE (op1
) == AND
3318 && CONST_INT_P (XEXP (op0
, 1))
3319 && CONST_INT_P (XEXP (op1
, 1))
3320 && (INTVAL (XEXP (op0
, 1))
3321 == ~INTVAL (XEXP (op1
, 1))))
3323 /* The IOR may be on both sides. */
3324 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
3325 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
3326 top0
= op0
, top1
= op1
;
3327 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
3328 top0
= op1
, top1
= op0
;
3331 /* X may be on either side of the inner IOR. */
3333 if (rtx_equal_p (XEXP (top0
, 0),
3334 XEXP (XEXP (top1
, 0), 0)))
3335 tem
= XEXP (XEXP (top1
, 0), 1);
3336 else if (rtx_equal_p (XEXP (top0
, 0),
3337 XEXP (XEXP (top1
, 0), 1)))
3338 tem
= XEXP (XEXP (top1
, 0), 0);
3340 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3342 (AND
, mode
, tem
, XEXP (top1
, 1)));
3346 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3347 if (GET_CODE (op0
) == GET_CODE (op1
)
3348 && (GET_CODE (op0
) == AND
3349 || GET_CODE (op0
) == IOR
3350 || GET_CODE (op0
) == LSHIFTRT
3351 || GET_CODE (op0
) == ASHIFTRT
3352 || GET_CODE (op0
) == ASHIFT
3353 || GET_CODE (op0
) == ROTATE
3354 || GET_CODE (op0
) == ROTATERT
))
3356 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3361 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3365 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3369 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3375 if (trueop1
== CONST0_RTX (mode
))
3377 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3378 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3379 if (rtx_equal_p (trueop0
, trueop1
)
3380 && ! side_effects_p (op0
)
3381 && GET_MODE_CLASS (mode
) != MODE_CC
)
3382 return CONST0_RTX (mode
);
3384 /* Canonicalize XOR of the most significant bit to PLUS. */
3385 if (CONST_SCALAR_INT_P (op1
)
3386 && mode_signbit_p (mode
, op1
))
3387 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3388 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3389 if (CONST_SCALAR_INT_P (op1
)
3390 && GET_CODE (op0
) == PLUS
3391 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3392 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3393 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3394 simplify_gen_binary (XOR
, mode
, op1
,
3397 /* If we are XORing two things that have no bits in common,
3398 convert them into an IOR. This helps to detect rotation encoded
3399 using those methods and possibly other simplifications. */
3401 if (HWI_COMPUTABLE_MODE_P (mode
)
3402 && (nonzero_bits (op0
, mode
)
3403 & nonzero_bits (op1
, mode
)) == 0)
3404 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3406 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3407 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3410 int num_negated
= 0;
3412 if (GET_CODE (op0
) == NOT
)
3413 num_negated
++, op0
= XEXP (op0
, 0);
3414 if (GET_CODE (op1
) == NOT
)
3415 num_negated
++, op1
= XEXP (op1
, 0);
3417 if (num_negated
== 2)
3418 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3419 else if (num_negated
== 1)
3420 return simplify_gen_unary (NOT
, mode
,
3421 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3425 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3426 correspond to a machine insn or result in further simplifications
3427 if B is a constant. */
3429 if (GET_CODE (op0
) == AND
3430 && rtx_equal_p (XEXP (op0
, 1), op1
)
3431 && ! side_effects_p (op1
))
3432 return simplify_gen_binary (AND
, mode
,
3433 simplify_gen_unary (NOT
, mode
,
3434 XEXP (op0
, 0), mode
),
3437 else if (GET_CODE (op0
) == AND
3438 && rtx_equal_p (XEXP (op0
, 0), op1
)
3439 && ! side_effects_p (op1
))
3440 return simplify_gen_binary (AND
, mode
,
3441 simplify_gen_unary (NOT
, mode
,
3442 XEXP (op0
, 1), mode
),
3445 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3446 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3447 out bits inverted twice and not set by C. Similarly, given
3448 (xor (and (xor A B) C) D), simplify without inverting C in
3449 the xor operand: (xor (and A C) (B&C)^D).
3451 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3452 && GET_CODE (XEXP (op0
, 0)) == XOR
3453 && CONST_INT_P (op1
)
3454 && CONST_INT_P (XEXP (op0
, 1))
3455 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3457 enum rtx_code op
= GET_CODE (op0
);
3458 rtx a
= XEXP (XEXP (op0
, 0), 0);
3459 rtx b
= XEXP (XEXP (op0
, 0), 1);
3460 rtx c
= XEXP (op0
, 1);
3462 HOST_WIDE_INT bval
= INTVAL (b
);
3463 HOST_WIDE_INT cval
= INTVAL (c
);
3464 HOST_WIDE_INT dval
= INTVAL (d
);
3465 HOST_WIDE_INT xcval
;
3472 return simplify_gen_binary (XOR
, mode
,
3473 simplify_gen_binary (op
, mode
, a
, c
),
3474 gen_int_mode ((bval
& xcval
) ^ dval
,
3478 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3479 we can transform like this:
3480 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3481 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3482 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3483 Attempt a few simplifications when B and C are both constants. */
3484 if (GET_CODE (op0
) == AND
3485 && CONST_INT_P (op1
)
3486 && CONST_INT_P (XEXP (op0
, 1)))
3488 rtx a
= XEXP (op0
, 0);
3489 rtx b
= XEXP (op0
, 1);
3491 HOST_WIDE_INT bval
= INTVAL (b
);
3492 HOST_WIDE_INT cval
= INTVAL (c
);
3494 /* Instead of computing ~A&C, we compute its negated value,
3495 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3496 optimize for sure. If it does not simplify, we still try
3497 to compute ~A&C below, but since that always allocates
3498 RTL, we don't try that before committing to returning a
3499 simplified expression. */
3500 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3503 if ((~cval
& bval
) == 0)
3505 rtx na_c
= NULL_RTX
;
3507 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3510 /* If ~A does not simplify, don't bother: we don't
3511 want to simplify 2 operations into 3, and if na_c
3512 were to simplify with na, n_na_c would have
3513 simplified as well. */
3514 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3516 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3519 /* Try to simplify ~A&C | ~B&C. */
3520 if (na_c
!= NULL_RTX
)
3521 return simplify_gen_binary (IOR
, mode
, na_c
,
3522 gen_int_mode (~bval
& cval
, mode
));
3526 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3527 if (n_na_c
== CONSTM1_RTX (mode
))
3529 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3530 gen_int_mode (~cval
& bval
,
3532 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3533 gen_int_mode (~bval
& cval
,
3539 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3540 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3541 machines, and also has shorter instruction path length. */
3542 if (GET_CODE (op0
) == AND
3543 && GET_CODE (XEXP (op0
, 0)) == XOR
3544 && CONST_INT_P (XEXP (op0
, 1))
3545 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3548 rtx b
= XEXP (XEXP (op0
, 0), 1);
3549 rtx c
= XEXP (op0
, 1);
3550 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3551 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3552 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3553 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3555 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3556 else if (GET_CODE (op0
) == AND
3557 && GET_CODE (XEXP (op0
, 0)) == XOR
3558 && CONST_INT_P (XEXP (op0
, 1))
3559 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3561 rtx a
= XEXP (XEXP (op0
, 0), 0);
3563 rtx c
= XEXP (op0
, 1);
3564 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3565 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3566 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3567 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3570 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3571 comparison if STORE_FLAG_VALUE is 1. */
3572 if (STORE_FLAG_VALUE
== 1
3573 && trueop1
== const1_rtx
3574 && COMPARISON_P (op0
)
3575 && (reversed
= reversed_comparison (op0
, mode
)))
3578 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3579 is (lt foo (const_int 0)), so we can perform the above
3580 simplification if STORE_FLAG_VALUE is 1. */
3582 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3583 && STORE_FLAG_VALUE
== 1
3584 && trueop1
== const1_rtx
3585 && GET_CODE (op0
) == LSHIFTRT
3586 && CONST_INT_P (XEXP (op0
, 1))
3587 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3588 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3590 /* (xor (comparison foo bar) (const_int sign-bit))
3591 when STORE_FLAG_VALUE is the sign bit. */
3592 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3593 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3594 && trueop1
== const_true_rtx
3595 && COMPARISON_P (op0
)
3596 && (reversed
= reversed_comparison (op0
, int_mode
)))
3599 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3600 if (GET_CODE (op0
) == GET_CODE (op1
)
3601 && (GET_CODE (op0
) == AND
3602 || GET_CODE (op0
) == LSHIFTRT
3603 || GET_CODE (op0
) == ASHIFTRT
3604 || GET_CODE (op0
) == ASHIFT
3605 || GET_CODE (op0
) == ROTATE
3606 || GET_CODE (op0
) == ROTATERT
))
3608 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3613 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3617 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3623 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3625 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3627 if (HWI_COMPUTABLE_MODE_P (mode
))
3629 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3630 HOST_WIDE_INT nzop1
;
3631 if (CONST_INT_P (trueop1
))
3633 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3634 /* If we are turning off bits already known off in OP0, we need
3636 if ((nzop0
& ~val1
) == 0)
3639 nzop1
= nonzero_bits (trueop1
, mode
);
3640 /* If we are clearing all the nonzero bits, the result is zero. */
3641 if ((nzop1
& nzop0
) == 0
3642 && !side_effects_p (op0
) && !side_effects_p (op1
))
3643 return CONST0_RTX (mode
);
3645 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3646 && GET_MODE_CLASS (mode
) != MODE_CC
)
3649 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3650 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3651 && ! side_effects_p (op0
)
3652 && GET_MODE_CLASS (mode
) != MODE_CC
)
3653 return CONST0_RTX (mode
);
3655 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3656 there are no nonzero bits of C outside of X's mode. */
3657 if ((GET_CODE (op0
) == SIGN_EXTEND
3658 || GET_CODE (op0
) == ZERO_EXTEND
)
3659 && CONST_INT_P (trueop1
)
3660 && HWI_COMPUTABLE_MODE_P (mode
)
3661 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3662 & UINTVAL (trueop1
)) == 0)
3664 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3665 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3666 gen_int_mode (INTVAL (trueop1
),
3668 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3671 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3672 we might be able to further simplify the AND with X and potentially
3673 remove the truncation altogether. */
3674 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3676 rtx x
= XEXP (op0
, 0);
3677 machine_mode xmode
= GET_MODE (x
);
3678 tem
= simplify_gen_binary (AND
, xmode
, x
,
3679 gen_int_mode (INTVAL (trueop1
), xmode
));
3680 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3683 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3684 if (GET_CODE (op0
) == IOR
3685 && CONST_INT_P (trueop1
)
3686 && CONST_INT_P (XEXP (op0
, 1)))
3688 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3689 return simplify_gen_binary (IOR
, mode
,
3690 simplify_gen_binary (AND
, mode
,
3691 XEXP (op0
, 0), op1
),
3692 gen_int_mode (tmp
, mode
));
3695 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3696 insn (and may simplify more). */
3697 if (GET_CODE (op0
) == XOR
3698 && rtx_equal_p (XEXP (op0
, 0), op1
)
3699 && ! side_effects_p (op1
))
3700 return simplify_gen_binary (AND
, mode
,
3701 simplify_gen_unary (NOT
, mode
,
3702 XEXP (op0
, 1), mode
),
3705 if (GET_CODE (op0
) == XOR
3706 && rtx_equal_p (XEXP (op0
, 1), op1
)
3707 && ! side_effects_p (op1
))
3708 return simplify_gen_binary (AND
, mode
,
3709 simplify_gen_unary (NOT
, mode
,
3710 XEXP (op0
, 0), mode
),
3713 /* Similarly for (~(A ^ B)) & A. */
3714 if (GET_CODE (op0
) == NOT
3715 && GET_CODE (XEXP (op0
, 0)) == XOR
3716 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3717 && ! side_effects_p (op1
))
3718 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3720 if (GET_CODE (op0
) == NOT
3721 && GET_CODE (XEXP (op0
, 0)) == XOR
3722 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3723 && ! side_effects_p (op1
))
3724 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3726 /* Convert (A | B) & A to A. */
3727 if (GET_CODE (op0
) == IOR
3728 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3729 || rtx_equal_p (XEXP (op0
, 1), op1
))
3730 && ! side_effects_p (XEXP (op0
, 0))
3731 && ! side_effects_p (XEXP (op0
, 1)))
3734 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3735 ((A & N) + B) & M -> (A + B) & M
3736 Similarly if (N & M) == 0,
3737 ((A | N) + B) & M -> (A + B) & M
3738 and for - instead of + and/or ^ instead of |.
3739 Also, if (N & M) == 0, then
3740 (A +- N) & M -> A & M. */
3741 if (CONST_INT_P (trueop1
)
3742 && HWI_COMPUTABLE_MODE_P (mode
)
3743 && ~UINTVAL (trueop1
)
3744 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3745 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3750 pmop
[0] = XEXP (op0
, 0);
3751 pmop
[1] = XEXP (op0
, 1);
3753 if (CONST_INT_P (pmop
[1])
3754 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3755 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3757 for (which
= 0; which
< 2; which
++)
3760 switch (GET_CODE (tem
))
3763 if (CONST_INT_P (XEXP (tem
, 1))
3764 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3765 == UINTVAL (trueop1
))
3766 pmop
[which
] = XEXP (tem
, 0);
3770 if (CONST_INT_P (XEXP (tem
, 1))
3771 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3772 pmop
[which
] = XEXP (tem
, 0);
3779 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3781 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3783 return simplify_gen_binary (code
, mode
, tem
, op1
);
3787 /* (and X (ior (not X) Y) -> (and X Y) */
3788 if (GET_CODE (op1
) == IOR
3789 && GET_CODE (XEXP (op1
, 0)) == NOT
3790 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3791 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3793 /* (and (ior (not X) Y) X) -> (and X Y) */
3794 if (GET_CODE (op0
) == IOR
3795 && GET_CODE (XEXP (op0
, 0)) == NOT
3796 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3797 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3799 /* (and X (ior Y (not X)) -> (and X Y) */
3800 if (GET_CODE (op1
) == IOR
3801 && GET_CODE (XEXP (op1
, 1)) == NOT
3802 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3803 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3805 /* (and (ior Y (not X)) X) -> (and X Y) */
3806 if (GET_CODE (op0
) == IOR
3807 && GET_CODE (XEXP (op0
, 1)) == NOT
3808 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3809 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3811 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3812 if (GET_CODE (op0
) == GET_CODE (op1
)
3813 && (GET_CODE (op0
) == AND
3814 || GET_CODE (op0
) == IOR
3815 || GET_CODE (op0
) == LSHIFTRT
3816 || GET_CODE (op0
) == ASHIFTRT
3817 || GET_CODE (op0
) == ASHIFT
3818 || GET_CODE (op0
) == ROTATE
3819 || GET_CODE (op0
) == ROTATERT
))
3821 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3826 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3830 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3836 /* 0/x is 0 (or x&0 if x has side-effects). */
3837 if (trueop0
== CONST0_RTX (mode
)
3838 && !cfun
->can_throw_non_call_exceptions
)
3840 if (side_effects_p (op1
))
3841 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3845 if (trueop1
== CONST1_RTX (mode
))
3847 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3851 /* Convert divide by power of two into shift. */
3852 if (CONST_INT_P (trueop1
)
3853 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3854 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3855 gen_int_shift_amount (mode
, val
));
3859 /* Handle floating point and integers separately. */
3860 if (SCALAR_FLOAT_MODE_P (mode
))
3862 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3863 safe for modes with NaNs, since 0.0 / 0.0 will then be
3864 NaN rather than 0.0. Nor is it safe for modes with signed
3865 zeros, since dividing 0 by a negative number gives -0.0 */
3866 if (trueop0
== CONST0_RTX (mode
)
3867 && !HONOR_NANS (mode
)
3868 && !HONOR_SIGNED_ZEROS (mode
)
3869 && ! side_effects_p (op1
))
3872 if (trueop1
== CONST1_RTX (mode
)
3873 && !HONOR_SNANS (mode
))
3876 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3877 && trueop1
!= CONST0_RTX (mode
))
3879 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3882 if (real_equal (d1
, &dconstm1
)
3883 && !HONOR_SNANS (mode
))
3884 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3886 /* Change FP division by a constant into multiplication.
3887 Only do this with -freciprocal-math. */
3888 if (flag_reciprocal_math
3889 && !real_equal (d1
, &dconst0
))
3892 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3893 tem
= const_double_from_real_value (d
, mode
);
3894 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3898 else if (SCALAR_INT_MODE_P (mode
))
3900 /* 0/x is 0 (or x&0 if x has side-effects). */
3901 if (trueop0
== CONST0_RTX (mode
)
3902 && !cfun
->can_throw_non_call_exceptions
)
3904 if (side_effects_p (op1
))
3905 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3909 if (trueop1
== CONST1_RTX (mode
))
3911 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3916 if (trueop1
== constm1_rtx
)
3918 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3920 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3926 /* 0%x is 0 (or x&0 if x has side-effects). */
3927 if (trueop0
== CONST0_RTX (mode
))
3929 if (side_effects_p (op1
))
3930 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3933 /* x%1 is 0 (of x&0 if x has side-effects). */
3934 if (trueop1
== CONST1_RTX (mode
))
3936 if (side_effects_p (op0
))
3937 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3938 return CONST0_RTX (mode
);
3940 /* Implement modulus by power of two as AND. */
3941 if (CONST_INT_P (trueop1
)
3942 && exact_log2 (UINTVAL (trueop1
)) > 0)
3943 return simplify_gen_binary (AND
, mode
, op0
,
3944 gen_int_mode (UINTVAL (trueop1
) - 1,
3949 /* 0%x is 0 (or x&0 if x has side-effects). */
3950 if (trueop0
== CONST0_RTX (mode
))
3952 if (side_effects_p (op1
))
3953 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3956 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3957 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3959 if (side_effects_p (op0
))
3960 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3961 return CONST0_RTX (mode
);
3967 if (trueop1
== CONST0_RTX (mode
))
3969 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3970 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3971 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3973 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3974 if (CONST_INT_P (trueop1
)
3975 && IN_RANGE (INTVAL (trueop1
),
3976 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3977 GET_MODE_UNIT_PRECISION (mode
) - 1))
3979 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3980 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3981 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3982 mode
, op0
, new_amount_rtx
);
3987 if (trueop1
== CONST0_RTX (mode
))
3989 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3991 /* Rotating ~0 always results in ~0. */
3992 if (CONST_INT_P (trueop0
)
3993 && HWI_COMPUTABLE_MODE_P (mode
)
3994 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3995 && ! side_effects_p (op1
))
4001 scalar constants c1, c2
4002 size (M2) > size (M1)
4003 c1 == size (M2) - size (M1)
4005 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4009 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4011 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
4012 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4014 && CONST_INT_P (op1
)
4015 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
4016 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
4018 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
4019 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
4020 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
4021 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
4022 && subreg_lowpart_p (op0
))
4024 rtx tmp
= gen_int_shift_amount
4025 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
4027 /* Combine would usually zero out the value when combining two
4028 local shifts and the range becomes larger or equal to the mode.
4029 However since we fold away one of the shifts here combine won't
4030 see it so we should immediately zero the result if it's out of
4032 if (code
== LSHIFTRT
4033 && INTVAL (tmp
) >= GET_MODE_BITSIZE (inner_mode
))
4036 tmp
= simplify_gen_binary (code
,
4038 XEXP (SUBREG_REG (op0
), 0),
4041 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
4044 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
4046 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
4047 if (val
!= INTVAL (op1
))
4048 return simplify_gen_binary (code
, mode
, op0
,
4049 gen_int_shift_amount (mode
, val
));
4056 if (trueop1
== CONST0_RTX (mode
))
4058 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4062 && CONST_INT_P (trueop1
)
4063 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4064 && IN_RANGE (UINTVAL (trueop1
),
4065 1, GET_MODE_PRECISION (int_mode
) - 1))
4067 auto c
= (wi::one (GET_MODE_PRECISION (int_mode
))
4068 << UINTVAL (trueop1
));
4069 rtx new_op1
= immed_wide_int_const (c
, int_mode
);
4070 return simplify_gen_binary (MULT
, int_mode
, op0
, new_op1
);
4072 goto canonicalize_shift
;
4075 if (trueop1
== CONST0_RTX (mode
))
4077 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4079 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4080 if (GET_CODE (op0
) == CLZ
4081 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
4082 && CONST_INT_P (trueop1
)
4083 && STORE_FLAG_VALUE
== 1
4084 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
4086 unsigned HOST_WIDE_INT zero_val
= 0;
4088 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
4089 && zero_val
== GET_MODE_PRECISION (inner_mode
)
4090 && INTVAL (trueop1
) == exact_log2 (zero_val
))
4091 return simplify_gen_relational (EQ
, mode
, inner_mode
,
4092 XEXP (op0
, 0), const0_rtx
);
4094 goto canonicalize_shift
;
4097 if (HWI_COMPUTABLE_MODE_P (mode
)
4098 && mode_signbit_p (mode
, trueop1
)
4099 && ! side_effects_p (op0
))
4101 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4103 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4109 if (HWI_COMPUTABLE_MODE_P (mode
)
4110 && CONST_INT_P (trueop1
)
4111 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
4112 && ! side_effects_p (op0
))
4114 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4116 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4122 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
4124 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4126 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4132 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
4134 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4136 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4145 /* Simplify x +/- 0 to x, if possible. */
4146 if (trueop1
== CONST0_RTX (mode
))
4152 /* Simplify x * 0 to 0, if possible. */
4153 if (trueop1
== CONST0_RTX (mode
)
4154 && !side_effects_p (op0
))
4157 /* Simplify x * 1 to x, if possible. */
4158 if (trueop1
== CONST1_RTX (mode
))
4164 /* Simplify x * 0 to 0, if possible. */
4165 if (trueop1
== CONST0_RTX (mode
)
4166 && !side_effects_p (op0
))
4172 /* Simplify x / 1 to x, if possible. */
4173 if (trueop1
== CONST1_RTX (mode
))
4178 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
4179 return gen_vec_duplicate (mode
, op0
);
4180 if (valid_for_const_vector_p (mode
, op0
)
4181 && valid_for_const_vector_p (mode
, op1
))
4182 return gen_const_vec_series (mode
, op0
, op1
);
4186 if (!VECTOR_MODE_P (mode
))
4188 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4189 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
4190 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4191 gcc_assert (XVECLEN (trueop1
, 0) == 1);
4193 /* We can't reason about selections made at runtime. */
4194 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4197 if (vec_duplicate_p (trueop0
, &elt0
))
4200 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4201 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
4204 /* Extract a scalar element from a nested VEC_SELECT expression
4205 (with optional nested VEC_CONCAT expression). Some targets
4206 (i386) extract scalar element from a vector using chain of
4207 nested VEC_SELECT expressions. When input operand is a memory
4208 operand, this operation can be simplified to a simple scalar
4209 load from an offseted memory address. */
4211 if (GET_CODE (trueop0
) == VEC_SELECT
4212 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4213 .is_constant (&n_elts
)))
4215 rtx op0
= XEXP (trueop0
, 0);
4216 rtx op1
= XEXP (trueop0
, 1);
4218 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
4224 gcc_assert (GET_CODE (op1
) == PARALLEL
);
4225 gcc_assert (i
< n_elts
);
4227 /* Select element, pointed by nested selector. */
4228 elem
= INTVAL (XVECEXP (op1
, 0, i
));
4230 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4231 if (GET_CODE (op0
) == VEC_CONCAT
)
4233 rtx op00
= XEXP (op0
, 0);
4234 rtx op01
= XEXP (op0
, 1);
4236 machine_mode mode00
, mode01
;
4237 int n_elts00
, n_elts01
;
4239 mode00
= GET_MODE (op00
);
4240 mode01
= GET_MODE (op01
);
4242 /* Find out the number of elements of each operand.
4243 Since the concatenated result has a constant number
4244 of elements, the operands must too. */
4245 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
4246 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
4248 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
4250 /* Select correct operand of VEC_CONCAT
4251 and adjust selector. */
4252 if (elem
< n_elts01
)
4263 vec
= rtvec_alloc (1);
4264 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
4266 tmp
= gen_rtx_fmt_ee (code
, mode
,
4267 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
4273 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4274 gcc_assert (GET_MODE_INNER (mode
)
4275 == GET_MODE_INNER (GET_MODE (trueop0
)));
4276 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4278 if (vec_duplicate_p (trueop0
, &elt0
))
4279 /* It doesn't matter which elements are selected by trueop1,
4280 because they are all the same. */
4281 return gen_vec_duplicate (mode
, elt0
);
4283 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4285 unsigned n_elts
= XVECLEN (trueop1
, 0);
4286 rtvec v
= rtvec_alloc (n_elts
);
4289 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4290 for (i
= 0; i
< n_elts
; i
++)
4292 rtx x
= XVECEXP (trueop1
, 0, i
);
4294 if (!CONST_INT_P (x
))
4297 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
4301 return gen_rtx_CONST_VECTOR (mode
, v
);
4304 /* Recognize the identity. */
4305 if (GET_MODE (trueop0
) == mode
)
4307 bool maybe_ident
= true;
4308 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4310 rtx j
= XVECEXP (trueop1
, 0, i
);
4311 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4313 maybe_ident
= false;
4321 /* If we select a low-part subreg, return that. */
4322 if (vec_series_lowpart_p (mode
, GET_MODE (trueop0
), trueop1
))
4324 rtx new_rtx
= lowpart_subreg (mode
, trueop0
,
4325 GET_MODE (trueop0
));
4326 if (new_rtx
!= NULL_RTX
)
4330 /* If we build {a,b} then permute it, build the result directly. */
4331 if (XVECLEN (trueop1
, 0) == 2
4332 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4333 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4334 && GET_CODE (trueop0
) == VEC_CONCAT
4335 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
4336 && GET_MODE (XEXP (trueop0
, 0)) == mode
4337 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
4338 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
4340 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4341 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4344 gcc_assert (i0
< 4 && i1
< 4);
4345 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
4346 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
4348 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4351 if (XVECLEN (trueop1
, 0) == 2
4352 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4353 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4354 && GET_CODE (trueop0
) == VEC_CONCAT
4355 && GET_MODE (trueop0
) == mode
)
4357 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4358 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4361 gcc_assert (i0
< 2 && i1
< 2);
4362 subop0
= XEXP (trueop0
, i0
);
4363 subop1
= XEXP (trueop0
, i1
);
4365 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4368 /* If we select one half of a vec_concat, return that. */
4370 if (GET_CODE (trueop0
) == VEC_CONCAT
4371 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4373 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
4375 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4377 rtx subop0
= XEXP (trueop0
, 0);
4378 rtx subop1
= XEXP (trueop0
, 1);
4379 machine_mode mode0
= GET_MODE (subop0
);
4380 machine_mode mode1
= GET_MODE (subop1
);
4381 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4382 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
4384 bool success
= true;
4385 for (int i
= 1; i
< l0
; ++i
)
4387 rtx j
= XVECEXP (trueop1
, 0, i
);
4388 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4397 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
4399 bool success
= true;
4400 for (int i
= 1; i
< l1
; ++i
)
4402 rtx j
= XVECEXP (trueop1
, 0, i
);
4403 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
4414 /* Simplify vec_select of a subreg of X to just a vec_select of X
4415 when X has same component mode as vec_select. */
4416 unsigned HOST_WIDE_INT subreg_offset
= 0;
4417 if (GET_CODE (trueop0
) == SUBREG
4418 && GET_MODE_INNER (mode
)
4419 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0
)))
4420 && GET_MODE_NUNITS (mode
).is_constant (&l1
)
4421 && constant_multiple_p (subreg_memory_offset (trueop0
),
4422 GET_MODE_UNIT_BITSIZE (mode
),
4426 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0
)));
4427 bool success
= true;
4428 for (int i
= 0; i
!= l1
; i
++)
4430 rtx idx
= XVECEXP (trueop1
, 0, i
);
4431 if (!CONST_INT_P (idx
)
4432 || maybe_ge (UINTVAL (idx
) + subreg_offset
, nunits
))
4444 rtvec vec
= rtvec_alloc (l1
);
4445 for (int i
= 0; i
< l1
; i
++)
4447 = GEN_INT (INTVAL (XVECEXP (trueop1
, 0, i
))
4449 par
= gen_rtx_PARALLEL (VOIDmode
, vec
);
4451 return gen_rtx_VEC_SELECT (mode
, SUBREG_REG (trueop0
), par
);
4456 if (XVECLEN (trueop1
, 0) == 1
4457 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4458 && GET_CODE (trueop0
) == VEC_CONCAT
)
4461 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
4463 /* Try to find the element in the VEC_CONCAT. */
4464 while (GET_MODE (vec
) != mode
4465 && GET_CODE (vec
) == VEC_CONCAT
)
4467 poly_int64 vec_size
;
4469 if (CONST_INT_P (XEXP (vec
, 0)))
4471 /* vec_concat of two const_ints doesn't make sense with
4472 respect to modes. */
4473 if (CONST_INT_P (XEXP (vec
, 1)))
4476 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
4477 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
4480 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4482 if (known_lt (offset
, vec_size
))
4483 vec
= XEXP (vec
, 0);
4484 else if (known_ge (offset
, vec_size
))
4487 vec
= XEXP (vec
, 1);
4491 vec
= avoid_constant_pool_reference (vec
);
4494 if (GET_MODE (vec
) == mode
)
4498 /* If we select elements in a vec_merge that all come from the same
4499 operand, select from that operand directly. */
4500 if (GET_CODE (op0
) == VEC_MERGE
)
4502 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4503 if (CONST_INT_P (trueop02
))
4505 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4506 bool all_operand0
= true;
4507 bool all_operand1
= true;
4508 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4510 rtx j
= XVECEXP (trueop1
, 0, i
);
4511 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4512 all_operand1
= false;
4514 all_operand0
= false;
4516 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4517 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4518 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4519 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4523 /* If we have two nested selects that are inverses of each
4524 other, replace them with the source operand. */
4525 if (GET_CODE (trueop0
) == VEC_SELECT
4526 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4528 rtx op0_subop1
= XEXP (trueop0
, 1);
4529 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4530 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4532 /* Apply the outer ordering vector to the inner one. (The inner
4533 ordering vector is expressly permitted to be of a different
4534 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4535 then the two VEC_SELECTs cancel. */
4536 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4538 rtx x
= XVECEXP (trueop1
, 0, i
);
4539 if (!CONST_INT_P (x
))
4541 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4542 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4545 return XEXP (trueop0
, 0);
4551 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4552 ? GET_MODE (trueop0
)
4553 : GET_MODE_INNER (mode
));
4554 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4555 ? GET_MODE (trueop1
)
4556 : GET_MODE_INNER (mode
));
4558 gcc_assert (VECTOR_MODE_P (mode
));
4559 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4560 + GET_MODE_SIZE (op1_mode
),
4561 GET_MODE_SIZE (mode
)));
4563 if (VECTOR_MODE_P (op0_mode
))
4564 gcc_assert (GET_MODE_INNER (mode
)
4565 == GET_MODE_INNER (op0_mode
));
4567 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4569 if (VECTOR_MODE_P (op1_mode
))
4570 gcc_assert (GET_MODE_INNER (mode
)
4571 == GET_MODE_INNER (op1_mode
));
4573 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4575 unsigned int n_elts
, in_n_elts
;
4576 if ((GET_CODE (trueop0
) == CONST_VECTOR
4577 || CONST_SCALAR_INT_P (trueop0
)
4578 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4579 && (GET_CODE (trueop1
) == CONST_VECTOR
4580 || CONST_SCALAR_INT_P (trueop1
)
4581 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4582 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4583 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4585 rtvec v
= rtvec_alloc (n_elts
);
4587 for (i
= 0; i
< n_elts
; i
++)
4591 if (!VECTOR_MODE_P (op0_mode
))
4592 RTVEC_ELT (v
, i
) = trueop0
;
4594 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4598 if (!VECTOR_MODE_P (op1_mode
))
4599 RTVEC_ELT (v
, i
) = trueop1
;
4601 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4606 return gen_rtx_CONST_VECTOR (mode
, v
);
4609 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4610 Restrict the transformation to avoid generating a VEC_SELECT with a
4611 mode unrelated to its operand. */
4612 if (GET_CODE (trueop0
) == VEC_SELECT
4613 && GET_CODE (trueop1
) == VEC_SELECT
4614 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4615 && GET_MODE_INNER (GET_MODE (XEXP (trueop0
, 0)))
4616 == GET_MODE_INNER(mode
))
4618 rtx par0
= XEXP (trueop0
, 1);
4619 rtx par1
= XEXP (trueop1
, 1);
4620 int len0
= XVECLEN (par0
, 0);
4621 int len1
= XVECLEN (par1
, 0);
4622 rtvec vec
= rtvec_alloc (len0
+ len1
);
4623 for (int i
= 0; i
< len0
; i
++)
4624 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4625 for (int i
= 0; i
< len1
; i
++)
4626 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4627 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4628 gen_rtx_PARALLEL (VOIDmode
, vec
));
4637 if (mode
== GET_MODE (op0
)
4638 && mode
== GET_MODE (op1
)
4639 && vec_duplicate_p (op0
, &elt0
)
4640 && vec_duplicate_p (op1
, &elt1
))
4642 /* Try applying the operator to ELT and see if that simplifies.
4643 We can duplicate the result if so.
4645 The reason we don't use simplify_gen_binary is that it isn't
4646 necessarily a win to convert things like:
4648 (plus:V (vec_duplicate:V (reg:S R1))
4649 (vec_duplicate:V (reg:S R2)))
4653 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4655 The first might be done entirely in vector registers while the
4656 second might need a move between register files. */
4657 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4660 return gen_vec_duplicate (mode
, tem
);
4666 /* Return true if binary operation OP distributes over addition in operand
4667 OPNO, with the other operand being held constant. OPNO counts from 1. */
4670 distributes_over_addition_p (rtx_code op
, int opno
)
4688 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4691 if (VECTOR_MODE_P (mode
)
4692 && code
!= VEC_CONCAT
4693 && GET_CODE (op0
) == CONST_VECTOR
4694 && GET_CODE (op1
) == CONST_VECTOR
)
4697 if (CONST_VECTOR_STEPPED_P (op0
)
4698 && CONST_VECTOR_STEPPED_P (op1
))
4699 /* We can operate directly on the encoding if:
4701 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4703 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4705 Addition and subtraction are the supported operators
4706 for which this is true. */
4707 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4708 else if (CONST_VECTOR_STEPPED_P (op0
))
4709 /* We can operate directly on stepped encodings if:
4713 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4715 which is true if (x -> x op c) distributes over addition. */
4716 step_ok_p
= distributes_over_addition_p (code
, 1);
4718 /* Similarly in reverse. */
4719 step_ok_p
= distributes_over_addition_p (code
, 2);
4720 rtx_vector_builder builder
;
4721 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4724 unsigned int count
= builder
.encoded_nelts ();
4725 for (unsigned int i
= 0; i
< count
; i
++)
4727 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4728 CONST_VECTOR_ELT (op0
, i
),
4729 CONST_VECTOR_ELT (op1
, i
));
4730 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4732 builder
.quick_push (x
);
4734 return builder
.build ();
4737 if (VECTOR_MODE_P (mode
)
4738 && code
== VEC_CONCAT
4739 && (CONST_SCALAR_INT_P (op0
)
4740 || CONST_FIXED_P (op0
)
4741 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4742 && (CONST_SCALAR_INT_P (op1
)
4743 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4744 || CONST_FIXED_P (op1
)))
4746 /* Both inputs have a constant number of elements, so the result
4748 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4749 rtvec v
= rtvec_alloc (n_elts
);
4751 gcc_assert (n_elts
>= 2);
4754 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4755 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4757 RTVEC_ELT (v
, 0) = op0
;
4758 RTVEC_ELT (v
, 1) = op1
;
4762 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4763 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4766 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4767 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4768 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4770 for (i
= 0; i
< op0_n_elts
; ++i
)
4771 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4772 for (i
= 0; i
< op1_n_elts
; ++i
)
4773 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4776 return gen_rtx_CONST_VECTOR (mode
, v
);
4779 if (SCALAR_FLOAT_MODE_P (mode
)
4780 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4781 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4782 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4793 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4795 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4797 for (i
= 0; i
< 4; i
++)
4814 real_from_target (&r
, tmp0
, mode
);
4815 return const_double_from_real_value (r
, mode
);
4819 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4820 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4823 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4824 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4826 if (HONOR_SNANS (mode
)
4827 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4828 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4831 real_convert (&f0
, mode
, opr0
);
4832 real_convert (&f1
, mode
, opr1
);
4835 && real_equal (&f1
, &dconst0
)
4836 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4839 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4840 && flag_trapping_math
4841 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4843 int s0
= REAL_VALUE_NEGATIVE (f0
);
4844 int s1
= REAL_VALUE_NEGATIVE (f1
);
4849 /* Inf + -Inf = NaN plus exception. */
4854 /* Inf - Inf = NaN plus exception. */
4859 /* Inf / Inf = NaN plus exception. */
4866 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4867 && flag_trapping_math
4868 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4869 || (REAL_VALUE_ISINF (f1
)
4870 && real_equal (&f0
, &dconst0
))))
4871 /* Inf * 0 = NaN plus exception. */
4874 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4876 real_convert (&result
, mode
, &value
);
4878 /* Don't constant fold this floating point operation if
4879 the result has overflowed and flag_trapping_math. */
4881 if (flag_trapping_math
4882 && MODE_HAS_INFINITIES (mode
)
4883 && REAL_VALUE_ISINF (result
)
4884 && !REAL_VALUE_ISINF (f0
)
4885 && !REAL_VALUE_ISINF (f1
))
4886 /* Overflow plus exception. */
4889 /* Don't constant fold this floating point operation if the
4890 result may dependent upon the run-time rounding mode and
4891 flag_rounding_math is set, or if GCC's software emulation
4892 is unable to accurately represent the result. */
4894 if ((flag_rounding_math
4895 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4896 && (inexact
|| !real_identical (&result
, &value
)))
4899 return const_double_from_real_value (result
, mode
);
4903 /* We can fold some multi-word operations. */
4904 scalar_int_mode int_mode
;
4905 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4906 && CONST_SCALAR_INT_P (op0
)
4907 && CONST_SCALAR_INT_P (op1
)
4908 && GET_MODE_PRECISION (int_mode
) <= MAX_BITSIZE_MODE_ANY_INT
)
4911 wi::overflow_type overflow
;
4912 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4913 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4915 #if TARGET_SUPPORTS_WIDE_INT == 0
4916 /* This assert keeps the simplification from producing a result
4917 that cannot be represented in a CONST_DOUBLE but a lot of
4918 upstream callers expect that this function never fails to
4919 simplify something and so you if you added this to the test
4920 above the code would die later anyway. If this assert
4921 happens, you just need to make the port support wide int. */
4922 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4927 result
= wi::sub (pop0
, pop1
);
4931 result
= wi::add (pop0
, pop1
);
4935 result
= wi::mul (pop0
, pop1
);
4939 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4945 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4951 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4957 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4963 result
= wi::bit_and (pop0
, pop1
);
4967 result
= wi::bit_or (pop0
, pop1
);
4971 result
= wi::bit_xor (pop0
, pop1
);
4975 result
= wi::smin (pop0
, pop1
);
4979 result
= wi::smax (pop0
, pop1
);
4983 result
= wi::umin (pop0
, pop1
);
4987 result
= wi::umax (pop0
, pop1
);
4994 wide_int wop1
= pop1
;
4995 if (SHIFT_COUNT_TRUNCATED
)
4996 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4997 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
5003 result
= wi::lrshift (pop0
, wop1
);
5007 result
= wi::arshift (pop0
, wop1
);
5011 result
= wi::lshift (pop0
, wop1
);
5022 if (wi::neg_p (pop1
))
5028 result
= wi::lrotate (pop0
, pop1
);
5032 result
= wi::rrotate (pop0
, pop1
);
5042 result
= wi::add (pop0
, pop1
, SIGNED
, &overflow
);
5043 clamp_signed_saturation
:
5044 if (overflow
== wi::OVF_OVERFLOW
)
5045 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5046 else if (overflow
== wi::OVF_UNDERFLOW
)
5047 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5048 else if (overflow
!= wi::OVF_NONE
)
5053 result
= wi::add (pop0
, pop1
, UNSIGNED
, &overflow
);
5054 clamp_unsigned_saturation
:
5055 if (overflow
!= wi::OVF_NONE
)
5056 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5060 result
= wi::sub (pop0
, pop1
, SIGNED
, &overflow
);
5061 goto clamp_signed_saturation
;
5064 result
= wi::sub (pop0
, pop1
, UNSIGNED
, &overflow
);
5065 if (overflow
!= wi::OVF_NONE
)
5066 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5070 result
= wi::mul (pop0
, pop1
, SIGNED
, &overflow
);
5071 goto clamp_signed_saturation
;
5074 result
= wi::mul (pop0
, pop1
, UNSIGNED
, &overflow
);
5075 goto clamp_unsigned_saturation
;
5078 result
= wi::mul_high (pop0
, pop1
, SIGNED
);
5082 result
= wi::mul_high (pop0
, pop1
, UNSIGNED
);
5088 return immed_wide_int_const (result
, int_mode
);
5091 /* Handle polynomial integers. */
5092 if (NUM_POLY_INT_COEFFS
> 1
5093 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5094 && poly_int_rtx_p (op0
)
5095 && poly_int_rtx_p (op1
))
5097 poly_wide_int result
;
5101 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
5105 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
5109 if (CONST_SCALAR_INT_P (op1
))
5110 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
5116 if (CONST_SCALAR_INT_P (op1
))
5118 wide_int shift
= rtx_mode_t (op1
, mode
);
5119 if (SHIFT_COUNT_TRUNCATED
)
5120 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
5121 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
5123 result
= wi::to_poly_wide (op0
, mode
) << shift
;
5130 if (!CONST_SCALAR_INT_P (op1
)
5131 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
5132 rtx_mode_t (op1
, mode
), &result
))
5139 return immed_wide_int_const (result
, int_mode
);
5147 /* Return a positive integer if X should sort after Y. The value
5148 returned is 1 if and only if X and Y are both regs. */
5151 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
5155 result
= (commutative_operand_precedence (y
)
5156 - commutative_operand_precedence (x
));
5158 return result
+ result
;
5160 /* Group together equal REGs to do more simplification. */
5161 if (REG_P (x
) && REG_P (y
))
5162 return REGNO (x
) > REGNO (y
);
5167 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5168 operands may be another PLUS or MINUS.
5170 Rather than test for specific case, we do this by a brute-force method
5171 and do all possible simplifications until no more changes occur. Then
5172 we rebuild the operation.
5174 May return NULL_RTX when no changes were made. */
5177 simplify_context::simplify_plus_minus (rtx_code code
, machine_mode mode
,
5180 struct simplify_plus_minus_op_data
5187 int changed
, n_constants
, canonicalized
= 0;
5190 memset (ops
, 0, sizeof ops
);
5192 /* Set up the two operands and then expand them until nothing has been
5193 changed. If we run out of room in our array, give up; this should
5194 almost never happen. */
5199 ops
[1].neg
= (code
== MINUS
);
5206 for (i
= 0; i
< n_ops
; i
++)
5208 rtx this_op
= ops
[i
].op
;
5209 int this_neg
= ops
[i
].neg
;
5210 enum rtx_code this_code
= GET_CODE (this_op
);
5216 if (n_ops
== ARRAY_SIZE (ops
))
5219 ops
[n_ops
].op
= XEXP (this_op
, 1);
5220 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
5223 ops
[i
].op
= XEXP (this_op
, 0);
5225 /* If this operand was negated then we will potentially
5226 canonicalize the expression. Similarly if we don't
5227 place the operands adjacent we're re-ordering the
5228 expression and thus might be performing a
5229 canonicalization. Ignore register re-ordering.
5230 ??? It might be better to shuffle the ops array here,
5231 but then (plus (plus (A, B), plus (C, D))) wouldn't
5232 be seen as non-canonical. */
5235 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
5240 ops
[i
].op
= XEXP (this_op
, 0);
5241 ops
[i
].neg
= ! this_neg
;
5247 if (n_ops
!= ARRAY_SIZE (ops
)
5248 && GET_CODE (XEXP (this_op
, 0)) == PLUS
5249 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
5250 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
5252 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
5253 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
5254 ops
[n_ops
].neg
= this_neg
;
5262 /* ~a -> (-a - 1) */
5263 if (n_ops
!= ARRAY_SIZE (ops
))
5265 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
5266 ops
[n_ops
++].neg
= this_neg
;
5267 ops
[i
].op
= XEXP (this_op
, 0);
5268 ops
[i
].neg
= !this_neg
;
5274 CASE_CONST_SCALAR_INT
:
5275 case CONST_POLY_INT
:
5279 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
5293 if (n_constants
> 1)
5296 gcc_assert (n_ops
>= 2);
5298 /* If we only have two operands, we can avoid the loops. */
5301 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
5304 /* Get the two operands. Be careful with the order, especially for
5305 the cases where code == MINUS. */
5306 if (ops
[0].neg
&& ops
[1].neg
)
5308 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
5311 else if (ops
[0].neg
)
5322 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
5325 /* Now simplify each pair of operands until nothing changes. */
5328 /* Insertion sort is good enough for a small array. */
5329 for (i
= 1; i
< n_ops
; i
++)
5331 struct simplify_plus_minus_op_data save
;
5335 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
5338 /* Just swapping registers doesn't count as canonicalization. */
5344 ops
[j
+ 1] = ops
[j
];
5346 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
5351 for (i
= n_ops
- 1; i
> 0; i
--)
5352 for (j
= i
- 1; j
>= 0; j
--)
5354 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
5355 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
5357 if (lhs
!= 0 && rhs
!= 0)
5359 enum rtx_code ncode
= PLUS
;
5365 std::swap (lhs
, rhs
);
5367 else if (swap_commutative_operands_p (lhs
, rhs
))
5368 std::swap (lhs
, rhs
);
5370 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
5371 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
5373 rtx tem_lhs
, tem_rhs
;
5375 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
5376 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
5377 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
5380 if (tem
&& !CONSTANT_P (tem
))
5381 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
5384 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
5388 /* Reject "simplifications" that just wrap the two
5389 arguments in a CONST. Failure to do so can result
5390 in infinite recursion with simplify_binary_operation
5391 when it calls us to simplify CONST operations.
5392 Also, if we find such a simplification, don't try
5393 any more combinations with this rhs: We must have
5394 something like symbol+offset, ie. one of the
5395 trivial CONST expressions we handle later. */
5396 if (GET_CODE (tem
) == CONST
5397 && GET_CODE (XEXP (tem
, 0)) == ncode
5398 && XEXP (XEXP (tem
, 0), 0) == lhs
5399 && XEXP (XEXP (tem
, 0), 1) == rhs
)
5402 if (GET_CODE (tem
) == NEG
)
5403 tem
= XEXP (tem
, 0), lneg
= !lneg
;
5404 if (poly_int_rtx_p (tem
) && lneg
)
5405 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
5409 ops
[j
].op
= NULL_RTX
;
5419 /* Pack all the operands to the lower-numbered entries. */
5420 for (i
= 0, j
= 0; j
< n_ops
; j
++)
5429 /* If nothing changed, check that rematerialization of rtl instructions
5430 is still required. */
5433 /* Perform rematerialization if only all operands are registers and
5434 all operations are PLUS. */
5435 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5436 around rs6000 and how it uses the CA register. See PR67145. */
5437 for (i
= 0; i
< n_ops
; i
++)
5439 || !REG_P (ops
[i
].op
)
5440 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
5441 && fixed_regs
[REGNO (ops
[i
].op
)]
5442 && !global_regs
[REGNO (ops
[i
].op
)]
5443 && ops
[i
].op
!= frame_pointer_rtx
5444 && ops
[i
].op
!= arg_pointer_rtx
5445 && ops
[i
].op
!= stack_pointer_rtx
))
5450 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5452 && CONST_INT_P (ops
[1].op
)
5453 && CONSTANT_P (ops
[0].op
)
5455 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
5457 /* We suppressed creation of trivial CONST expressions in the
5458 combination loop to avoid recursion. Create one manually now.
5459 The combination loop should have ensured that there is exactly
5460 one CONST_INT, and the sort will have ensured that it is last
5461 in the array and that any other constant will be next-to-last. */
5464 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
5465 && CONSTANT_P (ops
[n_ops
- 2].op
))
5467 rtx value
= ops
[n_ops
- 1].op
;
5468 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
5469 value
= neg_poly_int_rtx (mode
, value
);
5470 if (CONST_INT_P (value
))
5472 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
5478 /* Put a non-negated operand first, if possible. */
5480 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
5483 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
5492 /* Now make the result by performing the requested operations. */
5495 for (i
= 1; i
< n_ops
; i
++)
5496 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
5497 mode
, result
, ops
[i
].op
);
5502 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5504 plus_minus_operand_p (const_rtx x
)
5506 return GET_CODE (x
) == PLUS
5507 || GET_CODE (x
) == MINUS
5508 || (GET_CODE (x
) == CONST
5509 && GET_CODE (XEXP (x
, 0)) == PLUS
5510 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
5511 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
5514 /* Like simplify_binary_operation except used for relational operators.
5515 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5516 not also be VOIDmode.
5518 CMP_MODE specifies in which mode the comparison is done in, so it is
5519 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5520 the operands or, if both are VOIDmode, the operands are compared in
5521 "infinite precision". */
5523 simplify_context::simplify_relational_operation (rtx_code code
,
5525 machine_mode cmp_mode
,
5528 rtx tem
, trueop0
, trueop1
;
5530 if (cmp_mode
== VOIDmode
)
5531 cmp_mode
= GET_MODE (op0
);
5532 if (cmp_mode
== VOIDmode
)
5533 cmp_mode
= GET_MODE (op1
);
5535 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5537 return relational_result (mode
, cmp_mode
, tem
);
5539 /* For the following tests, ensure const0_rtx is op1. */
5540 if (swap_commutative_operands_p (op0
, op1
)
5541 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5542 std::swap (op0
, op1
), code
= swap_condition (code
);
5544 /* If op0 is a compare, extract the comparison arguments from it. */
5545 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5546 return simplify_gen_relational (code
, mode
, VOIDmode
,
5547 XEXP (op0
, 0), XEXP (op0
, 1));
5549 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
)
5552 trueop0
= avoid_constant_pool_reference (op0
);
5553 trueop1
= avoid_constant_pool_reference (op1
);
5554 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5558 /* This part of simplify_relational_operation is only used when CMP_MODE
5559 is not in class MODE_CC (i.e. it is a real comparison).
5561 MODE is the mode of the result, while CMP_MODE specifies in which
5562 mode the comparison is done in, so it is the mode of the operands. */
5565 simplify_context::simplify_relational_operation_1 (rtx_code code
,
5567 machine_mode cmp_mode
,
5570 enum rtx_code op0code
= GET_CODE (op0
);
5572 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5574 /* If op0 is a comparison, extract the comparison arguments
5578 if (GET_MODE (op0
) == mode
)
5579 return simplify_rtx (op0
);
5581 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5582 XEXP (op0
, 0), XEXP (op0
, 1));
5584 else if (code
== EQ
)
5586 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5587 if (new_code
!= UNKNOWN
)
5588 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5589 XEXP (op0
, 0), XEXP (op0
, 1));
5593 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5594 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5595 if ((code
== LTU
|| code
== GEU
)
5596 && GET_CODE (op0
) == PLUS
5597 && CONST_INT_P (XEXP (op0
, 1))
5598 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5599 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5600 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5601 && XEXP (op0
, 1) != const0_rtx
)
5604 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5605 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5606 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5609 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5610 transformed into (LTU a -C). */
5611 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5612 && CONST_INT_P (XEXP (op0
, 1))
5613 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5614 && XEXP (op0
, 1) != const0_rtx
)
5617 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5618 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5619 XEXP (op0
, 0), new_cmp
);
5622 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5623 if ((code
== LTU
|| code
== GEU
)
5624 && GET_CODE (op0
) == PLUS
5625 && rtx_equal_p (op1
, XEXP (op0
, 1))
5626 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5627 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5628 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5629 copy_rtx (XEXP (op0
, 0)));
5631 if (op1
== const0_rtx
)
5633 /* Canonicalize (GTU x 0) as (NE x 0). */
5635 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5636 /* Canonicalize (LEU x 0) as (EQ x 0). */
5638 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5640 else if (op1
== const1_rtx
)
5645 /* Canonicalize (GE x 1) as (GT x 0). */
5646 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5649 /* Canonicalize (GEU x 1) as (NE x 0). */
5650 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5653 /* Canonicalize (LT x 1) as (LE x 0). */
5654 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5657 /* Canonicalize (LTU x 1) as (EQ x 0). */
5658 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5664 else if (op1
== constm1_rtx
)
5666 /* Canonicalize (LE x -1) as (LT x 0). */
5668 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5669 /* Canonicalize (GT x -1) as (GE x 0). */
5671 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5674 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5675 if ((code
== EQ
|| code
== NE
)
5676 && (op0code
== PLUS
|| op0code
== MINUS
)
5678 && CONSTANT_P (XEXP (op0
, 1))
5679 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5681 rtx x
= XEXP (op0
, 0);
5682 rtx c
= XEXP (op0
, 1);
5683 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5684 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5686 /* Detect an infinite recursive condition, where we oscillate at this
5687 simplification case between:
5688 A + B == C <---> C - B == A,
5689 where A, B, and C are all constants with non-simplifiable expressions,
5690 usually SYMBOL_REFs. */
5691 if (GET_CODE (tem
) == invcode
5693 && rtx_equal_p (c
, XEXP (tem
, 1)))
5696 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5699 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5700 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5701 scalar_int_mode int_mode
, int_cmp_mode
;
5703 && op1
== const0_rtx
5704 && is_int_mode (mode
, &int_mode
)
5705 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5706 /* ??? Work-around BImode bugs in the ia64 backend. */
5707 && int_mode
!= BImode
5708 && int_cmp_mode
!= BImode
5709 && nonzero_bits (op0
, int_cmp_mode
) == 1
5710 && STORE_FLAG_VALUE
== 1)
5711 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5712 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5713 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5715 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5716 if ((code
== EQ
|| code
== NE
)
5717 && op1
== const0_rtx
5719 return simplify_gen_relational (code
, mode
, cmp_mode
,
5720 XEXP (op0
, 0), XEXP (op0
, 1));
5722 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5723 if ((code
== EQ
|| code
== NE
)
5725 && rtx_equal_p (XEXP (op0
, 0), op1
)
5726 && !side_effects_p (XEXP (op0
, 0)))
5727 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5730 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5731 if ((code
== EQ
|| code
== NE
)
5733 && rtx_equal_p (XEXP (op0
, 1), op1
)
5734 && !side_effects_p (XEXP (op0
, 1)))
5735 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5738 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5739 if ((code
== EQ
|| code
== NE
)
5741 && CONST_SCALAR_INT_P (op1
)
5742 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5743 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5744 simplify_gen_binary (XOR
, cmp_mode
,
5745 XEXP (op0
, 1), op1
));
5747 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5748 constant folding if x/y is a constant. */
5749 if ((code
== EQ
|| code
== NE
)
5750 && (op0code
== AND
|| op0code
== IOR
)
5751 && !side_effects_p (op1
)
5752 && op1
!= CONST0_RTX (cmp_mode
))
5754 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5755 (eq/ne (and (not y) x) 0). */
5756 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5757 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5759 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5761 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5763 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5764 CONST0_RTX (cmp_mode
));
5767 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5768 (eq/ne (and (not x) y) 0). */
5769 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5770 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5772 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5774 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5776 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5777 CONST0_RTX (cmp_mode
));
5781 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5782 if ((code
== EQ
|| code
== NE
)
5783 && GET_CODE (op0
) == BSWAP
5784 && CONST_SCALAR_INT_P (op1
))
5785 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5786 simplify_gen_unary (BSWAP
, cmp_mode
,
5789 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5790 if ((code
== EQ
|| code
== NE
)
5791 && GET_CODE (op0
) == BSWAP
5792 && GET_CODE (op1
) == BSWAP
)
5793 return simplify_gen_relational (code
, mode
, cmp_mode
,
5794 XEXP (op0
, 0), XEXP (op1
, 0));
5796 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5802 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5803 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5804 XEXP (op0
, 0), const0_rtx
);
5809 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5810 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5811 XEXP (op0
, 0), const0_rtx
);
5830 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5831 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5832 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5833 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5834 For floating-point comparisons, assume that the operands were ordered. */
5837 comparison_result (enum rtx_code code
, int known_results
)
5843 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5846 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5850 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5853 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5857 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5860 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5863 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5865 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5868 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5870 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5873 return const_true_rtx
;
5881 /* Check if the given comparison (done in the given MODE) is actually
5882 a tautology or a contradiction. If the mode is VOIDmode, the
5883 comparison is done in "infinite precision". If no simplification
5884 is possible, this function returns zero. Otherwise, it returns
5885 either const_true_rtx or const0_rtx. */
5888 simplify_const_relational_operation (enum rtx_code code
,
5896 gcc_assert (mode
!= VOIDmode
5897 || (GET_MODE (op0
) == VOIDmode
5898 && GET_MODE (op1
) == VOIDmode
));
5900 /* If op0 is a compare, extract the comparison arguments from it. */
5901 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5903 op1
= XEXP (op0
, 1);
5904 op0
= XEXP (op0
, 0);
5906 if (GET_MODE (op0
) != VOIDmode
)
5907 mode
= GET_MODE (op0
);
5908 else if (GET_MODE (op1
) != VOIDmode
)
5909 mode
= GET_MODE (op1
);
5914 /* We can't simplify MODE_CC values since we don't know what the
5915 actual comparison is. */
5916 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5919 /* Make sure the constant is second. */
5920 if (swap_commutative_operands_p (op0
, op1
))
5922 std::swap (op0
, op1
);
5923 code
= swap_condition (code
);
5926 trueop0
= avoid_constant_pool_reference (op0
);
5927 trueop1
= avoid_constant_pool_reference (op1
);
5929 /* For integer comparisons of A and B maybe we can simplify A - B and can
5930 then simplify a comparison of that with zero. If A and B are both either
5931 a register or a CONST_INT, this can't help; testing for these cases will
5932 prevent infinite recursion here and speed things up.
5934 We can only do this for EQ and NE comparisons as otherwise we may
5935 lose or introduce overflow which we cannot disregard as undefined as
5936 we do not know the signedness of the operation on either the left or
5937 the right hand side of the comparison. */
5939 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5940 && (code
== EQ
|| code
== NE
)
5941 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5942 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5943 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5944 /* We cannot do this if tem is a nonzero address. */
5945 && ! nonzero_address_p (tem
))
5946 return simplify_const_relational_operation (signed_condition (code
),
5947 mode
, tem
, const0_rtx
);
5949 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5950 return const_true_rtx
;
5952 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5955 /* For modes without NaNs, if the two operands are equal, we know the
5956 result except if they have side-effects. Even with NaNs we know
5957 the result of unordered comparisons and, if signaling NaNs are
5958 irrelevant, also the result of LT/GT/LTGT. */
5959 if ((! HONOR_NANS (trueop0
)
5960 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5961 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5962 && ! HONOR_SNANS (trueop0
)))
5963 && rtx_equal_p (trueop0
, trueop1
)
5964 && ! side_effects_p (trueop0
))
5965 return comparison_result (code
, CMP_EQ
);
5967 /* If the operands are floating-point constants, see if we can fold
5969 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5970 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5971 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5973 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5974 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5976 /* Comparisons are unordered iff at least one of the values is NaN. */
5977 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5987 return const_true_rtx
;
6000 return comparison_result (code
,
6001 (real_equal (d0
, d1
) ? CMP_EQ
:
6002 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
6005 /* Otherwise, see if the operands are both integers. */
6006 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
6007 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
6009 /* It would be nice if we really had a mode here. However, the
6010 largest int representable on the target is as good as
6012 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
6013 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
6014 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
6016 if (wi::eq_p (ptrueop0
, ptrueop1
))
6017 return comparison_result (code
, CMP_EQ
);
6020 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
6021 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
6022 return comparison_result (code
, cr
);
6026 /* Optimize comparisons with upper and lower bounds. */
6027 scalar_int_mode int_mode
;
6028 if (CONST_INT_P (trueop1
)
6029 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6030 && HWI_COMPUTABLE_MODE_P (int_mode
)
6031 && !side_effects_p (trueop0
))
6034 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
6035 HOST_WIDE_INT val
= INTVAL (trueop1
);
6036 HOST_WIDE_INT mmin
, mmax
;
6046 /* Get a reduced range if the sign bit is zero. */
6047 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
6054 rtx mmin_rtx
, mmax_rtx
;
6055 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
6057 mmin
= INTVAL (mmin_rtx
);
6058 mmax
= INTVAL (mmax_rtx
);
6061 unsigned int sign_copies
6062 = num_sign_bit_copies (trueop0
, int_mode
);
6064 mmin
>>= (sign_copies
- 1);
6065 mmax
>>= (sign_copies
- 1);
6071 /* x >= y is always true for y <= mmin, always false for y > mmax. */
6073 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6074 return const_true_rtx
;
6075 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6080 return const_true_rtx
;
6085 /* x <= y is always true for y >= mmax, always false for y < mmin. */
6087 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6088 return const_true_rtx
;
6089 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6094 return const_true_rtx
;
6100 /* x == y is always false for y out of range. */
6101 if (val
< mmin
|| val
> mmax
)
6105 /* x > y is always false for y >= mmax, always true for y < mmin. */
6107 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6109 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6110 return const_true_rtx
;
6116 return const_true_rtx
;
6119 /* x < y is always false for y <= mmin, always true for y > mmax. */
6121 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6123 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6124 return const_true_rtx
;
6130 return const_true_rtx
;
6134 /* x != y is always true for y out of range. */
6135 if (val
< mmin
|| val
> mmax
)
6136 return const_true_rtx
;
6144 /* Optimize integer comparisons with zero. */
6145 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6146 && trueop1
== const0_rtx
6147 && !side_effects_p (trueop0
))
6149 /* Some addresses are known to be nonzero. We don't know
6150 their sign, but equality comparisons are known. */
6151 if (nonzero_address_p (trueop0
))
6153 if (code
== EQ
|| code
== LEU
)
6155 if (code
== NE
|| code
== GTU
)
6156 return const_true_rtx
;
6159 /* See if the first operand is an IOR with a constant. If so, we
6160 may be able to determine the result of this comparison. */
6161 if (GET_CODE (op0
) == IOR
)
6163 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
6164 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
6166 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
6167 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
6168 && (UINTVAL (inner_const
)
6179 return const_true_rtx
;
6183 return const_true_rtx
;
6197 /* Optimize comparison of ABS with zero. */
6198 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
6199 && (GET_CODE (trueop0
) == ABS
6200 || (GET_CODE (trueop0
) == FLOAT_EXTEND
6201 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
6206 /* Optimize abs(x) < 0.0. */
6207 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
6212 /* Optimize abs(x) >= 0.0. */
6213 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
6214 return const_true_rtx
;
6218 /* Optimize ! (abs(x) < 0.0). */
6219 return const_true_rtx
;
6229 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6230 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6231 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6232 can be simplified to that or NULL_RTX if not.
6233 Assume X is compared against zero with CMP_CODE and the true
6234 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6237 simplify_context::simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
,
6238 rtx true_val
, rtx false_val
)
6240 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
6243 /* Result on X == 0 and X !=0 respectively. */
6244 rtx on_zero
, on_nonzero
;
6248 on_nonzero
= false_val
;
6252 on_zero
= false_val
;
6253 on_nonzero
= true_val
;
6256 rtx_code op_code
= GET_CODE (on_nonzero
);
6257 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
6258 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
6259 || !CONST_INT_P (on_zero
))
6262 HOST_WIDE_INT op_val
;
6263 scalar_int_mode mode ATTRIBUTE_UNUSED
6264 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
6265 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
6266 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
6267 && op_val
== INTVAL (on_zero
))
6273 /* Try to simplify X given that it appears within operand OP of a
6274 VEC_MERGE operation whose mask is MASK. X need not use the same
6275 vector mode as the VEC_MERGE, but it must have the same number of
6278 Return the simplified X on success, otherwise return NULL_RTX. */
6281 simplify_context::simplify_merge_mask (rtx x
, rtx mask
, int op
)
6283 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
6284 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
6285 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
6287 if (side_effects_p (XEXP (x
, 1 - op
)))
6290 return XEXP (x
, op
);
6293 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6294 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
6296 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6298 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
6299 GET_MODE (XEXP (x
, 0)));
6302 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6303 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6304 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6305 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
6307 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6308 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6311 if (COMPARISON_P (x
))
6312 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
6313 GET_MODE (XEXP (x
, 0)) != VOIDmode
6314 ? GET_MODE (XEXP (x
, 0))
6315 : GET_MODE (XEXP (x
, 1)),
6316 top0
? top0
: XEXP (x
, 0),
6317 top1
? top1
: XEXP (x
, 1));
6319 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
6320 top0
? top0
: XEXP (x
, 0),
6321 top1
? top1
: XEXP (x
, 1));
6324 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
6325 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6326 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6327 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6328 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
6329 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
6330 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
6332 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6333 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6334 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
6335 if (top0
|| top1
|| top2
)
6336 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
6337 GET_MODE (XEXP (x
, 0)),
6338 top0
? top0
: XEXP (x
, 0),
6339 top1
? top1
: XEXP (x
, 1),
6340 top2
? top2
: XEXP (x
, 2));
6346 /* Simplify CODE, an operation with result mode MODE and three operands,
6347 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6348 a constant. Return 0 if no simplifications is possible. */
6351 simplify_context::simplify_ternary_operation (rtx_code code
, machine_mode mode
,
6352 machine_mode op0_mode
,
6353 rtx op0
, rtx op1
, rtx op2
)
6355 bool any_change
= false;
6357 scalar_int_mode int_mode
, int_op0_mode
;
6358 unsigned int n_elts
;
6363 /* Simplify negations around the multiplication. */
6364 /* -a * -b + c => a * b + c. */
6365 if (GET_CODE (op0
) == NEG
)
6367 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
6369 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
6371 else if (GET_CODE (op1
) == NEG
)
6373 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
6375 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
6378 /* Canonicalize the two multiplication operands. */
6379 /* a * -b + c => -b * a + c. */
6380 if (swap_commutative_operands_p (op0
, op1
))
6381 std::swap (op0
, op1
), any_change
= true;
6384 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
6389 if (CONST_INT_P (op0
)
6390 && CONST_INT_P (op1
)
6391 && CONST_INT_P (op2
)
6392 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6393 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
6394 && HWI_COMPUTABLE_MODE_P (int_mode
))
6396 /* Extracting a bit-field from a constant */
6397 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
6398 HOST_WIDE_INT op1val
= INTVAL (op1
);
6399 HOST_WIDE_INT op2val
= INTVAL (op2
);
6400 if (!BITS_BIG_ENDIAN
)
6402 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
6403 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
6405 /* Not enough information to calculate the bit position. */
6408 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
6410 /* First zero-extend. */
6411 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
6412 /* If desired, propagate sign bit. */
6413 if (code
== SIGN_EXTRACT
6414 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
6416 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
6419 return gen_int_mode (val
, int_mode
);
6424 if (CONST_INT_P (op0
))
6425 return op0
!= const0_rtx
? op1
: op2
;
6427 /* Convert c ? a : a into "a". */
6428 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
6431 /* Convert a != b ? a : b into "a". */
6432 if (GET_CODE (op0
) == NE
6433 && ! side_effects_p (op0
)
6434 && ! HONOR_NANS (mode
)
6435 && ! HONOR_SIGNED_ZEROS (mode
)
6436 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6437 && rtx_equal_p (XEXP (op0
, 1), op2
))
6438 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6439 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6442 /* Convert a == b ? a : b into "b". */
6443 if (GET_CODE (op0
) == EQ
6444 && ! side_effects_p (op0
)
6445 && ! HONOR_NANS (mode
)
6446 && ! HONOR_SIGNED_ZEROS (mode
)
6447 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6448 && rtx_equal_p (XEXP (op0
, 1), op2
))
6449 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6450 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6453 /* Convert (!c) != {0,...,0} ? a : b into
6454 c != {0,...,0} ? b : a for vector modes. */
6455 if (VECTOR_MODE_P (GET_MODE (op1
))
6456 && GET_CODE (op0
) == NE
6457 && GET_CODE (XEXP (op0
, 0)) == NOT
6458 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
6460 rtx cv
= XEXP (op0
, 1);
6463 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
6466 for (int i
= 0; i
< nunits
; ++i
)
6467 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
6474 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
6475 XEXP (XEXP (op0
, 0), 0),
6477 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
6482 /* Convert x == 0 ? N : clz (x) into clz (x) when
6483 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6484 Similarly for ctz (x). */
6485 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
6486 && XEXP (op0
, 1) == const0_rtx
)
6489 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
6495 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6497 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6498 ? GET_MODE (XEXP (op0
, 1))
6499 : GET_MODE (XEXP (op0
, 0)));
6502 /* Look for happy constants in op1 and op2. */
6503 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6505 HOST_WIDE_INT t
= INTVAL (op1
);
6506 HOST_WIDE_INT f
= INTVAL (op2
);
6508 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6509 code
= GET_CODE (op0
);
6510 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6513 tmp
= reversed_comparison_code (op0
, NULL
);
6521 return simplify_gen_relational (code
, mode
, cmp_mode
,
6522 XEXP (op0
, 0), XEXP (op0
, 1));
6525 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6526 cmp_mode
, XEXP (op0
, 0),
6529 /* See if any simplifications were possible. */
6532 if (CONST_INT_P (temp
))
6533 return temp
== const0_rtx
? op2
: op1
;
6535 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6541 gcc_assert (GET_MODE (op0
) == mode
);
6542 gcc_assert (GET_MODE (op1
) == mode
);
6543 gcc_assert (VECTOR_MODE_P (mode
));
6544 trueop2
= avoid_constant_pool_reference (op2
);
6545 if (CONST_INT_P (trueop2
)
6546 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6548 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6549 unsigned HOST_WIDE_INT mask
;
6550 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6553 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6555 if (!(sel
& mask
) && !side_effects_p (op0
))
6557 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6560 rtx trueop0
= avoid_constant_pool_reference (op0
);
6561 rtx trueop1
= avoid_constant_pool_reference (op1
);
6562 if (GET_CODE (trueop0
) == CONST_VECTOR
6563 && GET_CODE (trueop1
) == CONST_VECTOR
)
6565 rtvec v
= rtvec_alloc (n_elts
);
6568 for (i
= 0; i
< n_elts
; i
++)
6569 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6570 ? CONST_VECTOR_ELT (trueop0
, i
)
6571 : CONST_VECTOR_ELT (trueop1
, i
));
6572 return gen_rtx_CONST_VECTOR (mode
, v
);
6575 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6576 if no element from a appears in the result. */
6577 if (GET_CODE (op0
) == VEC_MERGE
)
6579 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6580 if (CONST_INT_P (tem
))
6582 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6583 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6584 return simplify_gen_ternary (code
, mode
, mode
,
6585 XEXP (op0
, 1), op1
, op2
);
6586 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6587 return simplify_gen_ternary (code
, mode
, mode
,
6588 XEXP (op0
, 0), op1
, op2
);
6591 if (GET_CODE (op1
) == VEC_MERGE
)
6593 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6594 if (CONST_INT_P (tem
))
6596 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6597 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6598 return simplify_gen_ternary (code
, mode
, mode
,
6599 op0
, XEXP (op1
, 1), op2
);
6600 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6601 return simplify_gen_ternary (code
, mode
, mode
,
6602 op0
, XEXP (op1
, 0), op2
);
6606 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6608 if (GET_CODE (op0
) == VEC_DUPLICATE
6609 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6610 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6611 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6613 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6614 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6616 if (XEXP (XEXP (op0
, 0), 0) == op1
6617 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6621 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6623 with (vec_concat (X) (B)) if N == 1 or
6624 (vec_concat (A) (X)) if N == 2. */
6625 if (GET_CODE (op0
) == VEC_DUPLICATE
6626 && GET_CODE (op1
) == CONST_VECTOR
6627 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6628 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6629 && IN_RANGE (sel
, 1, 2))
6631 rtx newop0
= XEXP (op0
, 0);
6632 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6634 std::swap (newop0
, newop1
);
6635 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6637 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6638 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6639 Only applies for vectors of two elements. */
6640 if (GET_CODE (op0
) == VEC_DUPLICATE
6641 && GET_CODE (op1
) == VEC_CONCAT
6642 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6643 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6644 && IN_RANGE (sel
, 1, 2))
6646 rtx newop0
= XEXP (op0
, 0);
6647 rtx newop1
= XEXP (op1
, 2 - sel
);
6648 rtx otherop
= XEXP (op1
, sel
- 1);
6650 std::swap (newop0
, newop1
);
6651 /* Don't want to throw away the other part of the vec_concat if
6652 it has side-effects. */
6653 if (!side_effects_p (otherop
))
6654 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6659 (vec_merge:outer (vec_duplicate:outer x:inner)
6660 (subreg:outer y:inner 0)
6663 with (vec_concat:outer x:inner y:inner) if N == 1,
6664 or (vec_concat:outer y:inner x:inner) if N == 2.
6666 Implicitly, this means we have a paradoxical subreg, but such
6667 a check is cheap, so make it anyway.
6669 Only applies for vectors of two elements. */
6670 if (GET_CODE (op0
) == VEC_DUPLICATE
6671 && GET_CODE (op1
) == SUBREG
6672 && GET_MODE (op1
) == GET_MODE (op0
)
6673 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6674 && paradoxical_subreg_p (op1
)
6675 && subreg_lowpart_p (op1
)
6676 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6677 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6678 && IN_RANGE (sel
, 1, 2))
6680 rtx newop0
= XEXP (op0
, 0);
6681 rtx newop1
= SUBREG_REG (op1
);
6683 std::swap (newop0
, newop1
);
6684 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6687 /* Same as above but with switched operands:
6688 Replace (vec_merge:outer (subreg:outer x:inner 0)
6689 (vec_duplicate:outer y:inner)
6692 with (vec_concat:outer x:inner y:inner) if N == 1,
6693 or (vec_concat:outer y:inner x:inner) if N == 2. */
6694 if (GET_CODE (op1
) == VEC_DUPLICATE
6695 && GET_CODE (op0
) == SUBREG
6696 && GET_MODE (op0
) == GET_MODE (op1
)
6697 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6698 && paradoxical_subreg_p (op0
)
6699 && subreg_lowpart_p (op0
)
6700 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6701 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6702 && IN_RANGE (sel
, 1, 2))
6704 rtx newop0
= SUBREG_REG (op0
);
6705 rtx newop1
= XEXP (op1
, 0);
6707 std::swap (newop0
, newop1
);
6708 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6711 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6713 with (vec_concat x y) or (vec_concat y x) depending on value
6715 if (GET_CODE (op0
) == VEC_DUPLICATE
6716 && GET_CODE (op1
) == VEC_DUPLICATE
6717 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6718 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6719 && IN_RANGE (sel
, 1, 2))
6721 rtx newop0
= XEXP (op0
, 0);
6722 rtx newop1
= XEXP (op1
, 0);
6724 std::swap (newop0
, newop1
);
6726 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6730 if (rtx_equal_p (op0
, op1
)
6731 && !side_effects_p (op2
) && !side_effects_p (op1
))
6734 if (!side_effects_p (op2
))
6737 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6739 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6741 return simplify_gen_ternary (code
, mode
, mode
,
6743 top1
? top1
: op1
, op2
);
6755 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6756 starting at byte FIRST_BYTE. Return true on success and add the
6757 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6758 that the bytes follow target memory order. Leave BYTES unmodified
6761 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6762 BYTES before calling this function. */
6765 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6766 unsigned int first_byte
, unsigned int num_bytes
)
6768 /* Check the mode is sensible. */
6769 gcc_assert (GET_MODE (x
) == VOIDmode
6770 ? is_a
<scalar_int_mode
> (mode
)
6771 : mode
== GET_MODE (x
));
6773 if (GET_CODE (x
) == CONST_VECTOR
)
6775 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6776 is necessary. The only complication is that MODE_VECTOR_BOOL
6777 vectors can have several elements per byte. */
6778 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6779 GET_MODE_NUNITS (mode
));
6780 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
6781 if (elt_bits
< BITS_PER_UNIT
)
6783 /* This is the only case in which elements can be smaller than
6785 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6786 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
6788 target_unit value
= 0;
6789 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
6791 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & 1) << j
;
6794 bytes
.quick_push (value
);
6799 unsigned int start
= bytes
.length ();
6800 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
6801 /* Make FIRST_BYTE relative to ELT. */
6802 first_byte
%= elt_bytes
;
6803 while (num_bytes
> 0)
6805 /* Work out how many bytes we want from element ELT. */
6806 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
6807 if (!native_encode_rtx (GET_MODE_INNER (mode
),
6808 CONST_VECTOR_ELT (x
, elt
), bytes
,
6809 first_byte
, chunk_bytes
))
6811 bytes
.truncate (start
);
6816 num_bytes
-= chunk_bytes
;
6821 /* All subsequent cases are limited to scalars. */
6823 if (!is_a
<scalar_mode
> (mode
, &smode
))
6826 /* Make sure that the region is in range. */
6827 unsigned int end_byte
= first_byte
+ num_bytes
;
6828 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6829 gcc_assert (end_byte
<= mode_bytes
);
6831 if (CONST_SCALAR_INT_P (x
))
6833 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6834 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6835 position of each byte. */
6836 rtx_mode_t
value (x
, smode
);
6837 wide_int_ref
value_wi (value
);
6838 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6840 /* Always constant because the inputs are. */
6842 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6843 /* Operate directly on the encoding rather than using
6844 wi::extract_uhwi, so that we preserve the sign or zero
6845 extension for modes that are not a whole number of bits in
6846 size. (Zero extension is only used for the combination of
6847 innermode == BImode && STORE_FLAG_VALUE == 1). */
6848 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
6849 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
6850 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
6851 bytes
.quick_push (uhwi
>> shift
);
6856 if (CONST_DOUBLE_P (x
))
6858 /* real_to_target produces an array of integers in target memory order.
6859 All integers before the last one have 32 bits; the last one may
6860 have 32 bits or fewer, depending on whether the mode bitsize
6861 is divisible by 32. Each of these integers is then laid out
6862 in target memory as any other integer would be. */
6863 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6864 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
6866 /* The (maximum) number of target bytes per element of el32. */
6867 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6868 gcc_assert (bytes_per_el32
!= 0);
6870 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6872 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6874 unsigned int index
= byte
/ bytes_per_el32
;
6875 unsigned int subbyte
= byte
% bytes_per_el32
;
6876 unsigned int int_bytes
= MIN (bytes_per_el32
,
6877 mode_bytes
- index
* bytes_per_el32
);
6878 /* Always constant because the inputs are. */
6880 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6881 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
6886 if (GET_CODE (x
) == CONST_FIXED
)
6888 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6890 /* Always constant because the inputs are. */
6892 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6893 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
6894 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6896 lsb
-= HOST_BITS_PER_WIDE_INT
;
6897 piece
= CONST_FIXED_VALUE_HIGH (x
);
6899 bytes
.quick_push (piece
>> lsb
);
6907 /* Read a vector of mode MODE from the target memory image given by BYTES,
6908 starting at byte FIRST_BYTE. The vector is known to be encodable using
6909 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6910 and BYTES is known to have enough bytes to supply NPATTERNS *
6911 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6912 BITS_PER_UNIT bits and the bytes are in target memory order.
6914 Return the vector on success, otherwise return NULL_RTX. */
6917 native_decode_vector_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
6918 unsigned int first_byte
, unsigned int npatterns
,
6919 unsigned int nelts_per_pattern
)
6921 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
6923 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6924 GET_MODE_NUNITS (mode
));
6925 if (elt_bits
< BITS_PER_UNIT
)
6927 /* This is the only case in which elements can be smaller than a byte.
6928 Element 0 is always in the lsb of the containing byte. */
6929 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6930 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6932 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
6933 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
6934 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
6935 builder
.quick_push (bytes
[byte_index
] & (1 << lsb
)
6936 ? CONST1_RTX (BImode
)
6937 : CONST0_RTX (BImode
));
6942 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6944 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
6947 builder
.quick_push (x
);
6948 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
6951 return builder
.build ();
6954 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6955 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
6956 bits and the bytes are in target memory order. The image has enough
6957 values to specify all bytes of MODE.
6959 Return the rtx on success, otherwise return NULL_RTX. */
6962 native_decode_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
6963 unsigned int first_byte
)
6965 if (VECTOR_MODE_P (mode
))
6967 /* If we know at compile time how many elements there are,
6968 pull each element directly from BYTES. */
6970 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
6971 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
6975 scalar_int_mode imode
;
6976 if (is_a
<scalar_int_mode
> (mode
, &imode
)
6977 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
6979 /* Pull the bytes msb first, so that we can use simple
6980 shift-and-insert wide_int operations. */
6981 unsigned int size
= GET_MODE_SIZE (imode
);
6982 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
6983 for (unsigned int i
= 0; i
< size
; ++i
)
6985 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
6986 /* Always constant because the inputs are. */
6987 unsigned int subbyte
6988 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
6989 result
<<= BITS_PER_UNIT
;
6990 result
|= bytes
[first_byte
+ subbyte
];
6992 return immed_wide_int_const (result
, imode
);
6995 scalar_float_mode fmode
;
6996 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
6998 /* We need to build an array of integers in target memory order.
6999 All integers before the last one have 32 bits; the last one may
7000 have 32 bits or fewer, depending on whether the mode bitsize
7001 is divisible by 32. */
7002 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
7003 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
7004 memset (el32
, 0, num_el32
* sizeof (long));
7006 /* The (maximum) number of target bytes per element of el32. */
7007 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
7008 gcc_assert (bytes_per_el32
!= 0);
7010 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
7011 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7013 unsigned int index
= byte
/ bytes_per_el32
;
7014 unsigned int subbyte
= byte
% bytes_per_el32
;
7015 unsigned int int_bytes
= MIN (bytes_per_el32
,
7016 mode_bytes
- index
* bytes_per_el32
);
7017 /* Always constant because the inputs are. */
7019 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
7020 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
7023 real_from_target (&r
, el32
, fmode
);
7024 return const_double_from_real_value (r
, fmode
);
7027 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
7029 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
7035 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
7036 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7038 /* Always constant because the inputs are. */
7040 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
7041 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
7042 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
7043 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
7045 f
.data
.low
|= unit
<< lsb
;
7047 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
7053 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
7054 is to convert a runtime BYTE value into a constant one. */
7057 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
7059 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7060 machine_mode mode
= GET_MODE (x
);
7061 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
7062 GET_MODE_NUNITS (mode
));
7063 /* The number of bits needed to encode one element from each pattern. */
7064 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
7066 /* Identify the start point in terms of a sequence number and a byte offset
7067 within that sequence. */
7068 poly_uint64 first_sequence
;
7069 unsigned HOST_WIDE_INT subbit
;
7070 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
7071 &first_sequence
, &subbit
))
7073 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7074 if (nelts_per_pattern
== 1)
7075 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
7077 byte
= subbit
/ BITS_PER_UNIT
;
7078 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
7080 /* The subreg drops the first element from each pattern and
7081 only uses the second element. Find the first sequence
7082 that starts on a byte boundary. */
7083 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
7084 byte
= subbit
/ BITS_PER_UNIT
;
7090 /* Subroutine of simplify_subreg in which:
7092 - X is known to be a CONST_VECTOR
7093 - OUTERMODE is known to be a vector mode
7095 Try to handle the subreg by operating on the CONST_VECTOR encoding
7096 rather than on each individual element of the CONST_VECTOR.
7098 Return the simplified subreg on success, otherwise return NULL_RTX. */
7101 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
7102 machine_mode innermode
, unsigned int first_byte
)
7104 /* Paradoxical subregs of vectors have dubious semantics. */
7105 if (paradoxical_subreg_p (outermode
, innermode
))
7108 /* We can only preserve the semantics of a stepped pattern if the new
7109 vector element is the same as the original one. */
7110 if (CONST_VECTOR_STEPPED_P (x
)
7111 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
7114 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7115 unsigned int x_elt_bits
7116 = vector_element_size (GET_MODE_BITSIZE (innermode
),
7117 GET_MODE_NUNITS (innermode
));
7118 unsigned int out_elt_bits
7119 = vector_element_size (GET_MODE_BITSIZE (outermode
),
7120 GET_MODE_NUNITS (outermode
));
7122 /* The number of bits needed to encode one element from every pattern
7123 of the original vector. */
7124 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
7126 /* The number of bits needed to encode one element from every pattern
7128 unsigned int out_sequence_bits
7129 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
7131 /* Work out the number of interleaved patterns in the output vector
7132 and the number of encoded elements per pattern. */
7133 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
7134 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7136 /* The encoding scheme requires the number of elements to be a multiple
7137 of the number of patterns, so that each pattern appears at least once
7138 and so that the same number of elements appear from each pattern. */
7139 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
7140 unsigned int const_nunits
;
7141 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
7142 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
7144 /* Either the encoding is invalid, or applying it would give us
7145 more elements than we need. Just encode each element directly. */
7146 out_npatterns
= const_nunits
;
7147 nelts_per_pattern
= 1;
7152 /* Get enough bytes of X to form the new encoding. */
7153 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
7154 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
7155 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7156 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
7159 /* Reencode the bytes as OUTERMODE. */
7160 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
7164 /* Try to simplify a subreg of a constant by encoding the subreg region
7165 as a sequence of target bytes and reading them back in the new mode.
7166 Return the new value on success, otherwise return null.
7168 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7169 and byte offset FIRST_BYTE. */
7172 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
7173 machine_mode innermode
, unsigned int first_byte
)
7175 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
7176 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7178 /* Some ports misuse CCmode. */
7179 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
7182 /* Paradoxical subregs read undefined values for bytes outside of the
7183 inner value. However, we have traditionally always sign-extended
7184 integer constants and zero-extended others. */
7185 unsigned int inner_bytes
= buffer_bytes
;
7186 if (paradoxical_subreg_p (outermode
, innermode
))
7188 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
7191 target_unit filler
= 0;
7192 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
7195 /* Add any leading bytes due to big-endian layout. The number of
7196 bytes must be constant because both modes have constant size. */
7197 unsigned int leading_bytes
7198 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
7199 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
7200 buffer
.quick_push (filler
);
7202 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7205 /* Add any trailing bytes due to little-endian layout. */
7206 while (buffer
.length () < buffer_bytes
)
7207 buffer
.quick_push (filler
);
7209 else if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7211 rtx ret
= native_decode_rtx (outermode
, buffer
, 0);
7212 if (ret
&& MODE_COMPOSITE_P (outermode
))
7214 auto_vec
<target_unit
, 128> buffer2 (buffer_bytes
);
7215 if (!native_encode_rtx (outermode
, ret
, buffer2
, 0, buffer_bytes
))
7217 for (unsigned int i
= 0; i
< buffer_bytes
; ++i
)
7218 if (buffer
[i
] != buffer2
[i
])
7224 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7225 Return 0 if no simplifications are possible. */
7227 simplify_context::simplify_subreg (machine_mode outermode
, rtx op
,
7228 machine_mode innermode
, poly_uint64 byte
)
7230 /* Little bit of sanity checking. */
7231 gcc_assert (innermode
!= VOIDmode
);
7232 gcc_assert (outermode
!= VOIDmode
);
7233 gcc_assert (innermode
!= BLKmode
);
7234 gcc_assert (outermode
!= BLKmode
);
7236 gcc_assert (GET_MODE (op
) == innermode
7237 || GET_MODE (op
) == VOIDmode
);
7239 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
7240 if (!multiple_p (byte
, outersize
))
7243 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
7244 if (maybe_ge (byte
, innersize
))
7247 if (outermode
== innermode
&& known_eq (byte
, 0U))
7250 if (GET_CODE (op
) == CONST_VECTOR
)
7251 byte
= simplify_const_vector_byte_offset (op
, byte
);
7253 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
7257 if (VECTOR_MODE_P (outermode
)
7258 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
7259 && vec_duplicate_p (op
, &elt
))
7260 return gen_vec_duplicate (outermode
, elt
);
7262 if (outermode
== GET_MODE_INNER (innermode
)
7263 && vec_duplicate_p (op
, &elt
))
7267 if (CONST_SCALAR_INT_P (op
)
7268 || CONST_DOUBLE_AS_FLOAT_P (op
)
7269 || CONST_FIXED_P (op
)
7270 || GET_CODE (op
) == CONST_VECTOR
)
7272 unsigned HOST_WIDE_INT cbyte
;
7273 if (byte
.is_constant (&cbyte
))
7275 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
7277 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
7283 fixed_size_mode fs_outermode
;
7284 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
7285 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
7289 /* Changing mode twice with SUBREG => just change it once,
7290 or not at all if changing back op starting mode. */
7291 if (GET_CODE (op
) == SUBREG
)
7293 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
7294 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
7297 if (outermode
== innermostmode
7298 && known_eq (byte
, 0U)
7299 && known_eq (SUBREG_BYTE (op
), 0))
7300 return SUBREG_REG (op
);
7302 /* Work out the memory offset of the final OUTERMODE value relative
7303 to the inner value of OP. */
7304 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
7306 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
7307 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
7309 /* See whether resulting subreg will be paradoxical. */
7310 if (!paradoxical_subreg_p (outermode
, innermostmode
))
7312 /* Bail out in case resulting subreg would be incorrect. */
7313 if (maybe_lt (final_offset
, 0)
7314 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
7315 || !multiple_p (final_offset
, outersize
))
7320 poly_int64 required_offset
= subreg_memory_offset (outermode
,
7322 if (maybe_ne (final_offset
, required_offset
))
7324 /* Paradoxical subregs always have byte offset 0. */
7328 /* Recurse for further possible simplifications. */
7329 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
7333 if (validate_subreg (outermode
, innermostmode
,
7334 SUBREG_REG (op
), final_offset
))
7336 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
7337 if (SUBREG_PROMOTED_VAR_P (op
)
7338 && SUBREG_PROMOTED_SIGN (op
) >= 0
7339 && GET_MODE_CLASS (outermode
) == MODE_INT
7340 && known_ge (outersize
, innersize
)
7341 && known_le (outersize
, innermostsize
)
7342 && subreg_lowpart_p (newx
))
7344 SUBREG_PROMOTED_VAR_P (newx
) = 1;
7345 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
7352 /* SUBREG of a hard register => just change the register number
7353 and/or mode. If the hard register is not valid in that mode,
7354 suppress this simplification. If the hard register is the stack,
7355 frame, or argument pointer, leave this as a SUBREG. */
7357 if (REG_P (op
) && HARD_REGISTER_P (op
))
7359 unsigned int regno
, final_regno
;
7362 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
7363 if (HARD_REGISTER_NUM_P (final_regno
))
7365 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
7366 subreg_memory_offset (outermode
,
7369 /* Propagate original regno. We don't have any way to specify
7370 the offset inside original regno, so do so only for lowpart.
7371 The information is used only by alias analysis that cannot
7372 grog partial register anyway. */
7374 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
7375 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
7380 /* If we have a SUBREG of a register that we are replacing and we are
7381 replacing it with a MEM, make a new MEM and try replacing the
7382 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7383 or if we would be widening it. */
7386 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
7387 /* Allow splitting of volatile memory references in case we don't
7388 have instruction to move the whole thing. */
7389 && (! MEM_VOLATILE_P (op
)
7390 || ! have_insn_for (SET
, innermode
))
7391 && !(STRICT_ALIGNMENT
&& MEM_ALIGN (op
) < GET_MODE_ALIGNMENT (outermode
))
7392 && known_le (outersize
, innersize
))
7393 return adjust_address_nv (op
, outermode
, byte
);
7395 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7397 if (GET_CODE (op
) == CONCAT
7398 || GET_CODE (op
) == VEC_CONCAT
)
7400 poly_uint64 final_offset
;
7403 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
7404 if (part_mode
== VOIDmode
)
7405 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7406 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
7407 if (known_lt (byte
, part_size
))
7409 part
= XEXP (op
, 0);
7410 final_offset
= byte
;
7412 else if (known_ge (byte
, part_size
))
7414 part
= XEXP (op
, 1);
7415 final_offset
= byte
- part_size
;
7420 if (maybe_gt (final_offset
+ outersize
, part_size
))
7423 part_mode
= GET_MODE (part
);
7424 if (part_mode
== VOIDmode
)
7425 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7426 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
7429 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
7430 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
7435 (subreg (vec_merge (X)
7437 (const_int ((1 << N) | M)))
7438 (N * sizeof (outermode)))
7440 (subreg (X) (N * sizeof (outermode)))
7443 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
7444 && idx
< HOST_BITS_PER_WIDE_INT
7445 && GET_CODE (op
) == VEC_MERGE
7446 && GET_MODE_INNER (innermode
) == outermode
7447 && CONST_INT_P (XEXP (op
, 2))
7448 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
7449 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
7451 /* A SUBREG resulting from a zero extension may fold to zero if
7452 it extracts higher bits that the ZERO_EXTEND's source bits. */
7453 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
7455 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
7456 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
7457 return CONST0_RTX (outermode
);
7460 scalar_int_mode int_outermode
, int_innermode
;
7461 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
7462 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
7463 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
7465 /* Handle polynomial integers. The upper bits of a paradoxical
7466 subreg are undefined, so this is safe regardless of whether
7467 we're truncating or extending. */
7468 if (CONST_POLY_INT_P (op
))
7471 = poly_wide_int::from (const_poly_int_value (op
),
7472 GET_MODE_PRECISION (int_outermode
),
7474 return immed_wide_int_const (val
, int_outermode
);
7477 if (GET_MODE_PRECISION (int_outermode
)
7478 < GET_MODE_PRECISION (int_innermode
))
7480 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
7486 /* If OP is a vector comparison and the subreg is not changing the
7487 number of elements or the size of the elements, change the result
7488 of the comparison to the new mode. */
7489 if (COMPARISON_P (op
)
7490 && VECTOR_MODE_P (outermode
)
7491 && VECTOR_MODE_P (innermode
)
7492 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
7493 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
7494 GET_MODE_UNIT_SIZE (innermode
)))
7495 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
7496 XEXP (op
, 0), XEXP (op
, 1));
7500 /* Make a SUBREG operation or equivalent if it folds. */
7503 simplify_context::simplify_gen_subreg (machine_mode outermode
, rtx op
,
7504 machine_mode innermode
,
7509 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7513 if (GET_CODE (op
) == SUBREG
7514 || GET_CODE (op
) == CONCAT
7515 || GET_MODE (op
) == VOIDmode
)
7518 if (MODE_COMPOSITE_P (outermode
)
7519 && (CONST_SCALAR_INT_P (op
)
7520 || CONST_DOUBLE_AS_FLOAT_P (op
)
7521 || CONST_FIXED_P (op
)
7522 || GET_CODE (op
) == CONST_VECTOR
))
7525 if (validate_subreg (outermode
, innermode
, op
, byte
))
7526 return gen_rtx_SUBREG (outermode
, op
, byte
);
7531 /* Generates a subreg to get the least significant part of EXPR (in mode
7532 INNER_MODE) to OUTER_MODE. */
7535 simplify_context::lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7536 machine_mode inner_mode
)
7538 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7539 subreg_lowpart_offset (outer_mode
, inner_mode
));
7542 /* Simplify X, an rtx expression.
7544 Return the simplified expression or NULL if no simplifications
7547 This is the preferred entry point into the simplification routines;
7548 however, we still allow passes to call the more specific routines.
7550 Right now GCC has three (yes, three) major bodies of RTL simplification
7551 code that need to be unified.
7553 1. fold_rtx in cse.c. This code uses various CSE specific
7554 information to aid in RTL simplification.
7556 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7557 it uses combine specific information to aid in RTL
7560 3. The routines in this file.
7563 Long term we want to only have one body of simplification code; to
7564 get to that state I recommend the following steps:
7566 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7567 which are not pass dependent state into these routines.
7569 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7570 use this routine whenever possible.
7572 3. Allow for pass dependent state to be provided to these
7573 routines and add simplifications based on the pass dependent
7574 state. Remove code from cse.c & combine.c that becomes
7577 It will take time, but ultimately the compiler will be easier to
7578 maintain and improve. It's totally silly that when we add a
7579 simplification that it needs to be added to 4 places (3 for RTL
7580 simplification and 1 for tree simplification. */
7583 simplify_rtx (const_rtx x
)
7585 const enum rtx_code code
= GET_CODE (x
);
7586 const machine_mode mode
= GET_MODE (x
);
7588 switch (GET_RTX_CLASS (code
))
7591 return simplify_unary_operation (code
, mode
,
7592 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7593 case RTX_COMM_ARITH
:
7594 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7595 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7600 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7603 case RTX_BITFIELD_OPS
:
7604 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7605 XEXP (x
, 0), XEXP (x
, 1),
7609 case RTX_COMM_COMPARE
:
7610 return simplify_relational_operation (code
, mode
,
7611 ((GET_MODE (XEXP (x
, 0))
7613 ? GET_MODE (XEXP (x
, 0))
7614 : GET_MODE (XEXP (x
, 1))),
7620 return simplify_subreg (mode
, SUBREG_REG (x
),
7621 GET_MODE (SUBREG_REG (x
)),
7628 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7629 if (GET_CODE (XEXP (x
, 0)) == HIGH
7630 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7643 namespace selftest
{
7645 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7648 make_test_reg (machine_mode mode
)
7650 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7652 return gen_rtx_REG (mode
, test_reg_num
++);
7656 test_scalar_int_ops (machine_mode mode
)
7658 rtx op0
= make_test_reg (mode
);
7659 rtx op1
= make_test_reg (mode
);
7660 rtx six
= GEN_INT (6);
7662 rtx neg_op0
= simplify_gen_unary (NEG
, mode
, op0
, mode
);
7663 rtx not_op0
= simplify_gen_unary (NOT
, mode
, op0
, mode
);
7664 rtx bswap_op0
= simplify_gen_unary (BSWAP
, mode
, op0
, mode
);
7666 rtx and_op0_op1
= simplify_gen_binary (AND
, mode
, op0
, op1
);
7667 rtx ior_op0_op1
= simplify_gen_binary (IOR
, mode
, op0
, op1
);
7668 rtx xor_op0_op1
= simplify_gen_binary (XOR
, mode
, op0
, op1
);
7670 rtx and_op0_6
= simplify_gen_binary (AND
, mode
, op0
, six
);
7671 rtx and_op1_6
= simplify_gen_binary (AND
, mode
, op1
, six
);
7673 /* Test some binary identities. */
7674 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, op0
, const0_rtx
));
7675 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, const0_rtx
, op0
));
7676 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MINUS
, mode
, op0
, const0_rtx
));
7677 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, op0
, const1_rtx
));
7678 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, const1_rtx
, op0
));
7679 ASSERT_RTX_EQ (op0
, simplify_gen_binary (DIV
, mode
, op0
, const1_rtx
));
7680 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, constm1_rtx
));
7681 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, constm1_rtx
, op0
));
7682 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, const0_rtx
));
7683 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, const0_rtx
, op0
));
7684 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, op0
, const0_rtx
));
7685 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, const0_rtx
, op0
));
7686 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFT
, mode
, op0
, const0_rtx
));
7687 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATE
, mode
, op0
, const0_rtx
));
7688 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFTRT
, mode
, op0
, const0_rtx
));
7689 ASSERT_RTX_EQ (op0
, simplify_gen_binary (LSHIFTRT
, mode
, op0
, const0_rtx
));
7690 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATERT
, mode
, op0
, const0_rtx
));
7692 /* Test some self-inverse operations. */
7693 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NEG
, mode
, neg_op0
, mode
));
7694 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NOT
, mode
, not_op0
, mode
));
7695 ASSERT_RTX_EQ (op0
, simplify_gen_unary (BSWAP
, mode
, bswap_op0
, mode
));
7697 /* Test some reflexive operations. */
7698 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, op0
));
7699 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, op0
));
7700 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMIN
, mode
, op0
, op0
));
7701 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMAX
, mode
, op0
, op0
));
7702 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMIN
, mode
, op0
, op0
));
7703 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMAX
, mode
, op0
, op0
));
7705 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (MINUS
, mode
, op0
, op0
));
7706 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (XOR
, mode
, op0
, op0
));
7708 /* Test simplify_distributive_operation. */
7709 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, xor_op0_op1
, six
),
7710 simplify_gen_binary (XOR
, mode
, and_op0_6
, and_op1_6
));
7711 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, ior_op0_op1
, six
),
7712 simplify_gen_binary (IOR
, mode
, and_op0_6
, and_op1_6
));
7713 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, and_op0_op1
, six
),
7714 simplify_gen_binary (AND
, mode
, and_op0_6
, and_op1_6
));
7716 /* Test useless extensions are eliminated. */
7717 ASSERT_RTX_EQ (op0
, simplify_gen_unary (TRUNCATE
, mode
, op0
, mode
));
7718 ASSERT_RTX_EQ (op0
, simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, mode
));
7719 ASSERT_RTX_EQ (op0
, simplify_gen_unary (SIGN_EXTEND
, mode
, op0
, mode
));
7720 ASSERT_RTX_EQ (op0
, lowpart_subreg (mode
, op0
, mode
));
7723 /* Verify some simplifications of integer extension/truncation.
7724 Machine mode BMODE is the guaranteed wider than SMODE. */
7727 test_scalar_int_ext_ops (machine_mode bmode
, machine_mode smode
)
7729 rtx sreg
= make_test_reg (smode
);
7731 /* Check truncation of extension. */
7732 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7733 simplify_gen_unary (ZERO_EXTEND
, bmode
,
7737 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7738 simplify_gen_unary (SIGN_EXTEND
, bmode
,
7742 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7743 lowpart_subreg (bmode
, sreg
, smode
),
7748 /* Verify more simplifications of integer extension/truncation.
7749 BMODE is wider than MMODE which is wider than SMODE. */
7752 test_scalar_int_ext_ops2 (machine_mode bmode
, machine_mode mmode
,
7755 rtx breg
= make_test_reg (bmode
);
7756 rtx mreg
= make_test_reg (mmode
);
7757 rtx sreg
= make_test_reg (smode
);
7759 /* Check truncate of truncate. */
7760 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7761 simplify_gen_unary (TRUNCATE
, mmode
,
7764 simplify_gen_unary (TRUNCATE
, smode
, breg
, bmode
));
7766 /* Check extension of extension. */
7767 ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND
, bmode
,
7768 simplify_gen_unary (ZERO_EXTEND
, mmode
,
7771 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
7772 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
7773 simplify_gen_unary (SIGN_EXTEND
, mmode
,
7776 simplify_gen_unary (SIGN_EXTEND
, bmode
, sreg
, smode
));
7777 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
7778 simplify_gen_unary (ZERO_EXTEND
, mmode
,
7781 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
7783 /* Check truncation of extension. */
7784 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7785 simplify_gen_unary (ZERO_EXTEND
, bmode
,
7788 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7789 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7790 simplify_gen_unary (SIGN_EXTEND
, bmode
,
7793 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7794 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7795 lowpart_subreg (bmode
, mreg
, mmode
),
7797 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7801 /* Verify some simplifications involving scalar expressions. */
7806 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7808 machine_mode mode
= (machine_mode
) i
;
7809 if (SCALAR_INT_MODE_P (mode
) && mode
!= BImode
)
7810 test_scalar_int_ops (mode
);
7813 test_scalar_int_ext_ops (HImode
, QImode
);
7814 test_scalar_int_ext_ops (SImode
, QImode
);
7815 test_scalar_int_ext_ops (SImode
, HImode
);
7816 test_scalar_int_ext_ops (DImode
, QImode
);
7817 test_scalar_int_ext_ops (DImode
, HImode
);
7818 test_scalar_int_ext_ops (DImode
, SImode
);
7820 test_scalar_int_ext_ops2 (SImode
, HImode
, QImode
);
7821 test_scalar_int_ext_ops2 (DImode
, HImode
, QImode
);
7822 test_scalar_int_ext_ops2 (DImode
, SImode
, QImode
);
7823 test_scalar_int_ext_ops2 (DImode
, SImode
, HImode
);
7826 /* Test vector simplifications involving VEC_DUPLICATE in which the
7827 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7828 register that holds one element of MODE. */
7831 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
7833 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7834 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7835 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
7836 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7838 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7839 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
7840 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
7841 ASSERT_RTX_EQ (duplicate
,
7842 simplify_unary_operation (NOT
, mode
,
7843 duplicate_not
, mode
));
7845 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7846 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
7847 ASSERT_RTX_EQ (duplicate
,
7848 simplify_unary_operation (NEG
, mode
,
7849 duplicate_neg
, mode
));
7851 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7852 ASSERT_RTX_EQ (duplicate
,
7853 simplify_binary_operation (PLUS
, mode
, duplicate
,
7854 CONST0_RTX (mode
)));
7856 ASSERT_RTX_EQ (duplicate
,
7857 simplify_binary_operation (MINUS
, mode
, duplicate
,
7858 CONST0_RTX (mode
)));
7860 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
7861 simplify_binary_operation (MINUS
, mode
, duplicate
,
7865 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7866 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
7867 ASSERT_RTX_PTR_EQ (scalar_reg
,
7868 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7869 duplicate
, zero_par
));
7871 unsigned HOST_WIDE_INT const_nunits
;
7872 if (nunits
.is_constant (&const_nunits
))
7874 /* And again with the final element. */
7875 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
7876 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
7877 ASSERT_RTX_PTR_EQ (scalar_reg
,
7878 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7879 duplicate
, last_par
));
7881 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7882 rtx vector_reg
= make_test_reg (mode
);
7883 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
7885 if (i
>= HOST_BITS_PER_WIDE_INT
)
7887 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
7888 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
7889 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
7890 ASSERT_RTX_EQ (scalar_reg
,
7891 simplify_gen_subreg (inner_mode
, vm
,
7896 /* Test a scalar subreg of a VEC_DUPLICATE. */
7897 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
7898 ASSERT_RTX_EQ (scalar_reg
,
7899 simplify_gen_subreg (inner_mode
, duplicate
,
7902 machine_mode narrower_mode
;
7903 if (maybe_ne (nunits
, 2U)
7904 && multiple_p (nunits
, 2)
7905 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
7906 && VECTOR_MODE_P (narrower_mode
))
7908 /* Test VEC_DUPLICATE of a vector. */
7909 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
7910 nbuilder
.quick_push (const0_rtx
);
7911 nbuilder
.quick_push (const1_rtx
);
7912 rtx_vector_builder
builder (mode
, 2, 1);
7913 builder
.quick_push (const0_rtx
);
7914 builder
.quick_push (const1_rtx
);
7915 ASSERT_RTX_EQ (builder
.build (),
7916 simplify_unary_operation (VEC_DUPLICATE
, mode
,
7920 /* Test VEC_SELECT of a vector. */
7922 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
7923 rtx narrower_duplicate
7924 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
7925 ASSERT_RTX_EQ (narrower_duplicate
,
7926 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
7927 duplicate
, vec_par
));
7929 /* Test a vector subreg of a VEC_DUPLICATE. */
7930 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
7931 ASSERT_RTX_EQ (narrower_duplicate
,
7932 simplify_gen_subreg (narrower_mode
, duplicate
,
7937 /* Test vector simplifications involving VEC_SERIES in which the
7938 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7939 register that holds one element of MODE. */
7942 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7944 /* Test unary cases with VEC_SERIES arguments. */
7945 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7946 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7947 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7948 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7949 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7950 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7951 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7952 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7953 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7955 ASSERT_RTX_EQ (series_0_r
,
7956 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7957 ASSERT_RTX_EQ (series_r_m1
,
7958 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7959 ASSERT_RTX_EQ (series_r_r
,
7960 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7962 /* Test that a VEC_SERIES with a zero step is simplified away. */
7963 ASSERT_RTX_EQ (duplicate
,
7964 simplify_binary_operation (VEC_SERIES
, mode
,
7965 scalar_reg
, const0_rtx
));
7967 /* Test PLUS and MINUS with VEC_SERIES. */
7968 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7969 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7970 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7971 ASSERT_RTX_EQ (series_r_r
,
7972 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7974 ASSERT_RTX_EQ (series_r_1
,
7975 simplify_binary_operation (PLUS
, mode
, duplicate
,
7977 ASSERT_RTX_EQ (series_r_m1
,
7978 simplify_binary_operation (PLUS
, mode
, duplicate
,
7980 ASSERT_RTX_EQ (series_0_r
,
7981 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7983 ASSERT_RTX_EQ (series_r_m1
,
7984 simplify_binary_operation (MINUS
, mode
, duplicate
,
7986 ASSERT_RTX_EQ (series_r_1
,
7987 simplify_binary_operation (MINUS
, mode
, duplicate
,
7989 ASSERT_RTX_EQ (series_0_m1
,
7990 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7993 /* Test NEG on constant vector series. */
7994 ASSERT_RTX_EQ (series_0_m1
,
7995 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
7996 ASSERT_RTX_EQ (series_0_1
,
7997 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
7999 /* Test PLUS and MINUS on constant vector series. */
8000 rtx scalar2
= gen_int_mode (2, inner_mode
);
8001 rtx scalar3
= gen_int_mode (3, inner_mode
);
8002 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
8003 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
8004 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
8005 ASSERT_RTX_EQ (series_1_1
,
8006 simplify_binary_operation (PLUS
, mode
, series_0_1
,
8007 CONST1_RTX (mode
)));
8008 ASSERT_RTX_EQ (series_0_m1
,
8009 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
8011 ASSERT_RTX_EQ (series_1_3
,
8012 simplify_binary_operation (PLUS
, mode
, series_1_1
,
8014 ASSERT_RTX_EQ (series_0_1
,
8015 simplify_binary_operation (MINUS
, mode
, series_1_1
,
8016 CONST1_RTX (mode
)));
8017 ASSERT_RTX_EQ (series_1_1
,
8018 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
8020 ASSERT_RTX_EQ (series_1_1
,
8021 simplify_binary_operation (MINUS
, mode
, series_1_3
,
8024 /* Test MULT between constant vectors. */
8025 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
8026 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
8027 rtx scalar9
= gen_int_mode (9, inner_mode
);
8028 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
8029 ASSERT_RTX_EQ (series_0_2
,
8030 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
8031 ASSERT_RTX_EQ (series_3_9
,
8032 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
8033 if (!GET_MODE_NUNITS (mode
).is_constant ())
8034 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
8037 /* Test ASHIFT between constant vectors. */
8038 ASSERT_RTX_EQ (series_0_2
,
8039 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
8040 CONST1_RTX (mode
)));
8041 if (!GET_MODE_NUNITS (mode
).is_constant ())
8042 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
8047 simplify_merge_mask (rtx x
, rtx mask
, int op
)
8049 return simplify_context ().simplify_merge_mask (x
, mask
, op
);
8052 /* Verify simplify_merge_mask works correctly. */
8055 test_vec_merge (machine_mode mode
)
8057 rtx op0
= make_test_reg (mode
);
8058 rtx op1
= make_test_reg (mode
);
8059 rtx op2
= make_test_reg (mode
);
8060 rtx op3
= make_test_reg (mode
);
8061 rtx op4
= make_test_reg (mode
);
8062 rtx op5
= make_test_reg (mode
);
8063 rtx mask1
= make_test_reg (SImode
);
8064 rtx mask2
= make_test_reg (SImode
);
8065 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
8066 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
8067 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
8069 /* Simple vec_merge. */
8070 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
8071 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
8072 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
8073 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
8075 /* Nested vec_merge.
8076 It's tempting to make this simplify right down to opN, but we don't
8077 because all the simplify_* functions assume that the operands have
8078 already been simplified. */
8079 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
8080 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
8081 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
8083 /* Intermediate unary op. */
8084 rtx unop
= gen_rtx_NOT (mode
, vm1
);
8085 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
8086 simplify_merge_mask (unop
, mask1
, 0));
8087 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
8088 simplify_merge_mask (unop
, mask1
, 1));
8090 /* Intermediate binary op. */
8091 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
8092 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
8093 simplify_merge_mask (binop
, mask1
, 0));
8094 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
8095 simplify_merge_mask (binop
, mask1
, 1));
8097 /* Intermediate ternary op. */
8098 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
8099 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
8100 simplify_merge_mask (tenop
, mask1
, 0));
8101 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
8102 simplify_merge_mask (tenop
, mask1
, 1));
8105 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
8106 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
8107 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
8108 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
8110 /* Called indirectly. */
8111 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
8112 simplify_rtx (nvm
));
8115 /* Test subregs of integer vector constant X, trying elements in
8116 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8117 where NELTS is the number of elements in X. Subregs involving
8118 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
8121 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
8122 unsigned int first_valid
= 0)
8124 machine_mode inner_mode
= GET_MODE (x
);
8125 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8127 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
8129 machine_mode outer_mode
= (machine_mode
) modei
;
8130 if (!VECTOR_MODE_P (outer_mode
))
8133 unsigned int outer_nunits
;
8134 if (GET_MODE_INNER (outer_mode
) == int_mode
8135 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
8136 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
8138 /* Test subregs in which the outer mode is a smaller,
8139 constant-sized vector of the same element type. */
8141 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
8142 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
8144 rtx expected
= NULL_RTX
;
8145 if (elt
>= first_valid
)
8147 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
8148 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
8149 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
8150 expected
= builder
.build ();
8152 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
8153 ASSERT_RTX_EQ (expected
,
8154 simplify_subreg (outer_mode
, x
,
8158 else if (known_eq (GET_MODE_SIZE (outer_mode
),
8159 GET_MODE_SIZE (inner_mode
))
8160 && known_eq (elt_bias
, 0U)
8161 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
8162 || known_eq (GET_MODE_BITSIZE (outer_mode
),
8163 GET_MODE_NUNITS (outer_mode
)))
8164 && (!FLOAT_MODE_P (outer_mode
)
8165 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
8166 == GET_MODE_UNIT_PRECISION (outer_mode
)))
8167 && (GET_MODE_SIZE (inner_mode
).is_constant ()
8168 || !CONST_VECTOR_STEPPED_P (x
)))
8170 /* Try converting to OUTER_MODE and back. */
8171 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
8172 ASSERT_TRUE (outer_x
!= NULL_RTX
);
8173 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
8178 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
8180 /* Test each byte in the element range. */
8182 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
8183 for (unsigned int i
= 0; i
< limit
; ++i
)
8185 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
8186 rtx expected
= NULL_RTX
;
8187 if (elt
>= first_valid
)
8189 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
8190 if (BYTES_BIG_ENDIAN
)
8191 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
8192 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
8193 wide_int shifted_elt
8194 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
8195 expected
= immed_wide_int_const (shifted_elt
, QImode
);
8197 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
8198 ASSERT_RTX_EQ (expected
,
8199 simplify_subreg (QImode
, x
, inner_mode
, byte
));
8204 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8205 element per pattern. */
8208 test_vector_subregs_repeating (machine_mode inner_mode
)
8210 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8211 unsigned int min_nunits
= constant_lower_bound (nunits
);
8212 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8213 unsigned int count
= gcd (min_nunits
, 8);
8215 rtx_vector_builder
builder (inner_mode
, count
, 1);
8216 for (unsigned int i
= 0; i
< count
; ++i
)
8217 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
8218 rtx x
= builder
.build ();
8220 test_vector_subregs_modes (x
);
8221 if (!nunits
.is_constant ())
8222 test_vector_subregs_modes (x
, nunits
- min_nunits
);
8225 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8226 elements per pattern. */
8229 test_vector_subregs_fore_back (machine_mode inner_mode
)
8231 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8232 unsigned int min_nunits
= constant_lower_bound (nunits
);
8233 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8234 unsigned int count
= gcd (min_nunits
, 4);
8236 rtx_vector_builder
builder (inner_mode
, count
, 2);
8237 for (unsigned int i
= 0; i
< count
; ++i
)
8238 builder
.quick_push (gen_int_mode (i
, int_mode
));
8239 for (unsigned int i
= 0; i
< count
; ++i
)
8240 builder
.quick_push (gen_int_mode (-(int) i
, int_mode
));
8241 rtx x
= builder
.build ();
8243 test_vector_subregs_modes (x
);
8244 if (!nunits
.is_constant ())
8245 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
8248 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8249 elements per pattern. */
8252 test_vector_subregs_stepped (machine_mode inner_mode
)
8254 /* Build { 0, 1, 2, 3, ... }. */
8255 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8256 rtx_vector_builder
builder (inner_mode
, 1, 3);
8257 for (unsigned int i
= 0; i
< 3; ++i
)
8258 builder
.quick_push (gen_int_mode (i
, int_mode
));
8259 rtx x
= builder
.build ();
8261 test_vector_subregs_modes (x
);
8264 /* Test constant subregs of integer vector mode INNER_MODE. */
8267 test_vector_subregs (machine_mode inner_mode
)
8269 test_vector_subregs_repeating (inner_mode
);
8270 test_vector_subregs_fore_back (inner_mode
);
8271 test_vector_subregs_stepped (inner_mode
);
8274 /* Verify some simplifications involving vectors. */
8279 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
8281 machine_mode mode
= (machine_mode
) i
;
8282 if (VECTOR_MODE_P (mode
))
8284 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
8285 test_vector_ops_duplicate (mode
, scalar_reg
);
8286 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
8287 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
8289 test_vector_ops_series (mode
, scalar_reg
);
8290 test_vector_subregs (mode
);
8292 test_vec_merge (mode
);
8297 template<unsigned int N
>
8298 struct simplify_const_poly_int_tests
8304 struct simplify_const_poly_int_tests
<1>
8306 static void run () {}
8309 /* Test various CONST_POLY_INT properties. */
8311 template<unsigned int N
>
8313 simplify_const_poly_int_tests
<N
>::run ()
8315 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
8316 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
8317 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
8318 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
8319 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
8320 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
8321 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
8322 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
8323 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
8324 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
8325 rtx two
= GEN_INT (2);
8326 rtx six
= GEN_INT (6);
8327 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
8329 /* These tests only try limited operation combinations. Fuller arithmetic
8330 testing is done directly on poly_ints. */
8331 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
8332 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
8333 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
8334 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
8335 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
8336 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
8337 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
8338 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
8339 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
8340 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
8341 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
8344 /* Run all of the selftests within this file. */
8347 simplify_rtx_c_tests ()
8351 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
8354 } // namespace selftest
8356 #endif /* CHECKING_P */