1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 static rtx
neg_const_int (machine_mode
, const_rtx
);
50 static bool plus_minus_operand_p (const_rtx
);
51 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
52 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
54 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
55 machine_mode
, rtx
, rtx
);
56 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
57 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
60 /* Negate a CONST_INT rtx. */
62 neg_const_int (machine_mode mode
, const_rtx i
)
64 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
66 if (!HWI_COMPUTABLE_MODE_P (mode
)
67 && val
== UINTVAL (i
))
68 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
70 return gen_int_mode (val
, mode
);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (machine_mode mode
, const_rtx x
)
79 unsigned HOST_WIDE_INT val
;
81 scalar_int_mode int_mode
;
83 if (!is_int_mode (mode
, &int_mode
))
86 width
= GET_MODE_PRECISION (int_mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
93 #if TARGET_SUPPORTS_WIDE_INT
94 else if (CONST_WIDE_INT_P (x
))
97 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
98 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
100 for (i
= 0; i
< elts
- 1; i
++)
101 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
103 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
104 width
%= HOST_BITS_PER_WIDE_INT
;
106 width
= HOST_BITS_PER_WIDE_INT
;
109 else if (width
<= HOST_BITS_PER_DOUBLE_INT
110 && CONST_DOUBLE_AS_INT_P (x
)
111 && CONST_DOUBLE_LOW (x
) == 0)
113 val
= CONST_DOUBLE_HIGH (x
);
114 width
-= HOST_BITS_PER_WIDE_INT
;
118 /* X is not an integer constant. */
121 if (width
< HOST_BITS_PER_WIDE_INT
)
122 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
123 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
126 /* Test whether VAL is equal to the most significant bit of mode MODE
127 (after masking with the mode mask of MODE). Returns false if the
128 precision of MODE is too large to handle. */
131 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 scalar_int_mode int_mode
;
136 if (!is_int_mode (mode
, &int_mode
))
139 width
= GET_MODE_PRECISION (int_mode
);
140 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
143 val
&= GET_MODE_MASK (int_mode
);
144 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
147 /* Test whether the most significant bit of mode MODE is set in VAL.
148 Returns false if the precision of MODE is too large to handle. */
150 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
154 scalar_int_mode int_mode
;
155 if (!is_int_mode (mode
, &int_mode
))
158 width
= GET_MODE_PRECISION (int_mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
166 /* Test whether the most significant bit of mode MODE is clear in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 scalar_int_mode int_mode
;
174 if (!is_int_mode (mode
, &int_mode
))
177 width
= GET_MODE_PRECISION (int_mode
);
178 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
181 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
185 /* Make a binary operation by properly ordering the operands and
186 seeing if the expression folds. */
189 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
194 /* If this simplifies, do it. */
195 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
199 /* Put complex operands first and constants second if commutative. */
200 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
201 && swap_commutative_operands_p (op0
, op1
))
202 std::swap (op0
, op1
);
204 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
207 /* If X is a MEM referencing the constant pool, return the real value.
208 Otherwise return X. */
210 avoid_constant_pool_reference (rtx x
)
214 poly_int64 offset
= 0;
216 switch (GET_CODE (x
))
222 /* Handle float extensions of constant pool references. */
224 c
= avoid_constant_pool_reference (tmp
);
225 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
226 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
234 if (GET_MODE (x
) == BLKmode
)
239 /* Call target hook to avoid the effects of -fpic etc.... */
240 addr
= targetm
.delegitimize_address (addr
);
242 /* Split the address into a base and integer offset. */
243 addr
= strip_offset (addr
, &offset
);
245 if (GET_CODE (addr
) == LO_SUM
)
246 addr
= XEXP (addr
, 1);
248 /* If this is a constant pool reference, we can turn it into its
249 constant and hope that simplifications happen. */
250 if (GET_CODE (addr
) == SYMBOL_REF
251 && CONSTANT_POOL_ADDRESS_P (addr
))
253 c
= get_pool_constant (addr
);
254 cmode
= get_pool_mode (addr
);
256 /* If we're accessing the constant in a different mode than it was
257 originally stored, attempt to fix that up via subreg simplifications.
258 If that fails we have no choice but to return the original memory. */
259 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
261 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
263 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
264 if (tem
&& CONSTANT_P (tem
))
272 /* Simplify a MEM based on its attributes. This is the default
273 delegitimize_address target hook, and it's recommended that every
274 overrider call it. */
277 delegitimize_mem_from_attrs (rtx x
)
279 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
280 use their base addresses as equivalent. */
283 && MEM_OFFSET_KNOWN_P (x
))
285 tree decl
= MEM_EXPR (x
);
286 machine_mode mode
= GET_MODE (x
);
287 poly_int64 offset
= 0;
289 switch (TREE_CODE (decl
))
299 case ARRAY_RANGE_REF
:
304 case VIEW_CONVERT_EXPR
:
306 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
308 int unsignedp
, reversep
, volatilep
= 0;
311 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
312 &unsignedp
, &reversep
, &volatilep
);
313 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
314 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
315 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
318 offset
+= bytepos
+ toffset_val
;
324 && mode
== GET_MODE (x
)
326 && (TREE_STATIC (decl
)
327 || DECL_THREAD_LOCAL_P (decl
))
328 && DECL_RTL_SET_P (decl
)
329 && MEM_P (DECL_RTL (decl
)))
333 offset
+= MEM_OFFSET (x
);
335 newx
= DECL_RTL (decl
);
339 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
340 poly_int64 n_offset
, o_offset
;
342 /* Avoid creating a new MEM needlessly if we already had
343 the same address. We do if there's no OFFSET and the
344 old address X is identical to NEWX, or if X is of the
345 form (plus NEWX OFFSET), or the NEWX is of the form
346 (plus Y (const_int Z)) and X is that with the offset
347 added: (plus Y (const_int Z+OFFSET)). */
348 n
= strip_offset (n
, &n_offset
);
349 o
= strip_offset (o
, &o_offset
);
350 if (!(known_eq (o_offset
, n_offset
+ offset
)
351 && rtx_equal_p (o
, n
)))
352 x
= adjust_address_nv (newx
, mode
, offset
);
354 else if (GET_MODE (x
) == GET_MODE (newx
)
355 && known_eq (offset
, 0))
363 /* Make a unary operation by first seeing if it folds and otherwise making
364 the specified operation. */
367 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
368 machine_mode op_mode
)
372 /* If this simplifies, use it. */
373 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
376 return gen_rtx_fmt_e (code
, mode
, op
);
379 /* Likewise for ternary operations. */
382 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
383 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
387 /* If this simplifies, use it. */
388 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 op0
, op1
, op2
)) != 0)
392 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
395 /* Likewise, for relational operations.
396 CMP_MODE specifies mode comparison is done in. */
399 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
400 machine_mode cmp_mode
, rtx op0
, rtx op1
)
404 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
408 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
411 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
412 and simplify the result. If FN is non-NULL, call this callback on each
413 X, if it returns non-NULL, replace X with its return value and simplify the
417 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
418 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
420 enum rtx_code code
= GET_CODE (x
);
421 machine_mode mode
= GET_MODE (x
);
422 machine_mode op_mode
;
424 rtx op0
, op1
, op2
, newx
, op
;
428 if (__builtin_expect (fn
!= NULL
, 0))
430 newx
= fn (x
, old_rtx
, data
);
434 else if (rtx_equal_p (x
, old_rtx
))
435 return copy_rtx ((rtx
) data
);
437 switch (GET_RTX_CLASS (code
))
441 op_mode
= GET_MODE (op0
);
442 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
443 if (op0
== XEXP (x
, 0))
445 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
449 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
450 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
451 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
453 return simplify_gen_binary (code
, mode
, op0
, op1
);
456 case RTX_COMM_COMPARE
:
459 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
460 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
461 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
464 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
467 case RTX_BITFIELD_OPS
:
469 op_mode
= GET_MODE (op0
);
470 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
471 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
472 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
473 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
475 if (op_mode
== VOIDmode
)
476 op_mode
= GET_MODE (op0
);
477 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
482 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
483 if (op0
== SUBREG_REG (x
))
485 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
486 GET_MODE (SUBREG_REG (x
)),
488 return op0
? op0
: x
;
495 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
496 if (op0
== XEXP (x
, 0))
498 return replace_equiv_address_nv (x
, op0
);
500 else if (code
== LO_SUM
)
502 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
503 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
505 /* (lo_sum (high x) y) -> y where x and y have the same base. */
506 if (GET_CODE (op0
) == HIGH
)
508 rtx base0
, base1
, offset0
, offset1
;
509 split_const (XEXP (op0
, 0), &base0
, &offset0
);
510 split_const (op1
, &base1
, &offset1
);
511 if (rtx_equal_p (base0
, base1
))
515 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
517 return gen_rtx_LO_SUM (mode
, op0
, op1
);
526 fmt
= GET_RTX_FORMAT (code
);
527 for (i
= 0; fmt
[i
]; i
++)
532 newvec
= XVEC (newx
, i
);
533 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
535 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
537 if (op
!= RTVEC_ELT (vec
, j
))
541 newvec
= shallow_copy_rtvec (vec
);
543 newx
= shallow_copy_rtx (x
);
544 XVEC (newx
, i
) = newvec
;
546 RTVEC_ELT (newvec
, j
) = op
;
554 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
555 if (op
!= XEXP (x
, i
))
558 newx
= shallow_copy_rtx (x
);
567 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
568 resulting RTX. Return a new RTX which is as simplified as possible. */
571 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
573 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
576 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
577 Only handle cases where the truncated value is inherently an rvalue.
579 RTL provides two ways of truncating a value:
581 1. a lowpart subreg. This form is only a truncation when both
582 the outer and inner modes (here MODE and OP_MODE respectively)
583 are scalar integers, and only then when the subreg is used as
586 It is only valid to form such truncating subregs if the
587 truncation requires no action by the target. The onus for
588 proving this is on the creator of the subreg -- e.g. the
589 caller to simplify_subreg or simplify_gen_subreg -- and typically
590 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
592 2. a TRUNCATE. This form handles both scalar and compound integers.
594 The first form is preferred where valid. However, the TRUNCATE
595 handling in simplify_unary_operation turns the second form into the
596 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
597 so it is generally safe to form rvalue truncations using:
599 simplify_gen_unary (TRUNCATE, ...)
601 and leave simplify_unary_operation to work out which representation
604 Because of the proof requirements on (1), simplify_truncation must
605 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
606 regardless of whether the outer truncation came from a SUBREG or a
607 TRUNCATE. For example, if the caller has proven that an SImode
612 is a no-op and can be represented as a subreg, it does not follow
613 that SImode truncations of X and Y are also no-ops. On a target
614 like 64-bit MIPS that requires SImode values to be stored in
615 sign-extended form, an SImode truncation of:
617 (and:DI (reg:DI X) (const_int 63))
619 is trivially a no-op because only the lower 6 bits can be set.
620 However, X is still an arbitrary 64-bit number and so we cannot
621 assume that truncating it too is a no-op. */
624 simplify_truncation (machine_mode mode
, rtx op
,
625 machine_mode op_mode
)
627 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
628 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
629 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
631 gcc_assert (precision
<= op_precision
);
633 /* Optimize truncations of zero and sign extended values. */
634 if (GET_CODE (op
) == ZERO_EXTEND
635 || GET_CODE (op
) == SIGN_EXTEND
)
637 /* There are three possibilities. If MODE is the same as the
638 origmode, we can omit both the extension and the subreg.
639 If MODE is not larger than the origmode, we can apply the
640 truncation without the extension. Finally, if the outermode
641 is larger than the origmode, we can just extend to the appropriate
643 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
644 if (mode
== origmode
)
646 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
647 return simplify_gen_unary (TRUNCATE
, mode
,
648 XEXP (op
, 0), origmode
);
650 return simplify_gen_unary (GET_CODE (op
), mode
,
651 XEXP (op
, 0), origmode
);
654 /* If the machine can perform operations in the truncated mode, distribute
655 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
656 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
658 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
659 && (GET_CODE (op
) == PLUS
660 || GET_CODE (op
) == MINUS
661 || GET_CODE (op
) == MULT
))
663 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
666 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
668 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
672 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
673 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
674 the outer subreg is effectively a truncation to the original mode. */
675 if ((GET_CODE (op
) == LSHIFTRT
676 || GET_CODE (op
) == ASHIFTRT
)
677 /* Ensure that OP_MODE is at least twice as wide as MODE
678 to avoid the possibility that an outer LSHIFTRT shifts by more
679 than the sign extension's sign_bit_copies and introduces zeros
680 into the high bits of the result. */
681 && 2 * precision
<= op_precision
682 && CONST_INT_P (XEXP (op
, 1))
683 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
684 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
685 && UINTVAL (XEXP (op
, 1)) < precision
)
686 return simplify_gen_binary (ASHIFTRT
, mode
,
687 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
689 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
690 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
691 the outer subreg is effectively a truncation to the original mode. */
692 if ((GET_CODE (op
) == LSHIFTRT
693 || GET_CODE (op
) == ASHIFTRT
)
694 && CONST_INT_P (XEXP (op
, 1))
695 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
696 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
697 && UINTVAL (XEXP (op
, 1)) < precision
)
698 return simplify_gen_binary (LSHIFTRT
, mode
,
699 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
701 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
702 to (ashift:QI (x:QI) C), where C is a suitable small constant and
703 the outer subreg is effectively a truncation to the original mode. */
704 if (GET_CODE (op
) == ASHIFT
705 && CONST_INT_P (XEXP (op
, 1))
706 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
707 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
708 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
709 && UINTVAL (XEXP (op
, 1)) < precision
)
710 return simplify_gen_binary (ASHIFT
, mode
,
711 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
713 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
714 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
716 if (GET_CODE (op
) == AND
717 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
718 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
719 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
720 && CONST_INT_P (XEXP (op
, 1)))
722 rtx op0
= (XEXP (XEXP (op
, 0), 0));
723 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
724 rtx mask_op
= XEXP (op
, 1);
725 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
726 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
728 if (shift
< precision
729 /* If doing this transform works for an X with all bits set,
730 it works for any X. */
731 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
732 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
733 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
734 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
736 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
737 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
741 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
742 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
744 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
745 && REG_P (XEXP (op
, 0))
746 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
747 && CONST_INT_P (XEXP (op
, 1))
748 && CONST_INT_P (XEXP (op
, 2)))
750 rtx op0
= XEXP (op
, 0);
751 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
752 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
753 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
755 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
758 pos
-= op_precision
- precision
;
759 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
760 XEXP (op
, 1), GEN_INT (pos
));
763 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
765 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
767 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
768 XEXP (op
, 1), XEXP (op
, 2));
772 /* Recognize a word extraction from a multi-word subreg. */
773 if ((GET_CODE (op
) == LSHIFTRT
774 || GET_CODE (op
) == ASHIFTRT
)
775 && SCALAR_INT_MODE_P (mode
)
776 && SCALAR_INT_MODE_P (op_mode
)
777 && precision
>= BITS_PER_WORD
778 && 2 * precision
<= op_precision
779 && CONST_INT_P (XEXP (op
, 1))
780 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
781 && UINTVAL (XEXP (op
, 1)) < op_precision
)
783 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
784 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
785 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
787 ? byte
- shifted_bytes
788 : byte
+ shifted_bytes
));
791 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
792 and try replacing the TRUNCATE and shift with it. Don't do this
793 if the MEM has a mode-dependent address. */
794 if ((GET_CODE (op
) == LSHIFTRT
795 || GET_CODE (op
) == ASHIFTRT
)
796 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
797 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
798 && MEM_P (XEXP (op
, 0))
799 && CONST_INT_P (XEXP (op
, 1))
800 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
801 && INTVAL (XEXP (op
, 1)) > 0
802 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
803 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
804 MEM_ADDR_SPACE (XEXP (op
, 0)))
805 && ! MEM_VOLATILE_P (XEXP (op
, 0))
806 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
807 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
809 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
810 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
811 return adjust_address_nv (XEXP (op
, 0), int_mode
,
813 ? byte
- shifted_bytes
814 : byte
+ shifted_bytes
));
817 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
818 (OP:SI foo:SI) if OP is NEG or ABS. */
819 if ((GET_CODE (op
) == ABS
820 || GET_CODE (op
) == NEG
)
821 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
822 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
823 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
824 return simplify_gen_unary (GET_CODE (op
), mode
,
825 XEXP (XEXP (op
, 0), 0), mode
);
827 /* (truncate:A (subreg:B (truncate:C X) 0)) is
829 if (GET_CODE (op
) == SUBREG
830 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
831 && SCALAR_INT_MODE_P (op_mode
)
832 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
833 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
834 && subreg_lowpart_p (op
))
836 rtx inner
= XEXP (SUBREG_REG (op
), 0);
837 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
838 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
841 /* If subreg above is paradoxical and C is narrower
842 than A, return (subreg:A (truncate:C X) 0). */
843 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
846 /* (truncate:A (truncate:B X)) is (truncate:A X). */
847 if (GET_CODE (op
) == TRUNCATE
)
848 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
849 GET_MODE (XEXP (op
, 0)));
851 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
853 if (GET_CODE (op
) == IOR
854 && SCALAR_INT_MODE_P (mode
)
855 && SCALAR_INT_MODE_P (op_mode
)
856 && CONST_INT_P (XEXP (op
, 1))
857 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
867 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
868 rtx op
, machine_mode op_mode
)
872 trueop
= avoid_constant_pool_reference (op
);
874 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
878 return simplify_unary_operation_1 (code
, mode
, op
);
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
885 exact_int_to_float_conversion_p (const_rtx op
)
887 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
888 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode
!= VOIDmode
);
891 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
892 int in_bits
= in_prec
;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
895 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
896 if (GET_CODE (op
) == FLOAT
)
897 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
898 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
899 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
902 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
904 return in_bits
<= out_bits
;
907 /* Perform some simplifications we can do even if the operands
910 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
912 enum rtx_code reversed
;
913 rtx temp
, elt
, base
, step
;
914 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
919 /* (not (not X)) == X. */
920 if (GET_CODE (op
) == NOT
)
923 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
924 comparison is all ones. */
925 if (COMPARISON_P (op
)
926 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
927 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
928 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
929 XEXP (op
, 0), XEXP (op
, 1));
931 /* (not (plus X -1)) can become (neg X). */
932 if (GET_CODE (op
) == PLUS
933 && XEXP (op
, 1) == constm1_rtx
)
934 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
936 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
937 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
938 and MODE_VECTOR_INT. */
939 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
940 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
943 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
944 if (GET_CODE (op
) == XOR
945 && CONST_INT_P (XEXP (op
, 1))
946 && (temp
= simplify_unary_operation (NOT
, mode
,
947 XEXP (op
, 1), mode
)) != 0)
948 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
950 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
951 if (GET_CODE (op
) == PLUS
952 && CONST_INT_P (XEXP (op
, 1))
953 && mode_signbit_p (mode
, XEXP (op
, 1))
954 && (temp
= simplify_unary_operation (NOT
, mode
,
955 XEXP (op
, 1), mode
)) != 0)
956 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
959 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
960 operands other than 1, but that is not valid. We could do a
961 similar simplification for (not (lshiftrt C X)) where C is
962 just the sign bit, but this doesn't seem common enough to
964 if (GET_CODE (op
) == ASHIFT
965 && XEXP (op
, 0) == const1_rtx
)
967 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
968 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
971 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
972 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
973 so we can perform the above simplification. */
974 if (STORE_FLAG_VALUE
== -1
975 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
976 && GET_CODE (op
) == ASHIFTRT
977 && CONST_INT_P (XEXP (op
, 1))
978 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
979 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
980 XEXP (op
, 0), const0_rtx
);
983 if (partial_subreg_p (op
)
984 && subreg_lowpart_p (op
)
985 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
986 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
988 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
991 x
= gen_rtx_ROTATE (inner_mode
,
992 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
994 XEXP (SUBREG_REG (op
), 1));
995 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1004 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1006 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1007 machine_mode op_mode
;
1009 op_mode
= GET_MODE (in1
);
1010 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1012 op_mode
= GET_MODE (in2
);
1013 if (op_mode
== VOIDmode
)
1015 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1017 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1018 std::swap (in1
, in2
);
1020 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op
) == BSWAP
)
1027 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1028 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op
) == NEG
)
1035 return XEXP (op
, 0);
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1040 if (GET_CODE (op
) == IF_THEN_ELSE
)
1042 rtx cond
= XEXP (op
, 0);
1043 rtx true_rtx
= XEXP (op
, 1);
1044 rtx false_rtx
= XEXP (op
, 2);
1046 if ((GET_CODE (true_rtx
) == NEG
1047 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1048 || (GET_CODE (false_rtx
) == NEG
1049 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1051 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1052 temp
= reversed_comparison (cond
, mode
);
1056 std::swap (true_rtx
, false_rtx
);
1058 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1059 mode
, temp
, true_rtx
, false_rtx
);
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op
) == PLUS
1065 && XEXP (op
, 1) == const1_rtx
)
1066 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op
) == NOT
)
1070 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op
) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode
)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1081 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1083 if (GET_CODE (op
) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode
)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1091 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1093 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1098 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op
) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1106 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1107 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1113 if (GET_CODE (op
) == ASHIFT
)
1115 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1117 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op
) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op
, 1))
1124 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1125 return simplify_gen_binary (LSHIFTRT
, mode
,
1126 XEXP (op
, 0), XEXP (op
, 1));
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op
) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op
, 1))
1132 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1133 return simplify_gen_binary (ASHIFTRT
, mode
,
1134 XEXP (op
, 0), XEXP (op
, 1));
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op
) == XOR
1138 && XEXP (op
, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1140 return plus_constant (mode
, XEXP (op
, 0), -1);
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op
) == LT
1145 && XEXP (op
, 1) == const0_rtx
1146 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1148 int_mode
= as_a
<scalar_int_mode
> (mode
);
1149 int isize
= GET_MODE_PRECISION (inner
);
1150 if (STORE_FLAG_VALUE
== 1)
1152 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1153 gen_int_shift_amount (inner
,
1155 if (int_mode
== inner
)
1157 if (GET_MODE_PRECISION (int_mode
) > isize
)
1158 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1159 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1161 else if (STORE_FLAG_VALUE
== -1)
1163 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1164 gen_int_shift_amount (inner
,
1166 if (int_mode
== inner
)
1168 if (GET_MODE_PRECISION (int_mode
) > isize
)
1169 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1170 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1174 if (vec_series_p (op
, &base
, &step
))
1176 /* Only create a new series if we can simplify both parts. In other
1177 cases this isn't really a simplification, and it's not necessarily
1178 a win to replace a vector operation with a scalar operation. */
1179 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1180 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1183 step
= simplify_unary_operation (NEG
, inner_mode
,
1186 return gen_vec_series (mode
, base
, step
);
1192 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1193 with the umulXi3_highpart patterns. */
1194 if (GET_CODE (op
) == LSHIFTRT
1195 && GET_CODE (XEXP (op
, 0)) == MULT
)
1198 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1200 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1202 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1206 /* We can't handle truncation to a partial integer mode here
1207 because we don't know the real bitsize of the partial
1212 if (GET_MODE (op
) != VOIDmode
)
1214 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1219 /* If we know that the value is already truncated, we can
1220 replace the TRUNCATE with a SUBREG. */
1221 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1222 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1223 || truncated_to_mode (mode
, op
)))
1225 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1230 /* A truncate of a comparison can be replaced with a subreg if
1231 STORE_FLAG_VALUE permits. This is like the previous test,
1232 but it works even if the comparison is done in a mode larger
1233 than HOST_BITS_PER_WIDE_INT. */
1234 if (HWI_COMPUTABLE_MODE_P (mode
)
1235 && COMPARISON_P (op
)
1236 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1238 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1243 /* A truncate of a memory is just loading the low part of the memory
1244 if we are not changing the meaning of the address. */
1245 if (GET_CODE (op
) == MEM
1246 && !VECTOR_MODE_P (mode
)
1247 && !MEM_VOLATILE_P (op
)
1248 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1250 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1257 case FLOAT_TRUNCATE
:
1258 if (DECIMAL_FLOAT_MODE_P (mode
))
1261 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1262 if (GET_CODE (op
) == FLOAT_EXTEND
1263 && GET_MODE (XEXP (op
, 0)) == mode
)
1264 return XEXP (op
, 0);
1266 /* (float_truncate:SF (float_truncate:DF foo:XF))
1267 = (float_truncate:SF foo:XF).
1268 This may eliminate double rounding, so it is unsafe.
1270 (float_truncate:SF (float_extend:XF foo:DF))
1271 = (float_truncate:SF foo:DF).
1273 (float_truncate:DF (float_extend:XF foo:SF))
1274 = (float_extend:DF foo:SF). */
1275 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1276 && flag_unsafe_math_optimizations
)
1277 || GET_CODE (op
) == FLOAT_EXTEND
)
1278 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1279 > GET_MODE_UNIT_SIZE (mode
)
1280 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1282 XEXP (op
, 0), mode
);
1284 /* (float_truncate (float x)) is (float x) */
1285 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1286 && (flag_unsafe_math_optimizations
1287 || exact_int_to_float_conversion_p (op
)))
1288 return simplify_gen_unary (GET_CODE (op
), mode
,
1290 GET_MODE (XEXP (op
, 0)));
1292 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1293 (OP:SF foo:SF) if OP is NEG or ABS. */
1294 if ((GET_CODE (op
) == ABS
1295 || GET_CODE (op
) == NEG
)
1296 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1297 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1298 return simplify_gen_unary (GET_CODE (op
), mode
,
1299 XEXP (XEXP (op
, 0), 0), mode
);
1301 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1302 is (float_truncate:SF x). */
1303 if (GET_CODE (op
) == SUBREG
1304 && subreg_lowpart_p (op
)
1305 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1306 return SUBREG_REG (op
);
1310 if (DECIMAL_FLOAT_MODE_P (mode
))
1313 /* (float_extend (float_extend x)) is (float_extend x)
1315 (float_extend (float x)) is (float x) assuming that double
1316 rounding can't happen.
1318 if (GET_CODE (op
) == FLOAT_EXTEND
1319 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1320 && exact_int_to_float_conversion_p (op
)))
1321 return simplify_gen_unary (GET_CODE (op
), mode
,
1323 GET_MODE (XEXP (op
, 0)));
1328 /* (abs (neg <foo>)) -> (abs <foo>) */
1329 if (GET_CODE (op
) == NEG
)
1330 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1331 GET_MODE (XEXP (op
, 0)));
1333 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1335 if (GET_MODE (op
) == VOIDmode
)
1338 /* If operand is something known to be positive, ignore the ABS. */
1339 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1340 || val_signbit_known_clear_p (GET_MODE (op
),
1341 nonzero_bits (op
, GET_MODE (op
))))
1344 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1345 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1346 && (num_sign_bit_copies (op
, int_mode
)
1347 == GET_MODE_PRECISION (int_mode
)))
1348 return gen_rtx_NEG (int_mode
, op
);
1353 /* (ffs (*_extend <X>)) = (ffs <X>) */
1354 if (GET_CODE (op
) == SIGN_EXTEND
1355 || GET_CODE (op
) == ZERO_EXTEND
)
1356 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1357 GET_MODE (XEXP (op
, 0)));
1361 switch (GET_CODE (op
))
1365 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1366 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1367 GET_MODE (XEXP (op
, 0)));
1371 /* Rotations don't affect popcount. */
1372 if (!side_effects_p (XEXP (op
, 1)))
1373 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1374 GET_MODE (XEXP (op
, 0)));
1383 switch (GET_CODE (op
))
1389 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1390 GET_MODE (XEXP (op
, 0)));
1394 /* Rotations don't affect parity. */
1395 if (!side_effects_p (XEXP (op
, 1)))
1396 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1397 GET_MODE (XEXP (op
, 0)));
1406 /* (bswap (bswap x)) -> x. */
1407 if (GET_CODE (op
) == BSWAP
)
1408 return XEXP (op
, 0);
1412 /* (float (sign_extend <X>)) = (float <X>). */
1413 if (GET_CODE (op
) == SIGN_EXTEND
)
1414 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1415 GET_MODE (XEXP (op
, 0)));
1419 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1420 becomes just the MINUS if its mode is MODE. This allows
1421 folding switch statements on machines using casesi (such as
1423 if (GET_CODE (op
) == TRUNCATE
1424 && GET_MODE (XEXP (op
, 0)) == mode
1425 && GET_CODE (XEXP (op
, 0)) == MINUS
1426 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1427 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1428 return XEXP (op
, 0);
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op
) == MULT
)
1434 rtx lhs
= XEXP (op
, 0);
1435 rtx rhs
= XEXP (op
, 1);
1436 enum rtx_code lcode
= GET_CODE (lhs
);
1437 enum rtx_code rcode
= GET_CODE (rhs
);
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode
== SIGN_EXTEND
1442 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1443 && (rcode
== SIGN_EXTEND
1444 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1446 machine_mode lmode
= GET_MODE (lhs
);
1447 machine_mode rmode
= GET_MODE (rhs
);
1450 if (lcode
== ASHIFTRT
)
1451 /* Number of bits not shifted off the end. */
1452 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1453 - INTVAL (XEXP (lhs
, 1)));
1454 else /* lcode == SIGN_EXTEND */
1455 /* Size of inner mode. */
1456 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1458 if (rcode
== ASHIFTRT
)
1459 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1460 - INTVAL (XEXP (rhs
, 1)));
1461 else /* rcode == SIGN_EXTEND */
1462 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1464 /* We can only widen multiplies if the result is mathematiclly
1465 equivalent. I.e. if overflow was impossible. */
1466 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1467 return simplify_gen_binary
1469 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1470 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1474 /* Check for a sign extension of a subreg of a promoted
1475 variable, where the promotion is sign-extended, and the
1476 target mode is the same as the variable's promotion. */
1477 if (GET_CODE (op
) == SUBREG
1478 && SUBREG_PROMOTED_VAR_P (op
)
1479 && SUBREG_PROMOTED_SIGNED_P (op
)
1480 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1482 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1487 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1488 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1489 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1491 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1492 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1493 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1494 GET_MODE (XEXP (op
, 0)));
1497 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is (sign_extend:M (subreg:O <X>)) if there is mode with
1499 GET_MODE_BITSIZE (N) - I bits.
1500 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1501 is similarly (zero_extend:M (subreg:O <X>)). */
1502 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1503 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1504 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1505 && CONST_INT_P (XEXP (op
, 1))
1506 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1507 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1508 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1510 scalar_int_mode tmode
;
1511 gcc_assert (GET_MODE_PRECISION (int_mode
)
1512 > GET_MODE_PRECISION (op_mode
));
1513 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1514 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1517 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1519 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1520 ? SIGN_EXTEND
: ZERO_EXTEND
,
1521 int_mode
, inner
, tmode
);
1525 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1526 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1527 if (GET_CODE (op
) == LSHIFTRT
1528 && CONST_INT_P (XEXP (op
, 1))
1529 && XEXP (op
, 1) != const0_rtx
)
1530 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1532 #if defined(POINTERS_EXTEND_UNSIGNED)
1533 /* As we do not know which address space the pointer is referring to,
1534 we can do this only if the target does not support different pointer
1535 or address modes depending on the address space. */
1536 if (target_default_pointer_address_modes_p ()
1537 && ! POINTERS_EXTEND_UNSIGNED
1538 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1540 || (GET_CODE (op
) == SUBREG
1541 && REG_P (SUBREG_REG (op
))
1542 && REG_POINTER (SUBREG_REG (op
))
1543 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1544 && !targetm
.have_ptr_extend ())
1547 = convert_memory_address_addr_space_1 (Pmode
, op
,
1548 ADDR_SPACE_GENERIC
, false,
1557 /* Check for a zero extension of a subreg of a promoted
1558 variable, where the promotion is zero-extended, and the
1559 target mode is the same as the variable's promotion. */
1560 if (GET_CODE (op
) == SUBREG
1561 && SUBREG_PROMOTED_VAR_P (op
)
1562 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1563 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1565 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1570 /* Extending a widening multiplication should be canonicalized to
1571 a wider widening multiplication. */
1572 if (GET_CODE (op
) == MULT
)
1574 rtx lhs
= XEXP (op
, 0);
1575 rtx rhs
= XEXP (op
, 1);
1576 enum rtx_code lcode
= GET_CODE (lhs
);
1577 enum rtx_code rcode
= GET_CODE (rhs
);
1579 /* Widening multiplies usually extend both operands, but sometimes
1580 they use a shift to extract a portion of a register. */
1581 if ((lcode
== ZERO_EXTEND
1582 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1583 && (rcode
== ZERO_EXTEND
1584 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1586 machine_mode lmode
= GET_MODE (lhs
);
1587 machine_mode rmode
= GET_MODE (rhs
);
1590 if (lcode
== LSHIFTRT
)
1591 /* Number of bits not shifted off the end. */
1592 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1593 - INTVAL (XEXP (lhs
, 1)));
1594 else /* lcode == ZERO_EXTEND */
1595 /* Size of inner mode. */
1596 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1598 if (rcode
== LSHIFTRT
)
1599 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1600 - INTVAL (XEXP (rhs
, 1)));
1601 else /* rcode == ZERO_EXTEND */
1602 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1604 /* We can only widen multiplies if the result is mathematiclly
1605 equivalent. I.e. if overflow was impossible. */
1606 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1607 return simplify_gen_binary
1609 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1610 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1614 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1615 if (GET_CODE (op
) == ZERO_EXTEND
)
1616 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1617 GET_MODE (XEXP (op
, 0)));
1619 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1620 is (zero_extend:M (subreg:O <X>)) if there is mode with
1621 GET_MODE_PRECISION (N) - I bits. */
1622 if (GET_CODE (op
) == LSHIFTRT
1623 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1624 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1625 && CONST_INT_P (XEXP (op
, 1))
1626 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1627 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1628 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1630 scalar_int_mode tmode
;
1631 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1632 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1635 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1637 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1642 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1643 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1645 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1646 (and:SI (reg:SI) (const_int 63)). */
1647 if (partial_subreg_p (op
)
1648 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1649 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1650 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1651 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1652 && subreg_lowpart_p (op
)
1653 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1654 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1656 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1657 return SUBREG_REG (op
);
1658 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1662 #if defined(POINTERS_EXTEND_UNSIGNED)
1663 /* As we do not know which address space the pointer is referring to,
1664 we can do this only if the target does not support different pointer
1665 or address modes depending on the address space. */
1666 if (target_default_pointer_address_modes_p ()
1667 && POINTERS_EXTEND_UNSIGNED
> 0
1668 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1670 || (GET_CODE (op
) == SUBREG
1671 && REG_P (SUBREG_REG (op
))
1672 && REG_POINTER (SUBREG_REG (op
))
1673 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1674 && !targetm
.have_ptr_extend ())
1677 = convert_memory_address_addr_space_1 (Pmode
, op
,
1678 ADDR_SPACE_GENERIC
, false,
1690 if (VECTOR_MODE_P (mode
)
1691 && vec_duplicate_p (op
, &elt
)
1692 && code
!= VEC_DUPLICATE
)
1694 /* Try applying the operator to ELT and see if that simplifies.
1695 We can duplicate the result if so.
1697 The reason we don't use simplify_gen_unary is that it isn't
1698 necessarily a win to convert things like:
1700 (neg:V (vec_duplicate:V (reg:S R)))
1704 (vec_duplicate:V (neg:S (reg:S R)))
1706 The first might be done entirely in vector registers while the
1707 second might need a move between register files. */
1708 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1709 elt
, GET_MODE_INNER (GET_MODE (op
)));
1711 return gen_vec_duplicate (mode
, temp
);
1717 /* Try to compute the value of a unary operation CODE whose output mode is to
1718 be MODE with input operand OP whose mode was originally OP_MODE.
1719 Return zero if the value cannot be computed. */
1721 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1722 rtx op
, machine_mode op_mode
)
1724 scalar_int_mode result_mode
;
1726 if (code
== VEC_DUPLICATE
)
1728 gcc_assert (VECTOR_MODE_P (mode
));
1729 if (GET_MODE (op
) != VOIDmode
)
1731 if (!VECTOR_MODE_P (GET_MODE (op
)))
1732 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1734 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1737 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1738 return gen_const_vec_duplicate (mode
, op
);
1739 if (GET_CODE (op
) == CONST_VECTOR
1740 && (CONST_VECTOR_DUPLICATE_P (op
)
1741 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1743 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1744 ? CONST_VECTOR_NPATTERNS (op
)
1745 : CONST_VECTOR_NUNITS (op
).to_constant ());
1746 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1747 rtx_vector_builder
builder (mode
, npatterns
, 1);
1748 for (unsigned i
= 0; i
< npatterns
; i
++)
1749 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1750 return builder
.build ();
1754 if (VECTOR_MODE_P (mode
)
1755 && GET_CODE (op
) == CONST_VECTOR
1756 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1758 gcc_assert (GET_MODE (op
) == op_mode
);
1760 rtx_vector_builder builder
;
1761 if (!builder
.new_unary_operation (mode
, op
, false))
1764 unsigned int count
= builder
.encoded_nelts ();
1765 for (unsigned int i
= 0; i
< count
; i
++)
1767 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1768 CONST_VECTOR_ELT (op
, i
),
1769 GET_MODE_INNER (op_mode
));
1770 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1772 builder
.quick_push (x
);
1774 return builder
.build ();
1777 /* The order of these tests is critical so that, for example, we don't
1778 check the wrong mode (input vs. output) for a conversion operation,
1779 such as FIX. At some point, this should be simplified. */
1781 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1785 if (op_mode
== VOIDmode
)
1787 /* CONST_INT have VOIDmode as the mode. We assume that all
1788 the bits of the constant are significant, though, this is
1789 a dangerous assumption as many times CONST_INTs are
1790 created and used with garbage in the bits outside of the
1791 precision of the implied mode of the const_int. */
1792 op_mode
= MAX_MODE_INT
;
1795 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1797 /* Avoid the folding if flag_signaling_nans is on and
1798 operand is a signaling NaN. */
1799 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1802 d
= real_value_truncate (mode
, d
);
1803 return const_double_from_real_value (d
, mode
);
1805 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1809 if (op_mode
== VOIDmode
)
1811 /* CONST_INT have VOIDmode as the mode. We assume that all
1812 the bits of the constant are significant, though, this is
1813 a dangerous assumption as many times CONST_INTs are
1814 created and used with garbage in the bits outside of the
1815 precision of the implied mode of the const_int. */
1816 op_mode
= MAX_MODE_INT
;
1819 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1821 /* Avoid the folding if flag_signaling_nans is on and
1822 operand is a signaling NaN. */
1823 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1826 d
= real_value_truncate (mode
, d
);
1827 return const_double_from_real_value (d
, mode
);
1830 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1832 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1834 scalar_int_mode imode
= (op_mode
== VOIDmode
1836 : as_a
<scalar_int_mode
> (op_mode
));
1837 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1840 #if TARGET_SUPPORTS_WIDE_INT == 0
1841 /* This assert keeps the simplification from producing a result
1842 that cannot be represented in a CONST_DOUBLE but a lot of
1843 upstream callers expect that this function never fails to
1844 simplify something and so you if you added this to the test
1845 above the code would die later anyway. If this assert
1846 happens, you just need to make the port support wide int. */
1847 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1853 result
= wi::bit_not (op0
);
1857 result
= wi::neg (op0
);
1861 result
= wi::abs (op0
);
1865 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1869 if (wi::ne_p (op0
, 0))
1870 int_value
= wi::clz (op0
);
1871 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1873 result
= wi::shwi (int_value
, result_mode
);
1877 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1881 if (wi::ne_p (op0
, 0))
1882 int_value
= wi::ctz (op0
);
1883 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1885 result
= wi::shwi (int_value
, result_mode
);
1889 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1893 result
= wi::shwi (wi::parity (op0
), result_mode
);
1897 result
= wide_int (op0
).bswap ();
1902 result
= wide_int::from (op0
, width
, UNSIGNED
);
1906 result
= wide_int::from (op0
, width
, SIGNED
);
1914 return immed_wide_int_const (result
, result_mode
);
1917 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1918 && SCALAR_FLOAT_MODE_P (mode
)
1919 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1921 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1927 d
= real_value_abs (&d
);
1930 d
= real_value_negate (&d
);
1932 case FLOAT_TRUNCATE
:
1933 /* Don't perform the operation if flag_signaling_nans is on
1934 and the operand is a signaling NaN. */
1935 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1937 d
= real_value_truncate (mode
, d
);
1940 /* Don't perform the operation if flag_signaling_nans is on
1941 and the operand is a signaling NaN. */
1942 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1944 /* All this does is change the mode, unless changing
1946 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1947 real_convert (&d
, mode
, &d
);
1950 /* Don't perform the operation if flag_signaling_nans is on
1951 and the operand is a signaling NaN. */
1952 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1954 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1961 real_to_target (tmp
, &d
, GET_MODE (op
));
1962 for (i
= 0; i
< 4; i
++)
1964 real_from_target (&d
, tmp
, mode
);
1970 return const_double_from_real_value (d
, mode
);
1972 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1973 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1974 && is_int_mode (mode
, &result_mode
))
1976 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1977 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1978 operators are intentionally left unspecified (to ease implementation
1979 by target backends), for consistency, this routine implements the
1980 same semantics for constant folding as used by the middle-end. */
1982 /* This was formerly used only for non-IEEE float.
1983 eggert@twinsun.com says it is safe for IEEE also. */
1985 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1986 wide_int wmax
, wmin
;
1987 /* This is part of the abi to real_to_integer, but we check
1988 things before making this call. */
1994 if (REAL_VALUE_ISNAN (*x
))
1997 /* Test against the signed upper bound. */
1998 wmax
= wi::max_value (width
, SIGNED
);
1999 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2000 if (real_less (&t
, x
))
2001 return immed_wide_int_const (wmax
, mode
);
2003 /* Test against the signed lower bound. */
2004 wmin
= wi::min_value (width
, SIGNED
);
2005 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2006 if (real_less (x
, &t
))
2007 return immed_wide_int_const (wmin
, mode
);
2009 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2013 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2016 /* Test against the unsigned upper bound. */
2017 wmax
= wi::max_value (width
, UNSIGNED
);
2018 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2019 if (real_less (&t
, x
))
2020 return immed_wide_int_const (wmax
, mode
);
2022 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2030 /* Handle polynomial integers. */
2031 else if (CONST_POLY_INT_P (op
))
2033 poly_wide_int result
;
2037 result
= -const_poly_int_value (op
);
2041 result
= ~const_poly_int_value (op
);
2047 return immed_wide_int_const (result
, mode
);
2053 /* Subroutine of simplify_binary_operation to simplify a binary operation
2054 CODE that can commute with byte swapping, with result mode MODE and
2055 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2056 Return zero if no simplification or canonicalization is possible. */
2059 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2064 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2065 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2067 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2068 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2069 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2072 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2073 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2075 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2076 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2082 /* Subroutine of simplify_binary_operation to simplify a commutative,
2083 associative binary operation CODE with result mode MODE, operating
2084 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2085 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2086 canonicalization is possible. */
2089 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2094 /* Linearize the operator to the left. */
2095 if (GET_CODE (op1
) == code
)
2097 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2098 if (GET_CODE (op0
) == code
)
2100 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2101 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2104 /* "a op (b op c)" becomes "(b op c) op a". */
2105 if (! swap_commutative_operands_p (op1
, op0
))
2106 return simplify_gen_binary (code
, mode
, op1
, op0
);
2108 std::swap (op0
, op1
);
2111 if (GET_CODE (op0
) == code
)
2113 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2114 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2116 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2117 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2120 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2121 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2123 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2125 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2126 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2128 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2135 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2136 and OP1. Return 0 if no simplification is possible.
2138 Don't use this for relational operations such as EQ or LT.
2139 Use simplify_relational_operation instead. */
2141 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2144 rtx trueop0
, trueop1
;
2147 /* Relational operations don't work here. We must know the mode
2148 of the operands in order to do the comparison correctly.
2149 Assuming a full word can give incorrect results.
2150 Consider comparing 128 with -128 in QImode. */
2151 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2152 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2154 /* Make sure the constant is second. */
2155 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2156 && swap_commutative_operands_p (op0
, op1
))
2157 std::swap (op0
, op1
);
2159 trueop0
= avoid_constant_pool_reference (op0
);
2160 trueop1
= avoid_constant_pool_reference (op1
);
2162 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2165 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2170 /* If the above steps did not result in a simplification and op0 or op1
2171 were constant pool references, use the referenced constants directly. */
2172 if (trueop0
!= op0
|| trueop1
!= op1
)
2173 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2178 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2179 which OP0 and OP1 are both vector series or vector duplicates
2180 (which are really just series with a step of 0). If so, try to
2181 form a new series by applying CODE to the bases and to the steps.
2182 Return null if no simplification is possible.
2184 MODE is the mode of the operation and is known to be a vector
2188 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2192 if (vec_duplicate_p (op0
, &base0
))
2194 else if (!vec_series_p (op0
, &base0
, &step0
))
2198 if (vec_duplicate_p (op1
, &base1
))
2200 else if (!vec_series_p (op1
, &base1
, &step1
))
2203 /* Only create a new series if we can simplify both parts. In other
2204 cases this isn't really a simplification, and it's not necessarily
2205 a win to replace a vector operation with a scalar operation. */
2206 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2207 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2211 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2215 return gen_vec_series (mode
, new_base
, new_step
);
2218 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2219 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2220 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2221 actual constants. */
2224 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2225 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2227 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2229 scalar_int_mode int_mode
, inner_mode
;
2232 /* Even if we can't compute a constant result,
2233 there are some cases worth simplifying. */
2238 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2239 when x is NaN, infinite, or finite and nonzero. They aren't
2240 when x is -0 and the rounding mode is not towards -infinity,
2241 since (-0) + 0 is then 0. */
2242 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2245 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2246 transformations are safe even for IEEE. */
2247 if (GET_CODE (op0
) == NEG
)
2248 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2249 else if (GET_CODE (op1
) == NEG
)
2250 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2252 /* (~a) + 1 -> -a */
2253 if (INTEGRAL_MODE_P (mode
)
2254 && GET_CODE (op0
) == NOT
2255 && trueop1
== const1_rtx
)
2256 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2258 /* Handle both-operands-constant cases. We can only add
2259 CONST_INTs to constants since the sum of relocatable symbols
2260 can't be handled by most assemblers. Don't add CONST_INT
2261 to CONST_INT since overflow won't be computed properly if wider
2262 than HOST_BITS_PER_WIDE_INT. */
2264 if ((GET_CODE (op0
) == CONST
2265 || GET_CODE (op0
) == SYMBOL_REF
2266 || GET_CODE (op0
) == LABEL_REF
)
2267 && poly_int_rtx_p (op1
, &offset
))
2268 return plus_constant (mode
, op0
, offset
);
2269 else if ((GET_CODE (op1
) == CONST
2270 || GET_CODE (op1
) == SYMBOL_REF
2271 || GET_CODE (op1
) == LABEL_REF
)
2272 && poly_int_rtx_p (op0
, &offset
))
2273 return plus_constant (mode
, op1
, offset
);
2275 /* See if this is something like X * C - X or vice versa or
2276 if the multiplication is written as a shift. If so, we can
2277 distribute and make a new multiply, shift, or maybe just
2278 have X (if C is 2 in the example above). But don't make
2279 something more expensive than we had before. */
2281 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2283 rtx lhs
= op0
, rhs
= op1
;
2285 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2286 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2288 if (GET_CODE (lhs
) == NEG
)
2290 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2291 lhs
= XEXP (lhs
, 0);
2293 else if (GET_CODE (lhs
) == MULT
2294 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2296 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2297 lhs
= XEXP (lhs
, 0);
2299 else if (GET_CODE (lhs
) == ASHIFT
2300 && CONST_INT_P (XEXP (lhs
, 1))
2301 && INTVAL (XEXP (lhs
, 1)) >= 0
2302 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2304 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2305 GET_MODE_PRECISION (int_mode
));
2306 lhs
= XEXP (lhs
, 0);
2309 if (GET_CODE (rhs
) == NEG
)
2311 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2312 rhs
= XEXP (rhs
, 0);
2314 else if (GET_CODE (rhs
) == MULT
2315 && CONST_INT_P (XEXP (rhs
, 1)))
2317 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2318 rhs
= XEXP (rhs
, 0);
2320 else if (GET_CODE (rhs
) == ASHIFT
2321 && CONST_INT_P (XEXP (rhs
, 1))
2322 && INTVAL (XEXP (rhs
, 1)) >= 0
2323 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2325 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2326 GET_MODE_PRECISION (int_mode
));
2327 rhs
= XEXP (rhs
, 0);
2330 if (rtx_equal_p (lhs
, rhs
))
2332 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2334 bool speed
= optimize_function_for_speed_p (cfun
);
2336 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2338 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2339 return (set_src_cost (tem
, int_mode
, speed
)
2340 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2344 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2345 if (CONST_SCALAR_INT_P (op1
)
2346 && GET_CODE (op0
) == XOR
2347 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2348 && mode_signbit_p (mode
, op1
))
2349 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2350 simplify_gen_binary (XOR
, mode
, op1
,
2353 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2354 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2355 && GET_CODE (op0
) == MULT
2356 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2360 in1
= XEXP (XEXP (op0
, 0), 0);
2361 in2
= XEXP (op0
, 1);
2362 return simplify_gen_binary (MINUS
, mode
, op1
,
2363 simplify_gen_binary (MULT
, mode
,
2367 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2368 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2370 if (COMPARISON_P (op0
)
2371 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2372 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2373 && (reversed
= reversed_comparison (op0
, mode
)))
2375 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2377 /* If one of the operands is a PLUS or a MINUS, see if we can
2378 simplify this by the associative law.
2379 Don't use the associative law for floating point.
2380 The inaccuracy makes it nonassociative,
2381 and subtle programs can break if operations are associated. */
2383 if (INTEGRAL_MODE_P (mode
)
2384 && (plus_minus_operand_p (op0
)
2385 || plus_minus_operand_p (op1
))
2386 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2389 /* Reassociate floating point addition only when the user
2390 specifies associative math operations. */
2391 if (FLOAT_MODE_P (mode
)
2392 && flag_associative_math
)
2394 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2399 /* Handle vector series. */
2400 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2402 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2409 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2410 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2411 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2412 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2414 rtx xop00
= XEXP (op0
, 0);
2415 rtx xop10
= XEXP (op1
, 0);
2417 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2420 if (REG_P (xop00
) && REG_P (xop10
)
2421 && REGNO (xop00
) == REGNO (xop10
)
2422 && GET_MODE (xop00
) == mode
2423 && GET_MODE (xop10
) == mode
2424 && GET_MODE_CLASS (mode
) == MODE_CC
)
2430 /* We can't assume x-x is 0 even with non-IEEE floating point,
2431 but since it is zero except in very strange circumstances, we
2432 will treat it as zero with -ffinite-math-only. */
2433 if (rtx_equal_p (trueop0
, trueop1
)
2434 && ! side_effects_p (op0
)
2435 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2436 return CONST0_RTX (mode
);
2438 /* Change subtraction from zero into negation. (0 - x) is the
2439 same as -x when x is NaN, infinite, or finite and nonzero.
2440 But if the mode has signed zeros, and does not round towards
2441 -infinity, then 0 - 0 is 0, not -0. */
2442 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2443 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2445 /* (-1 - a) is ~a, unless the expression contains symbolic
2446 constants, in which case not retaining additions and
2447 subtractions could cause invalid assembly to be produced. */
2448 if (trueop0
== constm1_rtx
2449 && !contains_symbolic_reference_p (op1
))
2450 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2452 /* Subtracting 0 has no effect unless the mode has signed zeros
2453 and supports rounding towards -infinity. In such a case,
2455 if (!(HONOR_SIGNED_ZEROS (mode
)
2456 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2457 && trueop1
== CONST0_RTX (mode
))
2460 /* See if this is something like X * C - X or vice versa or
2461 if the multiplication is written as a shift. If so, we can
2462 distribute and make a new multiply, shift, or maybe just
2463 have X (if C is 2 in the example above). But don't make
2464 something more expensive than we had before. */
2466 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2468 rtx lhs
= op0
, rhs
= op1
;
2470 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2471 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2473 if (GET_CODE (lhs
) == NEG
)
2475 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2476 lhs
= XEXP (lhs
, 0);
2478 else if (GET_CODE (lhs
) == MULT
2479 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2481 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2482 lhs
= XEXP (lhs
, 0);
2484 else if (GET_CODE (lhs
) == ASHIFT
2485 && CONST_INT_P (XEXP (lhs
, 1))
2486 && INTVAL (XEXP (lhs
, 1)) >= 0
2487 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2489 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2490 GET_MODE_PRECISION (int_mode
));
2491 lhs
= XEXP (lhs
, 0);
2494 if (GET_CODE (rhs
) == NEG
)
2496 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2497 rhs
= XEXP (rhs
, 0);
2499 else if (GET_CODE (rhs
) == MULT
2500 && CONST_INT_P (XEXP (rhs
, 1)))
2502 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2503 rhs
= XEXP (rhs
, 0);
2505 else if (GET_CODE (rhs
) == ASHIFT
2506 && CONST_INT_P (XEXP (rhs
, 1))
2507 && INTVAL (XEXP (rhs
, 1)) >= 0
2508 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2510 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2511 GET_MODE_PRECISION (int_mode
));
2512 negcoeff1
= -negcoeff1
;
2513 rhs
= XEXP (rhs
, 0);
2516 if (rtx_equal_p (lhs
, rhs
))
2518 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2520 bool speed
= optimize_function_for_speed_p (cfun
);
2522 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2524 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2525 return (set_src_cost (tem
, int_mode
, speed
)
2526 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2530 /* (a - (-b)) -> (a + b). True even for IEEE. */
2531 if (GET_CODE (op1
) == NEG
)
2532 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2534 /* (-x - c) may be simplified as (-c - x). */
2535 if (GET_CODE (op0
) == NEG
2536 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2538 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2540 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2543 if ((GET_CODE (op0
) == CONST
2544 || GET_CODE (op0
) == SYMBOL_REF
2545 || GET_CODE (op0
) == LABEL_REF
)
2546 && poly_int_rtx_p (op1
, &offset
))
2547 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2549 /* Don't let a relocatable value get a negative coeff. */
2550 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2551 return simplify_gen_binary (PLUS
, mode
,
2553 neg_const_int (mode
, op1
));
2555 /* (x - (x & y)) -> (x & ~y) */
2556 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2558 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2560 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2561 GET_MODE (XEXP (op1
, 1)));
2562 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2564 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2566 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2567 GET_MODE (XEXP (op1
, 0)));
2568 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2572 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2573 by reversing the comparison code if valid. */
2574 if (STORE_FLAG_VALUE
== 1
2575 && trueop0
== const1_rtx
2576 && COMPARISON_P (op1
)
2577 && (reversed
= reversed_comparison (op1
, mode
)))
2580 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2581 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2582 && GET_CODE (op1
) == MULT
2583 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2587 in1
= XEXP (XEXP (op1
, 0), 0);
2588 in2
= XEXP (op1
, 1);
2589 return simplify_gen_binary (PLUS
, mode
,
2590 simplify_gen_binary (MULT
, mode
,
2595 /* Canonicalize (minus (neg A) (mult B C)) to
2596 (minus (mult (neg B) C) A). */
2597 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2598 && GET_CODE (op1
) == MULT
2599 && GET_CODE (op0
) == NEG
)
2603 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2604 in2
= XEXP (op1
, 1);
2605 return simplify_gen_binary (MINUS
, mode
,
2606 simplify_gen_binary (MULT
, mode
,
2611 /* If one of the operands is a PLUS or a MINUS, see if we can
2612 simplify this by the associative law. This will, for example,
2613 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2614 Don't use the associative law for floating point.
2615 The inaccuracy makes it nonassociative,
2616 and subtle programs can break if operations are associated. */
2618 if (INTEGRAL_MODE_P (mode
)
2619 && (plus_minus_operand_p (op0
)
2620 || plus_minus_operand_p (op1
))
2621 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2624 /* Handle vector series. */
2625 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2627 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2634 if (trueop1
== constm1_rtx
)
2635 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2637 if (GET_CODE (op0
) == NEG
)
2639 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2640 /* If op1 is a MULT as well and simplify_unary_operation
2641 just moved the NEG to the second operand, simplify_gen_binary
2642 below could through simplify_associative_operation move
2643 the NEG around again and recurse endlessly. */
2645 && GET_CODE (op1
) == MULT
2646 && GET_CODE (temp
) == MULT
2647 && XEXP (op1
, 0) == XEXP (temp
, 0)
2648 && GET_CODE (XEXP (temp
, 1)) == NEG
2649 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2652 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2654 if (GET_CODE (op1
) == NEG
)
2656 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2657 /* If op0 is a MULT as well and simplify_unary_operation
2658 just moved the NEG to the second operand, simplify_gen_binary
2659 below could through simplify_associative_operation move
2660 the NEG around again and recurse endlessly. */
2662 && GET_CODE (op0
) == MULT
2663 && GET_CODE (temp
) == MULT
2664 && XEXP (op0
, 0) == XEXP (temp
, 0)
2665 && GET_CODE (XEXP (temp
, 1)) == NEG
2666 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2669 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2672 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2673 x is NaN, since x * 0 is then also NaN. Nor is it valid
2674 when the mode has signed zeros, since multiplying a negative
2675 number by 0 will give -0, not 0. */
2676 if (!HONOR_NANS (mode
)
2677 && !HONOR_SIGNED_ZEROS (mode
)
2678 && trueop1
== CONST0_RTX (mode
)
2679 && ! side_effects_p (op0
))
2682 /* In IEEE floating point, x*1 is not equivalent to x for
2684 if (!HONOR_SNANS (mode
)
2685 && trueop1
== CONST1_RTX (mode
))
2688 /* Convert multiply by constant power of two into shift. */
2689 if (CONST_SCALAR_INT_P (trueop1
))
2691 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2693 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2694 gen_int_shift_amount (mode
, val
));
2697 /* x*2 is x+x and x*(-1) is -x */
2698 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2699 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2700 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2701 && GET_MODE (op0
) == mode
)
2703 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2705 if (real_equal (d1
, &dconst2
))
2706 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2708 if (!HONOR_SNANS (mode
)
2709 && real_equal (d1
, &dconstm1
))
2710 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2713 /* Optimize -x * -x as x * x. */
2714 if (FLOAT_MODE_P (mode
)
2715 && GET_CODE (op0
) == NEG
2716 && GET_CODE (op1
) == NEG
2717 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2718 && !side_effects_p (XEXP (op0
, 0)))
2719 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2721 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2722 if (SCALAR_FLOAT_MODE_P (mode
)
2723 && GET_CODE (op0
) == ABS
2724 && GET_CODE (op1
) == ABS
2725 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2726 && !side_effects_p (XEXP (op0
, 0)))
2727 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2729 /* Reassociate multiplication, but for floating point MULTs
2730 only when the user specifies unsafe math optimizations. */
2731 if (! FLOAT_MODE_P (mode
)
2732 || flag_unsafe_math_optimizations
)
2734 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2741 if (trueop1
== CONST0_RTX (mode
))
2743 if (INTEGRAL_MODE_P (mode
)
2744 && trueop1
== CONSTM1_RTX (mode
)
2745 && !side_effects_p (op0
))
2747 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2749 /* A | (~A) -> -1 */
2750 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2751 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2752 && ! side_effects_p (op0
)
2753 && SCALAR_INT_MODE_P (mode
))
2756 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2757 if (CONST_INT_P (op1
)
2758 && HWI_COMPUTABLE_MODE_P (mode
)
2759 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2760 && !side_effects_p (op0
))
2763 /* Canonicalize (X & C1) | C2. */
2764 if (GET_CODE (op0
) == AND
2765 && CONST_INT_P (trueop1
)
2766 && CONST_INT_P (XEXP (op0
, 1)))
2768 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2769 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2770 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2772 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2774 && !side_effects_p (XEXP (op0
, 0)))
2777 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2778 if (((c1
|c2
) & mask
) == mask
)
2779 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2782 /* Convert (A & B) | A to A. */
2783 if (GET_CODE (op0
) == AND
2784 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2785 || rtx_equal_p (XEXP (op0
, 1), op1
))
2786 && ! side_effects_p (XEXP (op0
, 0))
2787 && ! side_effects_p (XEXP (op0
, 1)))
2790 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2791 mode size to (rotate A CX). */
2793 if (GET_CODE (op1
) == ASHIFT
2794 || GET_CODE (op1
) == SUBREG
)
2805 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2806 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2807 && CONST_INT_P (XEXP (opleft
, 1))
2808 && CONST_INT_P (XEXP (opright
, 1))
2809 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2810 == GET_MODE_UNIT_PRECISION (mode
)))
2811 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2813 /* Same, but for ashift that has been "simplified" to a wider mode
2814 by simplify_shift_const. */
2816 if (GET_CODE (opleft
) == SUBREG
2817 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2818 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2820 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2821 && GET_CODE (opright
) == LSHIFTRT
2822 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2823 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2824 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2825 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2826 SUBREG_REG (XEXP (opright
, 0)))
2827 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2828 && CONST_INT_P (XEXP (opright
, 1))
2829 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2830 + INTVAL (XEXP (opright
, 1))
2831 == GET_MODE_PRECISION (int_mode
)))
2832 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2833 XEXP (SUBREG_REG (opleft
), 1));
2835 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2836 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2837 the PLUS does not affect any of the bits in OP1: then we can do
2838 the IOR as a PLUS and we can associate. This is valid if OP1
2839 can be safely shifted left C bits. */
2840 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2841 && GET_CODE (XEXP (op0
, 0)) == PLUS
2842 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2843 && CONST_INT_P (XEXP (op0
, 1))
2844 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2846 int count
= INTVAL (XEXP (op0
, 1));
2847 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2849 if (mask
>> count
== INTVAL (trueop1
)
2850 && trunc_int_for_mode (mask
, mode
) == mask
2851 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2852 return simplify_gen_binary (ASHIFTRT
, mode
,
2853 plus_constant (mode
, XEXP (op0
, 0),
2858 /* The following happens with bitfield merging.
2859 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2860 if (GET_CODE (op0
) == AND
2861 && GET_CODE (op1
) == AND
2862 && CONST_INT_P (XEXP (op0
, 1))
2863 && CONST_INT_P (XEXP (op1
, 1))
2864 && (INTVAL (XEXP (op0
, 1))
2865 == ~INTVAL (XEXP (op1
, 1))))
2867 /* The IOR may be on both sides. */
2868 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
2869 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
2870 top0
= op0
, top1
= op1
;
2871 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
2872 top0
= op1
, top1
= op0
;
2875 /* X may be on either side of the inner IOR. */
2877 if (rtx_equal_p (XEXP (top0
, 0),
2878 XEXP (XEXP (top1
, 0), 0)))
2879 tem
= XEXP (XEXP (top1
, 0), 1);
2880 else if (rtx_equal_p (XEXP (top0
, 0),
2881 XEXP (XEXP (top1
, 0), 1)))
2882 tem
= XEXP (XEXP (top1
, 0), 0);
2884 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
2886 (AND
, mode
, tem
, XEXP (top1
, 1)));
2890 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2894 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2900 if (trueop1
== CONST0_RTX (mode
))
2902 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2903 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2904 if (rtx_equal_p (trueop0
, trueop1
)
2905 && ! side_effects_p (op0
)
2906 && GET_MODE_CLASS (mode
) != MODE_CC
)
2907 return CONST0_RTX (mode
);
2909 /* Canonicalize XOR of the most significant bit to PLUS. */
2910 if (CONST_SCALAR_INT_P (op1
)
2911 && mode_signbit_p (mode
, op1
))
2912 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2913 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2914 if (CONST_SCALAR_INT_P (op1
)
2915 && GET_CODE (op0
) == PLUS
2916 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2917 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2918 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2919 simplify_gen_binary (XOR
, mode
, op1
,
2922 /* If we are XORing two things that have no bits in common,
2923 convert them into an IOR. This helps to detect rotation encoded
2924 using those methods and possibly other simplifications. */
2926 if (HWI_COMPUTABLE_MODE_P (mode
)
2927 && (nonzero_bits (op0
, mode
)
2928 & nonzero_bits (op1
, mode
)) == 0)
2929 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2931 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2932 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2935 int num_negated
= 0;
2937 if (GET_CODE (op0
) == NOT
)
2938 num_negated
++, op0
= XEXP (op0
, 0);
2939 if (GET_CODE (op1
) == NOT
)
2940 num_negated
++, op1
= XEXP (op1
, 0);
2942 if (num_negated
== 2)
2943 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2944 else if (num_negated
== 1)
2945 return simplify_gen_unary (NOT
, mode
,
2946 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2950 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2951 correspond to a machine insn or result in further simplifications
2952 if B is a constant. */
2954 if (GET_CODE (op0
) == AND
2955 && rtx_equal_p (XEXP (op0
, 1), op1
)
2956 && ! side_effects_p (op1
))
2957 return simplify_gen_binary (AND
, mode
,
2958 simplify_gen_unary (NOT
, mode
,
2959 XEXP (op0
, 0), mode
),
2962 else if (GET_CODE (op0
) == AND
2963 && rtx_equal_p (XEXP (op0
, 0), op1
)
2964 && ! side_effects_p (op1
))
2965 return simplify_gen_binary (AND
, mode
,
2966 simplify_gen_unary (NOT
, mode
,
2967 XEXP (op0
, 1), mode
),
2970 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2971 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2972 out bits inverted twice and not set by C. Similarly, given
2973 (xor (and (xor A B) C) D), simplify without inverting C in
2974 the xor operand: (xor (and A C) (B&C)^D).
2976 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2977 && GET_CODE (XEXP (op0
, 0)) == XOR
2978 && CONST_INT_P (op1
)
2979 && CONST_INT_P (XEXP (op0
, 1))
2980 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2982 enum rtx_code op
= GET_CODE (op0
);
2983 rtx a
= XEXP (XEXP (op0
, 0), 0);
2984 rtx b
= XEXP (XEXP (op0
, 0), 1);
2985 rtx c
= XEXP (op0
, 1);
2987 HOST_WIDE_INT bval
= INTVAL (b
);
2988 HOST_WIDE_INT cval
= INTVAL (c
);
2989 HOST_WIDE_INT dval
= INTVAL (d
);
2990 HOST_WIDE_INT xcval
;
2997 return simplify_gen_binary (XOR
, mode
,
2998 simplify_gen_binary (op
, mode
, a
, c
),
2999 gen_int_mode ((bval
& xcval
) ^ dval
,
3003 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3004 we can transform like this:
3005 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3006 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3007 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3008 Attempt a few simplifications when B and C are both constants. */
3009 if (GET_CODE (op0
) == AND
3010 && CONST_INT_P (op1
)
3011 && CONST_INT_P (XEXP (op0
, 1)))
3013 rtx a
= XEXP (op0
, 0);
3014 rtx b
= XEXP (op0
, 1);
3016 HOST_WIDE_INT bval
= INTVAL (b
);
3017 HOST_WIDE_INT cval
= INTVAL (c
);
3019 /* Instead of computing ~A&C, we compute its negated value,
3020 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3021 optimize for sure. If it does not simplify, we still try
3022 to compute ~A&C below, but since that always allocates
3023 RTL, we don't try that before committing to returning a
3024 simplified expression. */
3025 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3028 if ((~cval
& bval
) == 0)
3030 rtx na_c
= NULL_RTX
;
3032 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3035 /* If ~A does not simplify, don't bother: we don't
3036 want to simplify 2 operations into 3, and if na_c
3037 were to simplify with na, n_na_c would have
3038 simplified as well. */
3039 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3041 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3044 /* Try to simplify ~A&C | ~B&C. */
3045 if (na_c
!= NULL_RTX
)
3046 return simplify_gen_binary (IOR
, mode
, na_c
,
3047 gen_int_mode (~bval
& cval
, mode
));
3051 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3052 if (n_na_c
== CONSTM1_RTX (mode
))
3054 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3055 gen_int_mode (~cval
& bval
,
3057 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3058 gen_int_mode (~bval
& cval
,
3064 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3065 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3066 machines, and also has shorter instruction path length. */
3067 if (GET_CODE (op0
) == AND
3068 && GET_CODE (XEXP (op0
, 0)) == XOR
3069 && CONST_INT_P (XEXP (op0
, 1))
3070 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3073 rtx b
= XEXP (XEXP (op0
, 0), 1);
3074 rtx c
= XEXP (op0
, 1);
3075 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3076 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3077 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3078 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3080 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3081 else if (GET_CODE (op0
) == AND
3082 && GET_CODE (XEXP (op0
, 0)) == XOR
3083 && CONST_INT_P (XEXP (op0
, 1))
3084 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3086 rtx a
= XEXP (XEXP (op0
, 0), 0);
3088 rtx c
= XEXP (op0
, 1);
3089 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3090 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3091 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3092 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3095 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3096 comparison if STORE_FLAG_VALUE is 1. */
3097 if (STORE_FLAG_VALUE
== 1
3098 && trueop1
== const1_rtx
3099 && COMPARISON_P (op0
)
3100 && (reversed
= reversed_comparison (op0
, mode
)))
3103 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3104 is (lt foo (const_int 0)), so we can perform the above
3105 simplification if STORE_FLAG_VALUE is 1. */
3107 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3108 && STORE_FLAG_VALUE
== 1
3109 && trueop1
== const1_rtx
3110 && GET_CODE (op0
) == LSHIFTRT
3111 && CONST_INT_P (XEXP (op0
, 1))
3112 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3113 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3115 /* (xor (comparison foo bar) (const_int sign-bit))
3116 when STORE_FLAG_VALUE is the sign bit. */
3117 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3118 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3119 && trueop1
== const_true_rtx
3120 && COMPARISON_P (op0
)
3121 && (reversed
= reversed_comparison (op0
, int_mode
)))
3124 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3128 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3134 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3136 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3138 if (HWI_COMPUTABLE_MODE_P (mode
))
3140 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3141 HOST_WIDE_INT nzop1
;
3142 if (CONST_INT_P (trueop1
))
3144 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3145 /* If we are turning off bits already known off in OP0, we need
3147 if ((nzop0
& ~val1
) == 0)
3150 nzop1
= nonzero_bits (trueop1
, mode
);
3151 /* If we are clearing all the nonzero bits, the result is zero. */
3152 if ((nzop1
& nzop0
) == 0
3153 && !side_effects_p (op0
) && !side_effects_p (op1
))
3154 return CONST0_RTX (mode
);
3156 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3157 && GET_MODE_CLASS (mode
) != MODE_CC
)
3160 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3161 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3162 && ! side_effects_p (op0
)
3163 && GET_MODE_CLASS (mode
) != MODE_CC
)
3164 return CONST0_RTX (mode
);
3166 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3167 there are no nonzero bits of C outside of X's mode. */
3168 if ((GET_CODE (op0
) == SIGN_EXTEND
3169 || GET_CODE (op0
) == ZERO_EXTEND
)
3170 && CONST_INT_P (trueop1
)
3171 && HWI_COMPUTABLE_MODE_P (mode
)
3172 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3173 & UINTVAL (trueop1
)) == 0)
3175 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3176 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3177 gen_int_mode (INTVAL (trueop1
),
3179 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3182 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3183 we might be able to further simplify the AND with X and potentially
3184 remove the truncation altogether. */
3185 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3187 rtx x
= XEXP (op0
, 0);
3188 machine_mode xmode
= GET_MODE (x
);
3189 tem
= simplify_gen_binary (AND
, xmode
, x
,
3190 gen_int_mode (INTVAL (trueop1
), xmode
));
3191 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3194 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3195 if (GET_CODE (op0
) == IOR
3196 && CONST_INT_P (trueop1
)
3197 && CONST_INT_P (XEXP (op0
, 1)))
3199 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3200 return simplify_gen_binary (IOR
, mode
,
3201 simplify_gen_binary (AND
, mode
,
3202 XEXP (op0
, 0), op1
),
3203 gen_int_mode (tmp
, mode
));
3206 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3207 insn (and may simplify more). */
3208 if (GET_CODE (op0
) == XOR
3209 && rtx_equal_p (XEXP (op0
, 0), op1
)
3210 && ! side_effects_p (op1
))
3211 return simplify_gen_binary (AND
, mode
,
3212 simplify_gen_unary (NOT
, mode
,
3213 XEXP (op0
, 1), mode
),
3216 if (GET_CODE (op0
) == XOR
3217 && rtx_equal_p (XEXP (op0
, 1), op1
)
3218 && ! side_effects_p (op1
))
3219 return simplify_gen_binary (AND
, mode
,
3220 simplify_gen_unary (NOT
, mode
,
3221 XEXP (op0
, 0), mode
),
3224 /* Similarly for (~(A ^ B)) & A. */
3225 if (GET_CODE (op0
) == NOT
3226 && GET_CODE (XEXP (op0
, 0)) == XOR
3227 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3228 && ! side_effects_p (op1
))
3229 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3231 if (GET_CODE (op0
) == NOT
3232 && GET_CODE (XEXP (op0
, 0)) == XOR
3233 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3234 && ! side_effects_p (op1
))
3235 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3237 /* Convert (A | B) & A to A. */
3238 if (GET_CODE (op0
) == IOR
3239 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3240 || rtx_equal_p (XEXP (op0
, 1), op1
))
3241 && ! side_effects_p (XEXP (op0
, 0))
3242 && ! side_effects_p (XEXP (op0
, 1)))
3245 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3246 ((A & N) + B) & M -> (A + B) & M
3247 Similarly if (N & M) == 0,
3248 ((A | N) + B) & M -> (A + B) & M
3249 and for - instead of + and/or ^ instead of |.
3250 Also, if (N & M) == 0, then
3251 (A +- N) & M -> A & M. */
3252 if (CONST_INT_P (trueop1
)
3253 && HWI_COMPUTABLE_MODE_P (mode
)
3254 && ~UINTVAL (trueop1
)
3255 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3256 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3261 pmop
[0] = XEXP (op0
, 0);
3262 pmop
[1] = XEXP (op0
, 1);
3264 if (CONST_INT_P (pmop
[1])
3265 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3266 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3268 for (which
= 0; which
< 2; which
++)
3271 switch (GET_CODE (tem
))
3274 if (CONST_INT_P (XEXP (tem
, 1))
3275 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3276 == UINTVAL (trueop1
))
3277 pmop
[which
] = XEXP (tem
, 0);
3281 if (CONST_INT_P (XEXP (tem
, 1))
3282 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3283 pmop
[which
] = XEXP (tem
, 0);
3290 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3292 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3294 return simplify_gen_binary (code
, mode
, tem
, op1
);
3298 /* (and X (ior (not X) Y) -> (and X Y) */
3299 if (GET_CODE (op1
) == IOR
3300 && GET_CODE (XEXP (op1
, 0)) == NOT
3301 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3302 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3304 /* (and (ior (not X) Y) X) -> (and X Y) */
3305 if (GET_CODE (op0
) == IOR
3306 && GET_CODE (XEXP (op0
, 0)) == NOT
3307 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3308 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3310 /* (and X (ior Y (not X)) -> (and X Y) */
3311 if (GET_CODE (op1
) == IOR
3312 && GET_CODE (XEXP (op1
, 1)) == NOT
3313 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3314 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3316 /* (and (ior Y (not X)) X) -> (and X Y) */
3317 if (GET_CODE (op0
) == IOR
3318 && GET_CODE (XEXP (op0
, 1)) == NOT
3319 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3320 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3322 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3326 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3332 /* 0/x is 0 (or x&0 if x has side-effects). */
3333 if (trueop0
== CONST0_RTX (mode
)
3334 && !cfun
->can_throw_non_call_exceptions
)
3336 if (side_effects_p (op1
))
3337 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3341 if (trueop1
== CONST1_RTX (mode
))
3343 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3347 /* Convert divide by power of two into shift. */
3348 if (CONST_INT_P (trueop1
)
3349 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3350 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3351 gen_int_shift_amount (mode
, val
));
3355 /* Handle floating point and integers separately. */
3356 if (SCALAR_FLOAT_MODE_P (mode
))
3358 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3359 safe for modes with NaNs, since 0.0 / 0.0 will then be
3360 NaN rather than 0.0. Nor is it safe for modes with signed
3361 zeros, since dividing 0 by a negative number gives -0.0 */
3362 if (trueop0
== CONST0_RTX (mode
)
3363 && !HONOR_NANS (mode
)
3364 && !HONOR_SIGNED_ZEROS (mode
)
3365 && ! side_effects_p (op1
))
3368 if (trueop1
== CONST1_RTX (mode
)
3369 && !HONOR_SNANS (mode
))
3372 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3373 && trueop1
!= CONST0_RTX (mode
))
3375 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3378 if (real_equal (d1
, &dconstm1
)
3379 && !HONOR_SNANS (mode
))
3380 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3382 /* Change FP division by a constant into multiplication.
3383 Only do this with -freciprocal-math. */
3384 if (flag_reciprocal_math
3385 && !real_equal (d1
, &dconst0
))
3388 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3389 tem
= const_double_from_real_value (d
, mode
);
3390 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3394 else if (SCALAR_INT_MODE_P (mode
))
3396 /* 0/x is 0 (or x&0 if x has side-effects). */
3397 if (trueop0
== CONST0_RTX (mode
)
3398 && !cfun
->can_throw_non_call_exceptions
)
3400 if (side_effects_p (op1
))
3401 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3405 if (trueop1
== CONST1_RTX (mode
))
3407 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3412 if (trueop1
== constm1_rtx
)
3414 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3416 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3422 /* 0%x is 0 (or x&0 if x has side-effects). */
3423 if (trueop0
== CONST0_RTX (mode
))
3425 if (side_effects_p (op1
))
3426 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3429 /* x%1 is 0 (of x&0 if x has side-effects). */
3430 if (trueop1
== CONST1_RTX (mode
))
3432 if (side_effects_p (op0
))
3433 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3434 return CONST0_RTX (mode
);
3436 /* Implement modulus by power of two as AND. */
3437 if (CONST_INT_P (trueop1
)
3438 && exact_log2 (UINTVAL (trueop1
)) > 0)
3439 return simplify_gen_binary (AND
, mode
, op0
,
3440 gen_int_mode (UINTVAL (trueop1
) - 1,
3445 /* 0%x is 0 (or x&0 if x has side-effects). */
3446 if (trueop0
== CONST0_RTX (mode
))
3448 if (side_effects_p (op1
))
3449 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3452 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3453 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3455 if (side_effects_p (op0
))
3456 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3457 return CONST0_RTX (mode
);
3463 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3464 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3465 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3467 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3468 if (CONST_INT_P (trueop1
)
3469 && IN_RANGE (INTVAL (trueop1
),
3470 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3471 GET_MODE_UNIT_PRECISION (mode
) - 1))
3473 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3474 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3475 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3476 mode
, op0
, new_amount_rtx
);
3481 if (trueop1
== CONST0_RTX (mode
))
3483 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3485 /* Rotating ~0 always results in ~0. */
3486 if (CONST_INT_P (trueop0
)
3487 && HWI_COMPUTABLE_MODE_P (mode
)
3488 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3489 && ! side_effects_p (op1
))
3495 scalar constants c1, c2
3496 size (M2) > size (M1)
3497 c1 == size (M2) - size (M1)
3499 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3503 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3505 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3506 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3508 && CONST_INT_P (op1
)
3509 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3510 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3512 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3513 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3514 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3515 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3516 && subreg_lowpart_p (op0
))
3518 rtx tmp
= gen_int_shift_amount
3519 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3520 tmp
= simplify_gen_binary (code
, inner_mode
,
3521 XEXP (SUBREG_REG (op0
), 0),
3523 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3526 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3528 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3529 if (val
!= INTVAL (op1
))
3530 return simplify_gen_binary (code
, mode
, op0
,
3531 gen_int_shift_amount (mode
, val
));
3538 if (trueop1
== CONST0_RTX (mode
))
3540 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3542 goto canonicalize_shift
;
3545 if (trueop1
== CONST0_RTX (mode
))
3547 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3549 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3550 if (GET_CODE (op0
) == CLZ
3551 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3552 && CONST_INT_P (trueop1
)
3553 && STORE_FLAG_VALUE
== 1
3554 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3556 unsigned HOST_WIDE_INT zero_val
= 0;
3558 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3559 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3560 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3561 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3562 XEXP (op0
, 0), const0_rtx
);
3564 goto canonicalize_shift
;
3567 if (HWI_COMPUTABLE_MODE_P (mode
)
3568 && mode_signbit_p (mode
, trueop1
)
3569 && ! side_effects_p (op0
))
3571 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3573 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3579 if (HWI_COMPUTABLE_MODE_P (mode
)
3580 && CONST_INT_P (trueop1
)
3581 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3582 && ! side_effects_p (op0
))
3584 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3586 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3592 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3594 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3596 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3602 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3604 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3606 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3619 /* ??? There are simplifications that can be done. */
3623 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3624 return gen_vec_duplicate (mode
, op0
);
3625 if (valid_for_const_vector_p (mode
, op0
)
3626 && valid_for_const_vector_p (mode
, op1
))
3627 return gen_const_vec_series (mode
, op0
, op1
);
3631 if (!VECTOR_MODE_P (mode
))
3633 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3634 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3635 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3636 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3638 /* We can't reason about selections made at runtime. */
3639 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3642 if (vec_duplicate_p (trueop0
, &elt0
))
3645 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3646 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3649 /* Extract a scalar element from a nested VEC_SELECT expression
3650 (with optional nested VEC_CONCAT expression). Some targets
3651 (i386) extract scalar element from a vector using chain of
3652 nested VEC_SELECT expressions. When input operand is a memory
3653 operand, this operation can be simplified to a simple scalar
3654 load from an offseted memory address. */
3656 if (GET_CODE (trueop0
) == VEC_SELECT
3657 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3658 .is_constant (&n_elts
)))
3660 rtx op0
= XEXP (trueop0
, 0);
3661 rtx op1
= XEXP (trueop0
, 1);
3663 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3669 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3670 gcc_assert (i
< n_elts
);
3672 /* Select element, pointed by nested selector. */
3673 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3675 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3676 if (GET_CODE (op0
) == VEC_CONCAT
)
3678 rtx op00
= XEXP (op0
, 0);
3679 rtx op01
= XEXP (op0
, 1);
3681 machine_mode mode00
, mode01
;
3682 int n_elts00
, n_elts01
;
3684 mode00
= GET_MODE (op00
);
3685 mode01
= GET_MODE (op01
);
3687 /* Find out the number of elements of each operand.
3688 Since the concatenated result has a constant number
3689 of elements, the operands must too. */
3690 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3691 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3693 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3695 /* Select correct operand of VEC_CONCAT
3696 and adjust selector. */
3697 if (elem
< n_elts01
)
3708 vec
= rtvec_alloc (1);
3709 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3711 tmp
= gen_rtx_fmt_ee (code
, mode
,
3712 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3718 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3719 gcc_assert (GET_MODE_INNER (mode
)
3720 == GET_MODE_INNER (GET_MODE (trueop0
)));
3721 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3723 if (vec_duplicate_p (trueop0
, &elt0
))
3724 /* It doesn't matter which elements are selected by trueop1,
3725 because they are all the same. */
3726 return gen_vec_duplicate (mode
, elt0
);
3728 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3730 unsigned n_elts
= XVECLEN (trueop1
, 0);
3731 rtvec v
= rtvec_alloc (n_elts
);
3734 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3735 for (i
= 0; i
< n_elts
; i
++)
3737 rtx x
= XVECEXP (trueop1
, 0, i
);
3739 if (!CONST_INT_P (x
))
3742 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3746 return gen_rtx_CONST_VECTOR (mode
, v
);
3749 /* Recognize the identity. */
3750 if (GET_MODE (trueop0
) == mode
)
3752 bool maybe_ident
= true;
3753 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3755 rtx j
= XVECEXP (trueop1
, 0, i
);
3756 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3758 maybe_ident
= false;
3766 /* If we build {a,b} then permute it, build the result directly. */
3767 if (XVECLEN (trueop1
, 0) == 2
3768 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3769 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3770 && GET_CODE (trueop0
) == VEC_CONCAT
3771 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3772 && GET_MODE (XEXP (trueop0
, 0)) == mode
3773 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3774 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3776 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3777 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3780 gcc_assert (i0
< 4 && i1
< 4);
3781 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3782 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3784 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3787 if (XVECLEN (trueop1
, 0) == 2
3788 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3789 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3790 && GET_CODE (trueop0
) == VEC_CONCAT
3791 && GET_MODE (trueop0
) == mode
)
3793 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3794 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3797 gcc_assert (i0
< 2 && i1
< 2);
3798 subop0
= XEXP (trueop0
, i0
);
3799 subop1
= XEXP (trueop0
, i1
);
3801 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3804 /* If we select one half of a vec_concat, return that. */
3806 if (GET_CODE (trueop0
) == VEC_CONCAT
3807 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3809 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
3811 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3813 rtx subop0
= XEXP (trueop0
, 0);
3814 rtx subop1
= XEXP (trueop0
, 1);
3815 machine_mode mode0
= GET_MODE (subop0
);
3816 machine_mode mode1
= GET_MODE (subop1
);
3817 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3818 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3820 bool success
= true;
3821 for (int i
= 1; i
< l0
; ++i
)
3823 rtx j
= XVECEXP (trueop1
, 0, i
);
3824 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3833 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3835 bool success
= true;
3836 for (int i
= 1; i
< l1
; ++i
)
3838 rtx j
= XVECEXP (trueop1
, 0, i
);
3839 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3851 if (XVECLEN (trueop1
, 0) == 1
3852 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3853 && GET_CODE (trueop0
) == VEC_CONCAT
)
3856 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3858 /* Try to find the element in the VEC_CONCAT. */
3859 while (GET_MODE (vec
) != mode
3860 && GET_CODE (vec
) == VEC_CONCAT
)
3862 poly_int64 vec_size
;
3864 if (CONST_INT_P (XEXP (vec
, 0)))
3866 /* vec_concat of two const_ints doesn't make sense with
3867 respect to modes. */
3868 if (CONST_INT_P (XEXP (vec
, 1)))
3871 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3872 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3875 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3877 if (known_lt (offset
, vec_size
))
3878 vec
= XEXP (vec
, 0);
3879 else if (known_ge (offset
, vec_size
))
3882 vec
= XEXP (vec
, 1);
3886 vec
= avoid_constant_pool_reference (vec
);
3889 if (GET_MODE (vec
) == mode
)
3893 /* If we select elements in a vec_merge that all come from the same
3894 operand, select from that operand directly. */
3895 if (GET_CODE (op0
) == VEC_MERGE
)
3897 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3898 if (CONST_INT_P (trueop02
))
3900 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3901 bool all_operand0
= true;
3902 bool all_operand1
= true;
3903 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3905 rtx j
= XVECEXP (trueop1
, 0, i
);
3906 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3907 all_operand1
= false;
3909 all_operand0
= false;
3911 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3912 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3913 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3914 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3918 /* If we have two nested selects that are inverses of each
3919 other, replace them with the source operand. */
3920 if (GET_CODE (trueop0
) == VEC_SELECT
3921 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3923 rtx op0_subop1
= XEXP (trueop0
, 1);
3924 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3925 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
3927 /* Apply the outer ordering vector to the inner one. (The inner
3928 ordering vector is expressly permitted to be of a different
3929 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3930 then the two VEC_SELECTs cancel. */
3931 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3933 rtx x
= XVECEXP (trueop1
, 0, i
);
3934 if (!CONST_INT_P (x
))
3936 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3937 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3940 return XEXP (trueop0
, 0);
3946 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3947 ? GET_MODE (trueop0
)
3948 : GET_MODE_INNER (mode
));
3949 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3950 ? GET_MODE (trueop1
)
3951 : GET_MODE_INNER (mode
));
3953 gcc_assert (VECTOR_MODE_P (mode
));
3954 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
3955 + GET_MODE_SIZE (op1_mode
),
3956 GET_MODE_SIZE (mode
)));
3958 if (VECTOR_MODE_P (op0_mode
))
3959 gcc_assert (GET_MODE_INNER (mode
)
3960 == GET_MODE_INNER (op0_mode
));
3962 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3964 if (VECTOR_MODE_P (op1_mode
))
3965 gcc_assert (GET_MODE_INNER (mode
)
3966 == GET_MODE_INNER (op1_mode
));
3968 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3970 unsigned int n_elts
, in_n_elts
;
3971 if ((GET_CODE (trueop0
) == CONST_VECTOR
3972 || CONST_SCALAR_INT_P (trueop0
)
3973 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3974 && (GET_CODE (trueop1
) == CONST_VECTOR
3975 || CONST_SCALAR_INT_P (trueop1
)
3976 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
3977 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
3978 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
3980 rtvec v
= rtvec_alloc (n_elts
);
3982 for (i
= 0; i
< n_elts
; i
++)
3986 if (!VECTOR_MODE_P (op0_mode
))
3987 RTVEC_ELT (v
, i
) = trueop0
;
3989 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3993 if (!VECTOR_MODE_P (op1_mode
))
3994 RTVEC_ELT (v
, i
) = trueop1
;
3996 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4001 return gen_rtx_CONST_VECTOR (mode
, v
);
4004 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4005 Restrict the transformation to avoid generating a VEC_SELECT with a
4006 mode unrelated to its operand. */
4007 if (GET_CODE (trueop0
) == VEC_SELECT
4008 && GET_CODE (trueop1
) == VEC_SELECT
4009 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4010 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4012 rtx par0
= XEXP (trueop0
, 1);
4013 rtx par1
= XEXP (trueop1
, 1);
4014 int len0
= XVECLEN (par0
, 0);
4015 int len1
= XVECLEN (par1
, 0);
4016 rtvec vec
= rtvec_alloc (len0
+ len1
);
4017 for (int i
= 0; i
< len0
; i
++)
4018 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4019 for (int i
= 0; i
< len1
; i
++)
4020 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4021 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4022 gen_rtx_PARALLEL (VOIDmode
, vec
));
4031 if (mode
== GET_MODE (op0
)
4032 && mode
== GET_MODE (op1
)
4033 && vec_duplicate_p (op0
, &elt0
)
4034 && vec_duplicate_p (op1
, &elt1
))
4036 /* Try applying the operator to ELT and see if that simplifies.
4037 We can duplicate the result if so.
4039 The reason we don't use simplify_gen_binary is that it isn't
4040 necessarily a win to convert things like:
4042 (plus:V (vec_duplicate:V (reg:S R1))
4043 (vec_duplicate:V (reg:S R2)))
4047 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4049 The first might be done entirely in vector registers while the
4050 second might need a move between register files. */
4051 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4054 return gen_vec_duplicate (mode
, tem
);
4060 /* Return true if binary operation OP distributes over addition in operand
4061 OPNO, with the other operand being held constant. OPNO counts from 1. */
4064 distributes_over_addition_p (rtx_code op
, int opno
)
4082 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4085 if (VECTOR_MODE_P (mode
)
4086 && code
!= VEC_CONCAT
4087 && GET_CODE (op0
) == CONST_VECTOR
4088 && GET_CODE (op1
) == CONST_VECTOR
)
4091 if (CONST_VECTOR_STEPPED_P (op0
)
4092 && CONST_VECTOR_STEPPED_P (op1
))
4093 /* We can operate directly on the encoding if:
4095 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4097 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4099 Addition and subtraction are the supported operators
4100 for which this is true. */
4101 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4102 else if (CONST_VECTOR_STEPPED_P (op0
))
4103 /* We can operate directly on stepped encodings if:
4107 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4109 which is true if (x -> x op c) distributes over addition. */
4110 step_ok_p
= distributes_over_addition_p (code
, 1);
4112 /* Similarly in reverse. */
4113 step_ok_p
= distributes_over_addition_p (code
, 2);
4114 rtx_vector_builder builder
;
4115 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4118 unsigned int count
= builder
.encoded_nelts ();
4119 for (unsigned int i
= 0; i
< count
; i
++)
4121 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4122 CONST_VECTOR_ELT (op0
, i
),
4123 CONST_VECTOR_ELT (op1
, i
));
4124 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4126 builder
.quick_push (x
);
4128 return builder
.build ();
4131 if (VECTOR_MODE_P (mode
)
4132 && code
== VEC_CONCAT
4133 && (CONST_SCALAR_INT_P (op0
)
4134 || CONST_FIXED_P (op0
)
4135 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4136 && (CONST_SCALAR_INT_P (op1
)
4137 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4138 || CONST_FIXED_P (op1
)))
4140 /* Both inputs have a constant number of elements, so the result
4142 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4143 rtvec v
= rtvec_alloc (n_elts
);
4145 gcc_assert (n_elts
>= 2);
4148 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4149 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4151 RTVEC_ELT (v
, 0) = op0
;
4152 RTVEC_ELT (v
, 1) = op1
;
4156 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4157 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4160 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4161 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4162 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4164 for (i
= 0; i
< op0_n_elts
; ++i
)
4165 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4166 for (i
= 0; i
< op1_n_elts
; ++i
)
4167 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4170 return gen_rtx_CONST_VECTOR (mode
, v
);
4173 if (SCALAR_FLOAT_MODE_P (mode
)
4174 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4175 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4176 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4187 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4189 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4191 for (i
= 0; i
< 4; i
++)
4208 real_from_target (&r
, tmp0
, mode
);
4209 return const_double_from_real_value (r
, mode
);
4213 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4214 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4217 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4218 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4220 if (HONOR_SNANS (mode
)
4221 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4222 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4225 real_convert (&f0
, mode
, opr0
);
4226 real_convert (&f1
, mode
, opr1
);
4229 && real_equal (&f1
, &dconst0
)
4230 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4233 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4234 && flag_trapping_math
4235 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4237 int s0
= REAL_VALUE_NEGATIVE (f0
);
4238 int s1
= REAL_VALUE_NEGATIVE (f1
);
4243 /* Inf + -Inf = NaN plus exception. */
4248 /* Inf - Inf = NaN plus exception. */
4253 /* Inf / Inf = NaN plus exception. */
4260 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4261 && flag_trapping_math
4262 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4263 || (REAL_VALUE_ISINF (f1
)
4264 && real_equal (&f0
, &dconst0
))))
4265 /* Inf * 0 = NaN plus exception. */
4268 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4270 real_convert (&result
, mode
, &value
);
4272 /* Don't constant fold this floating point operation if
4273 the result has overflowed and flag_trapping_math. */
4275 if (flag_trapping_math
4276 && MODE_HAS_INFINITIES (mode
)
4277 && REAL_VALUE_ISINF (result
)
4278 && !REAL_VALUE_ISINF (f0
)
4279 && !REAL_VALUE_ISINF (f1
))
4280 /* Overflow plus exception. */
4283 /* Don't constant fold this floating point operation if the
4284 result may dependent upon the run-time rounding mode and
4285 flag_rounding_math is set, or if GCC's software emulation
4286 is unable to accurately represent the result. */
4288 if ((flag_rounding_math
4289 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4290 && (inexact
|| !real_identical (&result
, &value
)))
4293 return const_double_from_real_value (result
, mode
);
4297 /* We can fold some multi-word operations. */
4298 scalar_int_mode int_mode
;
4299 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4300 && CONST_SCALAR_INT_P (op0
)
4301 && CONST_SCALAR_INT_P (op1
))
4304 wi::overflow_type overflow
;
4305 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4306 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4308 #if TARGET_SUPPORTS_WIDE_INT == 0
4309 /* This assert keeps the simplification from producing a result
4310 that cannot be represented in a CONST_DOUBLE but a lot of
4311 upstream callers expect that this function never fails to
4312 simplify something and so you if you added this to the test
4313 above the code would die later anyway. If this assert
4314 happens, you just need to make the port support wide int. */
4315 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4320 result
= wi::sub (pop0
, pop1
);
4324 result
= wi::add (pop0
, pop1
);
4328 result
= wi::mul (pop0
, pop1
);
4332 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4338 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4344 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4350 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4356 result
= wi::bit_and (pop0
, pop1
);
4360 result
= wi::bit_or (pop0
, pop1
);
4364 result
= wi::bit_xor (pop0
, pop1
);
4368 result
= wi::smin (pop0
, pop1
);
4372 result
= wi::smax (pop0
, pop1
);
4376 result
= wi::umin (pop0
, pop1
);
4380 result
= wi::umax (pop0
, pop1
);
4387 wide_int wop1
= pop1
;
4388 if (SHIFT_COUNT_TRUNCATED
)
4389 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4390 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4396 result
= wi::lrshift (pop0
, wop1
);
4400 result
= wi::arshift (pop0
, wop1
);
4404 result
= wi::lshift (pop0
, wop1
);
4415 if (wi::neg_p (pop1
))
4421 result
= wi::lrotate (pop0
, pop1
);
4425 result
= wi::rrotate (pop0
, pop1
);
4436 return immed_wide_int_const (result
, int_mode
);
4439 /* Handle polynomial integers. */
4440 if (NUM_POLY_INT_COEFFS
> 1
4441 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4442 && poly_int_rtx_p (op0
)
4443 && poly_int_rtx_p (op1
))
4445 poly_wide_int result
;
4449 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4453 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4457 if (CONST_SCALAR_INT_P (op1
))
4458 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4464 if (CONST_SCALAR_INT_P (op1
))
4466 wide_int shift
= rtx_mode_t (op1
, mode
);
4467 if (SHIFT_COUNT_TRUNCATED
)
4468 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4469 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4471 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4478 if (!CONST_SCALAR_INT_P (op1
)
4479 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4480 rtx_mode_t (op1
, mode
), &result
))
4487 return immed_wide_int_const (result
, int_mode
);
4495 /* Return a positive integer if X should sort after Y. The value
4496 returned is 1 if and only if X and Y are both regs. */
4499 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4503 result
= (commutative_operand_precedence (y
)
4504 - commutative_operand_precedence (x
));
4506 return result
+ result
;
4508 /* Group together equal REGs to do more simplification. */
4509 if (REG_P (x
) && REG_P (y
))
4510 return REGNO (x
) > REGNO (y
);
4515 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4516 operands may be another PLUS or MINUS.
4518 Rather than test for specific case, we do this by a brute-force method
4519 and do all possible simplifications until no more changes occur. Then
4520 we rebuild the operation.
4522 May return NULL_RTX when no changes were made. */
4525 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4528 struct simplify_plus_minus_op_data
4535 int changed
, n_constants
, canonicalized
= 0;
4538 memset (ops
, 0, sizeof ops
);
4540 /* Set up the two operands and then expand them until nothing has been
4541 changed. If we run out of room in our array, give up; this should
4542 almost never happen. */
4547 ops
[1].neg
= (code
== MINUS
);
4554 for (i
= 0; i
< n_ops
; i
++)
4556 rtx this_op
= ops
[i
].op
;
4557 int this_neg
= ops
[i
].neg
;
4558 enum rtx_code this_code
= GET_CODE (this_op
);
4564 if (n_ops
== ARRAY_SIZE (ops
))
4567 ops
[n_ops
].op
= XEXP (this_op
, 1);
4568 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4571 ops
[i
].op
= XEXP (this_op
, 0);
4573 /* If this operand was negated then we will potentially
4574 canonicalize the expression. Similarly if we don't
4575 place the operands adjacent we're re-ordering the
4576 expression and thus might be performing a
4577 canonicalization. Ignore register re-ordering.
4578 ??? It might be better to shuffle the ops array here,
4579 but then (plus (plus (A, B), plus (C, D))) wouldn't
4580 be seen as non-canonical. */
4583 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4588 ops
[i
].op
= XEXP (this_op
, 0);
4589 ops
[i
].neg
= ! this_neg
;
4595 if (n_ops
!= ARRAY_SIZE (ops
)
4596 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4597 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4598 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4600 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4601 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4602 ops
[n_ops
].neg
= this_neg
;
4610 /* ~a -> (-a - 1) */
4611 if (n_ops
!= ARRAY_SIZE (ops
))
4613 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4614 ops
[n_ops
++].neg
= this_neg
;
4615 ops
[i
].op
= XEXP (this_op
, 0);
4616 ops
[i
].neg
= !this_neg
;
4626 ops
[i
].op
= neg_const_int (mode
, this_op
);
4640 if (n_constants
> 1)
4643 gcc_assert (n_ops
>= 2);
4645 /* If we only have two operands, we can avoid the loops. */
4648 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4651 /* Get the two operands. Be careful with the order, especially for
4652 the cases where code == MINUS. */
4653 if (ops
[0].neg
&& ops
[1].neg
)
4655 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4658 else if (ops
[0].neg
)
4669 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4672 /* Now simplify each pair of operands until nothing changes. */
4675 /* Insertion sort is good enough for a small array. */
4676 for (i
= 1; i
< n_ops
; i
++)
4678 struct simplify_plus_minus_op_data save
;
4682 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4685 /* Just swapping registers doesn't count as canonicalization. */
4691 ops
[j
+ 1] = ops
[j
];
4693 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4698 for (i
= n_ops
- 1; i
> 0; i
--)
4699 for (j
= i
- 1; j
>= 0; j
--)
4701 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4702 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4704 if (lhs
!= 0 && rhs
!= 0)
4706 enum rtx_code ncode
= PLUS
;
4712 std::swap (lhs
, rhs
);
4714 else if (swap_commutative_operands_p (lhs
, rhs
))
4715 std::swap (lhs
, rhs
);
4717 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4718 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4720 rtx tem_lhs
, tem_rhs
;
4722 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4723 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4724 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4727 if (tem
&& !CONSTANT_P (tem
))
4728 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4731 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4735 /* Reject "simplifications" that just wrap the two
4736 arguments in a CONST. Failure to do so can result
4737 in infinite recursion with simplify_binary_operation
4738 when it calls us to simplify CONST operations.
4739 Also, if we find such a simplification, don't try
4740 any more combinations with this rhs: We must have
4741 something like symbol+offset, ie. one of the
4742 trivial CONST expressions we handle later. */
4743 if (GET_CODE (tem
) == CONST
4744 && GET_CODE (XEXP (tem
, 0)) == ncode
4745 && XEXP (XEXP (tem
, 0), 0) == lhs
4746 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4749 if (GET_CODE (tem
) == NEG
)
4750 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4751 if (CONST_INT_P (tem
) && lneg
)
4752 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4756 ops
[j
].op
= NULL_RTX
;
4766 /* Pack all the operands to the lower-numbered entries. */
4767 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4776 /* If nothing changed, check that rematerialization of rtl instructions
4777 is still required. */
4780 /* Perform rematerialization if only all operands are registers and
4781 all operations are PLUS. */
4782 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4783 around rs6000 and how it uses the CA register. See PR67145. */
4784 for (i
= 0; i
< n_ops
; i
++)
4786 || !REG_P (ops
[i
].op
)
4787 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4788 && fixed_regs
[REGNO (ops
[i
].op
)]
4789 && !global_regs
[REGNO (ops
[i
].op
)]
4790 && ops
[i
].op
!= frame_pointer_rtx
4791 && ops
[i
].op
!= arg_pointer_rtx
4792 && ops
[i
].op
!= stack_pointer_rtx
))
4797 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4799 && CONST_INT_P (ops
[1].op
)
4800 && CONSTANT_P (ops
[0].op
)
4802 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4804 /* We suppressed creation of trivial CONST expressions in the
4805 combination loop to avoid recursion. Create one manually now.
4806 The combination loop should have ensured that there is exactly
4807 one CONST_INT, and the sort will have ensured that it is last
4808 in the array and that any other constant will be next-to-last. */
4811 && CONST_INT_P (ops
[n_ops
- 1].op
)
4812 && CONSTANT_P (ops
[n_ops
- 2].op
))
4814 rtx value
= ops
[n_ops
- 1].op
;
4815 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4816 value
= neg_const_int (mode
, value
);
4817 if (CONST_INT_P (value
))
4819 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4825 /* Put a non-negated operand first, if possible. */
4827 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4830 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4839 /* Now make the result by performing the requested operations. */
4842 for (i
= 1; i
< n_ops
; i
++)
4843 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4844 mode
, result
, ops
[i
].op
);
4849 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4851 plus_minus_operand_p (const_rtx x
)
4853 return GET_CODE (x
) == PLUS
4854 || GET_CODE (x
) == MINUS
4855 || (GET_CODE (x
) == CONST
4856 && GET_CODE (XEXP (x
, 0)) == PLUS
4857 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4858 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4861 /* Like simplify_binary_operation except used for relational operators.
4862 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4863 not also be VOIDmode.
4865 CMP_MODE specifies in which mode the comparison is done in, so it is
4866 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4867 the operands or, if both are VOIDmode, the operands are compared in
4868 "infinite precision". */
4870 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4871 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4873 rtx tem
, trueop0
, trueop1
;
4875 if (cmp_mode
== VOIDmode
)
4876 cmp_mode
= GET_MODE (op0
);
4877 if (cmp_mode
== VOIDmode
)
4878 cmp_mode
= GET_MODE (op1
);
4880 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4883 if (SCALAR_FLOAT_MODE_P (mode
))
4885 if (tem
== const0_rtx
)
4886 return CONST0_RTX (mode
);
4887 #ifdef FLOAT_STORE_FLAG_VALUE
4889 REAL_VALUE_TYPE val
;
4890 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4891 return const_double_from_real_value (val
, mode
);
4897 if (VECTOR_MODE_P (mode
))
4899 if (tem
== const0_rtx
)
4900 return CONST0_RTX (mode
);
4901 #ifdef VECTOR_STORE_FLAG_VALUE
4903 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4904 if (val
== NULL_RTX
)
4906 if (val
== const1_rtx
)
4907 return CONST1_RTX (mode
);
4909 return gen_const_vec_duplicate (mode
, val
);
4919 /* For the following tests, ensure const0_rtx is op1. */
4920 if (swap_commutative_operands_p (op0
, op1
)
4921 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4922 std::swap (op0
, op1
), code
= swap_condition (code
);
4924 /* If op0 is a compare, extract the comparison arguments from it. */
4925 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4926 return simplify_gen_relational (code
, mode
, VOIDmode
,
4927 XEXP (op0
, 0), XEXP (op0
, 1));
4929 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4933 trueop0
= avoid_constant_pool_reference (op0
);
4934 trueop1
= avoid_constant_pool_reference (op1
);
4935 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4939 /* This part of simplify_relational_operation is only used when CMP_MODE
4940 is not in class MODE_CC (i.e. it is a real comparison).
4942 MODE is the mode of the result, while CMP_MODE specifies in which
4943 mode the comparison is done in, so it is the mode of the operands. */
4946 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4947 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4949 enum rtx_code op0code
= GET_CODE (op0
);
4951 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4953 /* If op0 is a comparison, extract the comparison arguments
4957 if (GET_MODE (op0
) == mode
)
4958 return simplify_rtx (op0
);
4960 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4961 XEXP (op0
, 0), XEXP (op0
, 1));
4963 else if (code
== EQ
)
4965 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4966 if (new_code
!= UNKNOWN
)
4967 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4968 XEXP (op0
, 0), XEXP (op0
, 1));
4972 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4973 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4974 if ((code
== LTU
|| code
== GEU
)
4975 && GET_CODE (op0
) == PLUS
4976 && CONST_INT_P (XEXP (op0
, 1))
4977 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4978 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4979 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4980 && XEXP (op0
, 1) != const0_rtx
)
4983 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4984 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4985 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4988 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4989 transformed into (LTU a -C). */
4990 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4991 && CONST_INT_P (XEXP (op0
, 1))
4992 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4993 && XEXP (op0
, 1) != const0_rtx
)
4996 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4997 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4998 XEXP (op0
, 0), new_cmp
);
5001 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5002 if ((code
== LTU
|| code
== GEU
)
5003 && GET_CODE (op0
) == PLUS
5004 && rtx_equal_p (op1
, XEXP (op0
, 1))
5005 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5006 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5007 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5008 copy_rtx (XEXP (op0
, 0)));
5010 if (op1
== const0_rtx
)
5012 /* Canonicalize (GTU x 0) as (NE x 0). */
5014 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5015 /* Canonicalize (LEU x 0) as (EQ x 0). */
5017 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5019 else if (op1
== const1_rtx
)
5024 /* Canonicalize (GE x 1) as (GT x 0). */
5025 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5028 /* Canonicalize (GEU x 1) as (NE x 0). */
5029 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5032 /* Canonicalize (LT x 1) as (LE x 0). */
5033 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5036 /* Canonicalize (LTU x 1) as (EQ x 0). */
5037 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5043 else if (op1
== constm1_rtx
)
5045 /* Canonicalize (LE x -1) as (LT x 0). */
5047 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5048 /* Canonicalize (GT x -1) as (GE x 0). */
5050 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5053 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5054 if ((code
== EQ
|| code
== NE
)
5055 && (op0code
== PLUS
|| op0code
== MINUS
)
5057 && CONSTANT_P (XEXP (op0
, 1))
5058 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5060 rtx x
= XEXP (op0
, 0);
5061 rtx c
= XEXP (op0
, 1);
5062 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5063 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5065 /* Detect an infinite recursive condition, where we oscillate at this
5066 simplification case between:
5067 A + B == C <---> C - B == A,
5068 where A, B, and C are all constants with non-simplifiable expressions,
5069 usually SYMBOL_REFs. */
5070 if (GET_CODE (tem
) == invcode
5072 && rtx_equal_p (c
, XEXP (tem
, 1)))
5075 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5078 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5079 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5080 scalar_int_mode int_mode
, int_cmp_mode
;
5082 && op1
== const0_rtx
5083 && is_int_mode (mode
, &int_mode
)
5084 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5085 /* ??? Work-around BImode bugs in the ia64 backend. */
5086 && int_mode
!= BImode
5087 && int_cmp_mode
!= BImode
5088 && nonzero_bits (op0
, int_cmp_mode
) == 1
5089 && STORE_FLAG_VALUE
== 1)
5090 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5091 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5092 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5094 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5095 if ((code
== EQ
|| code
== NE
)
5096 && op1
== const0_rtx
5098 return simplify_gen_relational (code
, mode
, cmp_mode
,
5099 XEXP (op0
, 0), XEXP (op0
, 1));
5101 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5102 if ((code
== EQ
|| code
== NE
)
5104 && rtx_equal_p (XEXP (op0
, 0), op1
)
5105 && !side_effects_p (XEXP (op0
, 0)))
5106 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5109 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5110 if ((code
== EQ
|| code
== NE
)
5112 && rtx_equal_p (XEXP (op0
, 1), op1
)
5113 && !side_effects_p (XEXP (op0
, 1)))
5114 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5117 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5118 if ((code
== EQ
|| code
== NE
)
5120 && CONST_SCALAR_INT_P (op1
)
5121 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5122 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5123 simplify_gen_binary (XOR
, cmp_mode
,
5124 XEXP (op0
, 1), op1
));
5126 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5127 constant folding if x/y is a constant. */
5128 if ((code
== EQ
|| code
== NE
)
5129 && (op0code
== AND
|| op0code
== IOR
)
5130 && !side_effects_p (op1
)
5131 && op1
!= CONST0_RTX (cmp_mode
))
5133 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5134 (eq/ne (and (not y) x) 0). */
5135 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5136 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5138 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5140 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5142 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5143 CONST0_RTX (cmp_mode
));
5146 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5147 (eq/ne (and (not x) y) 0). */
5148 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5149 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5151 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5153 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5155 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5156 CONST0_RTX (cmp_mode
));
5160 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5161 if ((code
== EQ
|| code
== NE
)
5162 && GET_CODE (op0
) == BSWAP
5163 && CONST_SCALAR_INT_P (op1
))
5164 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5165 simplify_gen_unary (BSWAP
, cmp_mode
,
5168 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5169 if ((code
== EQ
|| code
== NE
)
5170 && GET_CODE (op0
) == BSWAP
5171 && GET_CODE (op1
) == BSWAP
)
5172 return simplify_gen_relational (code
, mode
, cmp_mode
,
5173 XEXP (op0
, 0), XEXP (op1
, 0));
5175 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5181 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5182 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5183 XEXP (op0
, 0), const0_rtx
);
5188 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5189 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5190 XEXP (op0
, 0), const0_rtx
);
5209 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5210 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5211 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5212 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5213 For floating-point comparisons, assume that the operands were ordered. */
5216 comparison_result (enum rtx_code code
, int known_results
)
5222 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5225 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5229 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5232 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5236 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5239 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5242 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5244 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5247 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5249 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5252 return const_true_rtx
;
5260 /* Check if the given comparison (done in the given MODE) is actually
5261 a tautology or a contradiction. If the mode is VOID_mode, the
5262 comparison is done in "infinite precision". If no simplification
5263 is possible, this function returns zero. Otherwise, it returns
5264 either const_true_rtx or const0_rtx. */
5267 simplify_const_relational_operation (enum rtx_code code
,
5275 gcc_assert (mode
!= VOIDmode
5276 || (GET_MODE (op0
) == VOIDmode
5277 && GET_MODE (op1
) == VOIDmode
));
5279 /* If op0 is a compare, extract the comparison arguments from it. */
5280 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5282 op1
= XEXP (op0
, 1);
5283 op0
= XEXP (op0
, 0);
5285 if (GET_MODE (op0
) != VOIDmode
)
5286 mode
= GET_MODE (op0
);
5287 else if (GET_MODE (op1
) != VOIDmode
)
5288 mode
= GET_MODE (op1
);
5293 /* We can't simplify MODE_CC values since we don't know what the
5294 actual comparison is. */
5295 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5298 /* Make sure the constant is second. */
5299 if (swap_commutative_operands_p (op0
, op1
))
5301 std::swap (op0
, op1
);
5302 code
= swap_condition (code
);
5305 trueop0
= avoid_constant_pool_reference (op0
);
5306 trueop1
= avoid_constant_pool_reference (op1
);
5308 /* For integer comparisons of A and B maybe we can simplify A - B and can
5309 then simplify a comparison of that with zero. If A and B are both either
5310 a register or a CONST_INT, this can't help; testing for these cases will
5311 prevent infinite recursion here and speed things up.
5313 We can only do this for EQ and NE comparisons as otherwise we may
5314 lose or introduce overflow which we cannot disregard as undefined as
5315 we do not know the signedness of the operation on either the left or
5316 the right hand side of the comparison. */
5318 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5319 && (code
== EQ
|| code
== NE
)
5320 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5321 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5322 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5323 /* We cannot do this if tem is a nonzero address. */
5324 && ! nonzero_address_p (tem
))
5325 return simplify_const_relational_operation (signed_condition (code
),
5326 mode
, tem
, const0_rtx
);
5328 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5329 return const_true_rtx
;
5331 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5334 /* For modes without NaNs, if the two operands are equal, we know the
5335 result except if they have side-effects. Even with NaNs we know
5336 the result of unordered comparisons and, if signaling NaNs are
5337 irrelevant, also the result of LT/GT/LTGT. */
5338 if ((! HONOR_NANS (trueop0
)
5339 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5340 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5341 && ! HONOR_SNANS (trueop0
)))
5342 && rtx_equal_p (trueop0
, trueop1
)
5343 && ! side_effects_p (trueop0
))
5344 return comparison_result (code
, CMP_EQ
);
5346 /* If the operands are floating-point constants, see if we can fold
5348 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5349 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5350 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5352 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5353 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5355 /* Comparisons are unordered iff at least one of the values is NaN. */
5356 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5366 return const_true_rtx
;
5379 return comparison_result (code
,
5380 (real_equal (d0
, d1
) ? CMP_EQ
:
5381 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5384 /* Otherwise, see if the operands are both integers. */
5385 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5386 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5388 /* It would be nice if we really had a mode here. However, the
5389 largest int representable on the target is as good as
5391 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5392 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5393 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5395 if (wi::eq_p (ptrueop0
, ptrueop1
))
5396 return comparison_result (code
, CMP_EQ
);
5399 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5400 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5401 return comparison_result (code
, cr
);
5405 /* Optimize comparisons with upper and lower bounds. */
5406 scalar_int_mode int_mode
;
5407 if (CONST_INT_P (trueop1
)
5408 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5409 && HWI_COMPUTABLE_MODE_P (int_mode
)
5410 && !side_effects_p (trueop0
))
5413 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5414 HOST_WIDE_INT val
= INTVAL (trueop1
);
5415 HOST_WIDE_INT mmin
, mmax
;
5425 /* Get a reduced range if the sign bit is zero. */
5426 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5433 rtx mmin_rtx
, mmax_rtx
;
5434 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5436 mmin
= INTVAL (mmin_rtx
);
5437 mmax
= INTVAL (mmax_rtx
);
5440 unsigned int sign_copies
5441 = num_sign_bit_copies (trueop0
, int_mode
);
5443 mmin
>>= (sign_copies
- 1);
5444 mmax
>>= (sign_copies
- 1);
5450 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5452 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5453 return const_true_rtx
;
5454 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5459 return const_true_rtx
;
5464 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5466 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5467 return const_true_rtx
;
5468 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5473 return const_true_rtx
;
5479 /* x == y is always false for y out of range. */
5480 if (val
< mmin
|| val
> mmax
)
5484 /* x > y is always false for y >= mmax, always true for y < mmin. */
5486 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5488 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5489 return const_true_rtx
;
5495 return const_true_rtx
;
5498 /* x < y is always false for y <= mmin, always true for y > mmax. */
5500 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5502 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5503 return const_true_rtx
;
5509 return const_true_rtx
;
5513 /* x != y is always true for y out of range. */
5514 if (val
< mmin
|| val
> mmax
)
5515 return const_true_rtx
;
5523 /* Optimize integer comparisons with zero. */
5524 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5525 && trueop1
== const0_rtx
5526 && !side_effects_p (trueop0
))
5528 /* Some addresses are known to be nonzero. We don't know
5529 their sign, but equality comparisons are known. */
5530 if (nonzero_address_p (trueop0
))
5532 if (code
== EQ
|| code
== LEU
)
5534 if (code
== NE
|| code
== GTU
)
5535 return const_true_rtx
;
5538 /* See if the first operand is an IOR with a constant. If so, we
5539 may be able to determine the result of this comparison. */
5540 if (GET_CODE (op0
) == IOR
)
5542 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5543 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5545 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5546 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5547 && (UINTVAL (inner_const
)
5558 return const_true_rtx
;
5562 return const_true_rtx
;
5576 /* Optimize comparison of ABS with zero. */
5577 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5578 && (GET_CODE (trueop0
) == ABS
5579 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5580 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5585 /* Optimize abs(x) < 0.0. */
5586 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5591 /* Optimize abs(x) >= 0.0. */
5592 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5593 return const_true_rtx
;
5597 /* Optimize ! (abs(x) < 0.0). */
5598 return const_true_rtx
;
5608 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5609 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5610 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5611 can be simplified to that or NULL_RTX if not.
5612 Assume X is compared against zero with CMP_CODE and the true
5613 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5616 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5618 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5621 /* Result on X == 0 and X !=0 respectively. */
5622 rtx on_zero
, on_nonzero
;
5626 on_nonzero
= false_val
;
5630 on_zero
= false_val
;
5631 on_nonzero
= true_val
;
5634 rtx_code op_code
= GET_CODE (on_nonzero
);
5635 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5636 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5637 || !CONST_INT_P (on_zero
))
5640 HOST_WIDE_INT op_val
;
5641 scalar_int_mode mode ATTRIBUTE_UNUSED
5642 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5643 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5644 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5645 && op_val
== INTVAL (on_zero
))
5651 /* Try to simplify X given that it appears within operand OP of a
5652 VEC_MERGE operation whose mask is MASK. X need not use the same
5653 vector mode as the VEC_MERGE, but it must have the same number of
5656 Return the simplified X on success, otherwise return NULL_RTX. */
5659 simplify_merge_mask (rtx x
, rtx mask
, int op
)
5661 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
5662 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
5663 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
5665 if (side_effects_p (XEXP (x
, 1 - op
)))
5668 return XEXP (x
, op
);
5671 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5672 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
5674 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5676 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
5677 GET_MODE (XEXP (x
, 0)));
5680 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5681 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5682 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5683 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
5685 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5686 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5689 if (COMPARISON_P (x
))
5690 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
5691 GET_MODE (XEXP (x
, 0)) != VOIDmode
5692 ? GET_MODE (XEXP (x
, 0))
5693 : GET_MODE (XEXP (x
, 1)),
5694 top0
? top0
: XEXP (x
, 0),
5695 top1
? top1
: XEXP (x
, 1));
5697 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
5698 top0
? top0
: XEXP (x
, 0),
5699 top1
? top1
: XEXP (x
, 1));
5702 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
5703 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5704 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5705 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5706 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
5707 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
5708 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
5710 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5711 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5712 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
5713 if (top0
|| top1
|| top2
)
5714 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
5715 GET_MODE (XEXP (x
, 0)),
5716 top0
? top0
: XEXP (x
, 0),
5717 top1
? top1
: XEXP (x
, 1),
5718 top2
? top2
: XEXP (x
, 2));
5724 /* Simplify CODE, an operation with result mode MODE and three operands,
5725 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5726 a constant. Return 0 if no simplifications is possible. */
5729 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5730 machine_mode op0_mode
, rtx op0
, rtx op1
,
5733 bool any_change
= false;
5735 scalar_int_mode int_mode
, int_op0_mode
;
5736 unsigned int n_elts
;
5741 /* Simplify negations around the multiplication. */
5742 /* -a * -b + c => a * b + c. */
5743 if (GET_CODE (op0
) == NEG
)
5745 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5747 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5749 else if (GET_CODE (op1
) == NEG
)
5751 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5753 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5756 /* Canonicalize the two multiplication operands. */
5757 /* a * -b + c => -b * a + c. */
5758 if (swap_commutative_operands_p (op0
, op1
))
5759 std::swap (op0
, op1
), any_change
= true;
5762 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5767 if (CONST_INT_P (op0
)
5768 && CONST_INT_P (op1
)
5769 && CONST_INT_P (op2
)
5770 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5771 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5772 && HWI_COMPUTABLE_MODE_P (int_mode
))
5774 /* Extracting a bit-field from a constant */
5775 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5776 HOST_WIDE_INT op1val
= INTVAL (op1
);
5777 HOST_WIDE_INT op2val
= INTVAL (op2
);
5778 if (!BITS_BIG_ENDIAN
)
5780 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5781 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5783 /* Not enough information to calculate the bit position. */
5786 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5788 /* First zero-extend. */
5789 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5790 /* If desired, propagate sign bit. */
5791 if (code
== SIGN_EXTRACT
5792 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5794 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5797 return gen_int_mode (val
, int_mode
);
5802 if (CONST_INT_P (op0
))
5803 return op0
!= const0_rtx
? op1
: op2
;
5805 /* Convert c ? a : a into "a". */
5806 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5809 /* Convert a != b ? a : b into "a". */
5810 if (GET_CODE (op0
) == NE
5811 && ! side_effects_p (op0
)
5812 && ! HONOR_NANS (mode
)
5813 && ! HONOR_SIGNED_ZEROS (mode
)
5814 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5815 && rtx_equal_p (XEXP (op0
, 1), op2
))
5816 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5817 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5820 /* Convert a == b ? a : b into "b". */
5821 if (GET_CODE (op0
) == EQ
5822 && ! side_effects_p (op0
)
5823 && ! HONOR_NANS (mode
)
5824 && ! HONOR_SIGNED_ZEROS (mode
)
5825 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5826 && rtx_equal_p (XEXP (op0
, 1), op2
))
5827 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5828 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5831 /* Convert (!c) != {0,...,0} ? a : b into
5832 c != {0,...,0} ? b : a for vector modes. */
5833 if (VECTOR_MODE_P (GET_MODE (op1
))
5834 && GET_CODE (op0
) == NE
5835 && GET_CODE (XEXP (op0
, 0)) == NOT
5836 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5838 rtx cv
= XEXP (op0
, 1);
5841 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
5844 for (int i
= 0; i
< nunits
; ++i
)
5845 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5852 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5853 XEXP (XEXP (op0
, 0), 0),
5855 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5860 /* Convert x == 0 ? N : clz (x) into clz (x) when
5861 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5862 Similarly for ctz (x). */
5863 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5864 && XEXP (op0
, 1) == const0_rtx
)
5867 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5873 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5875 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5876 ? GET_MODE (XEXP (op0
, 1))
5877 : GET_MODE (XEXP (op0
, 0)));
5880 /* Look for happy constants in op1 and op2. */
5881 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5883 HOST_WIDE_INT t
= INTVAL (op1
);
5884 HOST_WIDE_INT f
= INTVAL (op2
);
5886 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5887 code
= GET_CODE (op0
);
5888 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5891 tmp
= reversed_comparison_code (op0
, NULL
);
5899 return simplify_gen_relational (code
, mode
, cmp_mode
,
5900 XEXP (op0
, 0), XEXP (op0
, 1));
5903 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5904 cmp_mode
, XEXP (op0
, 0),
5907 /* See if any simplifications were possible. */
5910 if (CONST_INT_P (temp
))
5911 return temp
== const0_rtx
? op2
: op1
;
5913 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5919 gcc_assert (GET_MODE (op0
) == mode
);
5920 gcc_assert (GET_MODE (op1
) == mode
);
5921 gcc_assert (VECTOR_MODE_P (mode
));
5922 trueop2
= avoid_constant_pool_reference (op2
);
5923 if (CONST_INT_P (trueop2
)
5924 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
5926 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5927 unsigned HOST_WIDE_INT mask
;
5928 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5931 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5933 if (!(sel
& mask
) && !side_effects_p (op0
))
5935 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5938 rtx trueop0
= avoid_constant_pool_reference (op0
);
5939 rtx trueop1
= avoid_constant_pool_reference (op1
);
5940 if (GET_CODE (trueop0
) == CONST_VECTOR
5941 && GET_CODE (trueop1
) == CONST_VECTOR
)
5943 rtvec v
= rtvec_alloc (n_elts
);
5946 for (i
= 0; i
< n_elts
; i
++)
5947 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5948 ? CONST_VECTOR_ELT (trueop0
, i
)
5949 : CONST_VECTOR_ELT (trueop1
, i
));
5950 return gen_rtx_CONST_VECTOR (mode
, v
);
5953 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5954 if no element from a appears in the result. */
5955 if (GET_CODE (op0
) == VEC_MERGE
)
5957 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5958 if (CONST_INT_P (tem
))
5960 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5961 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5962 return simplify_gen_ternary (code
, mode
, mode
,
5963 XEXP (op0
, 1), op1
, op2
);
5964 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5965 return simplify_gen_ternary (code
, mode
, mode
,
5966 XEXP (op0
, 0), op1
, op2
);
5969 if (GET_CODE (op1
) == VEC_MERGE
)
5971 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5972 if (CONST_INT_P (tem
))
5974 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5975 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5976 return simplify_gen_ternary (code
, mode
, mode
,
5977 op0
, XEXP (op1
, 1), op2
);
5978 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5979 return simplify_gen_ternary (code
, mode
, mode
,
5980 op0
, XEXP (op1
, 0), op2
);
5984 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5986 if (GET_CODE (op0
) == VEC_DUPLICATE
5987 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5988 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5989 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
5991 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5992 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5994 if (XEXP (XEXP (op0
, 0), 0) == op1
5995 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5999 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6001 with (vec_concat (X) (B)) if N == 1 or
6002 (vec_concat (A) (X)) if N == 2. */
6003 if (GET_CODE (op0
) == VEC_DUPLICATE
6004 && GET_CODE (op1
) == CONST_VECTOR
6005 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6006 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6007 && IN_RANGE (sel
, 1, 2))
6009 rtx newop0
= XEXP (op0
, 0);
6010 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6012 std::swap (newop0
, newop1
);
6013 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6015 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6016 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6017 Only applies for vectors of two elements. */
6018 if (GET_CODE (op0
) == VEC_DUPLICATE
6019 && GET_CODE (op1
) == VEC_CONCAT
6020 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6021 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6022 && IN_RANGE (sel
, 1, 2))
6024 rtx newop0
= XEXP (op0
, 0);
6025 rtx newop1
= XEXP (op1
, 2 - sel
);
6026 rtx otherop
= XEXP (op1
, sel
- 1);
6028 std::swap (newop0
, newop1
);
6029 /* Don't want to throw away the other part of the vec_concat if
6030 it has side-effects. */
6031 if (!side_effects_p (otherop
))
6032 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6037 (vec_merge:outer (vec_duplicate:outer x:inner)
6038 (subreg:outer y:inner 0)
6041 with (vec_concat:outer x:inner y:inner) if N == 1,
6042 or (vec_concat:outer y:inner x:inner) if N == 2.
6044 Implicitly, this means we have a paradoxical subreg, but such
6045 a check is cheap, so make it anyway.
6047 Only applies for vectors of two elements. */
6048 if (GET_CODE (op0
) == VEC_DUPLICATE
6049 && GET_CODE (op1
) == SUBREG
6050 && GET_MODE (op1
) == GET_MODE (op0
)
6051 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6052 && paradoxical_subreg_p (op1
)
6053 && subreg_lowpart_p (op1
)
6054 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6055 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6056 && IN_RANGE (sel
, 1, 2))
6058 rtx newop0
= XEXP (op0
, 0);
6059 rtx newop1
= SUBREG_REG (op1
);
6061 std::swap (newop0
, newop1
);
6062 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6065 /* Same as above but with switched operands:
6066 Replace (vec_merge:outer (subreg:outer x:inner 0)
6067 (vec_duplicate:outer y:inner)
6070 with (vec_concat:outer x:inner y:inner) if N == 1,
6071 or (vec_concat:outer y:inner x:inner) if N == 2. */
6072 if (GET_CODE (op1
) == VEC_DUPLICATE
6073 && GET_CODE (op0
) == SUBREG
6074 && GET_MODE (op0
) == GET_MODE (op1
)
6075 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6076 && paradoxical_subreg_p (op0
)
6077 && subreg_lowpart_p (op0
)
6078 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6079 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6080 && IN_RANGE (sel
, 1, 2))
6082 rtx newop0
= SUBREG_REG (op0
);
6083 rtx newop1
= XEXP (op1
, 0);
6085 std::swap (newop0
, newop1
);
6086 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6089 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6091 with (vec_concat x y) or (vec_concat y x) depending on value
6093 if (GET_CODE (op0
) == VEC_DUPLICATE
6094 && GET_CODE (op1
) == VEC_DUPLICATE
6095 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6096 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6097 && IN_RANGE (sel
, 1, 2))
6099 rtx newop0
= XEXP (op0
, 0);
6100 rtx newop1
= XEXP (op1
, 0);
6102 std::swap (newop0
, newop1
);
6104 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6108 if (rtx_equal_p (op0
, op1
)
6109 && !side_effects_p (op2
) && !side_effects_p (op1
))
6112 if (!side_effects_p (op2
))
6115 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6117 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6119 return simplify_gen_ternary (code
, mode
, mode
,
6121 top1
? top1
: op1
, op2
);
6133 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
6134 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
6135 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
6137 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
6138 represented as a little-endian array of 'unsigned char', selecting by BYTE,
6139 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
6140 FIRST_ELEM is the number of the first element to extract, otherwise
6141 FIRST_ELEM is ignored. */
6144 simplify_immed_subreg (fixed_size_mode outermode
, rtx op
,
6145 machine_mode innermode
, unsigned int byte
,
6146 unsigned int first_elem
, unsigned int inner_bytes
)
6150 value_mask
= (1 << value_bit
) - 1
6152 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
6160 rtx result_s
= NULL
;
6161 rtvec result_v
= NULL
;
6162 enum mode_class outer_class
;
6163 scalar_mode outer_submode
;
6166 /* Some ports misuse CCmode. */
6167 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
6170 /* We have no way to represent a complex constant at the rtl level. */
6171 if (COMPLEX_MODE_P (outermode
))
6174 /* We support any size mode. */
6175 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
6176 inner_bytes
* BITS_PER_UNIT
);
6178 /* Unpack the value. */
6180 if (GET_CODE (op
) == CONST_VECTOR
)
6182 num_elem
= CEIL (inner_bytes
, GET_MODE_UNIT_SIZE (innermode
));
6183 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
6188 elem_bitsize
= max_bitsize
;
6190 /* If this asserts, it is too complicated; reducing value_bit may help. */
6191 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
6192 /* I don't know how to handle endianness of sub-units. */
6193 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
6195 for (elem
= 0; elem
< num_elem
; elem
++)
6198 rtx el
= (GET_CODE (op
) == CONST_VECTOR
6199 ? CONST_VECTOR_ELT (op
, first_elem
+ elem
)
6202 /* Vectors are kept in target memory order. (This is probably
6205 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6206 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6208 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6209 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6210 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6211 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6212 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6215 switch (GET_CODE (el
))
6219 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6221 *vp
++ = INTVAL (el
) >> i
;
6222 /* CONST_INTs are always logically sign-extended. */
6223 for (; i
< elem_bitsize
; i
+= value_bit
)
6224 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
6227 case CONST_WIDE_INT
:
6229 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
6230 unsigned char extend
= wi::sign_mask (val
);
6231 int prec
= wi::get_precision (val
);
6233 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
6234 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
6235 for (; i
< elem_bitsize
; i
+= value_bit
)
6241 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
6243 unsigned char extend
= 0;
6244 /* If this triggers, someone should have generated a
6245 CONST_INT instead. */
6246 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
6248 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6249 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
6250 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
6253 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
6257 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
6259 for (; i
< elem_bitsize
; i
+= value_bit
)
6264 /* This is big enough for anything on the platform. */
6265 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6266 scalar_float_mode el_mode
;
6268 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
6269 int bitsize
= GET_MODE_BITSIZE (el_mode
);
6271 gcc_assert (bitsize
<= elem_bitsize
);
6272 gcc_assert (bitsize
% value_bit
== 0);
6274 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
6277 /* real_to_target produces its result in words affected by
6278 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6279 and use WORDS_BIG_ENDIAN instead; see the documentation
6280 of SUBREG in rtl.texi. */
6281 for (i
= 0; i
< bitsize
; i
+= value_bit
)
6284 if (WORDS_BIG_ENDIAN
)
6285 ibase
= bitsize
- 1 - i
;
6288 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
6291 /* It shouldn't matter what's done here, so fill it with
6293 for (; i
< elem_bitsize
; i
+= value_bit
)
6299 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
6301 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6302 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6306 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6307 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6308 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
6310 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
6311 >> (i
- HOST_BITS_PER_WIDE_INT
);
6312 for (; i
< elem_bitsize
; i
+= value_bit
)
6322 /* Now, pick the right byte to start with. */
6323 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6324 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6325 will already have offset 0. */
6326 if (inner_bytes
>= GET_MODE_SIZE (outermode
))
6328 unsigned ibyte
= inner_bytes
- GET_MODE_SIZE (outermode
) - byte
;
6329 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6330 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6331 byte
= (subword_byte
% UNITS_PER_WORD
6332 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6335 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6336 so if it's become negative it will instead be very large.) */
6337 gcc_assert (byte
< inner_bytes
);
6339 /* Convert from bytes to chunks of size value_bit. */
6340 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
6342 /* Re-pack the value. */
6343 num_elem
= GET_MODE_NUNITS (outermode
);
6345 if (VECTOR_MODE_P (outermode
))
6347 result_v
= rtvec_alloc (num_elem
);
6348 elems
= &RTVEC_ELT (result_v
, 0);
6353 outer_submode
= GET_MODE_INNER (outermode
);
6354 outer_class
= GET_MODE_CLASS (outer_submode
);
6355 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
6357 gcc_assert (elem_bitsize
% value_bit
== 0);
6358 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
6360 for (elem
= 0; elem
< num_elem
; elem
++)
6364 /* Vectors are stored in target memory order. (This is probably
6367 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6368 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6370 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6371 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6372 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6373 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6374 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6377 switch (outer_class
)
6380 case MODE_PARTIAL_INT
:
6385 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
6386 / HOST_BITS_PER_WIDE_INT
;
6387 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
6390 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
6392 for (u
= 0; u
< units
; u
++)
6394 unsigned HOST_WIDE_INT buf
= 0;
6396 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
6398 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6401 base
+= HOST_BITS_PER_WIDE_INT
;
6403 r
= wide_int::from_array (tmp
, units
,
6404 GET_MODE_PRECISION (outer_submode
));
6405 #if TARGET_SUPPORTS_WIDE_INT == 0
6406 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6407 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
6410 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
6415 case MODE_DECIMAL_FLOAT
:
6418 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
6420 /* real_from_target wants its input in words affected by
6421 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6422 and use WORDS_BIG_ENDIAN instead; see the documentation
6423 of SUBREG in rtl.texi. */
6424 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6427 if (WORDS_BIG_ENDIAN
)
6428 ibase
= elem_bitsize
- 1 - i
;
6431 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6434 real_from_target (&r
, tmp
, outer_submode
);
6435 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6447 f
.mode
= outer_submode
;
6450 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6452 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6453 for (; i
< elem_bitsize
; i
+= value_bit
)
6454 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6455 << (i
- HOST_BITS_PER_WIDE_INT
));
6457 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6465 if (VECTOR_MODE_P (outermode
))
6466 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6471 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6472 Return 0 if no simplifications are possible. */
6474 simplify_subreg (machine_mode outermode
, rtx op
,
6475 machine_mode innermode
, poly_uint64 byte
)
6477 /* Little bit of sanity checking. */
6478 gcc_assert (innermode
!= VOIDmode
);
6479 gcc_assert (outermode
!= VOIDmode
);
6480 gcc_assert (innermode
!= BLKmode
);
6481 gcc_assert (outermode
!= BLKmode
);
6483 gcc_assert (GET_MODE (op
) == innermode
6484 || GET_MODE (op
) == VOIDmode
);
6486 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6487 if (!multiple_p (byte
, outersize
))
6490 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6491 if (maybe_ge (byte
, innersize
))
6494 if (outermode
== innermode
&& known_eq (byte
, 0U))
6497 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6501 if (VECTOR_MODE_P (outermode
)
6502 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6503 && vec_duplicate_p (op
, &elt
))
6504 return gen_vec_duplicate (outermode
, elt
);
6506 if (outermode
== GET_MODE_INNER (innermode
)
6507 && vec_duplicate_p (op
, &elt
))
6511 if (CONST_SCALAR_INT_P (op
)
6512 || CONST_DOUBLE_AS_FLOAT_P (op
)
6513 || CONST_FIXED_P (op
)
6514 || GET_CODE (op
) == CONST_VECTOR
)
6516 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6517 the result from bytes, so it only works if the sizes of the modes
6518 and the value of the offset are known at compile time. Cases that
6519 that apply to general modes and offsets should be handled here
6520 before calling simplify_immed_subreg. */
6521 fixed_size_mode fs_outermode
, fs_innermode
;
6522 unsigned HOST_WIDE_INT cbyte
;
6523 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6524 && is_a
<fixed_size_mode
> (innermode
, &fs_innermode
)
6525 && byte
.is_constant (&cbyte
))
6526 return simplify_immed_subreg (fs_outermode
, op
, fs_innermode
, cbyte
,
6527 0, GET_MODE_SIZE (fs_innermode
));
6529 /* Handle constant-sized outer modes and variable-sized inner modes. */
6530 unsigned HOST_WIDE_INT first_elem
;
6531 if (GET_CODE (op
) == CONST_VECTOR
6532 && is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6533 && constant_multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
),
6535 return simplify_immed_subreg (fs_outermode
, op
, innermode
, 0,
6537 GET_MODE_SIZE (fs_outermode
));
6542 /* Changing mode twice with SUBREG => just change it once,
6543 or not at all if changing back op starting mode. */
6544 if (GET_CODE (op
) == SUBREG
)
6546 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6547 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6550 if (outermode
== innermostmode
6551 && known_eq (byte
, 0U)
6552 && known_eq (SUBREG_BYTE (op
), 0))
6553 return SUBREG_REG (op
);
6555 /* Work out the memory offset of the final OUTERMODE value relative
6556 to the inner value of OP. */
6557 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6559 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6560 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6562 /* See whether resulting subreg will be paradoxical. */
6563 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6565 /* Bail out in case resulting subreg would be incorrect. */
6566 if (maybe_lt (final_offset
, 0)
6567 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6568 || !multiple_p (final_offset
, outersize
))
6573 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6575 if (maybe_ne (final_offset
, required_offset
))
6577 /* Paradoxical subregs always have byte offset 0. */
6581 /* Recurse for further possible simplifications. */
6582 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6586 if (validate_subreg (outermode
, innermostmode
,
6587 SUBREG_REG (op
), final_offset
))
6589 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6590 if (SUBREG_PROMOTED_VAR_P (op
)
6591 && SUBREG_PROMOTED_SIGN (op
) >= 0
6592 && GET_MODE_CLASS (outermode
) == MODE_INT
6593 && known_ge (outersize
, innersize
)
6594 && known_le (outersize
, innermostsize
)
6595 && subreg_lowpart_p (newx
))
6597 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6598 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6605 /* SUBREG of a hard register => just change the register number
6606 and/or mode. If the hard register is not valid in that mode,
6607 suppress this simplification. If the hard register is the stack,
6608 frame, or argument pointer, leave this as a SUBREG. */
6610 if (REG_P (op
) && HARD_REGISTER_P (op
))
6612 unsigned int regno
, final_regno
;
6615 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6616 if (HARD_REGISTER_NUM_P (final_regno
))
6618 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6619 subreg_memory_offset (outermode
,
6622 /* Propagate original regno. We don't have any way to specify
6623 the offset inside original regno, so do so only for lowpart.
6624 The information is used only by alias analysis that cannot
6625 grog partial register anyway. */
6627 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6628 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6633 /* If we have a SUBREG of a register that we are replacing and we are
6634 replacing it with a MEM, make a new MEM and try replacing the
6635 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6636 or if we would be widening it. */
6639 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6640 /* Allow splitting of volatile memory references in case we don't
6641 have instruction to move the whole thing. */
6642 && (! MEM_VOLATILE_P (op
)
6643 || ! have_insn_for (SET
, innermode
))
6644 && known_le (outersize
, innersize
))
6645 return adjust_address_nv (op
, outermode
, byte
);
6647 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6649 if (GET_CODE (op
) == CONCAT
6650 || GET_CODE (op
) == VEC_CONCAT
)
6652 poly_uint64 final_offset
;
6655 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6656 if (part_mode
== VOIDmode
)
6657 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6658 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6659 if (known_lt (byte
, part_size
))
6661 part
= XEXP (op
, 0);
6662 final_offset
= byte
;
6664 else if (known_ge (byte
, part_size
))
6666 part
= XEXP (op
, 1);
6667 final_offset
= byte
- part_size
;
6672 if (maybe_gt (final_offset
+ outersize
, part_size
))
6675 part_mode
= GET_MODE (part
);
6676 if (part_mode
== VOIDmode
)
6677 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6678 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6681 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6682 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6687 (subreg (vec_merge (X)
6689 (const_int ((1 << N) | M)))
6690 (N * sizeof (outermode)))
6692 (subreg (X) (N * sizeof (outermode)))
6695 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
6696 && idx
< HOST_BITS_PER_WIDE_INT
6697 && GET_CODE (op
) == VEC_MERGE
6698 && GET_MODE_INNER (innermode
) == outermode
6699 && CONST_INT_P (XEXP (op
, 2))
6700 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
6701 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
6703 /* A SUBREG resulting from a zero extension may fold to zero if
6704 it extracts higher bits that the ZERO_EXTEND's source bits. */
6705 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6707 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6708 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6709 return CONST0_RTX (outermode
);
6712 scalar_int_mode int_outermode
, int_innermode
;
6713 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6714 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6715 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6717 /* Handle polynomial integers. The upper bits of a paradoxical
6718 subreg are undefined, so this is safe regardless of whether
6719 we're truncating or extending. */
6720 if (CONST_POLY_INT_P (op
))
6723 = poly_wide_int::from (const_poly_int_value (op
),
6724 GET_MODE_PRECISION (int_outermode
),
6726 return immed_wide_int_const (val
, int_outermode
);
6729 if (GET_MODE_PRECISION (int_outermode
)
6730 < GET_MODE_PRECISION (int_innermode
))
6732 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6738 /* If OP is a vector comparison and the subreg is not changing the
6739 number of elements or the size of the elements, change the result
6740 of the comparison to the new mode. */
6741 if (COMPARISON_P (op
)
6742 && VECTOR_MODE_P (outermode
)
6743 && VECTOR_MODE_P (innermode
)
6744 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
6745 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
6746 GET_MODE_UNIT_SIZE (innermode
)))
6747 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
6748 XEXP (op
, 0), XEXP (op
, 1));
6752 /* Make a SUBREG operation or equivalent if it folds. */
6755 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6756 machine_mode innermode
, poly_uint64 byte
)
6760 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6764 if (GET_CODE (op
) == SUBREG
6765 || GET_CODE (op
) == CONCAT
6766 || GET_MODE (op
) == VOIDmode
)
6769 if (validate_subreg (outermode
, innermode
, op
, byte
))
6770 return gen_rtx_SUBREG (outermode
, op
, byte
);
6775 /* Generates a subreg to get the least significant part of EXPR (in mode
6776 INNER_MODE) to OUTER_MODE. */
6779 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6780 machine_mode inner_mode
)
6782 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6783 subreg_lowpart_offset (outer_mode
, inner_mode
));
6786 /* Simplify X, an rtx expression.
6788 Return the simplified expression or NULL if no simplifications
6791 This is the preferred entry point into the simplification routines;
6792 however, we still allow passes to call the more specific routines.
6794 Right now GCC has three (yes, three) major bodies of RTL simplification
6795 code that need to be unified.
6797 1. fold_rtx in cse.c. This code uses various CSE specific
6798 information to aid in RTL simplification.
6800 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6801 it uses combine specific information to aid in RTL
6804 3. The routines in this file.
6807 Long term we want to only have one body of simplification code; to
6808 get to that state I recommend the following steps:
6810 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6811 which are not pass dependent state into these routines.
6813 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6814 use this routine whenever possible.
6816 3. Allow for pass dependent state to be provided to these
6817 routines and add simplifications based on the pass dependent
6818 state. Remove code from cse.c & combine.c that becomes
6821 It will take time, but ultimately the compiler will be easier to
6822 maintain and improve. It's totally silly that when we add a
6823 simplification that it needs to be added to 4 places (3 for RTL
6824 simplification and 1 for tree simplification. */
6827 simplify_rtx (const_rtx x
)
6829 const enum rtx_code code
= GET_CODE (x
);
6830 const machine_mode mode
= GET_MODE (x
);
6832 switch (GET_RTX_CLASS (code
))
6835 return simplify_unary_operation (code
, mode
,
6836 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6837 case RTX_COMM_ARITH
:
6838 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6839 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6844 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6847 case RTX_BITFIELD_OPS
:
6848 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6849 XEXP (x
, 0), XEXP (x
, 1),
6853 case RTX_COMM_COMPARE
:
6854 return simplify_relational_operation (code
, mode
,
6855 ((GET_MODE (XEXP (x
, 0))
6857 ? GET_MODE (XEXP (x
, 0))
6858 : GET_MODE (XEXP (x
, 1))),
6864 return simplify_subreg (mode
, SUBREG_REG (x
),
6865 GET_MODE (SUBREG_REG (x
)),
6872 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6873 if (GET_CODE (XEXP (x
, 0)) == HIGH
6874 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6887 namespace selftest
{
6889 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6892 make_test_reg (machine_mode mode
)
6894 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
6896 return gen_rtx_REG (mode
, test_reg_num
++);
6899 /* Test vector simplifications involving VEC_DUPLICATE in which the
6900 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6901 register that holds one element of MODE. */
6904 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
6906 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6907 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6908 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
6909 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
6911 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6912 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
6913 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
6914 ASSERT_RTX_EQ (duplicate
,
6915 simplify_unary_operation (NOT
, mode
,
6916 duplicate_not
, mode
));
6918 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6919 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
6920 ASSERT_RTX_EQ (duplicate
,
6921 simplify_unary_operation (NEG
, mode
,
6922 duplicate_neg
, mode
));
6924 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6925 ASSERT_RTX_EQ (duplicate
,
6926 simplify_binary_operation (PLUS
, mode
, duplicate
,
6927 CONST0_RTX (mode
)));
6929 ASSERT_RTX_EQ (duplicate
,
6930 simplify_binary_operation (MINUS
, mode
, duplicate
,
6931 CONST0_RTX (mode
)));
6933 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
6934 simplify_binary_operation (MINUS
, mode
, duplicate
,
6938 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6939 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
6940 ASSERT_RTX_PTR_EQ (scalar_reg
,
6941 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6942 duplicate
, zero_par
));
6944 unsigned HOST_WIDE_INT const_nunits
;
6945 if (nunits
.is_constant (&const_nunits
))
6947 /* And again with the final element. */
6948 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
6949 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
6950 ASSERT_RTX_PTR_EQ (scalar_reg
,
6951 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6952 duplicate
, last_par
));
6954 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
6955 rtx vector_reg
= make_test_reg (mode
);
6956 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
6958 if (i
>= HOST_BITS_PER_WIDE_INT
)
6960 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
6961 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
6962 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
6963 ASSERT_RTX_EQ (scalar_reg
,
6964 simplify_gen_subreg (inner_mode
, vm
,
6969 /* Test a scalar subreg of a VEC_DUPLICATE. */
6970 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
6971 ASSERT_RTX_EQ (scalar_reg
,
6972 simplify_gen_subreg (inner_mode
, duplicate
,
6975 machine_mode narrower_mode
;
6976 if (maybe_ne (nunits
, 2U)
6977 && multiple_p (nunits
, 2)
6978 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
6979 && VECTOR_MODE_P (narrower_mode
))
6981 /* Test VEC_DUPLICATE of a vector. */
6982 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
6983 nbuilder
.quick_push (const0_rtx
);
6984 nbuilder
.quick_push (const1_rtx
);
6985 rtx_vector_builder
builder (mode
, 2, 1);
6986 builder
.quick_push (const0_rtx
);
6987 builder
.quick_push (const1_rtx
);
6988 ASSERT_RTX_EQ (builder
.build (),
6989 simplify_unary_operation (VEC_DUPLICATE
, mode
,
6993 /* Test VEC_SELECT of a vector. */
6995 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
6996 rtx narrower_duplicate
6997 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
6998 ASSERT_RTX_EQ (narrower_duplicate
,
6999 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
7000 duplicate
, vec_par
));
7002 /* Test a vector subreg of a VEC_DUPLICATE. */
7003 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
7004 ASSERT_RTX_EQ (narrower_duplicate
,
7005 simplify_gen_subreg (narrower_mode
, duplicate
,
7010 /* Test vector simplifications involving VEC_SERIES in which the
7011 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7012 register that holds one element of MODE. */
7015 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7017 /* Test unary cases with VEC_SERIES arguments. */
7018 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7019 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7020 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7021 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7022 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7023 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7024 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7025 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7026 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7028 ASSERT_RTX_EQ (series_0_r
,
7029 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7030 ASSERT_RTX_EQ (series_r_m1
,
7031 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7032 ASSERT_RTX_EQ (series_r_r
,
7033 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7035 /* Test that a VEC_SERIES with a zero step is simplified away. */
7036 ASSERT_RTX_EQ (duplicate
,
7037 simplify_binary_operation (VEC_SERIES
, mode
,
7038 scalar_reg
, const0_rtx
));
7040 /* Test PLUS and MINUS with VEC_SERIES. */
7041 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7042 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7043 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7044 ASSERT_RTX_EQ (series_r_r
,
7045 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7047 ASSERT_RTX_EQ (series_r_1
,
7048 simplify_binary_operation (PLUS
, mode
, duplicate
,
7050 ASSERT_RTX_EQ (series_r_m1
,
7051 simplify_binary_operation (PLUS
, mode
, duplicate
,
7053 ASSERT_RTX_EQ (series_0_r
,
7054 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7056 ASSERT_RTX_EQ (series_r_m1
,
7057 simplify_binary_operation (MINUS
, mode
, duplicate
,
7059 ASSERT_RTX_EQ (series_r_1
,
7060 simplify_binary_operation (MINUS
, mode
, duplicate
,
7062 ASSERT_RTX_EQ (series_0_m1
,
7063 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7066 /* Test NEG on constant vector series. */
7067 ASSERT_RTX_EQ (series_0_m1
,
7068 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
7069 ASSERT_RTX_EQ (series_0_1
,
7070 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
7072 /* Test PLUS and MINUS on constant vector series. */
7073 rtx scalar2
= gen_int_mode (2, inner_mode
);
7074 rtx scalar3
= gen_int_mode (3, inner_mode
);
7075 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
7076 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
7077 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
7078 ASSERT_RTX_EQ (series_1_1
,
7079 simplify_binary_operation (PLUS
, mode
, series_0_1
,
7080 CONST1_RTX (mode
)));
7081 ASSERT_RTX_EQ (series_0_m1
,
7082 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
7084 ASSERT_RTX_EQ (series_1_3
,
7085 simplify_binary_operation (PLUS
, mode
, series_1_1
,
7087 ASSERT_RTX_EQ (series_0_1
,
7088 simplify_binary_operation (MINUS
, mode
, series_1_1
,
7089 CONST1_RTX (mode
)));
7090 ASSERT_RTX_EQ (series_1_1
,
7091 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
7093 ASSERT_RTX_EQ (series_1_1
,
7094 simplify_binary_operation (MINUS
, mode
, series_1_3
,
7097 /* Test MULT between constant vectors. */
7098 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
7099 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
7100 rtx scalar9
= gen_int_mode (9, inner_mode
);
7101 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
7102 ASSERT_RTX_EQ (series_0_2
,
7103 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
7104 ASSERT_RTX_EQ (series_3_9
,
7105 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
7106 if (!GET_MODE_NUNITS (mode
).is_constant ())
7107 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
7110 /* Test ASHIFT between constant vectors. */
7111 ASSERT_RTX_EQ (series_0_2
,
7112 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
7113 CONST1_RTX (mode
)));
7114 if (!GET_MODE_NUNITS (mode
).is_constant ())
7115 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
7119 /* Verify simplify_merge_mask works correctly. */
7122 test_vec_merge (machine_mode mode
)
7124 rtx op0
= make_test_reg (mode
);
7125 rtx op1
= make_test_reg (mode
);
7126 rtx op2
= make_test_reg (mode
);
7127 rtx op3
= make_test_reg (mode
);
7128 rtx op4
= make_test_reg (mode
);
7129 rtx op5
= make_test_reg (mode
);
7130 rtx mask1
= make_test_reg (SImode
);
7131 rtx mask2
= make_test_reg (SImode
);
7132 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
7133 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
7134 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
7136 /* Simple vec_merge. */
7137 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
7138 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
7139 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
7140 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
7142 /* Nested vec_merge.
7143 It's tempting to make this simplify right down to opN, but we don't
7144 because all the simplify_* functions assume that the operands have
7145 already been simplified. */
7146 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
7147 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
7148 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
7150 /* Intermediate unary op. */
7151 rtx unop
= gen_rtx_NOT (mode
, vm1
);
7152 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
7153 simplify_merge_mask (unop
, mask1
, 0));
7154 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
7155 simplify_merge_mask (unop
, mask1
, 1));
7157 /* Intermediate binary op. */
7158 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
7159 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
7160 simplify_merge_mask (binop
, mask1
, 0));
7161 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
7162 simplify_merge_mask (binop
, mask1
, 1));
7164 /* Intermediate ternary op. */
7165 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
7166 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
7167 simplify_merge_mask (tenop
, mask1
, 0));
7168 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
7169 simplify_merge_mask (tenop
, mask1
, 1));
7172 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
7173 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
7174 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
7175 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
7177 /* Called indirectly. */
7178 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
7179 simplify_rtx (nvm
));
7182 /* Verify some simplifications involving vectors. */
7187 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7189 machine_mode mode
= (machine_mode
) i
;
7190 if (VECTOR_MODE_P (mode
))
7192 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
7193 test_vector_ops_duplicate (mode
, scalar_reg
);
7194 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
7195 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
7196 test_vector_ops_series (mode
, scalar_reg
);
7197 test_vec_merge (mode
);
7202 template<unsigned int N
>
7203 struct simplify_const_poly_int_tests
7209 struct simplify_const_poly_int_tests
<1>
7211 static void run () {}
7214 /* Test various CONST_POLY_INT properties. */
7216 template<unsigned int N
>
7218 simplify_const_poly_int_tests
<N
>::run ()
7220 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
7221 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
7222 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
7223 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
7224 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
7225 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
7226 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
7227 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
7228 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
7229 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
7230 rtx two
= GEN_INT (2);
7231 rtx six
= GEN_INT (6);
7232 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
7234 /* These tests only try limited operation combinations. Fuller arithmetic
7235 testing is done directly on poly_ints. */
7236 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
7237 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
7238 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
7239 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
7240 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
7241 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
7242 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
7243 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
7244 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
7245 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
7246 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
7249 /* Run all of the selftests within this file. */
7252 simplify_rtx_c_tests ()
7255 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
7258 } // namespace selftest
7260 #endif /* CHECKING_P */