1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
50 static bool plus_minus_operand_p (const_rtx
);
52 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
55 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
57 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
60 /* Test whether expression, X, is an immediate constant that represents
61 the most significant bit of machine mode MODE. */
64 mode_signbit_p (machine_mode mode
, const_rtx x
)
66 unsigned HOST_WIDE_INT val
;
68 scalar_int_mode int_mode
;
70 if (!is_int_mode (mode
, &int_mode
))
73 width
= GET_MODE_PRECISION (int_mode
);
77 if (width
<= HOST_BITS_PER_WIDE_INT
80 #if TARGET_SUPPORTS_WIDE_INT
81 else if (CONST_WIDE_INT_P (x
))
84 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
85 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
87 for (i
= 0; i
< elts
- 1; i
++)
88 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
90 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
91 width
%= HOST_BITS_PER_WIDE_INT
;
93 width
= HOST_BITS_PER_WIDE_INT
;
96 else if (width
<= HOST_BITS_PER_DOUBLE_INT
97 && CONST_DOUBLE_AS_INT_P (x
)
98 && CONST_DOUBLE_LOW (x
) == 0)
100 val
= CONST_DOUBLE_HIGH (x
);
101 width
-= HOST_BITS_PER_WIDE_INT
;
105 /* X is not an integer constant. */
108 if (width
< HOST_BITS_PER_WIDE_INT
)
109 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
110 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114 (after masking with the mode mask of MODE). Returns false if the
115 precision of MODE is too large to handle. */
118 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
121 scalar_int_mode int_mode
;
123 if (!is_int_mode (mode
, &int_mode
))
126 width
= GET_MODE_PRECISION (int_mode
);
127 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
130 val
&= GET_MODE_MASK (int_mode
);
131 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135 Returns false if the precision of MODE is too large to handle. */
137 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
141 scalar_int_mode int_mode
;
142 if (!is_int_mode (mode
, &int_mode
))
145 width
= GET_MODE_PRECISION (int_mode
);
146 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
149 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154 Returns false if the precision of MODE is too large to handle. */
156 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
160 scalar_int_mode int_mode
;
161 if (!is_int_mode (mode
, &int_mode
))
164 width
= GET_MODE_PRECISION (int_mode
);
165 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
168 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
172 /* Make a binary operation by properly ordering the operands and
173 seeing if the expression folds. */
176 simplify_context::simplify_gen_binary (rtx_code code
, machine_mode mode
,
181 /* If this simplifies, do it. */
182 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
186 /* Put complex operands first and constants second if commutative. */
187 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
188 && swap_commutative_operands_p (op0
, op1
))
189 std::swap (op0
, op1
);
191 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
194 /* If X is a MEM referencing the constant pool, return the real value.
195 Otherwise return X. */
197 avoid_constant_pool_reference (rtx x
)
201 poly_int64 offset
= 0;
203 switch (GET_CODE (x
))
209 /* Handle float extensions of constant pool references. */
211 c
= avoid_constant_pool_reference (tmp
);
212 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
213 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
221 if (GET_MODE (x
) == BLKmode
)
226 /* Call target hook to avoid the effects of -fpic etc.... */
227 addr
= targetm
.delegitimize_address (addr
);
229 /* Split the address into a base and integer offset. */
230 addr
= strip_offset (addr
, &offset
);
232 if (GET_CODE (addr
) == LO_SUM
)
233 addr
= XEXP (addr
, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr
) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr
))
240 c
= get_pool_constant (addr
);
241 cmode
= get_pool_mode (addr
);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
248 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
250 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
251 if (tem
&& CONSTANT_P (tem
))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x
)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x
))
272 tree decl
= MEM_EXPR (x
);
273 machine_mode mode
= GET_MODE (x
);
274 poly_int64 offset
= 0;
276 switch (TREE_CODE (decl
))
286 case ARRAY_RANGE_REF
:
291 case VIEW_CONVERT_EXPR
:
293 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
295 int unsignedp
, reversep
, volatilep
= 0;
298 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
299 &unsignedp
, &reversep
, &volatilep
);
300 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
301 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
302 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
305 offset
+= bytepos
+ toffset_val
;
311 && mode
== GET_MODE (x
)
313 && (TREE_STATIC (decl
)
314 || DECL_THREAD_LOCAL_P (decl
))
315 && DECL_RTL_SET_P (decl
)
316 && MEM_P (DECL_RTL (decl
)))
320 offset
+= MEM_OFFSET (x
);
322 newx
= DECL_RTL (decl
);
326 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
327 poly_int64 n_offset
, o_offset
;
329 /* Avoid creating a new MEM needlessly if we already had
330 the same address. We do if there's no OFFSET and the
331 old address X is identical to NEWX, or if X is of the
332 form (plus NEWX OFFSET), or the NEWX is of the form
333 (plus Y (const_int Z)) and X is that with the offset
334 added: (plus Y (const_int Z+OFFSET)). */
335 n
= strip_offset (n
, &n_offset
);
336 o
= strip_offset (o
, &o_offset
);
337 if (!(known_eq (o_offset
, n_offset
+ offset
)
338 && rtx_equal_p (o
, n
)))
339 x
= adjust_address_nv (newx
, mode
, offset
);
341 else if (GET_MODE (x
) == GET_MODE (newx
)
342 && known_eq (offset
, 0))
350 /* Make a unary operation by first seeing if it folds and otherwise making
351 the specified operation. */
354 simplify_context::simplify_gen_unary (rtx_code code
, machine_mode mode
, rtx op
,
355 machine_mode op_mode
)
359 /* If this simplifies, use it. */
360 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
363 return gen_rtx_fmt_e (code
, mode
, op
);
366 /* Likewise for ternary operations. */
369 simplify_context::simplify_gen_ternary (rtx_code code
, machine_mode mode
,
370 machine_mode op0_mode
,
371 rtx op0
, rtx op1
, rtx op2
)
375 /* If this simplifies, use it. */
376 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
377 op0
, op1
, op2
)) != 0)
380 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
383 /* Likewise, for relational operations.
384 CMP_MODE specifies mode comparison is done in. */
387 simplify_context::simplify_gen_relational (rtx_code code
, machine_mode mode
,
388 machine_mode cmp_mode
,
393 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
397 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 and simplify the result. If FN is non-NULL, call this callback on each
402 X, if it returns non-NULL, replace X with its return value and simplify the
406 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
407 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
409 enum rtx_code code
= GET_CODE (x
);
410 machine_mode mode
= GET_MODE (x
);
411 machine_mode op_mode
;
413 rtx op0
, op1
, op2
, newx
, op
;
417 if (__builtin_expect (fn
!= NULL
, 0))
419 newx
= fn (x
, old_rtx
, data
);
423 else if (rtx_equal_p (x
, old_rtx
))
424 return copy_rtx ((rtx
) data
);
426 switch (GET_RTX_CLASS (code
))
430 op_mode
= GET_MODE (op0
);
431 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
432 if (op0
== XEXP (x
, 0))
434 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
438 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
439 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
442 return simplify_gen_binary (code
, mode
, op0
, op1
);
445 case RTX_COMM_COMPARE
:
448 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
449 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
450 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
451 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
453 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
456 case RTX_BITFIELD_OPS
:
458 op_mode
= GET_MODE (op0
);
459 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
460 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
461 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
464 if (op_mode
== VOIDmode
)
465 op_mode
= GET_MODE (op0
);
466 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
471 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
472 if (op0
== SUBREG_REG (x
))
474 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
475 GET_MODE (SUBREG_REG (x
)),
477 return op0
? op0
: x
;
484 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
485 if (op0
== XEXP (x
, 0))
487 return replace_equiv_address_nv (x
, op0
);
489 else if (code
== LO_SUM
)
491 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
492 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
494 /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 if (GET_CODE (op0
) == HIGH
)
497 rtx base0
, base1
, offset0
, offset1
;
498 split_const (XEXP (op0
, 0), &base0
, &offset0
);
499 split_const (op1
, &base1
, &offset1
);
500 if (rtx_equal_p (base0
, base1
))
504 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
506 return gen_rtx_LO_SUM (mode
, op0
, op1
);
515 fmt
= GET_RTX_FORMAT (code
);
516 for (i
= 0; fmt
[i
]; i
++)
521 newvec
= XVEC (newx
, i
);
522 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
524 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
526 if (op
!= RTVEC_ELT (vec
, j
))
530 newvec
= shallow_copy_rtvec (vec
);
532 newx
= shallow_copy_rtx (x
);
533 XVEC (newx
, i
) = newvec
;
535 RTVEC_ELT (newvec
, j
) = op
;
543 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
544 if (op
!= XEXP (x
, i
))
547 newx
= shallow_copy_rtx (x
);
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
562 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_context::simplify_truncation (machine_mode mode
, rtx op
,
614 machine_mode op_mode
)
616 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
617 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
618 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
620 gcc_assert (precision
<= op_precision
);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op
) == ZERO_EXTEND
624 || GET_CODE (op
) == SIGN_EXTEND
)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
632 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
633 if (mode
== origmode
)
635 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
636 return simplify_gen_unary (TRUNCATE
, mode
,
637 XEXP (op
, 0), origmode
);
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (op
, 0), origmode
);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
647 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
648 && (GET_CODE (op
) == PLUS
649 || GET_CODE (op
) == MINUS
650 || GET_CODE (op
) == MULT
))
652 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
655 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
657 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
661 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 the outer subreg is effectively a truncation to the original mode. */
664 if ((GET_CODE (op
) == LSHIFTRT
665 || GET_CODE (op
) == ASHIFTRT
)
666 /* Ensure that OP_MODE is at least twice as wide as MODE
667 to avoid the possibility that an outer LSHIFTRT shifts by more
668 than the sign extension's sign_bit_copies and introduces zeros
669 into the high bits of the result. */
670 && 2 * precision
<= op_precision
671 && CONST_INT_P (XEXP (op
, 1))
672 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
673 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
674 && UINTVAL (XEXP (op
, 1)) < precision
)
675 return simplify_gen_binary (ASHIFTRT
, mode
,
676 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
678 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 the outer subreg is effectively a truncation to the original mode. */
681 if ((GET_CODE (op
) == LSHIFTRT
682 || GET_CODE (op
) == ASHIFTRT
)
683 && CONST_INT_P (XEXP (op
, 1))
684 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
685 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
686 && UINTVAL (XEXP (op
, 1)) < precision
)
687 return simplify_gen_binary (LSHIFTRT
, mode
,
688 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
690 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 the outer subreg is effectively a truncation to the original mode. */
693 if (GET_CODE (op
) == ASHIFT
694 && CONST_INT_P (XEXP (op
, 1))
695 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
696 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
697 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
698 && UINTVAL (XEXP (op
, 1)) < precision
)
699 return simplify_gen_binary (ASHIFT
, mode
,
700 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
702 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
705 if (GET_CODE (op
) == AND
706 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
707 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
708 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
709 && CONST_INT_P (XEXP (op
, 1)))
711 rtx op0
= (XEXP (XEXP (op
, 0), 0));
712 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
713 rtx mask_op
= XEXP (op
, 1);
714 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
715 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
717 if (shift
< precision
718 /* If doing this transform works for an X with all bits set,
719 it works for any X. */
720 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
721 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
722 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
723 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
725 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
726 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
730 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
733 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
734 && REG_P (XEXP (op
, 0))
735 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
736 && CONST_INT_P (XEXP (op
, 1))
737 && CONST_INT_P (XEXP (op
, 2)))
739 rtx op0
= XEXP (op
, 0);
740 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
741 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
742 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
744 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
747 pos
-= op_precision
- precision
;
748 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
749 XEXP (op
, 1), GEN_INT (pos
));
752 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
754 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
756 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
757 XEXP (op
, 1), XEXP (op
, 2));
761 /* Recognize a word extraction from a multi-word subreg. */
762 if ((GET_CODE (op
) == LSHIFTRT
763 || GET_CODE (op
) == ASHIFTRT
)
764 && SCALAR_INT_MODE_P (mode
)
765 && SCALAR_INT_MODE_P (op_mode
)
766 && precision
>= BITS_PER_WORD
767 && 2 * precision
<= op_precision
768 && CONST_INT_P (XEXP (op
, 1))
769 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
770 && UINTVAL (XEXP (op
, 1)) < op_precision
)
772 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
773 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
774 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
776 ? byte
- shifted_bytes
777 : byte
+ shifted_bytes
));
780 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 and try replacing the TRUNCATE and shift with it. Don't do this
782 if the MEM has a mode-dependent address. */
783 if ((GET_CODE (op
) == LSHIFTRT
784 || GET_CODE (op
) == ASHIFTRT
)
785 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
786 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
787 && MEM_P (XEXP (op
, 0))
788 && CONST_INT_P (XEXP (op
, 1))
789 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
790 && INTVAL (XEXP (op
, 1)) > 0
791 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
792 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
793 MEM_ADDR_SPACE (XEXP (op
, 0)))
794 && ! MEM_VOLATILE_P (XEXP (op
, 0))
795 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
796 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
798 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
799 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
800 return adjust_address_nv (XEXP (op
, 0), int_mode
,
802 ? byte
- shifted_bytes
803 : byte
+ shifted_bytes
));
806 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 (OP:SI foo:SI) if OP is NEG or ABS. */
808 if ((GET_CODE (op
) == ABS
809 || GET_CODE (op
) == NEG
)
810 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
811 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
812 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
813 return simplify_gen_unary (GET_CODE (op
), mode
,
814 XEXP (XEXP (op
, 0), 0), mode
);
816 /* Simplifications of (truncate:A (subreg:B X 0)). */
817 if (GET_CODE (op
) == SUBREG
818 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
819 && SCALAR_INT_MODE_P (op_mode
)
820 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
821 && subreg_lowpart_p (op
))
823 /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 if (GET_CODE (SUBREG_REG (op
)) == TRUNCATE
)
826 rtx inner
= XEXP (SUBREG_REG (op
), 0);
827 if (GET_MODE_PRECISION (int_mode
)
828 <= GET_MODE_PRECISION (subreg_mode
))
829 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
832 /* If subreg above is paradoxical and C is narrower
833 than A, return (subreg:A (truncate:C X) 0). */
834 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
),
838 /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 paradoxical subregs (B is wider than C). */
840 if (is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
))
842 unsigned int int_op_prec
= GET_MODE_PRECISION (int_op_mode
);
843 unsigned int subreg_prec
= GET_MODE_PRECISION (subreg_mode
);
844 if (int_op_prec
> subreg_prec
)
846 if (int_mode
== subreg_mode
)
847 return SUBREG_REG (op
);
848 if (GET_MODE_PRECISION (int_mode
) < subreg_prec
)
849 return simplify_gen_unary (TRUNCATE
, int_mode
,
850 SUBREG_REG (op
), subreg_mode
);
852 /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 A is narrower than B and B is narrower than C. */
854 else if (int_op_prec
< subreg_prec
855 && GET_MODE_PRECISION (int_mode
) < int_op_prec
)
856 return simplify_gen_unary (TRUNCATE
, int_mode
,
857 SUBREG_REG (op
), subreg_mode
);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op
) == TRUNCATE
)
863 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
864 GET_MODE (XEXP (op
, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
868 if (GET_CODE (op
) == IOR
869 && SCALAR_INT_MODE_P (mode
)
870 && SCALAR_INT_MODE_P (op_mode
)
871 && CONST_INT_P (XEXP (op
, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_context::simplify_unary_operation (rtx_code code
, machine_mode mode
,
883 rtx op
, machine_mode op_mode
)
887 trueop
= avoid_constant_pool_reference (op
);
889 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
893 return simplify_unary_operation_1 (code
, mode
, op
);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
900 exact_int_to_float_conversion_p (const_rtx op
)
902 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
903 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode
!= VOIDmode
);
906 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
907 int in_bits
= in_prec
;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
910 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
911 if (GET_CODE (op
) == FLOAT
)
912 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
913 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
914 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
917 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
919 return in_bits
<= out_bits
;
922 /* Perform some simplifications we can do even if the operands
925 simplify_context::simplify_unary_operation_1 (rtx_code code
, machine_mode mode
,
928 enum rtx_code reversed
;
929 rtx temp
, elt
, base
, step
;
930 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
935 /* (not (not X)) == X. */
936 if (GET_CODE (op
) == NOT
)
939 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
940 comparison is all ones. */
941 if (COMPARISON_P (op
)
942 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
943 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
944 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
945 XEXP (op
, 0), XEXP (op
, 1));
947 /* (not (plus X -1)) can become (neg X). */
948 if (GET_CODE (op
) == PLUS
949 && XEXP (op
, 1) == constm1_rtx
)
950 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
952 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
953 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
954 and MODE_VECTOR_INT. */
955 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
956 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
959 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
960 if (GET_CODE (op
) == XOR
961 && CONST_INT_P (XEXP (op
, 1))
962 && (temp
= simplify_unary_operation (NOT
, mode
,
963 XEXP (op
, 1), mode
)) != 0)
964 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
966 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
967 if (GET_CODE (op
) == PLUS
968 && CONST_INT_P (XEXP (op
, 1))
969 && mode_signbit_p (mode
, XEXP (op
, 1))
970 && (temp
= simplify_unary_operation (NOT
, mode
,
971 XEXP (op
, 1), mode
)) != 0)
972 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
975 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
976 operands other than 1, but that is not valid. We could do a
977 similar simplification for (not (lshiftrt C X)) where C is
978 just the sign bit, but this doesn't seem common enough to
980 if (GET_CODE (op
) == ASHIFT
981 && XEXP (op
, 0) == const1_rtx
)
983 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
984 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
987 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
988 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
989 so we can perform the above simplification. */
990 if (STORE_FLAG_VALUE
== -1
991 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
992 && GET_CODE (op
) == ASHIFTRT
993 && CONST_INT_P (XEXP (op
, 1))
994 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
995 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
996 XEXP (op
, 0), const0_rtx
);
999 if (partial_subreg_p (op
)
1000 && subreg_lowpart_p (op
)
1001 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
1002 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
1004 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1007 x
= gen_rtx_ROTATE (inner_mode
,
1008 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1010 XEXP (SUBREG_REG (op
), 1));
1011 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1016 /* Apply De Morgan's laws to reduce number of patterns for machines
1017 with negating logical insns (and-not, nand, etc.). If result has
1018 only one NOT, put it first, since that is how the patterns are
1020 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1022 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1023 machine_mode op_mode
;
1025 op_mode
= GET_MODE (in1
);
1026 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1028 op_mode
= GET_MODE (in2
);
1029 if (op_mode
== VOIDmode
)
1031 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1033 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1034 std::swap (in1
, in2
);
1036 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1040 /* (not (bswap x)) -> (bswap (not x)). */
1041 if (GET_CODE (op
) == BSWAP
)
1043 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1044 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1049 /* (neg (neg X)) == X. */
1050 if (GET_CODE (op
) == NEG
)
1051 return XEXP (op
, 0);
1053 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1054 If comparison is not reversible use
1056 if (GET_CODE (op
) == IF_THEN_ELSE
)
1058 rtx cond
= XEXP (op
, 0);
1059 rtx true_rtx
= XEXP (op
, 1);
1060 rtx false_rtx
= XEXP (op
, 2);
1062 if ((GET_CODE (true_rtx
) == NEG
1063 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1064 || (GET_CODE (false_rtx
) == NEG
1065 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1067 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1068 temp
= reversed_comparison (cond
, mode
);
1072 std::swap (true_rtx
, false_rtx
);
1074 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1075 mode
, temp
, true_rtx
, false_rtx
);
1079 /* (neg (plus X 1)) can become (not X). */
1080 if (GET_CODE (op
) == PLUS
1081 && XEXP (op
, 1) == const1_rtx
)
1082 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1084 /* Similarly, (neg (not X)) is (plus X 1). */
1085 if (GET_CODE (op
) == NOT
)
1086 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1089 /* (neg (minus X Y)) can become (minus Y X). This transformation
1090 isn't safe for modes with signed zeros, since if X and Y are
1091 both +0, (minus Y X) is the same as (minus X Y). If the
1092 rounding mode is towards +infinity (or -infinity) then the two
1093 expressions will be rounded differently. */
1094 if (GET_CODE (op
) == MINUS
1095 && !HONOR_SIGNED_ZEROS (mode
)
1096 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1097 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1099 if (GET_CODE (op
) == PLUS
1100 && !HONOR_SIGNED_ZEROS (mode
)
1101 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1103 /* (neg (plus A C)) is simplified to (minus -C A). */
1104 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1105 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1107 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1109 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1112 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1113 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1114 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1117 /* (neg (mult A B)) becomes (mult A (neg B)).
1118 This works even for floating-point values. */
1119 if (GET_CODE (op
) == MULT
1120 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1122 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1123 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1126 /* NEG commutes with ASHIFT since it is multiplication. Only do
1127 this if we can then eliminate the NEG (e.g., if the operand
1129 if (GET_CODE (op
) == ASHIFT
)
1131 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1133 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1136 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1137 C is equal to the width of MODE minus 1. */
1138 if (GET_CODE (op
) == ASHIFTRT
1139 && CONST_INT_P (XEXP (op
, 1))
1140 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1141 return simplify_gen_binary (LSHIFTRT
, mode
,
1142 XEXP (op
, 0), XEXP (op
, 1));
1144 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1145 C is equal to the width of MODE minus 1. */
1146 if (GET_CODE (op
) == LSHIFTRT
1147 && CONST_INT_P (XEXP (op
, 1))
1148 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1149 return simplify_gen_binary (ASHIFTRT
, mode
,
1150 XEXP (op
, 0), XEXP (op
, 1));
1152 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1153 if (GET_CODE (op
) == XOR
1154 && XEXP (op
, 1) == const1_rtx
1155 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1156 return plus_constant (mode
, XEXP (op
, 0), -1);
1158 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1159 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1160 if (GET_CODE (op
) == LT
1161 && XEXP (op
, 1) == const0_rtx
1162 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1164 int_mode
= as_a
<scalar_int_mode
> (mode
);
1165 int isize
= GET_MODE_PRECISION (inner
);
1166 if (STORE_FLAG_VALUE
== 1)
1168 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1169 gen_int_shift_amount (inner
,
1171 if (int_mode
== inner
)
1173 if (GET_MODE_PRECISION (int_mode
) > isize
)
1174 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1175 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1177 else if (STORE_FLAG_VALUE
== -1)
1179 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1180 gen_int_shift_amount (inner
,
1182 if (int_mode
== inner
)
1184 if (GET_MODE_PRECISION (int_mode
) > isize
)
1185 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1186 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1190 if (vec_series_p (op
, &base
, &step
))
1192 /* Only create a new series if we can simplify both parts. In other
1193 cases this isn't really a simplification, and it's not necessarily
1194 a win to replace a vector operation with a scalar operation. */
1195 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1196 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1199 step
= simplify_unary_operation (NEG
, inner_mode
,
1202 return gen_vec_series (mode
, base
, step
);
1208 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1209 with the umulXi3_highpart patterns. */
1210 if (GET_CODE (op
) == LSHIFTRT
1211 && GET_CODE (XEXP (op
, 0)) == MULT
)
1214 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1216 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1218 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1222 /* We can't handle truncation to a partial integer mode here
1223 because we don't know the real bitsize of the partial
1228 if (GET_MODE (op
) != VOIDmode
)
1230 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1235 /* If we know that the value is already truncated, we can
1236 replace the TRUNCATE with a SUBREG. */
1237 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1238 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1239 || truncated_to_mode (mode
, op
)))
1241 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1246 /* A truncate of a comparison can be replaced with a subreg if
1247 STORE_FLAG_VALUE permits. This is like the previous test,
1248 but it works even if the comparison is done in a mode larger
1249 than HOST_BITS_PER_WIDE_INT. */
1250 if (HWI_COMPUTABLE_MODE_P (mode
)
1251 && COMPARISON_P (op
)
1252 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
1253 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1255 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1260 /* A truncate of a memory is just loading the low part of the memory
1261 if we are not changing the meaning of the address. */
1262 if (GET_CODE (op
) == MEM
1263 && !VECTOR_MODE_P (mode
)
1264 && !MEM_VOLATILE_P (op
)
1265 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1267 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1272 /* Check for useless truncation. */
1273 if (GET_MODE (op
) == mode
)
1277 case FLOAT_TRUNCATE
:
1278 /* Check for useless truncation. */
1279 if (GET_MODE (op
) == mode
)
1282 if (DECIMAL_FLOAT_MODE_P (mode
))
1285 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1286 if (GET_CODE (op
) == FLOAT_EXTEND
1287 && GET_MODE (XEXP (op
, 0)) == mode
)
1288 return XEXP (op
, 0);
1290 /* (float_truncate:SF (float_truncate:DF foo:XF))
1291 = (float_truncate:SF foo:XF).
1292 This may eliminate double rounding, so it is unsafe.
1294 (float_truncate:SF (float_extend:XF foo:DF))
1295 = (float_truncate:SF foo:DF).
1297 (float_truncate:DF (float_extend:XF foo:SF))
1298 = (float_extend:DF foo:SF). */
1299 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1300 && flag_unsafe_math_optimizations
)
1301 || GET_CODE (op
) == FLOAT_EXTEND
)
1302 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1303 > GET_MODE_UNIT_SIZE (mode
)
1304 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1306 XEXP (op
, 0), mode
);
1308 /* (float_truncate (float x)) is (float x) */
1309 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1310 && (flag_unsafe_math_optimizations
1311 || exact_int_to_float_conversion_p (op
)))
1312 return simplify_gen_unary (GET_CODE (op
), mode
,
1314 GET_MODE (XEXP (op
, 0)));
1316 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1317 (OP:SF foo:SF) if OP is NEG or ABS. */
1318 if ((GET_CODE (op
) == ABS
1319 || GET_CODE (op
) == NEG
)
1320 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1321 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1322 return simplify_gen_unary (GET_CODE (op
), mode
,
1323 XEXP (XEXP (op
, 0), 0), mode
);
1325 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1326 is (float_truncate:SF x). */
1327 if (GET_CODE (op
) == SUBREG
1328 && subreg_lowpart_p (op
)
1329 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1330 return SUBREG_REG (op
);
1334 /* Check for useless extension. */
1335 if (GET_MODE (op
) == mode
)
1338 if (DECIMAL_FLOAT_MODE_P (mode
))
1341 /* (float_extend (float_extend x)) is (float_extend x)
1343 (float_extend (float x)) is (float x) assuming that double
1344 rounding can't happen.
1346 if (GET_CODE (op
) == FLOAT_EXTEND
1347 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1348 && exact_int_to_float_conversion_p (op
)))
1349 return simplify_gen_unary (GET_CODE (op
), mode
,
1351 GET_MODE (XEXP (op
, 0)));
1356 /* (abs (neg <foo>)) -> (abs <foo>) */
1357 if (GET_CODE (op
) == NEG
)
1358 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1359 GET_MODE (XEXP (op
, 0)));
1361 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1363 if (GET_MODE (op
) == VOIDmode
)
1366 /* If operand is something known to be positive, ignore the ABS. */
1367 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1368 || val_signbit_known_clear_p (GET_MODE (op
),
1369 nonzero_bits (op
, GET_MODE (op
))))
1372 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1373 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1374 && (num_sign_bit_copies (op
, int_mode
)
1375 == GET_MODE_PRECISION (int_mode
)))
1376 return gen_rtx_NEG (int_mode
, op
);
1381 /* (ffs (*_extend <X>)) = (ffs <X>) */
1382 if (GET_CODE (op
) == SIGN_EXTEND
1383 || GET_CODE (op
) == ZERO_EXTEND
)
1384 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1385 GET_MODE (XEXP (op
, 0)));
1389 switch (GET_CODE (op
))
1393 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1394 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1395 GET_MODE (XEXP (op
, 0)));
1399 /* Rotations don't affect popcount. */
1400 if (!side_effects_p (XEXP (op
, 1)))
1401 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1402 GET_MODE (XEXP (op
, 0)));
1411 switch (GET_CODE (op
))
1417 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1418 GET_MODE (XEXP (op
, 0)));
1422 /* Rotations don't affect parity. */
1423 if (!side_effects_p (XEXP (op
, 1)))
1424 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1425 GET_MODE (XEXP (op
, 0)));
1429 /* (parity (parity x)) -> parity (x). */
1438 /* (bswap (bswap x)) -> x. */
1439 if (GET_CODE (op
) == BSWAP
)
1440 return XEXP (op
, 0);
1444 /* (float (sign_extend <X>)) = (float <X>). */
1445 if (GET_CODE (op
) == SIGN_EXTEND
)
1446 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1447 GET_MODE (XEXP (op
, 0)));
1451 /* Check for useless extension. */
1452 if (GET_MODE (op
) == mode
)
1455 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1456 becomes just the MINUS if its mode is MODE. This allows
1457 folding switch statements on machines using casesi (such as
1459 if (GET_CODE (op
) == TRUNCATE
1460 && GET_MODE (XEXP (op
, 0)) == mode
1461 && GET_CODE (XEXP (op
, 0)) == MINUS
1462 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1463 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1464 return XEXP (op
, 0);
1466 /* Extending a widening multiplication should be canonicalized to
1467 a wider widening multiplication. */
1468 if (GET_CODE (op
) == MULT
)
1470 rtx lhs
= XEXP (op
, 0);
1471 rtx rhs
= XEXP (op
, 1);
1472 enum rtx_code lcode
= GET_CODE (lhs
);
1473 enum rtx_code rcode
= GET_CODE (rhs
);
1475 /* Widening multiplies usually extend both operands, but sometimes
1476 they use a shift to extract a portion of a register. */
1477 if ((lcode
== SIGN_EXTEND
1478 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1479 && (rcode
== SIGN_EXTEND
1480 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1482 machine_mode lmode
= GET_MODE (lhs
);
1483 machine_mode rmode
= GET_MODE (rhs
);
1486 if (lcode
== ASHIFTRT
)
1487 /* Number of bits not shifted off the end. */
1488 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1489 - INTVAL (XEXP (lhs
, 1)));
1490 else /* lcode == SIGN_EXTEND */
1491 /* Size of inner mode. */
1492 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1494 if (rcode
== ASHIFTRT
)
1495 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1496 - INTVAL (XEXP (rhs
, 1)));
1497 else /* rcode == SIGN_EXTEND */
1498 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1500 /* We can only widen multiplies if the result is mathematiclly
1501 equivalent. I.e. if overflow was impossible. */
1502 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1503 return simplify_gen_binary
1505 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1506 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1510 /* Check for a sign extension of a subreg of a promoted
1511 variable, where the promotion is sign-extended, and the
1512 target mode is the same as the variable's promotion. */
1513 if (GET_CODE (op
) == SUBREG
1514 && SUBREG_PROMOTED_VAR_P (op
)
1515 && SUBREG_PROMOTED_SIGNED_P (op
))
1517 rtx subreg
= SUBREG_REG (op
);
1518 machine_mode subreg_mode
= GET_MODE (subreg
);
1519 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1521 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1524 /* Preserve SUBREG_PROMOTED_VAR_P. */
1525 if (partial_subreg_p (temp
))
1527 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1528 SUBREG_PROMOTED_SET (temp
, 1);
1534 /* Sign-extending a sign-extended subreg. */
1535 return simplify_gen_unary (SIGN_EXTEND
, mode
,
1536 subreg
, subreg_mode
);
1539 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1540 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1541 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1543 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1544 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1545 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1546 GET_MODE (XEXP (op
, 0)));
1549 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1550 is (sign_extend:M (subreg:O <X>)) if there is mode with
1551 GET_MODE_BITSIZE (N) - I bits.
1552 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1553 is similarly (zero_extend:M (subreg:O <X>)). */
1554 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1555 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1556 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1557 && CONST_INT_P (XEXP (op
, 1))
1558 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1559 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1560 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1562 scalar_int_mode tmode
;
1563 gcc_assert (GET_MODE_PRECISION (int_mode
)
1564 > GET_MODE_PRECISION (op_mode
));
1565 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1566 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1569 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1571 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1572 ? SIGN_EXTEND
: ZERO_EXTEND
,
1573 int_mode
, inner
, tmode
);
1577 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1578 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1579 if (GET_CODE (op
) == LSHIFTRT
1580 && CONST_INT_P (XEXP (op
, 1))
1581 && XEXP (op
, 1) != const0_rtx
)
1582 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1584 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1585 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1586 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1587 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1588 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1590 if (GET_CODE (op
) == TRUNCATE
1591 && GET_CODE (XEXP (op
, 0)) == LSHIFTRT
1592 && CONST_INT_P (XEXP (XEXP (op
, 0), 1)))
1594 scalar_int_mode m_mode
, n_mode
, o_mode
;
1595 rtx old_shift
= XEXP (op
, 0);
1596 if (is_a
<scalar_int_mode
> (mode
, &m_mode
)
1597 && is_a
<scalar_int_mode
> (GET_MODE (op
), &n_mode
)
1598 && is_a
<scalar_int_mode
> (GET_MODE (old_shift
), &o_mode
)
1599 && GET_MODE_PRECISION (o_mode
) - GET_MODE_PRECISION (n_mode
)
1600 == INTVAL (XEXP (old_shift
, 1)))
1602 rtx new_shift
= simplify_gen_binary (ASHIFTRT
,
1603 GET_MODE (old_shift
),
1604 XEXP (old_shift
, 0),
1605 XEXP (old_shift
, 1));
1606 if (GET_MODE_PRECISION (m_mode
) > GET_MODE_PRECISION (o_mode
))
1607 return simplify_gen_unary (SIGN_EXTEND
, mode
, new_shift
,
1608 GET_MODE (new_shift
));
1609 if (mode
!= GET_MODE (new_shift
))
1610 return simplify_gen_unary (TRUNCATE
, mode
, new_shift
,
1611 GET_MODE (new_shift
));
1616 #if defined(POINTERS_EXTEND_UNSIGNED)
1617 /* As we do not know which address space the pointer is referring to,
1618 we can do this only if the target does not support different pointer
1619 or address modes depending on the address space. */
1620 if (target_default_pointer_address_modes_p ()
1621 && ! POINTERS_EXTEND_UNSIGNED
1622 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1624 || (GET_CODE (op
) == SUBREG
1625 && REG_P (SUBREG_REG (op
))
1626 && REG_POINTER (SUBREG_REG (op
))
1627 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1628 && !targetm
.have_ptr_extend ())
1631 = convert_memory_address_addr_space_1 (Pmode
, op
,
1632 ADDR_SPACE_GENERIC
, false,
1641 /* Check for useless extension. */
1642 if (GET_MODE (op
) == mode
)
1645 /* Check for a zero extension of a subreg of a promoted
1646 variable, where the promotion is zero-extended, and the
1647 target mode is the same as the variable's promotion. */
1648 if (GET_CODE (op
) == SUBREG
1649 && SUBREG_PROMOTED_VAR_P (op
)
1650 && SUBREG_PROMOTED_UNSIGNED_P (op
))
1652 rtx subreg
= SUBREG_REG (op
);
1653 machine_mode subreg_mode
= GET_MODE (subreg
);
1654 if (!paradoxical_subreg_p (mode
, subreg_mode
))
1656 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, subreg
);
1659 /* Preserve SUBREG_PROMOTED_VAR_P. */
1660 if (partial_subreg_p (temp
))
1662 SUBREG_PROMOTED_VAR_P (temp
) = 1;
1663 SUBREG_PROMOTED_SET (temp
, 0);
1669 /* Zero-extending a zero-extended subreg. */
1670 return simplify_gen_unary (ZERO_EXTEND
, mode
,
1671 subreg
, subreg_mode
);
1674 /* Extending a widening multiplication should be canonicalized to
1675 a wider widening multiplication. */
1676 if (GET_CODE (op
) == MULT
)
1678 rtx lhs
= XEXP (op
, 0);
1679 rtx rhs
= XEXP (op
, 1);
1680 enum rtx_code lcode
= GET_CODE (lhs
);
1681 enum rtx_code rcode
= GET_CODE (rhs
);
1683 /* Widening multiplies usually extend both operands, but sometimes
1684 they use a shift to extract a portion of a register. */
1685 if ((lcode
== ZERO_EXTEND
1686 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1687 && (rcode
== ZERO_EXTEND
1688 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1690 machine_mode lmode
= GET_MODE (lhs
);
1691 machine_mode rmode
= GET_MODE (rhs
);
1694 if (lcode
== LSHIFTRT
)
1695 /* Number of bits not shifted off the end. */
1696 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1697 - INTVAL (XEXP (lhs
, 1)));
1698 else /* lcode == ZERO_EXTEND */
1699 /* Size of inner mode. */
1700 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1702 if (rcode
== LSHIFTRT
)
1703 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1704 - INTVAL (XEXP (rhs
, 1)));
1705 else /* rcode == ZERO_EXTEND */
1706 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1708 /* We can only widen multiplies if the result is mathematiclly
1709 equivalent. I.e. if overflow was impossible. */
1710 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1711 return simplify_gen_binary
1713 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1714 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1718 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1719 if (GET_CODE (op
) == ZERO_EXTEND
)
1720 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1721 GET_MODE (XEXP (op
, 0)));
1723 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1724 is (zero_extend:M (subreg:O <X>)) if there is mode with
1725 GET_MODE_PRECISION (N) - I bits. */
1726 if (GET_CODE (op
) == LSHIFTRT
1727 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1728 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1729 && CONST_INT_P (XEXP (op
, 1))
1730 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1731 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1732 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1734 scalar_int_mode tmode
;
1735 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1736 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1739 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1741 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1746 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1747 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1749 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1750 (and:SI (reg:SI) (const_int 63)). */
1751 if (partial_subreg_p (op
)
1752 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1753 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1754 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1755 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1756 && subreg_lowpart_p (op
)
1757 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1758 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1760 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1761 return SUBREG_REG (op
);
1762 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1766 #if defined(POINTERS_EXTEND_UNSIGNED)
1767 /* As we do not know which address space the pointer is referring to,
1768 we can do this only if the target does not support different pointer
1769 or address modes depending on the address space. */
1770 if (target_default_pointer_address_modes_p ()
1771 && POINTERS_EXTEND_UNSIGNED
> 0
1772 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1774 || (GET_CODE (op
) == SUBREG
1775 && REG_P (SUBREG_REG (op
))
1776 && REG_POINTER (SUBREG_REG (op
))
1777 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1778 && !targetm
.have_ptr_extend ())
1781 = convert_memory_address_addr_space_1 (Pmode
, op
,
1782 ADDR_SPACE_GENERIC
, false,
1794 if (VECTOR_MODE_P (mode
)
1795 && vec_duplicate_p (op
, &elt
)
1796 && code
!= VEC_DUPLICATE
)
1798 if (code
== SIGN_EXTEND
|| code
== ZERO_EXTEND
)
1799 /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1800 operations by promoting VEC_DUPLICATE to the root of the expression
1801 (as far as possible). */
1802 temp
= simplify_gen_unary (code
, GET_MODE_INNER (mode
),
1803 elt
, GET_MODE_INNER (GET_MODE (op
)));
1805 /* Try applying the operator to ELT and see if that simplifies.
1806 We can duplicate the result if so.
1808 The reason we traditionally haven't used simplify_gen_unary
1809 for these codes is that it didn't necessarily seem to be a
1810 win to convert things like:
1812 (neg:V (vec_duplicate:V (reg:S R)))
1816 (vec_duplicate:V (neg:S (reg:S R)))
1818 The first might be done entirely in vector registers while the
1819 second might need a move between register files.
1821 However, there also cases where promoting the vec_duplicate is
1822 more efficient, and there is definite value in having a canonical
1823 form when matching instruction patterns. We should consider
1824 extending the simplify_gen_unary code above to more cases. */
1825 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1826 elt
, GET_MODE_INNER (GET_MODE (op
)));
1828 return gen_vec_duplicate (mode
, temp
);
1834 /* Try to compute the value of a unary operation CODE whose output mode is to
1835 be MODE with input operand OP whose mode was originally OP_MODE.
1836 Return zero if the value cannot be computed. */
1838 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1839 rtx op
, machine_mode op_mode
)
1841 scalar_int_mode result_mode
;
1843 if (code
== VEC_DUPLICATE
)
1845 gcc_assert (VECTOR_MODE_P (mode
));
1846 if (GET_MODE (op
) != VOIDmode
)
1848 if (!VECTOR_MODE_P (GET_MODE (op
)))
1849 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1851 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1854 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1855 return gen_const_vec_duplicate (mode
, op
);
1856 if (GET_CODE (op
) == CONST_VECTOR
1857 && (CONST_VECTOR_DUPLICATE_P (op
)
1858 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1860 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1861 ? CONST_VECTOR_NPATTERNS (op
)
1862 : CONST_VECTOR_NUNITS (op
).to_constant ());
1863 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1864 rtx_vector_builder
builder (mode
, npatterns
, 1);
1865 for (unsigned i
= 0; i
< npatterns
; i
++)
1866 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1867 return builder
.build ();
1871 if (VECTOR_MODE_P (mode
)
1872 && GET_CODE (op
) == CONST_VECTOR
1873 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1875 gcc_assert (GET_MODE (op
) == op_mode
);
1877 rtx_vector_builder builder
;
1878 if (!builder
.new_unary_operation (mode
, op
, false))
1881 unsigned int count
= builder
.encoded_nelts ();
1882 for (unsigned int i
= 0; i
< count
; i
++)
1884 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1885 CONST_VECTOR_ELT (op
, i
),
1886 GET_MODE_INNER (op_mode
));
1887 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1889 builder
.quick_push (x
);
1891 return builder
.build ();
1894 /* The order of these tests is critical so that, for example, we don't
1895 check the wrong mode (input vs. output) for a conversion operation,
1896 such as FIX. At some point, this should be simplified. */
1898 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1902 if (op_mode
== VOIDmode
)
1904 /* CONST_INT have VOIDmode as the mode. We assume that all
1905 the bits of the constant are significant, though, this is
1906 a dangerous assumption as many times CONST_INTs are
1907 created and used with garbage in the bits outside of the
1908 precision of the implied mode of the const_int. */
1909 op_mode
= MAX_MODE_INT
;
1912 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1914 /* Avoid the folding if flag_signaling_nans is on and
1915 operand is a signaling NaN. */
1916 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1919 d
= real_value_truncate (mode
, d
);
1920 return const_double_from_real_value (d
, mode
);
1922 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1926 if (op_mode
== VOIDmode
)
1928 /* CONST_INT have VOIDmode as the mode. We assume that all
1929 the bits of the constant are significant, though, this is
1930 a dangerous assumption as many times CONST_INTs are
1931 created and used with garbage in the bits outside of the
1932 precision of the implied mode of the const_int. */
1933 op_mode
= MAX_MODE_INT
;
1936 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1938 /* Avoid the folding if flag_signaling_nans is on and
1939 operand is a signaling NaN. */
1940 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1943 d
= real_value_truncate (mode
, d
);
1944 return const_double_from_real_value (d
, mode
);
1947 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1949 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1950 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1954 scalar_int_mode imode
= (op_mode
== VOIDmode
1956 : as_a
<scalar_int_mode
> (op_mode
));
1957 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1960 #if TARGET_SUPPORTS_WIDE_INT == 0
1961 /* This assert keeps the simplification from producing a result
1962 that cannot be represented in a CONST_DOUBLE but a lot of
1963 upstream callers expect that this function never fails to
1964 simplify something and so you if you added this to the test
1965 above the code would die later anyway. If this assert
1966 happens, you just need to make the port support wide int. */
1967 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1973 result
= wi::bit_not (op0
);
1977 result
= wi::neg (op0
);
1981 result
= wi::abs (op0
);
1985 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1989 if (wi::ne_p (op0
, 0))
1990 int_value
= wi::clz (op0
);
1991 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1993 result
= wi::shwi (int_value
, result_mode
);
1997 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
2001 if (wi::ne_p (op0
, 0))
2002 int_value
= wi::ctz (op0
);
2003 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
2005 result
= wi::shwi (int_value
, result_mode
);
2009 result
= wi::shwi (wi::popcount (op0
), result_mode
);
2013 result
= wi::shwi (wi::parity (op0
), result_mode
);
2017 result
= wide_int (op0
).bswap ();
2022 result
= wide_int::from (op0
, width
, UNSIGNED
);
2026 result
= wide_int::from (op0
, width
, SIGNED
);
2030 if (wi::only_sign_bit_p (op0
))
2031 result
= wi::max_value (GET_MODE_PRECISION (imode
), SIGNED
);
2033 result
= wi::neg (op0
);
2037 if (wi::only_sign_bit_p (op0
))
2038 result
= wi::max_value (GET_MODE_PRECISION (imode
), SIGNED
);
2040 result
= wi::abs (op0
);
2048 return immed_wide_int_const (result
, result_mode
);
2051 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2052 && SCALAR_FLOAT_MODE_P (mode
)
2053 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
2055 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
2061 d
= real_value_abs (&d
);
2064 d
= real_value_negate (&d
);
2066 case FLOAT_TRUNCATE
:
2067 /* Don't perform the operation if flag_signaling_nans is on
2068 and the operand is a signaling NaN. */
2069 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2071 d
= real_value_truncate (mode
, d
);
2074 /* Don't perform the operation if flag_signaling_nans is on
2075 and the operand is a signaling NaN. */
2076 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2078 /* All this does is change the mode, unless changing
2080 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
2081 real_convert (&d
, mode
, &d
);
2084 /* Don't perform the operation if flag_signaling_nans is on
2085 and the operand is a signaling NaN. */
2086 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
2088 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
2095 real_to_target (tmp
, &d
, GET_MODE (op
));
2096 for (i
= 0; i
< 4; i
++)
2098 real_from_target (&d
, tmp
, mode
);
2104 return const_double_from_real_value (d
, mode
);
2106 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2107 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
2108 && is_int_mode (mode
, &result_mode
))
2110 unsigned int width
= GET_MODE_PRECISION (result_mode
);
2111 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
2114 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2115 operators are intentionally left unspecified (to ease implementation
2116 by target backends), for consistency, this routine implements the
2117 same semantics for constant folding as used by the middle-end. */
2119 /* This was formerly used only for non-IEEE float.
2120 eggert@twinsun.com says it is safe for IEEE also. */
2122 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
2123 wide_int wmax
, wmin
;
2124 /* This is part of the abi to real_to_integer, but we check
2125 things before making this call. */
2131 if (REAL_VALUE_ISNAN (*x
))
2134 /* Test against the signed upper bound. */
2135 wmax
= wi::max_value (width
, SIGNED
);
2136 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2137 if (real_less (&t
, x
))
2138 return immed_wide_int_const (wmax
, mode
);
2140 /* Test against the signed lower bound. */
2141 wmin
= wi::min_value (width
, SIGNED
);
2142 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2143 if (real_less (x
, &t
))
2144 return immed_wide_int_const (wmin
, mode
);
2146 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2150 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2153 /* Test against the unsigned upper bound. */
2154 wmax
= wi::max_value (width
, UNSIGNED
);
2155 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2156 if (real_less (&t
, x
))
2157 return immed_wide_int_const (wmax
, mode
);
2159 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2167 /* Handle polynomial integers. */
2168 else if (CONST_POLY_INT_P (op
))
2170 poly_wide_int result
;
2174 result
= -const_poly_int_value (op
);
2178 result
= ~const_poly_int_value (op
);
2184 return immed_wide_int_const (result
, mode
);
2190 /* Subroutine of simplify_binary_operation to simplify a binary operation
2191 CODE that can commute with byte swapping, with result mode MODE and
2192 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2193 Return zero if no simplification or canonicalization is possible. */
2196 simplify_context::simplify_byte_swapping_operation (rtx_code code
,
2202 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2203 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2205 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2206 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2207 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2210 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2211 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2213 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2214 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2220 /* Subroutine of simplify_binary_operation to simplify a commutative,
2221 associative binary operation CODE with result mode MODE, operating
2222 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2223 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2224 canonicalization is possible. */
2227 simplify_context::simplify_associative_operation (rtx_code code
,
2233 /* Linearize the operator to the left. */
2234 if (GET_CODE (op1
) == code
)
2236 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2237 if (GET_CODE (op0
) == code
)
2239 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2240 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2243 /* "a op (b op c)" becomes "(b op c) op a". */
2244 if (! swap_commutative_operands_p (op1
, op0
))
2245 return simplify_gen_binary (code
, mode
, op1
, op0
);
2247 std::swap (op0
, op1
);
2250 if (GET_CODE (op0
) == code
)
2252 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2253 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2255 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2256 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2259 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2260 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2262 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2264 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2265 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2267 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2273 /* Return a mask describing the COMPARISON. */
2275 comparison_to_mask (enum rtx_code comparison
)
2315 /* Return a comparison corresponding to the MASK. */
2316 static enum rtx_code
2317 mask_to_comparison (int mask
)
2357 /* Return true if CODE is valid for comparisons of mode MODE, false
2360 It is always safe to return false, even if the code was valid for the
2361 given mode as that will merely suppress optimizations. */
2364 comparison_code_valid_for_mode (enum rtx_code code
, enum machine_mode mode
)
2368 /* These are valid for integral, floating and vector modes. */
2375 return (INTEGRAL_MODE_P (mode
)
2376 || FLOAT_MODE_P (mode
)
2377 || VECTOR_MODE_P (mode
));
2379 /* These are valid for floating point modes. */
2388 return FLOAT_MODE_P (mode
);
2390 /* These are filtered out in simplify_logical_operation, but
2391 we check for them too as a matter of safety. They are valid
2392 for integral and vector modes. */
2397 return INTEGRAL_MODE_P (mode
) || VECTOR_MODE_P (mode
);
2404 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2405 false/true value of comparison with MODE where comparison operands
2409 relational_result (machine_mode mode
, machine_mode cmp_mode
, rtx res
)
2411 if (SCALAR_FLOAT_MODE_P (mode
))
2413 if (res
== const0_rtx
)
2414 return CONST0_RTX (mode
);
2415 #ifdef FLOAT_STORE_FLAG_VALUE
2416 REAL_VALUE_TYPE val
= FLOAT_STORE_FLAG_VALUE (mode
);
2417 return const_double_from_real_value (val
, mode
);
2422 if (VECTOR_MODE_P (mode
))
2424 if (res
== const0_rtx
)
2425 return CONST0_RTX (mode
);
2426 #ifdef VECTOR_STORE_FLAG_VALUE
2427 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2428 if (val
== NULL_RTX
)
2430 if (val
== const1_rtx
)
2431 return CONST1_RTX (mode
);
2433 return gen_const_vec_duplicate (mode
, val
);
2438 /* For vector comparison with scalar int result, it is unknown
2439 if the target means here a comparison into an integral bitmask,
2440 or comparison where all comparisons true mean const_true_rtx
2441 whole result, or where any comparisons true mean const_true_rtx
2442 whole result. For const0_rtx all the cases are the same. */
2443 if (VECTOR_MODE_P (cmp_mode
)
2444 && SCALAR_INT_MODE_P (mode
)
2445 && res
== const_true_rtx
)
2451 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2452 and OP1, which should be both relational operations. Return 0 if no such
2453 simplification is possible. */
2455 simplify_context::simplify_logical_relational_operation (rtx_code code
,
2459 /* We only handle IOR of two relational operations. */
2463 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2466 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2467 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2470 enum rtx_code code0
= GET_CODE (op0
);
2471 enum rtx_code code1
= GET_CODE (op1
);
2473 /* We don't handle unsigned comparisons currently. */
2474 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2476 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2479 int mask0
= comparison_to_mask (code0
);
2480 int mask1
= comparison_to_mask (code1
);
2482 int mask
= mask0
| mask1
;
2485 return relational_result (mode
, GET_MODE (op0
), const_true_rtx
);
2487 code
= mask_to_comparison (mask
);
2489 /* Many comparison codes are only valid for certain mode classes. */
2490 if (!comparison_code_valid_for_mode (code
, mode
))
2493 op0
= XEXP (op1
, 0);
2494 op1
= XEXP (op1
, 1);
2496 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2499 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2500 and OP1. Return 0 if no simplification is possible.
2502 Don't use this for relational operations such as EQ or LT.
2503 Use simplify_relational_operation instead. */
2505 simplify_context::simplify_binary_operation (rtx_code code
, machine_mode mode
,
2508 rtx trueop0
, trueop1
;
2511 /* Relational operations don't work here. We must know the mode
2512 of the operands in order to do the comparison correctly.
2513 Assuming a full word can give incorrect results.
2514 Consider comparing 128 with -128 in QImode. */
2515 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2516 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2518 /* Make sure the constant is second. */
2519 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2520 && swap_commutative_operands_p (op0
, op1
))
2521 std::swap (op0
, op1
);
2523 trueop0
= avoid_constant_pool_reference (op0
);
2524 trueop1
= avoid_constant_pool_reference (op1
);
2526 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2529 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2534 /* If the above steps did not result in a simplification and op0 or op1
2535 were constant pool references, use the referenced constants directly. */
2536 if (trueop0
!= op0
|| trueop1
!= op1
)
2537 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2542 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2543 which OP0 and OP1 are both vector series or vector duplicates
2544 (which are really just series with a step of 0). If so, try to
2545 form a new series by applying CODE to the bases and to the steps.
2546 Return null if no simplification is possible.
2548 MODE is the mode of the operation and is known to be a vector
2552 simplify_context::simplify_binary_operation_series (rtx_code code
,
2557 if (vec_duplicate_p (op0
, &base0
))
2559 else if (!vec_series_p (op0
, &base0
, &step0
))
2563 if (vec_duplicate_p (op1
, &base1
))
2565 else if (!vec_series_p (op1
, &base1
, &step1
))
2568 /* Only create a new series if we can simplify both parts. In other
2569 cases this isn't really a simplification, and it's not necessarily
2570 a win to replace a vector operation with a scalar operation. */
2571 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2572 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2576 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2580 return gen_vec_series (mode
, new_base
, new_step
);
2583 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2584 operation CODE with result mode MODE, operating on OP0 and OP1.
2585 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2586 Returns NULL_RTX if no simplification is possible. */
2589 simplify_context::simplify_distributive_operation (rtx_code code
,
2593 enum rtx_code op
= GET_CODE (op0
);
2594 gcc_assert (GET_CODE (op1
) == op
);
2596 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))
2597 && ! side_effects_p (XEXP (op0
, 1)))
2598 return simplify_gen_binary (op
, mode
,
2599 simplify_gen_binary (code
, mode
,
2604 if (GET_RTX_CLASS (op
) == RTX_COMM_ARITH
)
2606 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2607 && ! side_effects_p (XEXP (op0
, 0)))
2608 return simplify_gen_binary (op
, mode
,
2609 simplify_gen_binary (code
, mode
,
2613 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 1))
2614 && ! side_effects_p (XEXP (op0
, 0)))
2615 return simplify_gen_binary (op
, mode
,
2616 simplify_gen_binary (code
, mode
,
2620 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 0))
2621 && ! side_effects_p (XEXP (op0
, 1)))
2622 return simplify_gen_binary (op
, mode
,
2623 simplify_gen_binary (code
, mode
,
2632 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2633 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2634 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2635 actual constants. */
2638 simplify_context::simplify_binary_operation_1 (rtx_code code
,
2641 rtx trueop0
, rtx trueop1
)
2643 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2645 scalar_int_mode int_mode
, inner_mode
;
2648 /* Even if we can't compute a constant result,
2649 there are some cases worth simplifying. */
2654 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2655 when x is NaN, infinite, or finite and nonzero. They aren't
2656 when x is -0 and the rounding mode is not towards -infinity,
2657 since (-0) + 0 is then 0. */
2658 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2661 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2662 transformations are safe even for IEEE. */
2663 if (GET_CODE (op0
) == NEG
)
2664 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2665 else if (GET_CODE (op1
) == NEG
)
2666 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2668 /* (~a) + 1 -> -a */
2669 if (INTEGRAL_MODE_P (mode
)
2670 && GET_CODE (op0
) == NOT
2671 && trueop1
== const1_rtx
)
2672 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2674 /* Handle both-operands-constant cases. We can only add
2675 CONST_INTs to constants since the sum of relocatable symbols
2676 can't be handled by most assemblers. Don't add CONST_INT
2677 to CONST_INT since overflow won't be computed properly if wider
2678 than HOST_BITS_PER_WIDE_INT. */
2680 if ((GET_CODE (op0
) == CONST
2681 || GET_CODE (op0
) == SYMBOL_REF
2682 || GET_CODE (op0
) == LABEL_REF
)
2683 && poly_int_rtx_p (op1
, &offset
))
2684 return plus_constant (mode
, op0
, offset
);
2685 else if ((GET_CODE (op1
) == CONST
2686 || GET_CODE (op1
) == SYMBOL_REF
2687 || GET_CODE (op1
) == LABEL_REF
)
2688 && poly_int_rtx_p (op0
, &offset
))
2689 return plus_constant (mode
, op1
, offset
);
2691 /* See if this is something like X * C - X or vice versa or
2692 if the multiplication is written as a shift. If so, we can
2693 distribute and make a new multiply, shift, or maybe just
2694 have X (if C is 2 in the example above). But don't make
2695 something more expensive than we had before. */
2697 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2699 rtx lhs
= op0
, rhs
= op1
;
2701 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2702 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2704 if (GET_CODE (lhs
) == NEG
)
2706 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2707 lhs
= XEXP (lhs
, 0);
2709 else if (GET_CODE (lhs
) == MULT
2710 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2712 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2713 lhs
= XEXP (lhs
, 0);
2715 else if (GET_CODE (lhs
) == ASHIFT
2716 && CONST_INT_P (XEXP (lhs
, 1))
2717 && INTVAL (XEXP (lhs
, 1)) >= 0
2718 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2720 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2721 GET_MODE_PRECISION (int_mode
));
2722 lhs
= XEXP (lhs
, 0);
2725 if (GET_CODE (rhs
) == NEG
)
2727 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2728 rhs
= XEXP (rhs
, 0);
2730 else if (GET_CODE (rhs
) == MULT
2731 && CONST_INT_P (XEXP (rhs
, 1)))
2733 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2734 rhs
= XEXP (rhs
, 0);
2736 else if (GET_CODE (rhs
) == ASHIFT
2737 && CONST_INT_P (XEXP (rhs
, 1))
2738 && INTVAL (XEXP (rhs
, 1)) >= 0
2739 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2741 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2742 GET_MODE_PRECISION (int_mode
));
2743 rhs
= XEXP (rhs
, 0);
2746 if (rtx_equal_p (lhs
, rhs
))
2748 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2750 bool speed
= optimize_function_for_speed_p (cfun
);
2752 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2754 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2755 return (set_src_cost (tem
, int_mode
, speed
)
2756 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2759 /* Optimize (X - 1) * Y + Y to X * Y. */
2762 if (GET_CODE (op0
) == MULT
)
2764 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2765 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
)
2766 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2767 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
))
2768 && rtx_equal_p (XEXP (op0
, 1), op1
))
2769 lhs
= XEXP (XEXP (op0
, 0), 0);
2770 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2771 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
)
2772 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2773 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
))
2774 && rtx_equal_p (XEXP (op0
, 0), op1
))
2775 lhs
= XEXP (XEXP (op0
, 1), 0);
2777 else if (GET_CODE (op1
) == MULT
)
2779 if (((GET_CODE (XEXP (op1
, 0)) == PLUS
2780 && XEXP (XEXP (op1
, 0), 1) == constm1_rtx
)
2781 || (GET_CODE (XEXP (op1
, 0)) == MINUS
2782 && XEXP (XEXP (op1
, 0), 1) == const1_rtx
))
2783 && rtx_equal_p (XEXP (op1
, 1), op0
))
2784 rhs
= XEXP (XEXP (op1
, 0), 0);
2785 else if (((GET_CODE (XEXP (op1
, 1)) == PLUS
2786 && XEXP (XEXP (op1
, 1), 1) == constm1_rtx
)
2787 || (GET_CODE (XEXP (op1
, 1)) == MINUS
2788 && XEXP (XEXP (op1
, 1), 1) == const1_rtx
))
2789 && rtx_equal_p (XEXP (op1
, 0), op0
))
2790 rhs
= XEXP (XEXP (op1
, 1), 0);
2792 if (lhs
!= op0
|| rhs
!= op1
)
2793 return simplify_gen_binary (MULT
, int_mode
, lhs
, rhs
);
2796 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2797 if (CONST_SCALAR_INT_P (op1
)
2798 && GET_CODE (op0
) == XOR
2799 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2800 && mode_signbit_p (mode
, op1
))
2801 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2802 simplify_gen_binary (XOR
, mode
, op1
,
2805 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2806 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2807 && GET_CODE (op0
) == MULT
2808 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2812 in1
= XEXP (XEXP (op0
, 0), 0);
2813 in2
= XEXP (op0
, 1);
2814 return simplify_gen_binary (MINUS
, mode
, op1
,
2815 simplify_gen_binary (MULT
, mode
,
2819 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2820 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2822 if (COMPARISON_P (op0
)
2823 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2824 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2825 && (reversed
= reversed_comparison (op0
, mode
)))
2827 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2829 /* If one of the operands is a PLUS or a MINUS, see if we can
2830 simplify this by the associative law.
2831 Don't use the associative law for floating point.
2832 The inaccuracy makes it nonassociative,
2833 and subtle programs can break if operations are associated. */
2835 if (INTEGRAL_MODE_P (mode
)
2836 && (plus_minus_operand_p (op0
)
2837 || plus_minus_operand_p (op1
))
2838 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2841 /* Reassociate floating point addition only when the user
2842 specifies associative math operations. */
2843 if (FLOAT_MODE_P (mode
)
2844 && flag_associative_math
)
2846 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2851 /* Handle vector series. */
2852 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2854 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2861 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2862 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2863 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2864 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2866 rtx xop00
= XEXP (op0
, 0);
2867 rtx xop10
= XEXP (op1
, 0);
2869 if (REG_P (xop00
) && REG_P (xop10
)
2870 && REGNO (xop00
) == REGNO (xop10
)
2871 && GET_MODE (xop00
) == mode
2872 && GET_MODE (xop10
) == mode
2873 && GET_MODE_CLASS (mode
) == MODE_CC
)
2879 /* We can't assume x-x is 0 even with non-IEEE floating point,
2880 but since it is zero except in very strange circumstances, we
2881 will treat it as zero with -ffinite-math-only. */
2882 if (rtx_equal_p (trueop0
, trueop1
)
2883 && ! side_effects_p (op0
)
2884 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2885 return CONST0_RTX (mode
);
2887 /* Change subtraction from zero into negation. (0 - x) is the
2888 same as -x when x is NaN, infinite, or finite and nonzero.
2889 But if the mode has signed zeros, and does not round towards
2890 -infinity, then 0 - 0 is 0, not -0. */
2891 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2892 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2894 /* (-1 - a) is ~a, unless the expression contains symbolic
2895 constants, in which case not retaining additions and
2896 subtractions could cause invalid assembly to be produced. */
2897 if (trueop0
== constm1_rtx
2898 && !contains_symbolic_reference_p (op1
))
2899 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2901 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2902 or has signed zeros and supports rounding towards -infinity.
2903 In such a case, 0 - 0 is -0. */
2904 if (!(HONOR_SIGNED_ZEROS (mode
)
2905 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2906 && !HONOR_SNANS (mode
)
2907 && trueop1
== CONST0_RTX (mode
))
2910 /* See if this is something like X * C - X or vice versa or
2911 if the multiplication is written as a shift. If so, we can
2912 distribute and make a new multiply, shift, or maybe just
2913 have X (if C is 2 in the example above). But don't make
2914 something more expensive than we had before. */
2916 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2918 rtx lhs
= op0
, rhs
= op1
;
2920 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2921 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2923 if (GET_CODE (lhs
) == NEG
)
2925 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2926 lhs
= XEXP (lhs
, 0);
2928 else if (GET_CODE (lhs
) == MULT
2929 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2931 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2932 lhs
= XEXP (lhs
, 0);
2934 else if (GET_CODE (lhs
) == ASHIFT
2935 && CONST_INT_P (XEXP (lhs
, 1))
2936 && INTVAL (XEXP (lhs
, 1)) >= 0
2937 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2939 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2940 GET_MODE_PRECISION (int_mode
));
2941 lhs
= XEXP (lhs
, 0);
2944 if (GET_CODE (rhs
) == NEG
)
2946 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2947 rhs
= XEXP (rhs
, 0);
2949 else if (GET_CODE (rhs
) == MULT
2950 && CONST_INT_P (XEXP (rhs
, 1)))
2952 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2953 rhs
= XEXP (rhs
, 0);
2955 else if (GET_CODE (rhs
) == ASHIFT
2956 && CONST_INT_P (XEXP (rhs
, 1))
2957 && INTVAL (XEXP (rhs
, 1)) >= 0
2958 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2960 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2961 GET_MODE_PRECISION (int_mode
));
2962 negcoeff1
= -negcoeff1
;
2963 rhs
= XEXP (rhs
, 0);
2966 if (rtx_equal_p (lhs
, rhs
))
2968 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2970 bool speed
= optimize_function_for_speed_p (cfun
);
2972 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2974 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2975 return (set_src_cost (tem
, int_mode
, speed
)
2976 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2979 /* Optimize (X + 1) * Y - Y to X * Y. */
2981 if (GET_CODE (op0
) == MULT
)
2983 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2984 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
)
2985 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2986 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
))
2987 && rtx_equal_p (XEXP (op0
, 1), op1
))
2988 lhs
= XEXP (XEXP (op0
, 0), 0);
2989 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2990 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
)
2991 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2992 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
))
2993 && rtx_equal_p (XEXP (op0
, 0), op1
))
2994 lhs
= XEXP (XEXP (op0
, 1), 0);
2997 return simplify_gen_binary (MULT
, int_mode
, lhs
, op1
);
3000 /* (a - (-b)) -> (a + b). True even for IEEE. */
3001 if (GET_CODE (op1
) == NEG
)
3002 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
3004 /* (-x - c) may be simplified as (-c - x). */
3005 if (GET_CODE (op0
) == NEG
3006 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
3008 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
3010 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
3013 if ((GET_CODE (op0
) == CONST
3014 || GET_CODE (op0
) == SYMBOL_REF
3015 || GET_CODE (op0
) == LABEL_REF
)
3016 && poly_int_rtx_p (op1
, &offset
))
3017 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
3019 /* Don't let a relocatable value get a negative coeff. */
3020 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
3021 return simplify_gen_binary (PLUS
, mode
,
3023 neg_poly_int_rtx (mode
, op1
));
3025 /* (x - (x & y)) -> (x & ~y) */
3026 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
3028 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
3030 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
3031 GET_MODE (XEXP (op1
, 1)));
3032 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3034 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
3036 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
3037 GET_MODE (XEXP (op1
, 0)));
3038 return simplify_gen_binary (AND
, mode
, op0
, tem
);
3042 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3043 by reversing the comparison code if valid. */
3044 if (STORE_FLAG_VALUE
== 1
3045 && trueop0
== const1_rtx
3046 && COMPARISON_P (op1
)
3047 && (reversed
= reversed_comparison (op1
, mode
)))
3050 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3051 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3052 && GET_CODE (op1
) == MULT
3053 && GET_CODE (XEXP (op1
, 0)) == NEG
)
3057 in1
= XEXP (XEXP (op1
, 0), 0);
3058 in2
= XEXP (op1
, 1);
3059 return simplify_gen_binary (PLUS
, mode
,
3060 simplify_gen_binary (MULT
, mode
,
3065 /* Canonicalize (minus (neg A) (mult B C)) to
3066 (minus (mult (neg B) C) A). */
3067 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
3068 && GET_CODE (op1
) == MULT
3069 && GET_CODE (op0
) == NEG
)
3073 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
3074 in2
= XEXP (op1
, 1);
3075 return simplify_gen_binary (MINUS
, mode
,
3076 simplify_gen_binary (MULT
, mode
,
3081 /* If one of the operands is a PLUS or a MINUS, see if we can
3082 simplify this by the associative law. This will, for example,
3083 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3084 Don't use the associative law for floating point.
3085 The inaccuracy makes it nonassociative,
3086 and subtle programs can break if operations are associated. */
3088 if (INTEGRAL_MODE_P (mode
)
3089 && (plus_minus_operand_p (op0
)
3090 || plus_minus_operand_p (op1
))
3091 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
3094 /* Handle vector series. */
3095 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3097 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
3104 if (trueop1
== constm1_rtx
)
3105 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3107 if (GET_CODE (op0
) == NEG
)
3109 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
3110 /* If op1 is a MULT as well and simplify_unary_operation
3111 just moved the NEG to the second operand, simplify_gen_binary
3112 below could through simplify_associative_operation move
3113 the NEG around again and recurse endlessly. */
3115 && GET_CODE (op1
) == MULT
3116 && GET_CODE (temp
) == MULT
3117 && XEXP (op1
, 0) == XEXP (temp
, 0)
3118 && GET_CODE (XEXP (temp
, 1)) == NEG
3119 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
3122 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
3124 if (GET_CODE (op1
) == NEG
)
3126 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3127 /* If op0 is a MULT as well and simplify_unary_operation
3128 just moved the NEG to the second operand, simplify_gen_binary
3129 below could through simplify_associative_operation move
3130 the NEG around again and recurse endlessly. */
3132 && GET_CODE (op0
) == MULT
3133 && GET_CODE (temp
) == MULT
3134 && XEXP (op0
, 0) == XEXP (temp
, 0)
3135 && GET_CODE (XEXP (temp
, 1)) == NEG
3136 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
3139 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
3142 /* Maybe simplify x * 0 to 0. The reduction is not valid if
3143 x is NaN, since x * 0 is then also NaN. Nor is it valid
3144 when the mode has signed zeros, since multiplying a negative
3145 number by 0 will give -0, not 0. */
3146 if (!HONOR_NANS (mode
)
3147 && !HONOR_SIGNED_ZEROS (mode
)
3148 && trueop1
== CONST0_RTX (mode
)
3149 && ! side_effects_p (op0
))
3152 /* In IEEE floating point, x*1 is not equivalent to x for
3154 if (!HONOR_SNANS (mode
)
3155 && trueop1
== CONST1_RTX (mode
))
3158 /* Convert multiply by constant power of two into shift. */
3159 if (mem_depth
== 0 && CONST_SCALAR_INT_P (trueop1
))
3161 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
3163 return simplify_gen_binary (ASHIFT
, mode
, op0
,
3164 gen_int_shift_amount (mode
, val
));
3167 /* x*2 is x+x and x*(-1) is -x */
3168 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3169 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
3170 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
3171 && GET_MODE (op0
) == mode
)
3173 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3175 if (real_equal (d1
, &dconst2
))
3176 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
3178 if (!HONOR_SNANS (mode
)
3179 && real_equal (d1
, &dconstm1
))
3180 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3183 /* Optimize -x * -x as x * x. */
3184 if (FLOAT_MODE_P (mode
)
3185 && GET_CODE (op0
) == NEG
3186 && GET_CODE (op1
) == NEG
3187 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3188 && !side_effects_p (XEXP (op0
, 0)))
3189 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3191 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3192 if (SCALAR_FLOAT_MODE_P (mode
)
3193 && GET_CODE (op0
) == ABS
3194 && GET_CODE (op1
) == ABS
3195 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3196 && !side_effects_p (XEXP (op0
, 0)))
3197 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3199 /* Reassociate multiplication, but for floating point MULTs
3200 only when the user specifies unsafe math optimizations. */
3201 if (! FLOAT_MODE_P (mode
)
3202 || flag_unsafe_math_optimizations
)
3204 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3211 if (trueop1
== CONST0_RTX (mode
))
3213 if (INTEGRAL_MODE_P (mode
)
3214 && trueop1
== CONSTM1_RTX (mode
)
3215 && !side_effects_p (op0
))
3217 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3219 /* A | (~A) -> -1 */
3220 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3221 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3222 && ! side_effects_p (op0
)
3223 && SCALAR_INT_MODE_P (mode
))
3226 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3227 if (CONST_INT_P (op1
)
3228 && HWI_COMPUTABLE_MODE_P (mode
)
3229 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
3230 && !side_effects_p (op0
))
3233 /* Canonicalize (X & C1) | C2. */
3234 if (GET_CODE (op0
) == AND
3235 && CONST_INT_P (trueop1
)
3236 && CONST_INT_P (XEXP (op0
, 1)))
3238 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
3239 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
3240 HOST_WIDE_INT c2
= INTVAL (trueop1
);
3242 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3244 && !side_effects_p (XEXP (op0
, 0)))
3247 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3248 if (((c1
|c2
) & mask
) == mask
)
3249 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
3252 /* Convert (A & B) | A to A. */
3253 if (GET_CODE (op0
) == AND
3254 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3255 || rtx_equal_p (XEXP (op0
, 1), op1
))
3256 && ! side_effects_p (XEXP (op0
, 0))
3257 && ! side_effects_p (XEXP (op0
, 1)))
3260 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3261 mode size to (rotate A CX). */
3263 if (GET_CODE (op1
) == ASHIFT
3264 || GET_CODE (op1
) == SUBREG
)
3275 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
3276 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
3277 && CONST_INT_P (XEXP (opleft
, 1))
3278 && CONST_INT_P (XEXP (opright
, 1))
3279 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
3280 == GET_MODE_UNIT_PRECISION (mode
)))
3281 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
3283 /* Same, but for ashift that has been "simplified" to a wider mode
3284 by simplify_shift_const. */
3286 if (GET_CODE (opleft
) == SUBREG
3287 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3288 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
3290 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
3291 && GET_CODE (opright
) == LSHIFTRT
3292 && GET_CODE (XEXP (opright
, 0)) == SUBREG
3293 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
3294 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
3295 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
3296 SUBREG_REG (XEXP (opright
, 0)))
3297 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
3298 && CONST_INT_P (XEXP (opright
, 1))
3299 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
3300 + INTVAL (XEXP (opright
, 1))
3301 == GET_MODE_PRECISION (int_mode
)))
3302 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
3303 XEXP (SUBREG_REG (opleft
), 1));
3305 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3306 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3307 the PLUS does not affect any of the bits in OP1: then we can do
3308 the IOR as a PLUS and we can associate. This is valid if OP1
3309 can be safely shifted left C bits. */
3310 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
3311 && GET_CODE (XEXP (op0
, 0)) == PLUS
3312 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
3313 && CONST_INT_P (XEXP (op0
, 1))
3314 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
3316 int count
= INTVAL (XEXP (op0
, 1));
3317 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
3319 if (mask
>> count
== INTVAL (trueop1
)
3320 && trunc_int_for_mode (mask
, mode
) == mask
3321 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
3322 return simplify_gen_binary (ASHIFTRT
, mode
,
3323 plus_constant (mode
, XEXP (op0
, 0),
3328 /* The following happens with bitfield merging.
3329 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3330 if (GET_CODE (op0
) == AND
3331 && GET_CODE (op1
) == AND
3332 && CONST_INT_P (XEXP (op0
, 1))
3333 && CONST_INT_P (XEXP (op1
, 1))
3334 && (INTVAL (XEXP (op0
, 1))
3335 == ~INTVAL (XEXP (op1
, 1))))
3337 /* The IOR may be on both sides. */
3338 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
3339 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
3340 top0
= op0
, top1
= op1
;
3341 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
3342 top0
= op1
, top1
= op0
;
3345 /* X may be on either side of the inner IOR. */
3347 if (rtx_equal_p (XEXP (top0
, 0),
3348 XEXP (XEXP (top1
, 0), 0)))
3349 tem
= XEXP (XEXP (top1
, 0), 1);
3350 else if (rtx_equal_p (XEXP (top0
, 0),
3351 XEXP (XEXP (top1
, 0), 1)))
3352 tem
= XEXP (XEXP (top1
, 0), 0);
3354 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3356 (AND
, mode
, tem
, XEXP (top1
, 1)));
3360 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3361 if (GET_CODE (op0
) == GET_CODE (op1
)
3362 && (GET_CODE (op0
) == AND
3363 || GET_CODE (op0
) == IOR
3364 || GET_CODE (op0
) == LSHIFTRT
3365 || GET_CODE (op0
) == ASHIFTRT
3366 || GET_CODE (op0
) == ASHIFT
3367 || GET_CODE (op0
) == ROTATE
3368 || GET_CODE (op0
) == ROTATERT
))
3370 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3375 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3379 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3383 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3389 if (trueop1
== CONST0_RTX (mode
))
3391 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3392 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3393 if (rtx_equal_p (trueop0
, trueop1
)
3394 && ! side_effects_p (op0
)
3395 && GET_MODE_CLASS (mode
) != MODE_CC
)
3396 return CONST0_RTX (mode
);
3398 /* Canonicalize XOR of the most significant bit to PLUS. */
3399 if (CONST_SCALAR_INT_P (op1
)
3400 && mode_signbit_p (mode
, op1
))
3401 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3402 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3403 if (CONST_SCALAR_INT_P (op1
)
3404 && GET_CODE (op0
) == PLUS
3405 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3406 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3407 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3408 simplify_gen_binary (XOR
, mode
, op1
,
3411 /* If we are XORing two things that have no bits in common,
3412 convert them into an IOR. This helps to detect rotation encoded
3413 using those methods and possibly other simplifications. */
3415 if (HWI_COMPUTABLE_MODE_P (mode
)
3416 && (nonzero_bits (op0
, mode
)
3417 & nonzero_bits (op1
, mode
)) == 0)
3418 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3420 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3421 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3424 int num_negated
= 0;
3426 if (GET_CODE (op0
) == NOT
)
3427 num_negated
++, op0
= XEXP (op0
, 0);
3428 if (GET_CODE (op1
) == NOT
)
3429 num_negated
++, op1
= XEXP (op1
, 0);
3431 if (num_negated
== 2)
3432 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3433 else if (num_negated
== 1)
3434 return simplify_gen_unary (NOT
, mode
,
3435 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3439 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3440 correspond to a machine insn or result in further simplifications
3441 if B is a constant. */
3443 if (GET_CODE (op0
) == AND
3444 && rtx_equal_p (XEXP (op0
, 1), op1
)
3445 && ! side_effects_p (op1
))
3446 return simplify_gen_binary (AND
, mode
,
3447 simplify_gen_unary (NOT
, mode
,
3448 XEXP (op0
, 0), mode
),
3451 else if (GET_CODE (op0
) == AND
3452 && rtx_equal_p (XEXP (op0
, 0), op1
)
3453 && ! side_effects_p (op1
))
3454 return simplify_gen_binary (AND
, mode
,
3455 simplify_gen_unary (NOT
, mode
,
3456 XEXP (op0
, 1), mode
),
3459 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3460 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3461 out bits inverted twice and not set by C. Similarly, given
3462 (xor (and (xor A B) C) D), simplify without inverting C in
3463 the xor operand: (xor (and A C) (B&C)^D).
3465 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3466 && GET_CODE (XEXP (op0
, 0)) == XOR
3467 && CONST_INT_P (op1
)
3468 && CONST_INT_P (XEXP (op0
, 1))
3469 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3471 enum rtx_code op
= GET_CODE (op0
);
3472 rtx a
= XEXP (XEXP (op0
, 0), 0);
3473 rtx b
= XEXP (XEXP (op0
, 0), 1);
3474 rtx c
= XEXP (op0
, 1);
3476 HOST_WIDE_INT bval
= INTVAL (b
);
3477 HOST_WIDE_INT cval
= INTVAL (c
);
3478 HOST_WIDE_INT dval
= INTVAL (d
);
3479 HOST_WIDE_INT xcval
;
3486 return simplify_gen_binary (XOR
, mode
,
3487 simplify_gen_binary (op
, mode
, a
, c
),
3488 gen_int_mode ((bval
& xcval
) ^ dval
,
3492 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3493 we can transform like this:
3494 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3495 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3496 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3497 Attempt a few simplifications when B and C are both constants. */
3498 if (GET_CODE (op0
) == AND
3499 && CONST_INT_P (op1
)
3500 && CONST_INT_P (XEXP (op0
, 1)))
3502 rtx a
= XEXP (op0
, 0);
3503 rtx b
= XEXP (op0
, 1);
3505 HOST_WIDE_INT bval
= INTVAL (b
);
3506 HOST_WIDE_INT cval
= INTVAL (c
);
3508 /* Instead of computing ~A&C, we compute its negated value,
3509 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3510 optimize for sure. If it does not simplify, we still try
3511 to compute ~A&C below, but since that always allocates
3512 RTL, we don't try that before committing to returning a
3513 simplified expression. */
3514 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3517 if ((~cval
& bval
) == 0)
3519 rtx na_c
= NULL_RTX
;
3521 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3524 /* If ~A does not simplify, don't bother: we don't
3525 want to simplify 2 operations into 3, and if na_c
3526 were to simplify with na, n_na_c would have
3527 simplified as well. */
3528 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3530 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3533 /* Try to simplify ~A&C | ~B&C. */
3534 if (na_c
!= NULL_RTX
)
3535 return simplify_gen_binary (IOR
, mode
, na_c
,
3536 gen_int_mode (~bval
& cval
, mode
));
3540 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3541 if (n_na_c
== CONSTM1_RTX (mode
))
3543 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3544 gen_int_mode (~cval
& bval
,
3546 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3547 gen_int_mode (~bval
& cval
,
3553 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3554 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3555 machines, and also has shorter instruction path length. */
3556 if (GET_CODE (op0
) == AND
3557 && GET_CODE (XEXP (op0
, 0)) == XOR
3558 && CONST_INT_P (XEXP (op0
, 1))
3559 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3562 rtx b
= XEXP (XEXP (op0
, 0), 1);
3563 rtx c
= XEXP (op0
, 1);
3564 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3565 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3566 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3567 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3569 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3570 else if (GET_CODE (op0
) == AND
3571 && GET_CODE (XEXP (op0
, 0)) == XOR
3572 && CONST_INT_P (XEXP (op0
, 1))
3573 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3575 rtx a
= XEXP (XEXP (op0
, 0), 0);
3577 rtx c
= XEXP (op0
, 1);
3578 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3579 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3580 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3581 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3584 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3585 comparison if STORE_FLAG_VALUE is 1. */
3586 if (STORE_FLAG_VALUE
== 1
3587 && trueop1
== const1_rtx
3588 && COMPARISON_P (op0
)
3589 && (reversed
= reversed_comparison (op0
, mode
)))
3592 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3593 is (lt foo (const_int 0)), so we can perform the above
3594 simplification if STORE_FLAG_VALUE is 1. */
3596 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3597 && STORE_FLAG_VALUE
== 1
3598 && trueop1
== const1_rtx
3599 && GET_CODE (op0
) == LSHIFTRT
3600 && CONST_INT_P (XEXP (op0
, 1))
3601 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3602 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3604 /* (xor (comparison foo bar) (const_int sign-bit))
3605 when STORE_FLAG_VALUE is the sign bit. */
3606 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3607 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3608 && trueop1
== const_true_rtx
3609 && COMPARISON_P (op0
)
3610 && (reversed
= reversed_comparison (op0
, int_mode
)))
3613 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3614 if (GET_CODE (op0
) == GET_CODE (op1
)
3615 && (GET_CODE (op0
) == AND
3616 || GET_CODE (op0
) == LSHIFTRT
3617 || GET_CODE (op0
) == ASHIFTRT
3618 || GET_CODE (op0
) == ASHIFT
3619 || GET_CODE (op0
) == ROTATE
3620 || GET_CODE (op0
) == ROTATERT
))
3622 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3627 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3631 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3637 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3639 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3641 if (HWI_COMPUTABLE_MODE_P (mode
))
3643 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3644 HOST_WIDE_INT nzop1
;
3645 if (CONST_INT_P (trueop1
))
3647 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3648 /* If we are turning off bits already known off in OP0, we need
3650 if ((nzop0
& ~val1
) == 0)
3653 nzop1
= nonzero_bits (trueop1
, mode
);
3654 /* If we are clearing all the nonzero bits, the result is zero. */
3655 if ((nzop1
& nzop0
) == 0
3656 && !side_effects_p (op0
) && !side_effects_p (op1
))
3657 return CONST0_RTX (mode
);
3659 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3660 && GET_MODE_CLASS (mode
) != MODE_CC
)
3663 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3664 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3665 && ! side_effects_p (op0
)
3666 && GET_MODE_CLASS (mode
) != MODE_CC
)
3667 return CONST0_RTX (mode
);
3669 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3670 there are no nonzero bits of C outside of X's mode. */
3671 if ((GET_CODE (op0
) == SIGN_EXTEND
3672 || GET_CODE (op0
) == ZERO_EXTEND
)
3673 && CONST_INT_P (trueop1
)
3674 && HWI_COMPUTABLE_MODE_P (mode
)
3675 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3676 & UINTVAL (trueop1
)) == 0)
3678 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3679 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3680 gen_int_mode (INTVAL (trueop1
),
3682 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3685 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3686 we might be able to further simplify the AND with X and potentially
3687 remove the truncation altogether. */
3688 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3690 rtx x
= XEXP (op0
, 0);
3691 machine_mode xmode
= GET_MODE (x
);
3692 tem
= simplify_gen_binary (AND
, xmode
, x
,
3693 gen_int_mode (INTVAL (trueop1
), xmode
));
3694 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3697 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3698 if (GET_CODE (op0
) == IOR
3699 && CONST_INT_P (trueop1
)
3700 && CONST_INT_P (XEXP (op0
, 1)))
3702 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3703 return simplify_gen_binary (IOR
, mode
,
3704 simplify_gen_binary (AND
, mode
,
3705 XEXP (op0
, 0), op1
),
3706 gen_int_mode (tmp
, mode
));
3709 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3710 insn (and may simplify more). */
3711 if (GET_CODE (op0
) == XOR
3712 && rtx_equal_p (XEXP (op0
, 0), op1
)
3713 && ! side_effects_p (op1
))
3714 return simplify_gen_binary (AND
, mode
,
3715 simplify_gen_unary (NOT
, mode
,
3716 XEXP (op0
, 1), mode
),
3719 if (GET_CODE (op0
) == XOR
3720 && rtx_equal_p (XEXP (op0
, 1), op1
)
3721 && ! side_effects_p (op1
))
3722 return simplify_gen_binary (AND
, mode
,
3723 simplify_gen_unary (NOT
, mode
,
3724 XEXP (op0
, 0), mode
),
3727 /* Similarly for (~(A ^ B)) & A. */
3728 if (GET_CODE (op0
) == NOT
3729 && GET_CODE (XEXP (op0
, 0)) == XOR
3730 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3731 && ! side_effects_p (op1
))
3732 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3734 if (GET_CODE (op0
) == NOT
3735 && GET_CODE (XEXP (op0
, 0)) == XOR
3736 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3737 && ! side_effects_p (op1
))
3738 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3740 /* Convert (A | B) & A to A. */
3741 if (GET_CODE (op0
) == IOR
3742 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3743 || rtx_equal_p (XEXP (op0
, 1), op1
))
3744 && ! side_effects_p (XEXP (op0
, 0))
3745 && ! side_effects_p (XEXP (op0
, 1)))
3748 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3749 ((A & N) + B) & M -> (A + B) & M
3750 Similarly if (N & M) == 0,
3751 ((A | N) + B) & M -> (A + B) & M
3752 and for - instead of + and/or ^ instead of |.
3753 Also, if (N & M) == 0, then
3754 (A +- N) & M -> A & M. */
3755 if (CONST_INT_P (trueop1
)
3756 && HWI_COMPUTABLE_MODE_P (mode
)
3757 && ~UINTVAL (trueop1
)
3758 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3759 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3764 pmop
[0] = XEXP (op0
, 0);
3765 pmop
[1] = XEXP (op0
, 1);
3767 if (CONST_INT_P (pmop
[1])
3768 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3769 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3771 for (which
= 0; which
< 2; which
++)
3774 switch (GET_CODE (tem
))
3777 if (CONST_INT_P (XEXP (tem
, 1))
3778 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3779 == UINTVAL (trueop1
))
3780 pmop
[which
] = XEXP (tem
, 0);
3784 if (CONST_INT_P (XEXP (tem
, 1))
3785 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3786 pmop
[which
] = XEXP (tem
, 0);
3793 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3795 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3797 return simplify_gen_binary (code
, mode
, tem
, op1
);
3801 /* (and X (ior (not X) Y) -> (and X Y) */
3802 if (GET_CODE (op1
) == IOR
3803 && GET_CODE (XEXP (op1
, 0)) == NOT
3804 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3805 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3807 /* (and (ior (not X) Y) X) -> (and X Y) */
3808 if (GET_CODE (op0
) == IOR
3809 && GET_CODE (XEXP (op0
, 0)) == NOT
3810 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3811 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3813 /* (and X (ior Y (not X)) -> (and X Y) */
3814 if (GET_CODE (op1
) == IOR
3815 && GET_CODE (XEXP (op1
, 1)) == NOT
3816 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3817 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3819 /* (and (ior Y (not X)) X) -> (and X Y) */
3820 if (GET_CODE (op0
) == IOR
3821 && GET_CODE (XEXP (op0
, 1)) == NOT
3822 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3823 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3825 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3826 if (GET_CODE (op0
) == GET_CODE (op1
)
3827 && (GET_CODE (op0
) == AND
3828 || GET_CODE (op0
) == IOR
3829 || GET_CODE (op0
) == LSHIFTRT
3830 || GET_CODE (op0
) == ASHIFTRT
3831 || GET_CODE (op0
) == ASHIFT
3832 || GET_CODE (op0
) == ROTATE
3833 || GET_CODE (op0
) == ROTATERT
))
3835 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3840 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3844 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3850 /* 0/x is 0 (or x&0 if x has side-effects). */
3851 if (trueop0
== CONST0_RTX (mode
)
3852 && !cfun
->can_throw_non_call_exceptions
)
3854 if (side_effects_p (op1
))
3855 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3859 if (trueop1
== CONST1_RTX (mode
))
3861 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3865 /* Convert divide by power of two into shift. */
3866 if (CONST_INT_P (trueop1
)
3867 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3868 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3869 gen_int_shift_amount (mode
, val
));
3873 /* Handle floating point and integers separately. */
3874 if (SCALAR_FLOAT_MODE_P (mode
))
3876 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3877 safe for modes with NaNs, since 0.0 / 0.0 will then be
3878 NaN rather than 0.0. Nor is it safe for modes with signed
3879 zeros, since dividing 0 by a negative number gives -0.0 */
3880 if (trueop0
== CONST0_RTX (mode
)
3881 && !HONOR_NANS (mode
)
3882 && !HONOR_SIGNED_ZEROS (mode
)
3883 && ! side_effects_p (op1
))
3886 if (trueop1
== CONST1_RTX (mode
)
3887 && !HONOR_SNANS (mode
))
3890 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3891 && trueop1
!= CONST0_RTX (mode
))
3893 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3896 if (real_equal (d1
, &dconstm1
)
3897 && !HONOR_SNANS (mode
))
3898 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3900 /* Change FP division by a constant into multiplication.
3901 Only do this with -freciprocal-math. */
3902 if (flag_reciprocal_math
3903 && !real_equal (d1
, &dconst0
))
3906 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3907 tem
= const_double_from_real_value (d
, mode
);
3908 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3912 else if (SCALAR_INT_MODE_P (mode
))
3914 /* 0/x is 0 (or x&0 if x has side-effects). */
3915 if (trueop0
== CONST0_RTX (mode
)
3916 && !cfun
->can_throw_non_call_exceptions
)
3918 if (side_effects_p (op1
))
3919 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3923 if (trueop1
== CONST1_RTX (mode
))
3925 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3930 if (trueop1
== constm1_rtx
)
3932 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3934 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3940 /* 0%x is 0 (or x&0 if x has side-effects). */
3941 if (trueop0
== CONST0_RTX (mode
))
3943 if (side_effects_p (op1
))
3944 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3947 /* x%1 is 0 (of x&0 if x has side-effects). */
3948 if (trueop1
== CONST1_RTX (mode
))
3950 if (side_effects_p (op0
))
3951 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3952 return CONST0_RTX (mode
);
3954 /* Implement modulus by power of two as AND. */
3955 if (CONST_INT_P (trueop1
)
3956 && exact_log2 (UINTVAL (trueop1
)) > 0)
3957 return simplify_gen_binary (AND
, mode
, op0
,
3958 gen_int_mode (UINTVAL (trueop1
) - 1,
3963 /* 0%x is 0 (or x&0 if x has side-effects). */
3964 if (trueop0
== CONST0_RTX (mode
))
3966 if (side_effects_p (op1
))
3967 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3970 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3971 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3973 if (side_effects_p (op0
))
3974 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3975 return CONST0_RTX (mode
);
3981 if (trueop1
== CONST0_RTX (mode
))
3983 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3984 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3985 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3987 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3988 if (CONST_INT_P (trueop1
)
3989 && IN_RANGE (INTVAL (trueop1
),
3990 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3991 GET_MODE_UNIT_PRECISION (mode
) - 1))
3993 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3994 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3995 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3996 mode
, op0
, new_amount_rtx
);
4001 if (trueop1
== CONST0_RTX (mode
))
4003 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4005 /* Rotating ~0 always results in ~0. */
4006 if (CONST_INT_P (trueop0
)
4007 && HWI_COMPUTABLE_MODE_P (mode
)
4008 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
4009 && ! side_effects_p (op1
))
4015 scalar constants c1, c2
4016 size (M2) > size (M1)
4017 c1 == size (M2) - size (M1)
4019 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4023 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4025 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
4026 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4028 && CONST_INT_P (op1
)
4029 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
4030 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
4032 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
4033 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
4034 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
4035 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
4036 && subreg_lowpart_p (op0
))
4038 rtx tmp
= gen_int_shift_amount
4039 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
4041 /* Combine would usually zero out the value when combining two
4042 local shifts and the range becomes larger or equal to the mode.
4043 However since we fold away one of the shifts here combine won't
4044 see it so we should immediately zero the result if it's out of
4046 if (code
== LSHIFTRT
4047 && INTVAL (tmp
) >= GET_MODE_BITSIZE (inner_mode
))
4050 tmp
= simplify_gen_binary (code
,
4052 XEXP (SUBREG_REG (op0
), 0),
4055 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
4058 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
4060 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
4061 if (val
!= INTVAL (op1
))
4062 return simplify_gen_binary (code
, mode
, op0
,
4063 gen_int_shift_amount (mode
, val
));
4070 if (trueop1
== CONST0_RTX (mode
))
4072 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4076 && CONST_INT_P (trueop1
)
4077 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4078 && IN_RANGE (UINTVAL (trueop1
),
4079 1, GET_MODE_PRECISION (int_mode
) - 1))
4081 auto c
= (wi::one (GET_MODE_PRECISION (int_mode
))
4082 << UINTVAL (trueop1
));
4083 rtx new_op1
= immed_wide_int_const (c
, int_mode
);
4084 return simplify_gen_binary (MULT
, int_mode
, op0
, new_op1
);
4086 goto canonicalize_shift
;
4089 if (trueop1
== CONST0_RTX (mode
))
4091 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
4093 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4094 if (GET_CODE (op0
) == CLZ
4095 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
4096 && CONST_INT_P (trueop1
)
4097 && STORE_FLAG_VALUE
== 1
4098 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
4100 unsigned HOST_WIDE_INT zero_val
= 0;
4102 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
4103 && zero_val
== GET_MODE_PRECISION (inner_mode
)
4104 && INTVAL (trueop1
) == exact_log2 (zero_val
))
4105 return simplify_gen_relational (EQ
, mode
, inner_mode
,
4106 XEXP (op0
, 0), const0_rtx
);
4108 goto canonicalize_shift
;
4111 if (HWI_COMPUTABLE_MODE_P (mode
)
4112 && mode_signbit_p (mode
, trueop1
)
4113 && ! side_effects_p (op0
))
4115 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4117 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4123 if (HWI_COMPUTABLE_MODE_P (mode
)
4124 && CONST_INT_P (trueop1
)
4125 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
4126 && ! side_effects_p (op0
))
4128 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4130 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4136 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
4138 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4140 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4146 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
4148 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4150 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4159 /* Simplify x +/- 0 to x, if possible. */
4160 if (trueop1
== CONST0_RTX (mode
))
4166 /* Simplify x * 0 to 0, if possible. */
4167 if (trueop1
== CONST0_RTX (mode
)
4168 && !side_effects_p (op0
))
4171 /* Simplify x * 1 to x, if possible. */
4172 if (trueop1
== CONST1_RTX (mode
))
4178 /* Simplify x * 0 to 0, if possible. */
4179 if (trueop1
== CONST0_RTX (mode
)
4180 && !side_effects_p (op0
))
4186 /* Simplify x / 1 to x, if possible. */
4187 if (trueop1
== CONST1_RTX (mode
))
4192 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
4193 return gen_vec_duplicate (mode
, op0
);
4194 if (valid_for_const_vector_p (mode
, op0
)
4195 && valid_for_const_vector_p (mode
, op1
))
4196 return gen_const_vec_series (mode
, op0
, op1
);
4200 if (!VECTOR_MODE_P (mode
))
4202 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4203 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
4204 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4205 gcc_assert (XVECLEN (trueop1
, 0) == 1);
4207 /* We can't reason about selections made at runtime. */
4208 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4211 if (vec_duplicate_p (trueop0
, &elt0
))
4214 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4215 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
4218 /* Extract a scalar element from a nested VEC_SELECT expression
4219 (with optional nested VEC_CONCAT expression). Some targets
4220 (i386) extract scalar element from a vector using chain of
4221 nested VEC_SELECT expressions. When input operand is a memory
4222 operand, this operation can be simplified to a simple scalar
4223 load from an offseted memory address. */
4225 if (GET_CODE (trueop0
) == VEC_SELECT
4226 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4227 .is_constant (&n_elts
)))
4229 rtx op0
= XEXP (trueop0
, 0);
4230 rtx op1
= XEXP (trueop0
, 1);
4232 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
4238 gcc_assert (GET_CODE (op1
) == PARALLEL
);
4239 gcc_assert (i
< n_elts
);
4241 /* Select element, pointed by nested selector. */
4242 elem
= INTVAL (XVECEXP (op1
, 0, i
));
4244 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4245 if (GET_CODE (op0
) == VEC_CONCAT
)
4247 rtx op00
= XEXP (op0
, 0);
4248 rtx op01
= XEXP (op0
, 1);
4250 machine_mode mode00
, mode01
;
4251 int n_elts00
, n_elts01
;
4253 mode00
= GET_MODE (op00
);
4254 mode01
= GET_MODE (op01
);
4256 /* Find out the number of elements of each operand.
4257 Since the concatenated result has a constant number
4258 of elements, the operands must too. */
4259 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
4260 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
4262 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
4264 /* Select correct operand of VEC_CONCAT
4265 and adjust selector. */
4266 if (elem
< n_elts01
)
4277 vec
= rtvec_alloc (1);
4278 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
4280 tmp
= gen_rtx_fmt_ee (code
, mode
,
4281 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
4287 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4288 gcc_assert (GET_MODE_INNER (mode
)
4289 == GET_MODE_INNER (GET_MODE (trueop0
)));
4290 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4292 if (vec_duplicate_p (trueop0
, &elt0
))
4293 /* It doesn't matter which elements are selected by trueop1,
4294 because they are all the same. */
4295 return gen_vec_duplicate (mode
, elt0
);
4297 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4299 unsigned n_elts
= XVECLEN (trueop1
, 0);
4300 rtvec v
= rtvec_alloc (n_elts
);
4303 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4304 for (i
= 0; i
< n_elts
; i
++)
4306 rtx x
= XVECEXP (trueop1
, 0, i
);
4308 if (!CONST_INT_P (x
))
4311 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
4315 return gen_rtx_CONST_VECTOR (mode
, v
);
4318 /* Recognize the identity. */
4319 if (GET_MODE (trueop0
) == mode
)
4321 bool maybe_ident
= true;
4322 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4324 rtx j
= XVECEXP (trueop1
, 0, i
);
4325 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4327 maybe_ident
= false;
4335 /* If we select a low-part subreg, return that. */
4336 if (vec_series_lowpart_p (mode
, GET_MODE (trueop0
), trueop1
))
4338 rtx new_rtx
= lowpart_subreg (mode
, trueop0
,
4339 GET_MODE (trueop0
));
4340 if (new_rtx
!= NULL_RTX
)
4344 /* If we build {a,b} then permute it, build the result directly. */
4345 if (XVECLEN (trueop1
, 0) == 2
4346 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4347 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4348 && GET_CODE (trueop0
) == VEC_CONCAT
4349 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
4350 && GET_MODE (XEXP (trueop0
, 0)) == mode
4351 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
4352 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
4354 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4355 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4358 gcc_assert (i0
< 4 && i1
< 4);
4359 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
4360 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
4362 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4365 if (XVECLEN (trueop1
, 0) == 2
4366 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4367 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4368 && GET_CODE (trueop0
) == VEC_CONCAT
4369 && GET_MODE (trueop0
) == mode
)
4371 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4372 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4375 gcc_assert (i0
< 2 && i1
< 2);
4376 subop0
= XEXP (trueop0
, i0
);
4377 subop1
= XEXP (trueop0
, i1
);
4379 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4382 /* If we select one half of a vec_concat, return that. */
4384 if (GET_CODE (trueop0
) == VEC_CONCAT
4385 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4387 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
4389 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4391 rtx subop0
= XEXP (trueop0
, 0);
4392 rtx subop1
= XEXP (trueop0
, 1);
4393 machine_mode mode0
= GET_MODE (subop0
);
4394 machine_mode mode1
= GET_MODE (subop1
);
4395 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4396 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
4398 bool success
= true;
4399 for (int i
= 1; i
< l0
; ++i
)
4401 rtx j
= XVECEXP (trueop1
, 0, i
);
4402 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4411 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
4413 bool success
= true;
4414 for (int i
= 1; i
< l1
; ++i
)
4416 rtx j
= XVECEXP (trueop1
, 0, i
);
4417 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
4428 /* Simplify vec_select of a subreg of X to just a vec_select of X
4429 when X has same component mode as vec_select. */
4430 unsigned HOST_WIDE_INT subreg_offset
= 0;
4431 if (GET_CODE (trueop0
) == SUBREG
4432 && GET_MODE_INNER (mode
)
4433 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0
)))
4434 && GET_MODE_NUNITS (mode
).is_constant (&l1
)
4435 && constant_multiple_p (subreg_memory_offset (trueop0
),
4436 GET_MODE_UNIT_BITSIZE (mode
),
4440 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0
)));
4441 bool success
= true;
4442 for (int i
= 0; i
!= l1
; i
++)
4444 rtx idx
= XVECEXP (trueop1
, 0, i
);
4445 if (!CONST_INT_P (idx
)
4446 || maybe_ge (UINTVAL (idx
) + subreg_offset
, nunits
))
4458 rtvec vec
= rtvec_alloc (l1
);
4459 for (int i
= 0; i
< l1
; i
++)
4461 = GEN_INT (INTVAL (XVECEXP (trueop1
, 0, i
))
4463 par
= gen_rtx_PARALLEL (VOIDmode
, vec
);
4465 return gen_rtx_VEC_SELECT (mode
, SUBREG_REG (trueop0
), par
);
4470 if (XVECLEN (trueop1
, 0) == 1
4471 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4472 && GET_CODE (trueop0
) == VEC_CONCAT
)
4475 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
4477 /* Try to find the element in the VEC_CONCAT. */
4478 while (GET_MODE (vec
) != mode
4479 && GET_CODE (vec
) == VEC_CONCAT
)
4481 poly_int64 vec_size
;
4483 if (CONST_INT_P (XEXP (vec
, 0)))
4485 /* vec_concat of two const_ints doesn't make sense with
4486 respect to modes. */
4487 if (CONST_INT_P (XEXP (vec
, 1)))
4490 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
4491 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
4494 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4496 if (known_lt (offset
, vec_size
))
4497 vec
= XEXP (vec
, 0);
4498 else if (known_ge (offset
, vec_size
))
4501 vec
= XEXP (vec
, 1);
4505 vec
= avoid_constant_pool_reference (vec
);
4508 if (GET_MODE (vec
) == mode
)
4512 /* If we select elements in a vec_merge that all come from the same
4513 operand, select from that operand directly. */
4514 if (GET_CODE (op0
) == VEC_MERGE
)
4516 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4517 if (CONST_INT_P (trueop02
))
4519 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4520 bool all_operand0
= true;
4521 bool all_operand1
= true;
4522 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4524 rtx j
= XVECEXP (trueop1
, 0, i
);
4525 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4526 all_operand1
= false;
4528 all_operand0
= false;
4530 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4531 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4532 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4533 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4537 /* If we have two nested selects that are inverses of each
4538 other, replace them with the source operand. */
4539 if (GET_CODE (trueop0
) == VEC_SELECT
4540 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4542 rtx op0_subop1
= XEXP (trueop0
, 1);
4543 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4544 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4546 /* Apply the outer ordering vector to the inner one. (The inner
4547 ordering vector is expressly permitted to be of a different
4548 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4549 then the two VEC_SELECTs cancel. */
4550 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4552 rtx x
= XVECEXP (trueop1
, 0, i
);
4553 if (!CONST_INT_P (x
))
4555 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4556 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4559 return XEXP (trueop0
, 0);
4565 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4566 ? GET_MODE (trueop0
)
4567 : GET_MODE_INNER (mode
));
4568 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4569 ? GET_MODE (trueop1
)
4570 : GET_MODE_INNER (mode
));
4572 gcc_assert (VECTOR_MODE_P (mode
));
4573 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4574 + GET_MODE_SIZE (op1_mode
),
4575 GET_MODE_SIZE (mode
)));
4577 if (VECTOR_MODE_P (op0_mode
))
4578 gcc_assert (GET_MODE_INNER (mode
)
4579 == GET_MODE_INNER (op0_mode
));
4581 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4583 if (VECTOR_MODE_P (op1_mode
))
4584 gcc_assert (GET_MODE_INNER (mode
)
4585 == GET_MODE_INNER (op1_mode
));
4587 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4589 unsigned int n_elts
, in_n_elts
;
4590 if ((GET_CODE (trueop0
) == CONST_VECTOR
4591 || CONST_SCALAR_INT_P (trueop0
)
4592 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4593 && (GET_CODE (trueop1
) == CONST_VECTOR
4594 || CONST_SCALAR_INT_P (trueop1
)
4595 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4596 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4597 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4599 rtvec v
= rtvec_alloc (n_elts
);
4601 for (i
= 0; i
< n_elts
; i
++)
4605 if (!VECTOR_MODE_P (op0_mode
))
4606 RTVEC_ELT (v
, i
) = trueop0
;
4608 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4612 if (!VECTOR_MODE_P (op1_mode
))
4613 RTVEC_ELT (v
, i
) = trueop1
;
4615 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4620 return gen_rtx_CONST_VECTOR (mode
, v
);
4623 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4624 Restrict the transformation to avoid generating a VEC_SELECT with a
4625 mode unrelated to its operand. */
4626 if (GET_CODE (trueop0
) == VEC_SELECT
4627 && GET_CODE (trueop1
) == VEC_SELECT
4628 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4629 && GET_MODE_INNER (GET_MODE (XEXP (trueop0
, 0)))
4630 == GET_MODE_INNER(mode
))
4632 rtx par0
= XEXP (trueop0
, 1);
4633 rtx par1
= XEXP (trueop1
, 1);
4634 int len0
= XVECLEN (par0
, 0);
4635 int len1
= XVECLEN (par1
, 0);
4636 rtvec vec
= rtvec_alloc (len0
+ len1
);
4637 for (int i
= 0; i
< len0
; i
++)
4638 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4639 for (int i
= 0; i
< len1
; i
++)
4640 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4641 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4642 gen_rtx_PARALLEL (VOIDmode
, vec
));
4651 if (mode
== GET_MODE (op0
)
4652 && mode
== GET_MODE (op1
)
4653 && vec_duplicate_p (op0
, &elt0
)
4654 && vec_duplicate_p (op1
, &elt1
))
4656 /* Try applying the operator to ELT and see if that simplifies.
4657 We can duplicate the result if so.
4659 The reason we don't use simplify_gen_binary is that it isn't
4660 necessarily a win to convert things like:
4662 (plus:V (vec_duplicate:V (reg:S R1))
4663 (vec_duplicate:V (reg:S R2)))
4667 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4669 The first might be done entirely in vector registers while the
4670 second might need a move between register files. */
4671 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4674 return gen_vec_duplicate (mode
, tem
);
4680 /* Return true if binary operation OP distributes over addition in operand
4681 OPNO, with the other operand being held constant. OPNO counts from 1. */
4684 distributes_over_addition_p (rtx_code op
, int opno
)
4702 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4705 if (VECTOR_MODE_P (mode
)
4706 && code
!= VEC_CONCAT
4707 && GET_CODE (op0
) == CONST_VECTOR
4708 && GET_CODE (op1
) == CONST_VECTOR
)
4711 if (CONST_VECTOR_STEPPED_P (op0
)
4712 && CONST_VECTOR_STEPPED_P (op1
))
4713 /* We can operate directly on the encoding if:
4715 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4717 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4719 Addition and subtraction are the supported operators
4720 for which this is true. */
4721 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4722 else if (CONST_VECTOR_STEPPED_P (op0
))
4723 /* We can operate directly on stepped encodings if:
4727 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4729 which is true if (x -> x op c) distributes over addition. */
4730 step_ok_p
= distributes_over_addition_p (code
, 1);
4732 /* Similarly in reverse. */
4733 step_ok_p
= distributes_over_addition_p (code
, 2);
4734 rtx_vector_builder builder
;
4735 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4738 unsigned int count
= builder
.encoded_nelts ();
4739 for (unsigned int i
= 0; i
< count
; i
++)
4741 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4742 CONST_VECTOR_ELT (op0
, i
),
4743 CONST_VECTOR_ELT (op1
, i
));
4744 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4746 builder
.quick_push (x
);
4748 return builder
.build ();
4751 if (VECTOR_MODE_P (mode
)
4752 && code
== VEC_CONCAT
4753 && (CONST_SCALAR_INT_P (op0
)
4754 || CONST_FIXED_P (op0
)
4755 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4756 && (CONST_SCALAR_INT_P (op1
)
4757 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4758 || CONST_FIXED_P (op1
)))
4760 /* Both inputs have a constant number of elements, so the result
4762 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4763 rtvec v
= rtvec_alloc (n_elts
);
4765 gcc_assert (n_elts
>= 2);
4768 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4769 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4771 RTVEC_ELT (v
, 0) = op0
;
4772 RTVEC_ELT (v
, 1) = op1
;
4776 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4777 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4780 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4781 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4782 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4784 for (i
= 0; i
< op0_n_elts
; ++i
)
4785 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4786 for (i
= 0; i
< op1_n_elts
; ++i
)
4787 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4790 return gen_rtx_CONST_VECTOR (mode
, v
);
4793 if (SCALAR_FLOAT_MODE_P (mode
)
4794 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4795 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4796 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4807 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4809 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4811 for (i
= 0; i
< 4; i
++)
4828 real_from_target (&r
, tmp0
, mode
);
4829 return const_double_from_real_value (r
, mode
);
4833 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4834 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4837 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4838 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4840 if (HONOR_SNANS (mode
)
4841 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4842 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4845 real_convert (&f0
, mode
, opr0
);
4846 real_convert (&f1
, mode
, opr1
);
4849 && real_equal (&f1
, &dconst0
)
4850 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4853 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4854 && flag_trapping_math
4855 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4857 int s0
= REAL_VALUE_NEGATIVE (f0
);
4858 int s1
= REAL_VALUE_NEGATIVE (f1
);
4863 /* Inf + -Inf = NaN plus exception. */
4868 /* Inf - Inf = NaN plus exception. */
4873 /* Inf / Inf = NaN plus exception. */
4880 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4881 && flag_trapping_math
4882 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4883 || (REAL_VALUE_ISINF (f1
)
4884 && real_equal (&f0
, &dconst0
))))
4885 /* Inf * 0 = NaN plus exception. */
4888 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4890 real_convert (&result
, mode
, &value
);
4892 /* Don't constant fold this floating point operation if
4893 the result has overflowed and flag_trapping_math. */
4895 if (flag_trapping_math
4896 && MODE_HAS_INFINITIES (mode
)
4897 && REAL_VALUE_ISINF (result
)
4898 && !REAL_VALUE_ISINF (f0
)
4899 && !REAL_VALUE_ISINF (f1
))
4900 /* Overflow plus exception. */
4903 /* Don't constant fold this floating point operation if the
4904 result may dependent upon the run-time rounding mode and
4905 flag_rounding_math is set, or if GCC's software emulation
4906 is unable to accurately represent the result. */
4908 if ((flag_rounding_math
4909 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4910 && (inexact
|| !real_identical (&result
, &value
)))
4913 return const_double_from_real_value (result
, mode
);
4917 /* We can fold some multi-word operations. */
4918 scalar_int_mode int_mode
;
4919 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4920 && CONST_SCALAR_INT_P (op0
)
4921 && CONST_SCALAR_INT_P (op1
)
4922 && GET_MODE_PRECISION (int_mode
) <= MAX_BITSIZE_MODE_ANY_INT
)
4925 wi::overflow_type overflow
;
4926 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4927 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4929 #if TARGET_SUPPORTS_WIDE_INT == 0
4930 /* This assert keeps the simplification from producing a result
4931 that cannot be represented in a CONST_DOUBLE but a lot of
4932 upstream callers expect that this function never fails to
4933 simplify something and so you if you added this to the test
4934 above the code would die later anyway. If this assert
4935 happens, you just need to make the port support wide int. */
4936 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4941 result
= wi::sub (pop0
, pop1
);
4945 result
= wi::add (pop0
, pop1
);
4949 result
= wi::mul (pop0
, pop1
);
4953 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4959 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4965 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4971 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4977 result
= wi::bit_and (pop0
, pop1
);
4981 result
= wi::bit_or (pop0
, pop1
);
4985 result
= wi::bit_xor (pop0
, pop1
);
4989 result
= wi::smin (pop0
, pop1
);
4993 result
= wi::smax (pop0
, pop1
);
4997 result
= wi::umin (pop0
, pop1
);
5001 result
= wi::umax (pop0
, pop1
);
5008 wide_int wop1
= pop1
;
5009 if (SHIFT_COUNT_TRUNCATED
)
5010 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
5011 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
5017 result
= wi::lrshift (pop0
, wop1
);
5021 result
= wi::arshift (pop0
, wop1
);
5025 result
= wi::lshift (pop0
, wop1
);
5036 if (wi::neg_p (pop1
))
5042 result
= wi::lrotate (pop0
, pop1
);
5046 result
= wi::rrotate (pop0
, pop1
);
5056 result
= wi::add (pop0
, pop1
, SIGNED
, &overflow
);
5057 clamp_signed_saturation
:
5058 if (overflow
== wi::OVF_OVERFLOW
)
5059 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5060 else if (overflow
== wi::OVF_UNDERFLOW
)
5061 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), SIGNED
);
5062 else if (overflow
!= wi::OVF_NONE
)
5067 result
= wi::add (pop0
, pop1
, UNSIGNED
, &overflow
);
5068 clamp_unsigned_saturation
:
5069 if (overflow
!= wi::OVF_NONE
)
5070 result
= wi::max_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5074 result
= wi::sub (pop0
, pop1
, SIGNED
, &overflow
);
5075 goto clamp_signed_saturation
;
5078 result
= wi::sub (pop0
, pop1
, UNSIGNED
, &overflow
);
5079 if (overflow
!= wi::OVF_NONE
)
5080 result
= wi::min_value (GET_MODE_PRECISION (int_mode
), UNSIGNED
);
5084 result
= wi::mul (pop0
, pop1
, SIGNED
, &overflow
);
5085 goto clamp_signed_saturation
;
5088 result
= wi::mul (pop0
, pop1
, UNSIGNED
, &overflow
);
5089 goto clamp_unsigned_saturation
;
5092 result
= wi::mul_high (pop0
, pop1
, SIGNED
);
5096 result
= wi::mul_high (pop0
, pop1
, UNSIGNED
);
5102 return immed_wide_int_const (result
, int_mode
);
5105 /* Handle polynomial integers. */
5106 if (NUM_POLY_INT_COEFFS
> 1
5107 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5108 && poly_int_rtx_p (op0
)
5109 && poly_int_rtx_p (op1
))
5111 poly_wide_int result
;
5115 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
5119 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
5123 if (CONST_SCALAR_INT_P (op1
))
5124 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
5130 if (CONST_SCALAR_INT_P (op1
))
5132 wide_int shift
= rtx_mode_t (op1
, mode
);
5133 if (SHIFT_COUNT_TRUNCATED
)
5134 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
5135 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
5137 result
= wi::to_poly_wide (op0
, mode
) << shift
;
5144 if (!CONST_SCALAR_INT_P (op1
)
5145 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
5146 rtx_mode_t (op1
, mode
), &result
))
5153 return immed_wide_int_const (result
, int_mode
);
5161 /* Return a positive integer if X should sort after Y. The value
5162 returned is 1 if and only if X and Y are both regs. */
5165 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
5169 result
= (commutative_operand_precedence (y
)
5170 - commutative_operand_precedence (x
));
5172 return result
+ result
;
5174 /* Group together equal REGs to do more simplification. */
5175 if (REG_P (x
) && REG_P (y
))
5176 return REGNO (x
) > REGNO (y
);
5181 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5182 operands may be another PLUS or MINUS.
5184 Rather than test for specific case, we do this by a brute-force method
5185 and do all possible simplifications until no more changes occur. Then
5186 we rebuild the operation.
5188 May return NULL_RTX when no changes were made. */
5191 simplify_context::simplify_plus_minus (rtx_code code
, machine_mode mode
,
5194 struct simplify_plus_minus_op_data
5201 int changed
, n_constants
, canonicalized
= 0;
5204 memset (ops
, 0, sizeof ops
);
5206 /* Set up the two operands and then expand them until nothing has been
5207 changed. If we run out of room in our array, give up; this should
5208 almost never happen. */
5213 ops
[1].neg
= (code
== MINUS
);
5220 for (i
= 0; i
< n_ops
; i
++)
5222 rtx this_op
= ops
[i
].op
;
5223 int this_neg
= ops
[i
].neg
;
5224 enum rtx_code this_code
= GET_CODE (this_op
);
5230 if (n_ops
== ARRAY_SIZE (ops
))
5233 ops
[n_ops
].op
= XEXP (this_op
, 1);
5234 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
5237 ops
[i
].op
= XEXP (this_op
, 0);
5239 /* If this operand was negated then we will potentially
5240 canonicalize the expression. Similarly if we don't
5241 place the operands adjacent we're re-ordering the
5242 expression and thus might be performing a
5243 canonicalization. Ignore register re-ordering.
5244 ??? It might be better to shuffle the ops array here,
5245 but then (plus (plus (A, B), plus (C, D))) wouldn't
5246 be seen as non-canonical. */
5249 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
5254 ops
[i
].op
= XEXP (this_op
, 0);
5255 ops
[i
].neg
= ! this_neg
;
5261 if (n_ops
!= ARRAY_SIZE (ops
)
5262 && GET_CODE (XEXP (this_op
, 0)) == PLUS
5263 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
5264 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
5266 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
5267 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
5268 ops
[n_ops
].neg
= this_neg
;
5276 /* ~a -> (-a - 1) */
5277 if (n_ops
!= ARRAY_SIZE (ops
))
5279 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
5280 ops
[n_ops
++].neg
= this_neg
;
5281 ops
[i
].op
= XEXP (this_op
, 0);
5282 ops
[i
].neg
= !this_neg
;
5288 CASE_CONST_SCALAR_INT
:
5289 case CONST_POLY_INT
:
5293 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
5307 if (n_constants
> 1)
5310 gcc_assert (n_ops
>= 2);
5312 /* If we only have two operands, we can avoid the loops. */
5315 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
5318 /* Get the two operands. Be careful with the order, especially for
5319 the cases where code == MINUS. */
5320 if (ops
[0].neg
&& ops
[1].neg
)
5322 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
5325 else if (ops
[0].neg
)
5336 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
5339 /* Now simplify each pair of operands until nothing changes. */
5342 /* Insertion sort is good enough for a small array. */
5343 for (i
= 1; i
< n_ops
; i
++)
5345 struct simplify_plus_minus_op_data save
;
5349 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
5352 /* Just swapping registers doesn't count as canonicalization. */
5358 ops
[j
+ 1] = ops
[j
];
5360 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
5365 for (i
= n_ops
- 1; i
> 0; i
--)
5366 for (j
= i
- 1; j
>= 0; j
--)
5368 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
5369 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
5371 if (lhs
!= 0 && rhs
!= 0)
5373 enum rtx_code ncode
= PLUS
;
5379 std::swap (lhs
, rhs
);
5381 else if (swap_commutative_operands_p (lhs
, rhs
))
5382 std::swap (lhs
, rhs
);
5384 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
5385 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
5387 rtx tem_lhs
, tem_rhs
;
5389 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
5390 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
5391 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
5394 if (tem
&& !CONSTANT_P (tem
))
5395 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
5398 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
5402 /* Reject "simplifications" that just wrap the two
5403 arguments in a CONST. Failure to do so can result
5404 in infinite recursion with simplify_binary_operation
5405 when it calls us to simplify CONST operations.
5406 Also, if we find such a simplification, don't try
5407 any more combinations with this rhs: We must have
5408 something like symbol+offset, ie. one of the
5409 trivial CONST expressions we handle later. */
5410 if (GET_CODE (tem
) == CONST
5411 && GET_CODE (XEXP (tem
, 0)) == ncode
5412 && XEXP (XEXP (tem
, 0), 0) == lhs
5413 && XEXP (XEXP (tem
, 0), 1) == rhs
)
5416 if (GET_CODE (tem
) == NEG
)
5417 tem
= XEXP (tem
, 0), lneg
= !lneg
;
5418 if (poly_int_rtx_p (tem
) && lneg
)
5419 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
5423 ops
[j
].op
= NULL_RTX
;
5433 /* Pack all the operands to the lower-numbered entries. */
5434 for (i
= 0, j
= 0; j
< n_ops
; j
++)
5443 /* If nothing changed, check that rematerialization of rtl instructions
5444 is still required. */
5447 /* Perform rematerialization if only all operands are registers and
5448 all operations are PLUS. */
5449 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5450 around rs6000 and how it uses the CA register. See PR67145. */
5451 for (i
= 0; i
< n_ops
; i
++)
5453 || !REG_P (ops
[i
].op
)
5454 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
5455 && fixed_regs
[REGNO (ops
[i
].op
)]
5456 && !global_regs
[REGNO (ops
[i
].op
)]
5457 && ops
[i
].op
!= frame_pointer_rtx
5458 && ops
[i
].op
!= arg_pointer_rtx
5459 && ops
[i
].op
!= stack_pointer_rtx
))
5464 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5466 && CONST_INT_P (ops
[1].op
)
5467 && CONSTANT_P (ops
[0].op
)
5469 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
5471 /* We suppressed creation of trivial CONST expressions in the
5472 combination loop to avoid recursion. Create one manually now.
5473 The combination loop should have ensured that there is exactly
5474 one CONST_INT, and the sort will have ensured that it is last
5475 in the array and that any other constant will be next-to-last. */
5478 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
5479 && CONSTANT_P (ops
[n_ops
- 2].op
))
5481 rtx value
= ops
[n_ops
- 1].op
;
5482 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
5483 value
= neg_poly_int_rtx (mode
, value
);
5484 if (CONST_INT_P (value
))
5486 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
5492 /* Put a non-negated operand first, if possible. */
5494 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
5497 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
5506 /* Now make the result by performing the requested operations. */
5509 for (i
= 1; i
< n_ops
; i
++)
5510 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
5511 mode
, result
, ops
[i
].op
);
5516 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5518 plus_minus_operand_p (const_rtx x
)
5520 return GET_CODE (x
) == PLUS
5521 || GET_CODE (x
) == MINUS
5522 || (GET_CODE (x
) == CONST
5523 && GET_CODE (XEXP (x
, 0)) == PLUS
5524 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
5525 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
5528 /* Like simplify_binary_operation except used for relational operators.
5529 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5530 not also be VOIDmode.
5532 CMP_MODE specifies in which mode the comparison is done in, so it is
5533 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5534 the operands or, if both are VOIDmode, the operands are compared in
5535 "infinite precision". */
5537 simplify_context::simplify_relational_operation (rtx_code code
,
5539 machine_mode cmp_mode
,
5542 rtx tem
, trueop0
, trueop1
;
5544 if (cmp_mode
== VOIDmode
)
5545 cmp_mode
= GET_MODE (op0
);
5546 if (cmp_mode
== VOIDmode
)
5547 cmp_mode
= GET_MODE (op1
);
5549 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5551 return relational_result (mode
, cmp_mode
, tem
);
5553 /* For the following tests, ensure const0_rtx is op1. */
5554 if (swap_commutative_operands_p (op0
, op1
)
5555 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5556 std::swap (op0
, op1
), code
= swap_condition (code
);
5558 /* If op0 is a compare, extract the comparison arguments from it. */
5559 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5560 return simplify_gen_relational (code
, mode
, VOIDmode
,
5561 XEXP (op0
, 0), XEXP (op0
, 1));
5563 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
)
5566 trueop0
= avoid_constant_pool_reference (op0
);
5567 trueop1
= avoid_constant_pool_reference (op1
);
5568 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5572 /* This part of simplify_relational_operation is only used when CMP_MODE
5573 is not in class MODE_CC (i.e. it is a real comparison).
5575 MODE is the mode of the result, while CMP_MODE specifies in which
5576 mode the comparison is done in, so it is the mode of the operands. */
5579 simplify_context::simplify_relational_operation_1 (rtx_code code
,
5581 machine_mode cmp_mode
,
5584 enum rtx_code op0code
= GET_CODE (op0
);
5586 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5588 /* If op0 is a comparison, extract the comparison arguments
5592 if (GET_MODE (op0
) == mode
)
5593 return simplify_rtx (op0
);
5595 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5596 XEXP (op0
, 0), XEXP (op0
, 1));
5598 else if (code
== EQ
)
5600 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5601 if (new_code
!= UNKNOWN
)
5602 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5603 XEXP (op0
, 0), XEXP (op0
, 1));
5607 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5608 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5609 if ((code
== LTU
|| code
== GEU
)
5610 && GET_CODE (op0
) == PLUS
5611 && CONST_INT_P (XEXP (op0
, 1))
5612 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5613 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5614 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5615 && XEXP (op0
, 1) != const0_rtx
)
5618 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5619 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5620 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5623 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5624 transformed into (LTU a -C). */
5625 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5626 && CONST_INT_P (XEXP (op0
, 1))
5627 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5628 && XEXP (op0
, 1) != const0_rtx
)
5631 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5632 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5633 XEXP (op0
, 0), new_cmp
);
5636 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5637 if ((code
== LTU
|| code
== GEU
)
5638 && GET_CODE (op0
) == PLUS
5639 && rtx_equal_p (op1
, XEXP (op0
, 1))
5640 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5641 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5642 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5643 copy_rtx (XEXP (op0
, 0)));
5645 if (op1
== const0_rtx
)
5647 /* Canonicalize (GTU x 0) as (NE x 0). */
5649 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5650 /* Canonicalize (LEU x 0) as (EQ x 0). */
5652 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5654 else if (op1
== const1_rtx
)
5659 /* Canonicalize (GE x 1) as (GT x 0). */
5660 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5663 /* Canonicalize (GEU x 1) as (NE x 0). */
5664 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5667 /* Canonicalize (LT x 1) as (LE x 0). */
5668 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5671 /* Canonicalize (LTU x 1) as (EQ x 0). */
5672 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5678 else if (op1
== constm1_rtx
)
5680 /* Canonicalize (LE x -1) as (LT x 0). */
5682 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5683 /* Canonicalize (GT x -1) as (GE x 0). */
5685 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5688 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5689 if ((code
== EQ
|| code
== NE
)
5690 && (op0code
== PLUS
|| op0code
== MINUS
)
5692 && CONSTANT_P (XEXP (op0
, 1))
5693 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5695 rtx x
= XEXP (op0
, 0);
5696 rtx c
= XEXP (op0
, 1);
5697 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5698 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5700 /* Detect an infinite recursive condition, where we oscillate at this
5701 simplification case between:
5702 A + B == C <---> C - B == A,
5703 where A, B, and C are all constants with non-simplifiable expressions,
5704 usually SYMBOL_REFs. */
5705 if (GET_CODE (tem
) == invcode
5707 && rtx_equal_p (c
, XEXP (tem
, 1)))
5710 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5713 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5714 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5715 scalar_int_mode int_mode
, int_cmp_mode
;
5717 && op1
== const0_rtx
5718 && is_int_mode (mode
, &int_mode
)
5719 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5720 /* ??? Work-around BImode bugs in the ia64 backend. */
5721 && int_mode
!= BImode
5722 && int_cmp_mode
!= BImode
5723 && nonzero_bits (op0
, int_cmp_mode
) == 1
5724 && STORE_FLAG_VALUE
== 1)
5725 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5726 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5727 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5729 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5730 if ((code
== EQ
|| code
== NE
)
5731 && op1
== const0_rtx
5733 return simplify_gen_relational (code
, mode
, cmp_mode
,
5734 XEXP (op0
, 0), XEXP (op0
, 1));
5736 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5737 if ((code
== EQ
|| code
== NE
)
5739 && rtx_equal_p (XEXP (op0
, 0), op1
)
5740 && !side_effects_p (XEXP (op0
, 0)))
5741 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5744 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5745 if ((code
== EQ
|| code
== NE
)
5747 && rtx_equal_p (XEXP (op0
, 1), op1
)
5748 && !side_effects_p (XEXP (op0
, 1)))
5749 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5752 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5753 if ((code
== EQ
|| code
== NE
)
5755 && CONST_SCALAR_INT_P (op1
)
5756 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5757 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5758 simplify_gen_binary (XOR
, cmp_mode
,
5759 XEXP (op0
, 1), op1
));
5761 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5762 constant folding if x/y is a constant. */
5763 if ((code
== EQ
|| code
== NE
)
5764 && (op0code
== AND
|| op0code
== IOR
)
5765 && !side_effects_p (op1
)
5766 && op1
!= CONST0_RTX (cmp_mode
))
5768 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5769 (eq/ne (and (not y) x) 0). */
5770 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5771 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5773 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5775 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5777 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5778 CONST0_RTX (cmp_mode
));
5781 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5782 (eq/ne (and (not x) y) 0). */
5783 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5784 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5786 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5788 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5790 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5791 CONST0_RTX (cmp_mode
));
5795 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5796 if ((code
== EQ
|| code
== NE
)
5797 && GET_CODE (op0
) == BSWAP
5798 && CONST_SCALAR_INT_P (op1
))
5799 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5800 simplify_gen_unary (BSWAP
, cmp_mode
,
5803 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5804 if ((code
== EQ
|| code
== NE
)
5805 && GET_CODE (op0
) == BSWAP
5806 && GET_CODE (op1
) == BSWAP
)
5807 return simplify_gen_relational (code
, mode
, cmp_mode
,
5808 XEXP (op0
, 0), XEXP (op1
, 0));
5810 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5816 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5817 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5818 XEXP (op0
, 0), const0_rtx
);
5823 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5824 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5825 XEXP (op0
, 0), const0_rtx
);
5844 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5845 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5846 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5847 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5848 For floating-point comparisons, assume that the operands were ordered. */
5851 comparison_result (enum rtx_code code
, int known_results
)
5857 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5860 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5864 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5867 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5871 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5874 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5877 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5879 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5882 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5884 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5887 return const_true_rtx
;
5895 /* Check if the given comparison (done in the given MODE) is actually
5896 a tautology or a contradiction. If the mode is VOIDmode, the
5897 comparison is done in "infinite precision". If no simplification
5898 is possible, this function returns zero. Otherwise, it returns
5899 either const_true_rtx or const0_rtx. */
5902 simplify_const_relational_operation (enum rtx_code code
,
5910 gcc_assert (mode
!= VOIDmode
5911 || (GET_MODE (op0
) == VOIDmode
5912 && GET_MODE (op1
) == VOIDmode
));
5914 /* If op0 is a compare, extract the comparison arguments from it. */
5915 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5917 op1
= XEXP (op0
, 1);
5918 op0
= XEXP (op0
, 0);
5920 if (GET_MODE (op0
) != VOIDmode
)
5921 mode
= GET_MODE (op0
);
5922 else if (GET_MODE (op1
) != VOIDmode
)
5923 mode
= GET_MODE (op1
);
5928 /* We can't simplify MODE_CC values since we don't know what the
5929 actual comparison is. */
5930 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5933 /* Make sure the constant is second. */
5934 if (swap_commutative_operands_p (op0
, op1
))
5936 std::swap (op0
, op1
);
5937 code
= swap_condition (code
);
5940 trueop0
= avoid_constant_pool_reference (op0
);
5941 trueop1
= avoid_constant_pool_reference (op1
);
5943 /* For integer comparisons of A and B maybe we can simplify A - B and can
5944 then simplify a comparison of that with zero. If A and B are both either
5945 a register or a CONST_INT, this can't help; testing for these cases will
5946 prevent infinite recursion here and speed things up.
5948 We can only do this for EQ and NE comparisons as otherwise we may
5949 lose or introduce overflow which we cannot disregard as undefined as
5950 we do not know the signedness of the operation on either the left or
5951 the right hand side of the comparison. */
5953 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5954 && (code
== EQ
|| code
== NE
)
5955 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5956 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5957 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5958 /* We cannot do this if tem is a nonzero address. */
5959 && ! nonzero_address_p (tem
))
5960 return simplify_const_relational_operation (signed_condition (code
),
5961 mode
, tem
, const0_rtx
);
5963 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5964 return const_true_rtx
;
5966 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5969 /* For modes without NaNs, if the two operands are equal, we know the
5970 result except if they have side-effects. Even with NaNs we know
5971 the result of unordered comparisons and, if signaling NaNs are
5972 irrelevant, also the result of LT/GT/LTGT. */
5973 if ((! HONOR_NANS (trueop0
)
5974 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5975 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5976 && ! HONOR_SNANS (trueop0
)))
5977 && rtx_equal_p (trueop0
, trueop1
)
5978 && ! side_effects_p (trueop0
))
5979 return comparison_result (code
, CMP_EQ
);
5981 /* If the operands are floating-point constants, see if we can fold
5983 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5984 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5985 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5987 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5988 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5990 /* Comparisons are unordered iff at least one of the values is NaN. */
5991 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
6001 return const_true_rtx
;
6014 return comparison_result (code
,
6015 (real_equal (d0
, d1
) ? CMP_EQ
:
6016 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
6019 /* Otherwise, see if the operands are both integers. */
6020 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
6021 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
6023 /* It would be nice if we really had a mode here. However, the
6024 largest int representable on the target is as good as
6026 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
6027 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
6028 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
6030 if (wi::eq_p (ptrueop0
, ptrueop1
))
6031 return comparison_result (code
, CMP_EQ
);
6034 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
6035 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
6036 return comparison_result (code
, cr
);
6040 /* Optimize comparisons with upper and lower bounds. */
6041 scalar_int_mode int_mode
;
6042 if (CONST_INT_P (trueop1
)
6043 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6044 && HWI_COMPUTABLE_MODE_P (int_mode
)
6045 && !side_effects_p (trueop0
))
6048 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
6049 HOST_WIDE_INT val
= INTVAL (trueop1
);
6050 HOST_WIDE_INT mmin
, mmax
;
6060 /* Get a reduced range if the sign bit is zero. */
6061 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
6068 rtx mmin_rtx
, mmax_rtx
;
6069 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
6071 mmin
= INTVAL (mmin_rtx
);
6072 mmax
= INTVAL (mmax_rtx
);
6075 unsigned int sign_copies
6076 = num_sign_bit_copies (trueop0
, int_mode
);
6078 mmin
>>= (sign_copies
- 1);
6079 mmax
>>= (sign_copies
- 1);
6085 /* x >= y is always true for y <= mmin, always false for y > mmax. */
6087 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6088 return const_true_rtx
;
6089 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6094 return const_true_rtx
;
6099 /* x <= y is always true for y >= mmax, always false for y < mmin. */
6101 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6102 return const_true_rtx
;
6103 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6108 return const_true_rtx
;
6114 /* x == y is always false for y out of range. */
6115 if (val
< mmin
|| val
> mmax
)
6119 /* x > y is always false for y >= mmax, always true for y < mmin. */
6121 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
6123 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
6124 return const_true_rtx
;
6130 return const_true_rtx
;
6133 /* x < y is always false for y <= mmin, always true for y > mmax. */
6135 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
6137 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
6138 return const_true_rtx
;
6144 return const_true_rtx
;
6148 /* x != y is always true for y out of range. */
6149 if (val
< mmin
|| val
> mmax
)
6150 return const_true_rtx
;
6158 /* Optimize integer comparisons with zero. */
6159 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6160 && trueop1
== const0_rtx
6161 && !side_effects_p (trueop0
))
6163 /* Some addresses are known to be nonzero. We don't know
6164 their sign, but equality comparisons are known. */
6165 if (nonzero_address_p (trueop0
))
6167 if (code
== EQ
|| code
== LEU
)
6169 if (code
== NE
|| code
== GTU
)
6170 return const_true_rtx
;
6173 /* See if the first operand is an IOR with a constant. If so, we
6174 may be able to determine the result of this comparison. */
6175 if (GET_CODE (op0
) == IOR
)
6177 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
6178 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
6180 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
6181 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
6182 && (UINTVAL (inner_const
)
6193 return const_true_rtx
;
6197 return const_true_rtx
;
6211 /* Optimize comparison of ABS with zero. */
6212 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
6213 && (GET_CODE (trueop0
) == ABS
6214 || (GET_CODE (trueop0
) == FLOAT_EXTEND
6215 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
6220 /* Optimize abs(x) < 0.0. */
6221 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
6226 /* Optimize abs(x) >= 0.0. */
6227 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
6228 return const_true_rtx
;
6232 /* Optimize ! (abs(x) < 0.0). */
6233 return const_true_rtx
;
6243 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6244 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6245 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6246 can be simplified to that or NULL_RTX if not.
6247 Assume X is compared against zero with CMP_CODE and the true
6248 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6251 simplify_context::simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
,
6252 rtx true_val
, rtx false_val
)
6254 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
6257 /* Result on X == 0 and X !=0 respectively. */
6258 rtx on_zero
, on_nonzero
;
6262 on_nonzero
= false_val
;
6266 on_zero
= false_val
;
6267 on_nonzero
= true_val
;
6270 rtx_code op_code
= GET_CODE (on_nonzero
);
6271 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
6272 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
6273 || !CONST_INT_P (on_zero
))
6276 HOST_WIDE_INT op_val
;
6277 scalar_int_mode mode ATTRIBUTE_UNUSED
6278 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
6279 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
6280 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
6281 && op_val
== INTVAL (on_zero
))
6287 /* Try to simplify X given that it appears within operand OP of a
6288 VEC_MERGE operation whose mask is MASK. X need not use the same
6289 vector mode as the VEC_MERGE, but it must have the same number of
6292 Return the simplified X on success, otherwise return NULL_RTX. */
6295 simplify_context::simplify_merge_mask (rtx x
, rtx mask
, int op
)
6297 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
6298 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
6299 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
6301 if (side_effects_p (XEXP (x
, 1 - op
)))
6304 return XEXP (x
, op
);
6307 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6308 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
6310 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6312 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
6313 GET_MODE (XEXP (x
, 0)));
6316 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6317 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6318 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6319 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
6321 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6322 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6325 if (COMPARISON_P (x
))
6326 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
6327 GET_MODE (XEXP (x
, 0)) != VOIDmode
6328 ? GET_MODE (XEXP (x
, 0))
6329 : GET_MODE (XEXP (x
, 1)),
6330 top0
? top0
: XEXP (x
, 0),
6331 top1
? top1
: XEXP (x
, 1));
6333 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
6334 top0
? top0
: XEXP (x
, 0),
6335 top1
? top1
: XEXP (x
, 1));
6338 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
6339 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6340 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6341 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6342 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
6343 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
6344 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
6346 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6347 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6348 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
6349 if (top0
|| top1
|| top2
)
6350 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
6351 GET_MODE (XEXP (x
, 0)),
6352 top0
? top0
: XEXP (x
, 0),
6353 top1
? top1
: XEXP (x
, 1),
6354 top2
? top2
: XEXP (x
, 2));
6360 /* Simplify CODE, an operation with result mode MODE and three operands,
6361 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6362 a constant. Return 0 if no simplifications is possible. */
6365 simplify_context::simplify_ternary_operation (rtx_code code
, machine_mode mode
,
6366 machine_mode op0_mode
,
6367 rtx op0
, rtx op1
, rtx op2
)
6369 bool any_change
= false;
6371 scalar_int_mode int_mode
, int_op0_mode
;
6372 unsigned int n_elts
;
6377 /* Simplify negations around the multiplication. */
6378 /* -a * -b + c => a * b + c. */
6379 if (GET_CODE (op0
) == NEG
)
6381 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
6383 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
6385 else if (GET_CODE (op1
) == NEG
)
6387 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
6389 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
6392 /* Canonicalize the two multiplication operands. */
6393 /* a * -b + c => -b * a + c. */
6394 if (swap_commutative_operands_p (op0
, op1
))
6395 std::swap (op0
, op1
), any_change
= true;
6398 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
6403 if (CONST_INT_P (op0
)
6404 && CONST_INT_P (op1
)
6405 && CONST_INT_P (op2
)
6406 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6407 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
6408 && HWI_COMPUTABLE_MODE_P (int_mode
))
6410 /* Extracting a bit-field from a constant */
6411 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
6412 HOST_WIDE_INT op1val
= INTVAL (op1
);
6413 HOST_WIDE_INT op2val
= INTVAL (op2
);
6414 if (!BITS_BIG_ENDIAN
)
6416 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
6417 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
6419 /* Not enough information to calculate the bit position. */
6422 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
6424 /* First zero-extend. */
6425 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
6426 /* If desired, propagate sign bit. */
6427 if (code
== SIGN_EXTRACT
6428 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
6430 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
6433 return gen_int_mode (val
, int_mode
);
6438 if (CONST_INT_P (op0
))
6439 return op0
!= const0_rtx
? op1
: op2
;
6441 /* Convert c ? a : a into "a". */
6442 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
6445 /* Convert a != b ? a : b into "a". */
6446 if (GET_CODE (op0
) == NE
6447 && ! side_effects_p (op0
)
6448 && ! HONOR_NANS (mode
)
6449 && ! HONOR_SIGNED_ZEROS (mode
)
6450 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6451 && rtx_equal_p (XEXP (op0
, 1), op2
))
6452 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6453 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6456 /* Convert a == b ? a : b into "b". */
6457 if (GET_CODE (op0
) == EQ
6458 && ! side_effects_p (op0
)
6459 && ! HONOR_NANS (mode
)
6460 && ! HONOR_SIGNED_ZEROS (mode
)
6461 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6462 && rtx_equal_p (XEXP (op0
, 1), op2
))
6463 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6464 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6467 /* Convert (!c) != {0,...,0} ? a : b into
6468 c != {0,...,0} ? b : a for vector modes. */
6469 if (VECTOR_MODE_P (GET_MODE (op1
))
6470 && GET_CODE (op0
) == NE
6471 && GET_CODE (XEXP (op0
, 0)) == NOT
6472 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
6474 rtx cv
= XEXP (op0
, 1);
6477 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
6480 for (int i
= 0; i
< nunits
; ++i
)
6481 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
6488 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
6489 XEXP (XEXP (op0
, 0), 0),
6491 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
6496 /* Convert x == 0 ? N : clz (x) into clz (x) when
6497 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6498 Similarly for ctz (x). */
6499 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
6500 && XEXP (op0
, 1) == const0_rtx
)
6503 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
6509 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6511 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6512 ? GET_MODE (XEXP (op0
, 1))
6513 : GET_MODE (XEXP (op0
, 0)));
6516 /* Look for happy constants in op1 and op2. */
6517 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6519 HOST_WIDE_INT t
= INTVAL (op1
);
6520 HOST_WIDE_INT f
= INTVAL (op2
);
6522 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6523 code
= GET_CODE (op0
);
6524 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6527 tmp
= reversed_comparison_code (op0
, NULL
);
6535 return simplify_gen_relational (code
, mode
, cmp_mode
,
6536 XEXP (op0
, 0), XEXP (op0
, 1));
6539 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6540 cmp_mode
, XEXP (op0
, 0),
6543 /* See if any simplifications were possible. */
6546 if (CONST_INT_P (temp
))
6547 return temp
== const0_rtx
? op2
: op1
;
6549 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6555 gcc_assert (GET_MODE (op0
) == mode
);
6556 gcc_assert (GET_MODE (op1
) == mode
);
6557 gcc_assert (VECTOR_MODE_P (mode
));
6558 trueop2
= avoid_constant_pool_reference (op2
);
6559 if (CONST_INT_P (trueop2
)
6560 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6562 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6563 unsigned HOST_WIDE_INT mask
;
6564 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6567 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6569 if (!(sel
& mask
) && !side_effects_p (op0
))
6571 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6574 rtx trueop0
= avoid_constant_pool_reference (op0
);
6575 rtx trueop1
= avoid_constant_pool_reference (op1
);
6576 if (GET_CODE (trueop0
) == CONST_VECTOR
6577 && GET_CODE (trueop1
) == CONST_VECTOR
)
6579 rtvec v
= rtvec_alloc (n_elts
);
6582 for (i
= 0; i
< n_elts
; i
++)
6583 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6584 ? CONST_VECTOR_ELT (trueop0
, i
)
6585 : CONST_VECTOR_ELT (trueop1
, i
));
6586 return gen_rtx_CONST_VECTOR (mode
, v
);
6589 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6590 if no element from a appears in the result. */
6591 if (GET_CODE (op0
) == VEC_MERGE
)
6593 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6594 if (CONST_INT_P (tem
))
6596 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6597 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6598 return simplify_gen_ternary (code
, mode
, mode
,
6599 XEXP (op0
, 1), op1
, op2
);
6600 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6601 return simplify_gen_ternary (code
, mode
, mode
,
6602 XEXP (op0
, 0), op1
, op2
);
6605 if (GET_CODE (op1
) == VEC_MERGE
)
6607 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6608 if (CONST_INT_P (tem
))
6610 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6611 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6612 return simplify_gen_ternary (code
, mode
, mode
,
6613 op0
, XEXP (op1
, 1), op2
);
6614 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6615 return simplify_gen_ternary (code
, mode
, mode
,
6616 op0
, XEXP (op1
, 0), op2
);
6620 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6622 if (GET_CODE (op0
) == VEC_DUPLICATE
6623 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6624 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6625 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6627 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6628 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6630 if (XEXP (XEXP (op0
, 0), 0) == op1
6631 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6635 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6637 with (vec_concat (X) (B)) if N == 1 or
6638 (vec_concat (A) (X)) if N == 2. */
6639 if (GET_CODE (op0
) == VEC_DUPLICATE
6640 && GET_CODE (op1
) == CONST_VECTOR
6641 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6642 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6643 && IN_RANGE (sel
, 1, 2))
6645 rtx newop0
= XEXP (op0
, 0);
6646 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6648 std::swap (newop0
, newop1
);
6649 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6651 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6652 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6653 Only applies for vectors of two elements. */
6654 if (GET_CODE (op0
) == VEC_DUPLICATE
6655 && GET_CODE (op1
) == VEC_CONCAT
6656 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6657 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6658 && IN_RANGE (sel
, 1, 2))
6660 rtx newop0
= XEXP (op0
, 0);
6661 rtx newop1
= XEXP (op1
, 2 - sel
);
6662 rtx otherop
= XEXP (op1
, sel
- 1);
6664 std::swap (newop0
, newop1
);
6665 /* Don't want to throw away the other part of the vec_concat if
6666 it has side-effects. */
6667 if (!side_effects_p (otherop
))
6668 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6673 (vec_merge:outer (vec_duplicate:outer x:inner)
6674 (subreg:outer y:inner 0)
6677 with (vec_concat:outer x:inner y:inner) if N == 1,
6678 or (vec_concat:outer y:inner x:inner) if N == 2.
6680 Implicitly, this means we have a paradoxical subreg, but such
6681 a check is cheap, so make it anyway.
6683 Only applies for vectors of two elements. */
6684 if (GET_CODE (op0
) == VEC_DUPLICATE
6685 && GET_CODE (op1
) == SUBREG
6686 && GET_MODE (op1
) == GET_MODE (op0
)
6687 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6688 && paradoxical_subreg_p (op1
)
6689 && subreg_lowpart_p (op1
)
6690 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6691 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6692 && IN_RANGE (sel
, 1, 2))
6694 rtx newop0
= XEXP (op0
, 0);
6695 rtx newop1
= SUBREG_REG (op1
);
6697 std::swap (newop0
, newop1
);
6698 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6701 /* Same as above but with switched operands:
6702 Replace (vec_merge:outer (subreg:outer x:inner 0)
6703 (vec_duplicate:outer y:inner)
6706 with (vec_concat:outer x:inner y:inner) if N == 1,
6707 or (vec_concat:outer y:inner x:inner) if N == 2. */
6708 if (GET_CODE (op1
) == VEC_DUPLICATE
6709 && GET_CODE (op0
) == SUBREG
6710 && GET_MODE (op0
) == GET_MODE (op1
)
6711 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6712 && paradoxical_subreg_p (op0
)
6713 && subreg_lowpart_p (op0
)
6714 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6715 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6716 && IN_RANGE (sel
, 1, 2))
6718 rtx newop0
= SUBREG_REG (op0
);
6719 rtx newop1
= XEXP (op1
, 0);
6721 std::swap (newop0
, newop1
);
6722 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6725 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6727 with (vec_concat x y) or (vec_concat y x) depending on value
6729 if (GET_CODE (op0
) == VEC_DUPLICATE
6730 && GET_CODE (op1
) == VEC_DUPLICATE
6731 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6732 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6733 && IN_RANGE (sel
, 1, 2))
6735 rtx newop0
= XEXP (op0
, 0);
6736 rtx newop1
= XEXP (op1
, 0);
6738 std::swap (newop0
, newop1
);
6740 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6744 if (rtx_equal_p (op0
, op1
)
6745 && !side_effects_p (op2
) && !side_effects_p (op1
))
6748 if (!side_effects_p (op2
))
6751 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6753 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6755 return simplify_gen_ternary (code
, mode
, mode
,
6757 top1
? top1
: op1
, op2
);
6769 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6770 starting at byte FIRST_BYTE. Return true on success and add the
6771 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6772 that the bytes follow target memory order. Leave BYTES unmodified
6775 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6776 BYTES before calling this function. */
6779 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6780 unsigned int first_byte
, unsigned int num_bytes
)
6782 /* Check the mode is sensible. */
6783 gcc_assert (GET_MODE (x
) == VOIDmode
6784 ? is_a
<scalar_int_mode
> (mode
)
6785 : mode
== GET_MODE (x
));
6787 if (GET_CODE (x
) == CONST_VECTOR
)
6789 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6790 is necessary. The only complication is that MODE_VECTOR_BOOL
6791 vectors can have several elements per byte. */
6792 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6793 GET_MODE_NUNITS (mode
));
6794 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
6795 if (elt_bits
< BITS_PER_UNIT
)
6797 /* This is the only case in which elements can be smaller than
6799 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6800 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
6802 target_unit value
= 0;
6803 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
6805 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & 1) << j
;
6808 bytes
.quick_push (value
);
6813 unsigned int start
= bytes
.length ();
6814 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
6815 /* Make FIRST_BYTE relative to ELT. */
6816 first_byte
%= elt_bytes
;
6817 while (num_bytes
> 0)
6819 /* Work out how many bytes we want from element ELT. */
6820 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
6821 if (!native_encode_rtx (GET_MODE_INNER (mode
),
6822 CONST_VECTOR_ELT (x
, elt
), bytes
,
6823 first_byte
, chunk_bytes
))
6825 bytes
.truncate (start
);
6830 num_bytes
-= chunk_bytes
;
6835 /* All subsequent cases are limited to scalars. */
6837 if (!is_a
<scalar_mode
> (mode
, &smode
))
6840 /* Make sure that the region is in range. */
6841 unsigned int end_byte
= first_byte
+ num_bytes
;
6842 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6843 gcc_assert (end_byte
<= mode_bytes
);
6845 if (CONST_SCALAR_INT_P (x
))
6847 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6848 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6849 position of each byte. */
6850 rtx_mode_t
value (x
, smode
);
6851 wide_int_ref
value_wi (value
);
6852 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6854 /* Always constant because the inputs are. */
6856 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6857 /* Operate directly on the encoding rather than using
6858 wi::extract_uhwi, so that we preserve the sign or zero
6859 extension for modes that are not a whole number of bits in
6860 size. (Zero extension is only used for the combination of
6861 innermode == BImode && STORE_FLAG_VALUE == 1). */
6862 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
6863 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
6864 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
6865 bytes
.quick_push (uhwi
>> shift
);
6870 if (CONST_DOUBLE_P (x
))
6872 /* real_to_target produces an array of integers in target memory order.
6873 All integers before the last one have 32 bits; the last one may
6874 have 32 bits or fewer, depending on whether the mode bitsize
6875 is divisible by 32. Each of these integers is then laid out
6876 in target memory as any other integer would be. */
6877 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6878 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
6880 /* The (maximum) number of target bytes per element of el32. */
6881 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6882 gcc_assert (bytes_per_el32
!= 0);
6884 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6886 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6888 unsigned int index
= byte
/ bytes_per_el32
;
6889 unsigned int subbyte
= byte
% bytes_per_el32
;
6890 unsigned int int_bytes
= MIN (bytes_per_el32
,
6891 mode_bytes
- index
* bytes_per_el32
);
6892 /* Always constant because the inputs are. */
6894 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6895 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
6900 if (GET_CODE (x
) == CONST_FIXED
)
6902 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6904 /* Always constant because the inputs are. */
6906 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6907 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
6908 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6910 lsb
-= HOST_BITS_PER_WIDE_INT
;
6911 piece
= CONST_FIXED_VALUE_HIGH (x
);
6913 bytes
.quick_push (piece
>> lsb
);
6921 /* Read a vector of mode MODE from the target memory image given by BYTES,
6922 starting at byte FIRST_BYTE. The vector is known to be encodable using
6923 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6924 and BYTES is known to have enough bytes to supply NPATTERNS *
6925 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6926 BITS_PER_UNIT bits and the bytes are in target memory order.
6928 Return the vector on success, otherwise return NULL_RTX. */
6931 native_decode_vector_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
6932 unsigned int first_byte
, unsigned int npatterns
,
6933 unsigned int nelts_per_pattern
)
6935 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
6937 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6938 GET_MODE_NUNITS (mode
));
6939 if (elt_bits
< BITS_PER_UNIT
)
6941 /* This is the only case in which elements can be smaller than a byte.
6942 Element 0 is always in the lsb of the containing byte. */
6943 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6944 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6946 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
6947 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
6948 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
6949 builder
.quick_push (bytes
[byte_index
] & (1 << lsb
)
6950 ? CONST1_RTX (BImode
)
6951 : CONST0_RTX (BImode
));
6956 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6958 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
6961 builder
.quick_push (x
);
6962 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
6965 return builder
.build ();
6968 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6969 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
6970 bits and the bytes are in target memory order. The image has enough
6971 values to specify all bytes of MODE.
6973 Return the rtx on success, otherwise return NULL_RTX. */
6976 native_decode_rtx (machine_mode mode
, const vec
<target_unit
> &bytes
,
6977 unsigned int first_byte
)
6979 if (VECTOR_MODE_P (mode
))
6981 /* If we know at compile time how many elements there are,
6982 pull each element directly from BYTES. */
6984 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
6985 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
6989 scalar_int_mode imode
;
6990 if (is_a
<scalar_int_mode
> (mode
, &imode
)
6991 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
6993 /* Pull the bytes msb first, so that we can use simple
6994 shift-and-insert wide_int operations. */
6995 unsigned int size
= GET_MODE_SIZE (imode
);
6996 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
6997 for (unsigned int i
= 0; i
< size
; ++i
)
6999 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
7000 /* Always constant because the inputs are. */
7001 unsigned int subbyte
7002 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
7003 result
<<= BITS_PER_UNIT
;
7004 result
|= bytes
[first_byte
+ subbyte
];
7006 return immed_wide_int_const (result
, imode
);
7009 scalar_float_mode fmode
;
7010 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
7012 /* We need to build an array of integers in target memory order.
7013 All integers before the last one have 32 bits; the last one may
7014 have 32 bits or fewer, depending on whether the mode bitsize
7015 is divisible by 32. */
7016 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
7017 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
7018 memset (el32
, 0, num_el32
* sizeof (long));
7020 /* The (maximum) number of target bytes per element of el32. */
7021 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
7022 gcc_assert (bytes_per_el32
!= 0);
7024 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
7025 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7027 unsigned int index
= byte
/ bytes_per_el32
;
7028 unsigned int subbyte
= byte
% bytes_per_el32
;
7029 unsigned int int_bytes
= MIN (bytes_per_el32
,
7030 mode_bytes
- index
* bytes_per_el32
);
7031 /* Always constant because the inputs are. */
7033 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
7034 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
7037 real_from_target (&r
, el32
, fmode
);
7038 return const_double_from_real_value (r
, fmode
);
7041 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
7043 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
7049 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
7050 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
7052 /* Always constant because the inputs are. */
7054 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
7055 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
7056 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
7057 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
7059 f
.data
.low
|= unit
<< lsb
;
7061 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
7067 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
7068 is to convert a runtime BYTE value into a constant one. */
7071 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
7073 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7074 machine_mode mode
= GET_MODE (x
);
7075 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
7076 GET_MODE_NUNITS (mode
));
7077 /* The number of bits needed to encode one element from each pattern. */
7078 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
7080 /* Identify the start point in terms of a sequence number and a byte offset
7081 within that sequence. */
7082 poly_uint64 first_sequence
;
7083 unsigned HOST_WIDE_INT subbit
;
7084 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
7085 &first_sequence
, &subbit
))
7087 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7088 if (nelts_per_pattern
== 1)
7089 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
7091 byte
= subbit
/ BITS_PER_UNIT
;
7092 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
7094 /* The subreg drops the first element from each pattern and
7095 only uses the second element. Find the first sequence
7096 that starts on a byte boundary. */
7097 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
7098 byte
= subbit
/ BITS_PER_UNIT
;
7104 /* Subroutine of simplify_subreg in which:
7106 - X is known to be a CONST_VECTOR
7107 - OUTERMODE is known to be a vector mode
7109 Try to handle the subreg by operating on the CONST_VECTOR encoding
7110 rather than on each individual element of the CONST_VECTOR.
7112 Return the simplified subreg on success, otherwise return NULL_RTX. */
7115 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
7116 machine_mode innermode
, unsigned int first_byte
)
7118 /* Paradoxical subregs of vectors have dubious semantics. */
7119 if (paradoxical_subreg_p (outermode
, innermode
))
7122 /* We can only preserve the semantics of a stepped pattern if the new
7123 vector element is the same as the original one. */
7124 if (CONST_VECTOR_STEPPED_P (x
)
7125 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
7128 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7129 unsigned int x_elt_bits
7130 = vector_element_size (GET_MODE_BITSIZE (innermode
),
7131 GET_MODE_NUNITS (innermode
));
7132 unsigned int out_elt_bits
7133 = vector_element_size (GET_MODE_BITSIZE (outermode
),
7134 GET_MODE_NUNITS (outermode
));
7136 /* The number of bits needed to encode one element from every pattern
7137 of the original vector. */
7138 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
7140 /* The number of bits needed to encode one element from every pattern
7142 unsigned int out_sequence_bits
7143 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
7145 /* Work out the number of interleaved patterns in the output vector
7146 and the number of encoded elements per pattern. */
7147 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
7148 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
7150 /* The encoding scheme requires the number of elements to be a multiple
7151 of the number of patterns, so that each pattern appears at least once
7152 and so that the same number of elements appear from each pattern. */
7153 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
7154 unsigned int const_nunits
;
7155 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
7156 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
7158 /* Either the encoding is invalid, or applying it would give us
7159 more elements than we need. Just encode each element directly. */
7160 out_npatterns
= const_nunits
;
7161 nelts_per_pattern
= 1;
7166 /* Get enough bytes of X to form the new encoding. */
7167 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
7168 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
7169 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7170 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
7173 /* Reencode the bytes as OUTERMODE. */
7174 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
7178 /* Try to simplify a subreg of a constant by encoding the subreg region
7179 as a sequence of target bytes and reading them back in the new mode.
7180 Return the new value on success, otherwise return null.
7182 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7183 and byte offset FIRST_BYTE. */
7186 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
7187 machine_mode innermode
, unsigned int first_byte
)
7189 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
7190 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7192 /* Some ports misuse CCmode. */
7193 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
7196 /* Paradoxical subregs read undefined values for bytes outside of the
7197 inner value. However, we have traditionally always sign-extended
7198 integer constants and zero-extended others. */
7199 unsigned int inner_bytes
= buffer_bytes
;
7200 if (paradoxical_subreg_p (outermode
, innermode
))
7202 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
7205 target_unit filler
= 0;
7206 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
7209 /* Add any leading bytes due to big-endian layout. The number of
7210 bytes must be constant because both modes have constant size. */
7211 unsigned int leading_bytes
7212 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
7213 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
7214 buffer
.quick_push (filler
);
7216 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7219 /* Add any trailing bytes due to little-endian layout. */
7220 while (buffer
.length () < buffer_bytes
)
7221 buffer
.quick_push (filler
);
7223 else if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7225 rtx ret
= native_decode_rtx (outermode
, buffer
, 0);
7226 if (ret
&& MODE_COMPOSITE_P (outermode
))
7228 auto_vec
<target_unit
, 128> buffer2 (buffer_bytes
);
7229 if (!native_encode_rtx (outermode
, ret
, buffer2
, 0, buffer_bytes
))
7231 for (unsigned int i
= 0; i
< buffer_bytes
; ++i
)
7232 if (buffer
[i
] != buffer2
[i
])
7238 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7239 Return 0 if no simplifications are possible. */
7241 simplify_context::simplify_subreg (machine_mode outermode
, rtx op
,
7242 machine_mode innermode
, poly_uint64 byte
)
7244 /* Little bit of sanity checking. */
7245 gcc_assert (innermode
!= VOIDmode
);
7246 gcc_assert (outermode
!= VOIDmode
);
7247 gcc_assert (innermode
!= BLKmode
);
7248 gcc_assert (outermode
!= BLKmode
);
7250 gcc_assert (GET_MODE (op
) == innermode
7251 || GET_MODE (op
) == VOIDmode
);
7253 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
7254 if (!multiple_p (byte
, outersize
))
7257 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
7258 if (maybe_ge (byte
, innersize
))
7261 if (outermode
== innermode
&& known_eq (byte
, 0U))
7264 if (GET_CODE (op
) == CONST_VECTOR
)
7265 byte
= simplify_const_vector_byte_offset (op
, byte
);
7267 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
7271 if (VECTOR_MODE_P (outermode
)
7272 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
7273 && vec_duplicate_p (op
, &elt
))
7274 return gen_vec_duplicate (outermode
, elt
);
7276 if (outermode
== GET_MODE_INNER (innermode
)
7277 && vec_duplicate_p (op
, &elt
))
7281 if (CONST_SCALAR_INT_P (op
)
7282 || CONST_DOUBLE_AS_FLOAT_P (op
)
7283 || CONST_FIXED_P (op
)
7284 || GET_CODE (op
) == CONST_VECTOR
)
7286 unsigned HOST_WIDE_INT cbyte
;
7287 if (byte
.is_constant (&cbyte
))
7289 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
7291 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
7297 fixed_size_mode fs_outermode
;
7298 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
7299 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
7303 /* Changing mode twice with SUBREG => just change it once,
7304 or not at all if changing back op starting mode. */
7305 if (GET_CODE (op
) == SUBREG
)
7307 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
7308 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
7311 if (outermode
== innermostmode
7312 && known_eq (byte
, 0U)
7313 && known_eq (SUBREG_BYTE (op
), 0))
7314 return SUBREG_REG (op
);
7316 /* Work out the memory offset of the final OUTERMODE value relative
7317 to the inner value of OP. */
7318 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
7320 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
7321 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
7323 /* See whether resulting subreg will be paradoxical. */
7324 if (!paradoxical_subreg_p (outermode
, innermostmode
))
7326 /* Bail out in case resulting subreg would be incorrect. */
7327 if (maybe_lt (final_offset
, 0)
7328 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
7329 || !multiple_p (final_offset
, outersize
))
7334 poly_int64 required_offset
= subreg_memory_offset (outermode
,
7336 if (maybe_ne (final_offset
, required_offset
))
7338 /* Paradoxical subregs always have byte offset 0. */
7342 /* Recurse for further possible simplifications. */
7343 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
7347 if (validate_subreg (outermode
, innermostmode
,
7348 SUBREG_REG (op
), final_offset
))
7350 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
7351 if (SUBREG_PROMOTED_VAR_P (op
)
7352 && SUBREG_PROMOTED_SIGN (op
) >= 0
7353 && GET_MODE_CLASS (outermode
) == MODE_INT
7354 && known_ge (outersize
, innersize
)
7355 && known_le (outersize
, innermostsize
)
7356 && subreg_lowpart_p (newx
))
7358 SUBREG_PROMOTED_VAR_P (newx
) = 1;
7359 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
7366 /* SUBREG of a hard register => just change the register number
7367 and/or mode. If the hard register is not valid in that mode,
7368 suppress this simplification. If the hard register is the stack,
7369 frame, or argument pointer, leave this as a SUBREG. */
7371 if (REG_P (op
) && HARD_REGISTER_P (op
))
7373 unsigned int regno
, final_regno
;
7376 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
7377 if (HARD_REGISTER_NUM_P (final_regno
))
7379 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
7380 subreg_memory_offset (outermode
,
7383 /* Propagate original regno. We don't have any way to specify
7384 the offset inside original regno, so do so only for lowpart.
7385 The information is used only by alias analysis that cannot
7386 grog partial register anyway. */
7388 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
7389 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
7394 /* If we have a SUBREG of a register that we are replacing and we are
7395 replacing it with a MEM, make a new MEM and try replacing the
7396 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7397 or if we would be widening it. */
7400 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
7401 /* Allow splitting of volatile memory references in case we don't
7402 have instruction to move the whole thing. */
7403 && (! MEM_VOLATILE_P (op
)
7404 || ! have_insn_for (SET
, innermode
))
7405 && !(STRICT_ALIGNMENT
&& MEM_ALIGN (op
) < GET_MODE_ALIGNMENT (outermode
))
7406 && known_le (outersize
, innersize
))
7407 return adjust_address_nv (op
, outermode
, byte
);
7409 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7411 if (GET_CODE (op
) == CONCAT
7412 || GET_CODE (op
) == VEC_CONCAT
)
7414 poly_uint64 final_offset
;
7417 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
7418 if (part_mode
== VOIDmode
)
7419 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7420 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
7421 if (known_lt (byte
, part_size
))
7423 part
= XEXP (op
, 0);
7424 final_offset
= byte
;
7426 else if (known_ge (byte
, part_size
))
7428 part
= XEXP (op
, 1);
7429 final_offset
= byte
- part_size
;
7434 if (maybe_gt (final_offset
+ outersize
, part_size
))
7437 part_mode
= GET_MODE (part
);
7438 if (part_mode
== VOIDmode
)
7439 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7440 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
7443 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
7444 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
7449 (subreg (vec_merge (X)
7451 (const_int ((1 << N) | M)))
7452 (N * sizeof (outermode)))
7454 (subreg (X) (N * sizeof (outermode)))
7457 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
7458 && idx
< HOST_BITS_PER_WIDE_INT
7459 && GET_CODE (op
) == VEC_MERGE
7460 && GET_MODE_INNER (innermode
) == outermode
7461 && CONST_INT_P (XEXP (op
, 2))
7462 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
7463 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
7465 /* A SUBREG resulting from a zero extension may fold to zero if
7466 it extracts higher bits that the ZERO_EXTEND's source bits. */
7467 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
7469 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
7470 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
7471 return CONST0_RTX (outermode
);
7474 scalar_int_mode int_outermode
, int_innermode
;
7475 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
7476 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
7477 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
7479 /* Handle polynomial integers. The upper bits of a paradoxical
7480 subreg are undefined, so this is safe regardless of whether
7481 we're truncating or extending. */
7482 if (CONST_POLY_INT_P (op
))
7485 = poly_wide_int::from (const_poly_int_value (op
),
7486 GET_MODE_PRECISION (int_outermode
),
7488 return immed_wide_int_const (val
, int_outermode
);
7491 if (GET_MODE_PRECISION (int_outermode
)
7492 < GET_MODE_PRECISION (int_innermode
))
7494 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
7500 /* If OP is a vector comparison and the subreg is not changing the
7501 number of elements or the size of the elements, change the result
7502 of the comparison to the new mode. */
7503 if (COMPARISON_P (op
)
7504 && VECTOR_MODE_P (outermode
)
7505 && VECTOR_MODE_P (innermode
)
7506 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
7507 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
7508 GET_MODE_UNIT_SIZE (innermode
)))
7509 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
7510 XEXP (op
, 0), XEXP (op
, 1));
7514 /* Make a SUBREG operation or equivalent if it folds. */
7517 simplify_context::simplify_gen_subreg (machine_mode outermode
, rtx op
,
7518 machine_mode innermode
,
7523 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7527 if (GET_CODE (op
) == SUBREG
7528 || GET_CODE (op
) == CONCAT
7529 || GET_MODE (op
) == VOIDmode
)
7532 if (MODE_COMPOSITE_P (outermode
)
7533 && (CONST_SCALAR_INT_P (op
)
7534 || CONST_DOUBLE_AS_FLOAT_P (op
)
7535 || CONST_FIXED_P (op
)
7536 || GET_CODE (op
) == CONST_VECTOR
))
7539 if (validate_subreg (outermode
, innermode
, op
, byte
))
7540 return gen_rtx_SUBREG (outermode
, op
, byte
);
7545 /* Generates a subreg to get the least significant part of EXPR (in mode
7546 INNER_MODE) to OUTER_MODE. */
7549 simplify_context::lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7550 machine_mode inner_mode
)
7552 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7553 subreg_lowpart_offset (outer_mode
, inner_mode
));
7556 /* Simplify X, an rtx expression.
7558 Return the simplified expression or NULL if no simplifications
7561 This is the preferred entry point into the simplification routines;
7562 however, we still allow passes to call the more specific routines.
7564 Right now GCC has three (yes, three) major bodies of RTL simplification
7565 code that need to be unified.
7567 1. fold_rtx in cse.c. This code uses various CSE specific
7568 information to aid in RTL simplification.
7570 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7571 it uses combine specific information to aid in RTL
7574 3. The routines in this file.
7577 Long term we want to only have one body of simplification code; to
7578 get to that state I recommend the following steps:
7580 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7581 which are not pass dependent state into these routines.
7583 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7584 use this routine whenever possible.
7586 3. Allow for pass dependent state to be provided to these
7587 routines and add simplifications based on the pass dependent
7588 state. Remove code from cse.c & combine.c that becomes
7591 It will take time, but ultimately the compiler will be easier to
7592 maintain and improve. It's totally silly that when we add a
7593 simplification that it needs to be added to 4 places (3 for RTL
7594 simplification and 1 for tree simplification. */
7597 simplify_rtx (const_rtx x
)
7599 const enum rtx_code code
= GET_CODE (x
);
7600 const machine_mode mode
= GET_MODE (x
);
7602 switch (GET_RTX_CLASS (code
))
7605 return simplify_unary_operation (code
, mode
,
7606 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7607 case RTX_COMM_ARITH
:
7608 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7609 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7614 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7617 case RTX_BITFIELD_OPS
:
7618 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7619 XEXP (x
, 0), XEXP (x
, 1),
7623 case RTX_COMM_COMPARE
:
7624 return simplify_relational_operation (code
, mode
,
7625 ((GET_MODE (XEXP (x
, 0))
7627 ? GET_MODE (XEXP (x
, 0))
7628 : GET_MODE (XEXP (x
, 1))),
7634 return simplify_subreg (mode
, SUBREG_REG (x
),
7635 GET_MODE (SUBREG_REG (x
)),
7642 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7643 if (GET_CODE (XEXP (x
, 0)) == HIGH
7644 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7657 namespace selftest
{
7659 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7662 make_test_reg (machine_mode mode
)
7664 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7666 return gen_rtx_REG (mode
, test_reg_num
++);
7670 test_scalar_int_ops (machine_mode mode
)
7672 rtx op0
= make_test_reg (mode
);
7673 rtx op1
= make_test_reg (mode
);
7674 rtx six
= GEN_INT (6);
7676 rtx neg_op0
= simplify_gen_unary (NEG
, mode
, op0
, mode
);
7677 rtx not_op0
= simplify_gen_unary (NOT
, mode
, op0
, mode
);
7678 rtx bswap_op0
= simplify_gen_unary (BSWAP
, mode
, op0
, mode
);
7680 rtx and_op0_op1
= simplify_gen_binary (AND
, mode
, op0
, op1
);
7681 rtx ior_op0_op1
= simplify_gen_binary (IOR
, mode
, op0
, op1
);
7682 rtx xor_op0_op1
= simplify_gen_binary (XOR
, mode
, op0
, op1
);
7684 rtx and_op0_6
= simplify_gen_binary (AND
, mode
, op0
, six
);
7685 rtx and_op1_6
= simplify_gen_binary (AND
, mode
, op1
, six
);
7687 /* Test some binary identities. */
7688 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, op0
, const0_rtx
));
7689 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, const0_rtx
, op0
));
7690 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MINUS
, mode
, op0
, const0_rtx
));
7691 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, op0
, const1_rtx
));
7692 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, const1_rtx
, op0
));
7693 ASSERT_RTX_EQ (op0
, simplify_gen_binary (DIV
, mode
, op0
, const1_rtx
));
7694 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, constm1_rtx
));
7695 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, constm1_rtx
, op0
));
7696 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, const0_rtx
));
7697 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, const0_rtx
, op0
));
7698 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, op0
, const0_rtx
));
7699 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, const0_rtx
, op0
));
7700 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFT
, mode
, op0
, const0_rtx
));
7701 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATE
, mode
, op0
, const0_rtx
));
7702 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFTRT
, mode
, op0
, const0_rtx
));
7703 ASSERT_RTX_EQ (op0
, simplify_gen_binary (LSHIFTRT
, mode
, op0
, const0_rtx
));
7704 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATERT
, mode
, op0
, const0_rtx
));
7706 /* Test some self-inverse operations. */
7707 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NEG
, mode
, neg_op0
, mode
));
7708 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NOT
, mode
, not_op0
, mode
));
7709 ASSERT_RTX_EQ (op0
, simplify_gen_unary (BSWAP
, mode
, bswap_op0
, mode
));
7711 /* Test some reflexive operations. */
7712 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, op0
));
7713 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, op0
));
7714 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMIN
, mode
, op0
, op0
));
7715 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMAX
, mode
, op0
, op0
));
7716 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMIN
, mode
, op0
, op0
));
7717 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMAX
, mode
, op0
, op0
));
7719 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (MINUS
, mode
, op0
, op0
));
7720 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (XOR
, mode
, op0
, op0
));
7722 /* Test simplify_distributive_operation. */
7723 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, xor_op0_op1
, six
),
7724 simplify_gen_binary (XOR
, mode
, and_op0_6
, and_op1_6
));
7725 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, ior_op0_op1
, six
),
7726 simplify_gen_binary (IOR
, mode
, and_op0_6
, and_op1_6
));
7727 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, and_op0_op1
, six
),
7728 simplify_gen_binary (AND
, mode
, and_op0_6
, and_op1_6
));
7730 /* Test useless extensions are eliminated. */
7731 ASSERT_RTX_EQ (op0
, simplify_gen_unary (TRUNCATE
, mode
, op0
, mode
));
7732 ASSERT_RTX_EQ (op0
, simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, mode
));
7733 ASSERT_RTX_EQ (op0
, simplify_gen_unary (SIGN_EXTEND
, mode
, op0
, mode
));
7734 ASSERT_RTX_EQ (op0
, lowpart_subreg (mode
, op0
, mode
));
7737 /* Verify some simplifications of integer extension/truncation.
7738 Machine mode BMODE is the guaranteed wider than SMODE. */
7741 test_scalar_int_ext_ops (machine_mode bmode
, machine_mode smode
)
7743 rtx sreg
= make_test_reg (smode
);
7745 /* Check truncation of extension. */
7746 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7747 simplify_gen_unary (ZERO_EXTEND
, bmode
,
7751 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7752 simplify_gen_unary (SIGN_EXTEND
, bmode
,
7756 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7757 lowpart_subreg (bmode
, sreg
, smode
),
7762 /* Verify more simplifications of integer extension/truncation.
7763 BMODE is wider than MMODE which is wider than SMODE. */
7766 test_scalar_int_ext_ops2 (machine_mode bmode
, machine_mode mmode
,
7769 rtx breg
= make_test_reg (bmode
);
7770 rtx mreg
= make_test_reg (mmode
);
7771 rtx sreg
= make_test_reg (smode
);
7773 /* Check truncate of truncate. */
7774 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7775 simplify_gen_unary (TRUNCATE
, mmode
,
7778 simplify_gen_unary (TRUNCATE
, smode
, breg
, bmode
));
7780 /* Check extension of extension. */
7781 ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND
, bmode
,
7782 simplify_gen_unary (ZERO_EXTEND
, mmode
,
7785 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
7786 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
7787 simplify_gen_unary (SIGN_EXTEND
, mmode
,
7790 simplify_gen_unary (SIGN_EXTEND
, bmode
, sreg
, smode
));
7791 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND
, bmode
,
7792 simplify_gen_unary (ZERO_EXTEND
, mmode
,
7795 simplify_gen_unary (ZERO_EXTEND
, bmode
, sreg
, smode
));
7797 /* Check truncation of extension. */
7798 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7799 simplify_gen_unary (ZERO_EXTEND
, bmode
,
7802 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7803 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7804 simplify_gen_unary (SIGN_EXTEND
, bmode
,
7807 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7808 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE
, smode
,
7809 lowpart_subreg (bmode
, mreg
, mmode
),
7811 simplify_gen_unary (TRUNCATE
, smode
, mreg
, mmode
));
7815 /* Verify some simplifications involving scalar expressions. */
7820 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7822 machine_mode mode
= (machine_mode
) i
;
7823 if (SCALAR_INT_MODE_P (mode
) && mode
!= BImode
)
7824 test_scalar_int_ops (mode
);
7827 test_scalar_int_ext_ops (HImode
, QImode
);
7828 test_scalar_int_ext_ops (SImode
, QImode
);
7829 test_scalar_int_ext_ops (SImode
, HImode
);
7830 test_scalar_int_ext_ops (DImode
, QImode
);
7831 test_scalar_int_ext_ops (DImode
, HImode
);
7832 test_scalar_int_ext_ops (DImode
, SImode
);
7834 test_scalar_int_ext_ops2 (SImode
, HImode
, QImode
);
7835 test_scalar_int_ext_ops2 (DImode
, HImode
, QImode
);
7836 test_scalar_int_ext_ops2 (DImode
, SImode
, QImode
);
7837 test_scalar_int_ext_ops2 (DImode
, SImode
, HImode
);
7840 /* Test vector simplifications involving VEC_DUPLICATE in which the
7841 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7842 register that holds one element of MODE. */
7845 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
7847 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7848 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7849 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
7850 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7852 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7853 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
7854 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
7855 ASSERT_RTX_EQ (duplicate
,
7856 simplify_unary_operation (NOT
, mode
,
7857 duplicate_not
, mode
));
7859 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7860 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
7861 ASSERT_RTX_EQ (duplicate
,
7862 simplify_unary_operation (NEG
, mode
,
7863 duplicate_neg
, mode
));
7865 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7866 ASSERT_RTX_EQ (duplicate
,
7867 simplify_binary_operation (PLUS
, mode
, duplicate
,
7868 CONST0_RTX (mode
)));
7870 ASSERT_RTX_EQ (duplicate
,
7871 simplify_binary_operation (MINUS
, mode
, duplicate
,
7872 CONST0_RTX (mode
)));
7874 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
7875 simplify_binary_operation (MINUS
, mode
, duplicate
,
7879 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7880 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
7881 ASSERT_RTX_PTR_EQ (scalar_reg
,
7882 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7883 duplicate
, zero_par
));
7885 unsigned HOST_WIDE_INT const_nunits
;
7886 if (nunits
.is_constant (&const_nunits
))
7888 /* And again with the final element. */
7889 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
7890 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
7891 ASSERT_RTX_PTR_EQ (scalar_reg
,
7892 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7893 duplicate
, last_par
));
7895 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7896 rtx vector_reg
= make_test_reg (mode
);
7897 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
7899 if (i
>= HOST_BITS_PER_WIDE_INT
)
7901 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
7902 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
7903 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
7904 ASSERT_RTX_EQ (scalar_reg
,
7905 simplify_gen_subreg (inner_mode
, vm
,
7910 /* Test a scalar subreg of a VEC_DUPLICATE. */
7911 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
7912 ASSERT_RTX_EQ (scalar_reg
,
7913 simplify_gen_subreg (inner_mode
, duplicate
,
7916 machine_mode narrower_mode
;
7917 if (maybe_ne (nunits
, 2U)
7918 && multiple_p (nunits
, 2)
7919 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
7920 && VECTOR_MODE_P (narrower_mode
))
7922 /* Test VEC_DUPLICATE of a vector. */
7923 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
7924 nbuilder
.quick_push (const0_rtx
);
7925 nbuilder
.quick_push (const1_rtx
);
7926 rtx_vector_builder
builder (mode
, 2, 1);
7927 builder
.quick_push (const0_rtx
);
7928 builder
.quick_push (const1_rtx
);
7929 ASSERT_RTX_EQ (builder
.build (),
7930 simplify_unary_operation (VEC_DUPLICATE
, mode
,
7934 /* Test VEC_SELECT of a vector. */
7936 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
7937 rtx narrower_duplicate
7938 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
7939 ASSERT_RTX_EQ (narrower_duplicate
,
7940 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
7941 duplicate
, vec_par
));
7943 /* Test a vector subreg of a VEC_DUPLICATE. */
7944 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
7945 ASSERT_RTX_EQ (narrower_duplicate
,
7946 simplify_gen_subreg (narrower_mode
, duplicate
,
7951 /* Test vector simplifications involving VEC_SERIES in which the
7952 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7953 register that holds one element of MODE. */
7956 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7958 /* Test unary cases with VEC_SERIES arguments. */
7959 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7960 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7961 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7962 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7963 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7964 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7965 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7966 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7967 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7969 ASSERT_RTX_EQ (series_0_r
,
7970 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7971 ASSERT_RTX_EQ (series_r_m1
,
7972 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7973 ASSERT_RTX_EQ (series_r_r
,
7974 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7976 /* Test that a VEC_SERIES with a zero step is simplified away. */
7977 ASSERT_RTX_EQ (duplicate
,
7978 simplify_binary_operation (VEC_SERIES
, mode
,
7979 scalar_reg
, const0_rtx
));
7981 /* Test PLUS and MINUS with VEC_SERIES. */
7982 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7983 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7984 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7985 ASSERT_RTX_EQ (series_r_r
,
7986 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7988 ASSERT_RTX_EQ (series_r_1
,
7989 simplify_binary_operation (PLUS
, mode
, duplicate
,
7991 ASSERT_RTX_EQ (series_r_m1
,
7992 simplify_binary_operation (PLUS
, mode
, duplicate
,
7994 ASSERT_RTX_EQ (series_0_r
,
7995 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7997 ASSERT_RTX_EQ (series_r_m1
,
7998 simplify_binary_operation (MINUS
, mode
, duplicate
,
8000 ASSERT_RTX_EQ (series_r_1
,
8001 simplify_binary_operation (MINUS
, mode
, duplicate
,
8003 ASSERT_RTX_EQ (series_0_m1
,
8004 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
8007 /* Test NEG on constant vector series. */
8008 ASSERT_RTX_EQ (series_0_m1
,
8009 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
8010 ASSERT_RTX_EQ (series_0_1
,
8011 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
8013 /* Test PLUS and MINUS on constant vector series. */
8014 rtx scalar2
= gen_int_mode (2, inner_mode
);
8015 rtx scalar3
= gen_int_mode (3, inner_mode
);
8016 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
8017 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
8018 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
8019 ASSERT_RTX_EQ (series_1_1
,
8020 simplify_binary_operation (PLUS
, mode
, series_0_1
,
8021 CONST1_RTX (mode
)));
8022 ASSERT_RTX_EQ (series_0_m1
,
8023 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
8025 ASSERT_RTX_EQ (series_1_3
,
8026 simplify_binary_operation (PLUS
, mode
, series_1_1
,
8028 ASSERT_RTX_EQ (series_0_1
,
8029 simplify_binary_operation (MINUS
, mode
, series_1_1
,
8030 CONST1_RTX (mode
)));
8031 ASSERT_RTX_EQ (series_1_1
,
8032 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
8034 ASSERT_RTX_EQ (series_1_1
,
8035 simplify_binary_operation (MINUS
, mode
, series_1_3
,
8038 /* Test MULT between constant vectors. */
8039 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
8040 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
8041 rtx scalar9
= gen_int_mode (9, inner_mode
);
8042 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
8043 ASSERT_RTX_EQ (series_0_2
,
8044 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
8045 ASSERT_RTX_EQ (series_3_9
,
8046 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
8047 if (!GET_MODE_NUNITS (mode
).is_constant ())
8048 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
8051 /* Test ASHIFT between constant vectors. */
8052 ASSERT_RTX_EQ (series_0_2
,
8053 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
8054 CONST1_RTX (mode
)));
8055 if (!GET_MODE_NUNITS (mode
).is_constant ())
8056 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
8061 simplify_merge_mask (rtx x
, rtx mask
, int op
)
8063 return simplify_context ().simplify_merge_mask (x
, mask
, op
);
8066 /* Verify simplify_merge_mask works correctly. */
8069 test_vec_merge (machine_mode mode
)
8071 rtx op0
= make_test_reg (mode
);
8072 rtx op1
= make_test_reg (mode
);
8073 rtx op2
= make_test_reg (mode
);
8074 rtx op3
= make_test_reg (mode
);
8075 rtx op4
= make_test_reg (mode
);
8076 rtx op5
= make_test_reg (mode
);
8077 rtx mask1
= make_test_reg (SImode
);
8078 rtx mask2
= make_test_reg (SImode
);
8079 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
8080 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
8081 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
8083 /* Simple vec_merge. */
8084 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
8085 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
8086 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
8087 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
8089 /* Nested vec_merge.
8090 It's tempting to make this simplify right down to opN, but we don't
8091 because all the simplify_* functions assume that the operands have
8092 already been simplified. */
8093 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
8094 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
8095 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
8097 /* Intermediate unary op. */
8098 rtx unop
= gen_rtx_NOT (mode
, vm1
);
8099 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
8100 simplify_merge_mask (unop
, mask1
, 0));
8101 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
8102 simplify_merge_mask (unop
, mask1
, 1));
8104 /* Intermediate binary op. */
8105 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
8106 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
8107 simplify_merge_mask (binop
, mask1
, 0));
8108 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
8109 simplify_merge_mask (binop
, mask1
, 1));
8111 /* Intermediate ternary op. */
8112 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
8113 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
8114 simplify_merge_mask (tenop
, mask1
, 0));
8115 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
8116 simplify_merge_mask (tenop
, mask1
, 1));
8119 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
8120 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
8121 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
8122 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
8124 /* Called indirectly. */
8125 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
8126 simplify_rtx (nvm
));
8129 /* Test subregs of integer vector constant X, trying elements in
8130 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8131 where NELTS is the number of elements in X. Subregs involving
8132 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
8135 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
8136 unsigned int first_valid
= 0)
8138 machine_mode inner_mode
= GET_MODE (x
);
8139 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8141 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
8143 machine_mode outer_mode
= (machine_mode
) modei
;
8144 if (!VECTOR_MODE_P (outer_mode
))
8147 unsigned int outer_nunits
;
8148 if (GET_MODE_INNER (outer_mode
) == int_mode
8149 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
8150 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
8152 /* Test subregs in which the outer mode is a smaller,
8153 constant-sized vector of the same element type. */
8155 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
8156 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
8158 rtx expected
= NULL_RTX
;
8159 if (elt
>= first_valid
)
8161 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
8162 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
8163 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
8164 expected
= builder
.build ();
8166 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
8167 ASSERT_RTX_EQ (expected
,
8168 simplify_subreg (outer_mode
, x
,
8172 else if (known_eq (GET_MODE_SIZE (outer_mode
),
8173 GET_MODE_SIZE (inner_mode
))
8174 && known_eq (elt_bias
, 0U)
8175 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
8176 || known_eq (GET_MODE_BITSIZE (outer_mode
),
8177 GET_MODE_NUNITS (outer_mode
)))
8178 && (!FLOAT_MODE_P (outer_mode
)
8179 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
8180 == GET_MODE_UNIT_PRECISION (outer_mode
)))
8181 && (GET_MODE_SIZE (inner_mode
).is_constant ()
8182 || !CONST_VECTOR_STEPPED_P (x
)))
8184 /* Try converting to OUTER_MODE and back. */
8185 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
8186 ASSERT_TRUE (outer_x
!= NULL_RTX
);
8187 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
8192 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
8194 /* Test each byte in the element range. */
8196 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
8197 for (unsigned int i
= 0; i
< limit
; ++i
)
8199 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
8200 rtx expected
= NULL_RTX
;
8201 if (elt
>= first_valid
)
8203 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
8204 if (BYTES_BIG_ENDIAN
)
8205 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
8206 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
8207 wide_int shifted_elt
8208 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
8209 expected
= immed_wide_int_const (shifted_elt
, QImode
);
8211 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
8212 ASSERT_RTX_EQ (expected
,
8213 simplify_subreg (QImode
, x
, inner_mode
, byte
));
8218 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8219 element per pattern. */
8222 test_vector_subregs_repeating (machine_mode inner_mode
)
8224 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8225 unsigned int min_nunits
= constant_lower_bound (nunits
);
8226 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8227 unsigned int count
= gcd (min_nunits
, 8);
8229 rtx_vector_builder
builder (inner_mode
, count
, 1);
8230 for (unsigned int i
= 0; i
< count
; ++i
)
8231 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
8232 rtx x
= builder
.build ();
8234 test_vector_subregs_modes (x
);
8235 if (!nunits
.is_constant ())
8236 test_vector_subregs_modes (x
, nunits
- min_nunits
);
8239 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8240 elements per pattern. */
8243 test_vector_subregs_fore_back (machine_mode inner_mode
)
8245 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
8246 unsigned int min_nunits
= constant_lower_bound (nunits
);
8247 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8248 unsigned int count
= gcd (min_nunits
, 4);
8250 rtx_vector_builder
builder (inner_mode
, count
, 2);
8251 for (unsigned int i
= 0; i
< count
; ++i
)
8252 builder
.quick_push (gen_int_mode (i
, int_mode
));
8253 for (unsigned int i
= 0; i
< count
; ++i
)
8254 builder
.quick_push (gen_int_mode (-(int) i
, int_mode
));
8255 rtx x
= builder
.build ();
8257 test_vector_subregs_modes (x
);
8258 if (!nunits
.is_constant ())
8259 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
8262 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8263 elements per pattern. */
8266 test_vector_subregs_stepped (machine_mode inner_mode
)
8268 /* Build { 0, 1, 2, 3, ... }. */
8269 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
8270 rtx_vector_builder
builder (inner_mode
, 1, 3);
8271 for (unsigned int i
= 0; i
< 3; ++i
)
8272 builder
.quick_push (gen_int_mode (i
, int_mode
));
8273 rtx x
= builder
.build ();
8275 test_vector_subregs_modes (x
);
8278 /* Test constant subregs of integer vector mode INNER_MODE. */
8281 test_vector_subregs (machine_mode inner_mode
)
8283 test_vector_subregs_repeating (inner_mode
);
8284 test_vector_subregs_fore_back (inner_mode
);
8285 test_vector_subregs_stepped (inner_mode
);
8288 /* Verify some simplifications involving vectors. */
8293 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
8295 machine_mode mode
= (machine_mode
) i
;
8296 if (VECTOR_MODE_P (mode
))
8298 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
8299 test_vector_ops_duplicate (mode
, scalar_reg
);
8300 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
8301 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
8303 test_vector_ops_series (mode
, scalar_reg
);
8304 test_vector_subregs (mode
);
8306 test_vec_merge (mode
);
8311 template<unsigned int N
>
8312 struct simplify_const_poly_int_tests
8318 struct simplify_const_poly_int_tests
<1>
8320 static void run () {}
8323 /* Test various CONST_POLY_INT properties. */
8325 template<unsigned int N
>
8327 simplify_const_poly_int_tests
<N
>::run ()
8329 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
8330 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
8331 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
8332 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
8333 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
8334 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
8335 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
8336 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
8337 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
8338 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
8339 rtx two
= GEN_INT (2);
8340 rtx six
= GEN_INT (6);
8341 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
8343 /* These tests only try limited operation combinations. Fuller arithmetic
8344 testing is done directly on poly_ints. */
8345 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
8346 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
8347 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
8348 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
8349 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
8350 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
8351 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
8352 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
8353 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
8354 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
8355 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
8358 /* Run all of the selftests within this file. */
8361 simplify_rtx_c_tests ()
8365 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
8368 } // namespace selftest
8370 #endif /* CHECKING_P */