1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx
neg_const_int (machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (!HWI_COMPUTABLE_MODE_P (mode
)
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
80 scalar_int_mode int_mode
;
82 if (!is_int_mode (mode
, &int_mode
))
85 width
= GET_MODE_PRECISION (int_mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x
))
96 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
97 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
99 for (i
= 0; i
< elts
- 1; i
++)
100 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
102 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
103 width
%= HOST_BITS_PER_WIDE_INT
;
105 width
= HOST_BITS_PER_WIDE_INT
;
108 else if (width
<= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x
)
110 && CONST_DOUBLE_LOW (x
) == 0)
112 val
= CONST_DOUBLE_HIGH (x
);
113 width
-= HOST_BITS_PER_WIDE_INT
;
117 /* X is not an integer constant. */
120 if (width
< HOST_BITS_PER_WIDE_INT
)
121 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
122 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
130 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 scalar_int_mode int_mode
;
135 if (!is_int_mode (mode
, &int_mode
))
138 width
= GET_MODE_PRECISION (int_mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= GET_MODE_MASK (int_mode
);
143 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 scalar_int_mode int_mode
;
154 if (!is_int_mode (mode
, &int_mode
))
157 width
= GET_MODE_PRECISION (int_mode
);
158 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
161 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
168 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
172 scalar_int_mode int_mode
;
173 if (!is_int_mode (mode
, &int_mode
))
176 width
= GET_MODE_PRECISION (int_mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 poly_int64 offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 addr
= strip_offset (addr
, &offset
);
244 if (GET_CODE (addr
) == LO_SUM
)
245 addr
= XEXP (addr
, 1);
247 /* If this is a constant pool reference, we can turn it into its
248 constant and hope that simplifications happen. */
249 if (GET_CODE (addr
) == SYMBOL_REF
250 && CONSTANT_POOL_ADDRESS_P (addr
))
252 c
= get_pool_constant (addr
);
253 cmode
= get_pool_mode (addr
);
255 /* If we're accessing the constant in a different mode than it was
256 originally stored, attempt to fix that up via subreg simplifications.
257 If that fails we have no choice but to return the original memory. */
258 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
260 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
262 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
263 if (tem
&& CONSTANT_P (tem
))
271 /* Simplify a MEM based on its attributes. This is the default
272 delegitimize_address target hook, and it's recommended that every
273 overrider call it. */
276 delegitimize_mem_from_attrs (rtx x
)
278 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
279 use their base addresses as equivalent. */
282 && MEM_OFFSET_KNOWN_P (x
))
284 tree decl
= MEM_EXPR (x
);
285 machine_mode mode
= GET_MODE (x
);
286 poly_int64 offset
= 0;
288 switch (TREE_CODE (decl
))
298 case ARRAY_RANGE_REF
:
303 case VIEW_CONVERT_EXPR
:
305 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
307 int unsignedp
, reversep
, volatilep
= 0;
310 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
311 &unsignedp
, &reversep
, &volatilep
);
312 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
313 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
314 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
317 offset
+= bytepos
+ toffset_val
;
323 && mode
== GET_MODE (x
)
325 && (TREE_STATIC (decl
)
326 || DECL_THREAD_LOCAL_P (decl
))
327 && DECL_RTL_SET_P (decl
)
328 && MEM_P (DECL_RTL (decl
)))
332 offset
+= MEM_OFFSET (x
);
334 newx
= DECL_RTL (decl
);
338 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
339 poly_int64 n_offset
, o_offset
;
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 n
= strip_offset (n
, &n_offset
);
348 o
= strip_offset (o
, &o_offset
);
349 if (!(known_eq (o_offset
, n_offset
+ offset
)
350 && rtx_equal_p (o
, n
)))
351 x
= adjust_address_nv (newx
, mode
, offset
);
353 else if (GET_MODE (x
) == GET_MODE (newx
)
354 && known_eq (offset
, 0))
362 /* Make a unary operation by first seeing if it folds and otherwise making
363 the specified operation. */
366 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
367 machine_mode op_mode
)
371 /* If this simplifies, use it. */
372 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
375 return gen_rtx_fmt_e (code
, mode
, op
);
378 /* Likewise for ternary operations. */
381 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
382 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
386 /* If this simplifies, use it. */
387 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
388 op0
, op1
, op2
)) != 0)
391 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
394 /* Likewise, for relational operations.
395 CMP_MODE specifies mode comparison is done in. */
398 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
399 machine_mode cmp_mode
, rtx op0
, rtx op1
)
403 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
407 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
410 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
411 and simplify the result. If FN is non-NULL, call this callback on each
412 X, if it returns non-NULL, replace X with its return value and simplify the
416 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
417 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
419 enum rtx_code code
= GET_CODE (x
);
420 machine_mode mode
= GET_MODE (x
);
421 machine_mode op_mode
;
423 rtx op0
, op1
, op2
, newx
, op
;
427 if (__builtin_expect (fn
!= NULL
, 0))
429 newx
= fn (x
, old_rtx
, data
);
433 else if (rtx_equal_p (x
, old_rtx
))
434 return copy_rtx ((rtx
) data
);
436 switch (GET_RTX_CLASS (code
))
440 op_mode
= GET_MODE (op0
);
441 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
442 if (op0
== XEXP (x
, 0))
444 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
448 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
449 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
450 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
452 return simplify_gen_binary (code
, mode
, op0
, op1
);
455 case RTX_COMM_COMPARE
:
458 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
459 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
460 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
461 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
463 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
466 case RTX_BITFIELD_OPS
:
468 op_mode
= GET_MODE (op0
);
469 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
470 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
471 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
472 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
474 if (op_mode
== VOIDmode
)
475 op_mode
= GET_MODE (op0
);
476 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
481 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
482 if (op0
== SUBREG_REG (x
))
484 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
485 GET_MODE (SUBREG_REG (x
)),
487 return op0
? op0
: x
;
494 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
495 if (op0
== XEXP (x
, 0))
497 return replace_equiv_address_nv (x
, op0
);
499 else if (code
== LO_SUM
)
501 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
502 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
504 /* (lo_sum (high x) y) -> y where x and y have the same base. */
505 if (GET_CODE (op0
) == HIGH
)
507 rtx base0
, base1
, offset0
, offset1
;
508 split_const (XEXP (op0
, 0), &base0
, &offset0
);
509 split_const (op1
, &base1
, &offset1
);
510 if (rtx_equal_p (base0
, base1
))
514 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
516 return gen_rtx_LO_SUM (mode
, op0
, op1
);
525 fmt
= GET_RTX_FORMAT (code
);
526 for (i
= 0; fmt
[i
]; i
++)
531 newvec
= XVEC (newx
, i
);
532 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
534 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
536 if (op
!= RTVEC_ELT (vec
, j
))
540 newvec
= shallow_copy_rtvec (vec
);
542 newx
= shallow_copy_rtx (x
);
543 XVEC (newx
, i
) = newvec
;
545 RTVEC_ELT (newvec
, j
) = op
;
553 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
554 if (op
!= XEXP (x
, i
))
557 newx
= shallow_copy_rtx (x
);
566 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
567 resulting RTX. Return a new RTX which is as simplified as possible. */
570 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
572 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
575 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
576 Only handle cases where the truncated value is inherently an rvalue.
578 RTL provides two ways of truncating a value:
580 1. a lowpart subreg. This form is only a truncation when both
581 the outer and inner modes (here MODE and OP_MODE respectively)
582 are scalar integers, and only then when the subreg is used as
585 It is only valid to form such truncating subregs if the
586 truncation requires no action by the target. The onus for
587 proving this is on the creator of the subreg -- e.g. the
588 caller to simplify_subreg or simplify_gen_subreg -- and typically
589 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
591 2. a TRUNCATE. This form handles both scalar and compound integers.
593 The first form is preferred where valid. However, the TRUNCATE
594 handling in simplify_unary_operation turns the second form into the
595 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
596 so it is generally safe to form rvalue truncations using:
598 simplify_gen_unary (TRUNCATE, ...)
600 and leave simplify_unary_operation to work out which representation
603 Because of the proof requirements on (1), simplify_truncation must
604 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
605 regardless of whether the outer truncation came from a SUBREG or a
606 TRUNCATE. For example, if the caller has proven that an SImode
611 is a no-op and can be represented as a subreg, it does not follow
612 that SImode truncations of X and Y are also no-ops. On a target
613 like 64-bit MIPS that requires SImode values to be stored in
614 sign-extended form, an SImode truncation of:
616 (and:DI (reg:DI X) (const_int 63))
618 is trivially a no-op because only the lower 6 bits can be set.
619 However, X is still an arbitrary 64-bit number and so we cannot
620 assume that truncating it too is a no-op. */
623 simplify_truncation (machine_mode mode
, rtx op
,
624 machine_mode op_mode
)
626 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
627 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
628 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
630 gcc_assert (precision
<= op_precision
);
632 /* Optimize truncations of zero and sign extended values. */
633 if (GET_CODE (op
) == ZERO_EXTEND
634 || GET_CODE (op
) == SIGN_EXTEND
)
636 /* There are three possibilities. If MODE is the same as the
637 origmode, we can omit both the extension and the subreg.
638 If MODE is not larger than the origmode, we can apply the
639 truncation without the extension. Finally, if the outermode
640 is larger than the origmode, we can just extend to the appropriate
642 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
643 if (mode
== origmode
)
645 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
646 return simplify_gen_unary (TRUNCATE
, mode
,
647 XEXP (op
, 0), origmode
);
649 return simplify_gen_unary (GET_CODE (op
), mode
,
650 XEXP (op
, 0), origmode
);
653 /* If the machine can perform operations in the truncated mode, distribute
654 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
655 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
657 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
658 && (GET_CODE (op
) == PLUS
659 || GET_CODE (op
) == MINUS
660 || GET_CODE (op
) == MULT
))
662 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
665 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
667 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
671 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
672 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
673 the outer subreg is effectively a truncation to the original mode. */
674 if ((GET_CODE (op
) == LSHIFTRT
675 || GET_CODE (op
) == ASHIFTRT
)
676 /* Ensure that OP_MODE is at least twice as wide as MODE
677 to avoid the possibility that an outer LSHIFTRT shifts by more
678 than the sign extension's sign_bit_copies and introduces zeros
679 into the high bits of the result. */
680 && 2 * precision
<= op_precision
681 && CONST_INT_P (XEXP (op
, 1))
682 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
683 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
684 && UINTVAL (XEXP (op
, 1)) < precision
)
685 return simplify_gen_binary (ASHIFTRT
, mode
,
686 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
688 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
689 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op
) == LSHIFTRT
692 || GET_CODE (op
) == ASHIFTRT
)
693 && CONST_INT_P (XEXP (op
, 1))
694 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
695 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
696 && UINTVAL (XEXP (op
, 1)) < precision
)
697 return simplify_gen_binary (LSHIFTRT
, mode
,
698 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
700 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
701 to (ashift:QI (x:QI) C), where C is a suitable small constant and
702 the outer subreg is effectively a truncation to the original mode. */
703 if (GET_CODE (op
) == ASHIFT
704 && CONST_INT_P (XEXP (op
, 1))
705 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
706 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
707 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
708 && UINTVAL (XEXP (op
, 1)) < precision
)
709 return simplify_gen_binary (ASHIFT
, mode
,
710 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
712 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
713 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
715 if (GET_CODE (op
) == AND
716 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
717 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
718 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
719 && CONST_INT_P (XEXP (op
, 1)))
721 rtx op0
= (XEXP (XEXP (op
, 0), 0));
722 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
723 rtx mask_op
= XEXP (op
, 1);
724 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
725 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
727 if (shift
< precision
728 /* If doing this transform works for an X with all bits set,
729 it works for any X. */
730 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
731 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
732 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
733 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
735 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
736 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
740 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
741 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
743 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
744 && REG_P (XEXP (op
, 0))
745 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
746 && CONST_INT_P (XEXP (op
, 1))
747 && CONST_INT_P (XEXP (op
, 2)))
749 rtx op0
= XEXP (op
, 0);
750 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
751 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
752 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
754 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
757 pos
-= op_precision
- precision
;
758 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
759 XEXP (op
, 1), GEN_INT (pos
));
762 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
764 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
766 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
767 XEXP (op
, 1), XEXP (op
, 2));
771 /* Recognize a word extraction from a multi-word subreg. */
772 if ((GET_CODE (op
) == LSHIFTRT
773 || GET_CODE (op
) == ASHIFTRT
)
774 && SCALAR_INT_MODE_P (mode
)
775 && SCALAR_INT_MODE_P (op_mode
)
776 && precision
>= BITS_PER_WORD
777 && 2 * precision
<= op_precision
778 && CONST_INT_P (XEXP (op
, 1))
779 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
780 && UINTVAL (XEXP (op
, 1)) < op_precision
)
782 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
783 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
784 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
786 ? byte
- shifted_bytes
787 : byte
+ shifted_bytes
));
790 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
791 and try replacing the TRUNCATE and shift with it. Don't do this
792 if the MEM has a mode-dependent address. */
793 if ((GET_CODE (op
) == LSHIFTRT
794 || GET_CODE (op
) == ASHIFTRT
)
795 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
796 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
797 && MEM_P (XEXP (op
, 0))
798 && CONST_INT_P (XEXP (op
, 1))
799 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
800 && INTVAL (XEXP (op
, 1)) > 0
801 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
802 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
803 MEM_ADDR_SPACE (XEXP (op
, 0)))
804 && ! MEM_VOLATILE_P (XEXP (op
, 0))
805 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
806 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
808 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
809 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
810 return adjust_address_nv (XEXP (op
, 0), int_mode
,
812 ? byte
- shifted_bytes
813 : byte
+ shifted_bytes
));
816 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
817 (OP:SI foo:SI) if OP is NEG or ABS. */
818 if ((GET_CODE (op
) == ABS
819 || GET_CODE (op
) == NEG
)
820 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
821 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
822 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
823 return simplify_gen_unary (GET_CODE (op
), mode
,
824 XEXP (XEXP (op
, 0), 0), mode
);
826 /* (truncate:A (subreg:B (truncate:C X) 0)) is
828 if (GET_CODE (op
) == SUBREG
829 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
830 && SCALAR_INT_MODE_P (op_mode
)
831 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
832 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
833 && subreg_lowpart_p (op
))
835 rtx inner
= XEXP (SUBREG_REG (op
), 0);
836 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
837 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
840 /* If subreg above is paradoxical and C is narrower
841 than A, return (subreg:A (truncate:C X) 0). */
842 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
845 /* (truncate:A (truncate:B X)) is (truncate:A X). */
846 if (GET_CODE (op
) == TRUNCATE
)
847 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
848 GET_MODE (XEXP (op
, 0)));
850 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
852 if (GET_CODE (op
) == IOR
853 && SCALAR_INT_MODE_P (mode
)
854 && SCALAR_INT_MODE_P (op_mode
)
855 && CONST_INT_P (XEXP (op
, 1))
856 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
862 /* Try to simplify a unary operation CODE whose output mode is to be
863 MODE with input operand OP whose mode was originally OP_MODE.
864 Return zero if no simplification can be made. */
866 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
867 rtx op
, machine_mode op_mode
)
871 trueop
= avoid_constant_pool_reference (op
);
873 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
877 return simplify_unary_operation_1 (code
, mode
, op
);
880 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
884 exact_int_to_float_conversion_p (const_rtx op
)
886 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
887 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
888 /* Constants shouldn't reach here. */
889 gcc_assert (op0_mode
!= VOIDmode
);
890 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
891 int in_bits
= in_prec
;
892 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
894 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
895 if (GET_CODE (op
) == FLOAT
)
896 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
897 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
898 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
901 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
903 return in_bits
<= out_bits
;
906 /* Perform some simplifications we can do even if the operands
909 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
911 enum rtx_code reversed
;
912 rtx temp
, elt
, base
, step
;
913 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
918 /* (not (not X)) == X. */
919 if (GET_CODE (op
) == NOT
)
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op
)
925 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
926 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
927 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
928 XEXP (op
, 0), XEXP (op
, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op
) == PLUS
932 && XEXP (op
, 1) == constm1_rtx
)
933 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
939 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op
) == XOR
944 && CONST_INT_P (XEXP (op
, 1))
945 && (temp
= simplify_unary_operation (NOT
, mode
,
946 XEXP (op
, 1), mode
)) != 0)
947 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op
) == PLUS
951 && CONST_INT_P (XEXP (op
, 1))
952 && mode_signbit_p (mode
, XEXP (op
, 1))
953 && (temp
= simplify_unary_operation (NOT
, mode
,
954 XEXP (op
, 1), mode
)) != 0)
955 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
963 if (GET_CODE (op
) == ASHIFT
964 && XEXP (op
, 0) == const1_rtx
)
966 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
967 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE
== -1
974 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
975 && GET_CODE (op
) == ASHIFTRT
976 && CONST_INT_P (XEXP (op
, 1))
977 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
978 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
979 XEXP (op
, 0), const0_rtx
);
982 if (partial_subreg_p (op
)
983 && subreg_lowpart_p (op
)
984 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
985 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
987 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
990 x
= gen_rtx_ROTATE (inner_mode
,
991 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
993 XEXP (SUBREG_REG (op
), 1));
994 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
999 /* Apply De Morgan's laws to reduce number of patterns for machines
1000 with negating logical insns (and-not, nand, etc.). If result has
1001 only one NOT, put it first, since that is how the patterns are
1003 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1005 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1006 machine_mode op_mode
;
1008 op_mode
= GET_MODE (in1
);
1009 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1011 op_mode
= GET_MODE (in2
);
1012 if (op_mode
== VOIDmode
)
1014 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1016 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1017 std::swap (in1
, in2
);
1019 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1023 /* (not (bswap x)) -> (bswap (not x)). */
1024 if (GET_CODE (op
) == BSWAP
)
1026 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1027 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1032 /* (neg (neg X)) == X. */
1033 if (GET_CODE (op
) == NEG
)
1034 return XEXP (op
, 0);
1036 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1037 If comparison is not reversible use
1039 if (GET_CODE (op
) == IF_THEN_ELSE
)
1041 rtx cond
= XEXP (op
, 0);
1042 rtx true_rtx
= XEXP (op
, 1);
1043 rtx false_rtx
= XEXP (op
, 2);
1045 if ((GET_CODE (true_rtx
) == NEG
1046 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1047 || (GET_CODE (false_rtx
) == NEG
1048 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1050 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1051 temp
= reversed_comparison (cond
, mode
);
1055 std::swap (true_rtx
, false_rtx
);
1057 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1058 mode
, temp
, true_rtx
, false_rtx
);
1062 /* (neg (plus X 1)) can become (not X). */
1063 if (GET_CODE (op
) == PLUS
1064 && XEXP (op
, 1) == const1_rtx
)
1065 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1067 /* Similarly, (neg (not X)) is (plus X 1). */
1068 if (GET_CODE (op
) == NOT
)
1069 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1072 /* (neg (minus X Y)) can become (minus Y X). This transformation
1073 isn't safe for modes with signed zeros, since if X and Y are
1074 both +0, (minus Y X) is the same as (minus X Y). If the
1075 rounding mode is towards +infinity (or -infinity) then the two
1076 expressions will be rounded differently. */
1077 if (GET_CODE (op
) == MINUS
1078 && !HONOR_SIGNED_ZEROS (mode
)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1080 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1082 if (GET_CODE (op
) == PLUS
1083 && !HONOR_SIGNED_ZEROS (mode
)
1084 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1086 /* (neg (plus A C)) is simplified to (minus -C A). */
1087 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1088 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1090 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1092 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1095 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1096 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1097 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1100 /* (neg (mult A B)) becomes (mult A (neg B)).
1101 This works even for floating-point values. */
1102 if (GET_CODE (op
) == MULT
1103 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1105 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1106 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1109 /* NEG commutes with ASHIFT since it is multiplication. Only do
1110 this if we can then eliminate the NEG (e.g., if the operand
1112 if (GET_CODE (op
) == ASHIFT
)
1114 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1116 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1119 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1120 C is equal to the width of MODE minus 1. */
1121 if (GET_CODE (op
) == ASHIFTRT
1122 && CONST_INT_P (XEXP (op
, 1))
1123 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1124 return simplify_gen_binary (LSHIFTRT
, mode
,
1125 XEXP (op
, 0), XEXP (op
, 1));
1127 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1128 C is equal to the width of MODE minus 1. */
1129 if (GET_CODE (op
) == LSHIFTRT
1130 && CONST_INT_P (XEXP (op
, 1))
1131 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1132 return simplify_gen_binary (ASHIFTRT
, mode
,
1133 XEXP (op
, 0), XEXP (op
, 1));
1135 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1136 if (GET_CODE (op
) == XOR
1137 && XEXP (op
, 1) == const1_rtx
1138 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1139 return plus_constant (mode
, XEXP (op
, 0), -1);
1141 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1142 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1143 if (GET_CODE (op
) == LT
1144 && XEXP (op
, 1) == const0_rtx
1145 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1147 int_mode
= as_a
<scalar_int_mode
> (mode
);
1148 int isize
= GET_MODE_PRECISION (inner
);
1149 if (STORE_FLAG_VALUE
== 1)
1151 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1152 gen_int_shift_amount (inner
,
1154 if (int_mode
== inner
)
1156 if (GET_MODE_PRECISION (int_mode
) > isize
)
1157 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1158 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1160 else if (STORE_FLAG_VALUE
== -1)
1162 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1163 gen_int_shift_amount (inner
,
1165 if (int_mode
== inner
)
1167 if (GET_MODE_PRECISION (int_mode
) > isize
)
1168 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1169 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1173 if (vec_series_p (op
, &base
, &step
))
1175 /* Only create a new series if we can simplify both parts. In other
1176 cases this isn't really a simplification, and it's not necessarily
1177 a win to replace a vector operation with a scalar operation. */
1178 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1179 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1182 step
= simplify_unary_operation (NEG
, inner_mode
,
1185 return gen_vec_series (mode
, base
, step
);
1191 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1192 with the umulXi3_highpart patterns. */
1193 if (GET_CODE (op
) == LSHIFTRT
1194 && GET_CODE (XEXP (op
, 0)) == MULT
)
1197 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1199 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1201 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1205 /* We can't handle truncation to a partial integer mode here
1206 because we don't know the real bitsize of the partial
1211 if (GET_MODE (op
) != VOIDmode
)
1213 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1218 /* If we know that the value is already truncated, we can
1219 replace the TRUNCATE with a SUBREG. */
1220 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1221 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1222 || truncated_to_mode (mode
, op
)))
1224 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1229 /* A truncate of a comparison can be replaced with a subreg if
1230 STORE_FLAG_VALUE permits. This is like the previous test,
1231 but it works even if the comparison is done in a mode larger
1232 than HOST_BITS_PER_WIDE_INT. */
1233 if (HWI_COMPUTABLE_MODE_P (mode
)
1234 && COMPARISON_P (op
)
1235 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1237 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1242 /* A truncate of a memory is just loading the low part of the memory
1243 if we are not changing the meaning of the address. */
1244 if (GET_CODE (op
) == MEM
1245 && !VECTOR_MODE_P (mode
)
1246 && !MEM_VOLATILE_P (op
)
1247 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1249 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1256 case FLOAT_TRUNCATE
:
1257 if (DECIMAL_FLOAT_MODE_P (mode
))
1260 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1261 if (GET_CODE (op
) == FLOAT_EXTEND
1262 && GET_MODE (XEXP (op
, 0)) == mode
)
1263 return XEXP (op
, 0);
1265 /* (float_truncate:SF (float_truncate:DF foo:XF))
1266 = (float_truncate:SF foo:XF).
1267 This may eliminate double rounding, so it is unsafe.
1269 (float_truncate:SF (float_extend:XF foo:DF))
1270 = (float_truncate:SF foo:DF).
1272 (float_truncate:DF (float_extend:XF foo:SF))
1273 = (float_extend:DF foo:SF). */
1274 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1275 && flag_unsafe_math_optimizations
)
1276 || GET_CODE (op
) == FLOAT_EXTEND
)
1277 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1278 > GET_MODE_UNIT_SIZE (mode
)
1279 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1281 XEXP (op
, 0), mode
);
1283 /* (float_truncate (float x)) is (float x) */
1284 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1285 && (flag_unsafe_math_optimizations
1286 || exact_int_to_float_conversion_p (op
)))
1287 return simplify_gen_unary (GET_CODE (op
), mode
,
1289 GET_MODE (XEXP (op
, 0)));
1291 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1292 (OP:SF foo:SF) if OP is NEG or ABS. */
1293 if ((GET_CODE (op
) == ABS
1294 || GET_CODE (op
) == NEG
)
1295 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1296 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1297 return simplify_gen_unary (GET_CODE (op
), mode
,
1298 XEXP (XEXP (op
, 0), 0), mode
);
1300 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1301 is (float_truncate:SF x). */
1302 if (GET_CODE (op
) == SUBREG
1303 && subreg_lowpart_p (op
)
1304 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1305 return SUBREG_REG (op
);
1309 if (DECIMAL_FLOAT_MODE_P (mode
))
1312 /* (float_extend (float_extend x)) is (float_extend x)
1314 (float_extend (float x)) is (float x) assuming that double
1315 rounding can't happen.
1317 if (GET_CODE (op
) == FLOAT_EXTEND
1318 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1319 && exact_int_to_float_conversion_p (op
)))
1320 return simplify_gen_unary (GET_CODE (op
), mode
,
1322 GET_MODE (XEXP (op
, 0)));
1327 /* (abs (neg <foo>)) -> (abs <foo>) */
1328 if (GET_CODE (op
) == NEG
)
1329 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1330 GET_MODE (XEXP (op
, 0)));
1332 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1334 if (GET_MODE (op
) == VOIDmode
)
1337 /* If operand is something known to be positive, ignore the ABS. */
1338 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1339 || val_signbit_known_clear_p (GET_MODE (op
),
1340 nonzero_bits (op
, GET_MODE (op
))))
1343 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1344 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1345 && (num_sign_bit_copies (op
, int_mode
)
1346 == GET_MODE_PRECISION (int_mode
)))
1347 return gen_rtx_NEG (int_mode
, op
);
1352 /* (ffs (*_extend <X>)) = (ffs <X>) */
1353 if (GET_CODE (op
) == SIGN_EXTEND
1354 || GET_CODE (op
) == ZERO_EXTEND
)
1355 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1356 GET_MODE (XEXP (op
, 0)));
1360 switch (GET_CODE (op
))
1364 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1365 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1366 GET_MODE (XEXP (op
, 0)));
1370 /* Rotations don't affect popcount. */
1371 if (!side_effects_p (XEXP (op
, 1)))
1372 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1373 GET_MODE (XEXP (op
, 0)));
1382 switch (GET_CODE (op
))
1388 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1389 GET_MODE (XEXP (op
, 0)));
1393 /* Rotations don't affect parity. */
1394 if (!side_effects_p (XEXP (op
, 1)))
1395 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1396 GET_MODE (XEXP (op
, 0)));
1405 /* (bswap (bswap x)) -> x. */
1406 if (GET_CODE (op
) == BSWAP
)
1407 return XEXP (op
, 0);
1411 /* (float (sign_extend <X>)) = (float <X>). */
1412 if (GET_CODE (op
) == SIGN_EXTEND
)
1413 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1414 GET_MODE (XEXP (op
, 0)));
1418 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1419 becomes just the MINUS if its mode is MODE. This allows
1420 folding switch statements on machines using casesi (such as
1422 if (GET_CODE (op
) == TRUNCATE
1423 && GET_MODE (XEXP (op
, 0)) == mode
1424 && GET_CODE (XEXP (op
, 0)) == MINUS
1425 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1426 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1427 return XEXP (op
, 0);
1429 /* Extending a widening multiplication should be canonicalized to
1430 a wider widening multiplication. */
1431 if (GET_CODE (op
) == MULT
)
1433 rtx lhs
= XEXP (op
, 0);
1434 rtx rhs
= XEXP (op
, 1);
1435 enum rtx_code lcode
= GET_CODE (lhs
);
1436 enum rtx_code rcode
= GET_CODE (rhs
);
1438 /* Widening multiplies usually extend both operands, but sometimes
1439 they use a shift to extract a portion of a register. */
1440 if ((lcode
== SIGN_EXTEND
1441 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1442 && (rcode
== SIGN_EXTEND
1443 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1445 machine_mode lmode
= GET_MODE (lhs
);
1446 machine_mode rmode
= GET_MODE (rhs
);
1449 if (lcode
== ASHIFTRT
)
1450 /* Number of bits not shifted off the end. */
1451 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1452 - INTVAL (XEXP (lhs
, 1)));
1453 else /* lcode == SIGN_EXTEND */
1454 /* Size of inner mode. */
1455 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1457 if (rcode
== ASHIFTRT
)
1458 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1459 - INTVAL (XEXP (rhs
, 1)));
1460 else /* rcode == SIGN_EXTEND */
1461 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1463 /* We can only widen multiplies if the result is mathematiclly
1464 equivalent. I.e. if overflow was impossible. */
1465 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1466 return simplify_gen_binary
1468 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1469 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1473 /* Check for a sign extension of a subreg of a promoted
1474 variable, where the promotion is sign-extended, and the
1475 target mode is the same as the variable's promotion. */
1476 if (GET_CODE (op
) == SUBREG
1477 && SUBREG_PROMOTED_VAR_P (op
)
1478 && SUBREG_PROMOTED_SIGNED_P (op
)
1479 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1481 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1486 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1487 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1488 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1490 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1491 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1492 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1493 GET_MODE (XEXP (op
, 0)));
1496 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1497 is (sign_extend:M (subreg:O <X>)) if there is mode with
1498 GET_MODE_BITSIZE (N) - I bits.
1499 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1500 is similarly (zero_extend:M (subreg:O <X>)). */
1501 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1502 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1503 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1504 && CONST_INT_P (XEXP (op
, 1))
1505 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1506 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1507 GET_MODE_BITSIZE (op_mode
) > INTVAL (XEXP (op
, 1))))
1509 scalar_int_mode tmode
;
1510 gcc_assert (GET_MODE_BITSIZE (int_mode
)
1511 > GET_MODE_BITSIZE (op_mode
));
1512 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode
)
1513 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1516 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1518 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1519 ? SIGN_EXTEND
: ZERO_EXTEND
,
1520 int_mode
, inner
, tmode
);
1524 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1525 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1526 if (GET_CODE (op
) == LSHIFTRT
1527 && CONST_INT_P (XEXP (op
, 1))
1528 && XEXP (op
, 1) != const0_rtx
)
1529 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1531 #if defined(POINTERS_EXTEND_UNSIGNED)
1532 /* As we do not know which address space the pointer is referring to,
1533 we can do this only if the target does not support different pointer
1534 or address modes depending on the address space. */
1535 if (target_default_pointer_address_modes_p ()
1536 && ! POINTERS_EXTEND_UNSIGNED
1537 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1539 || (GET_CODE (op
) == SUBREG
1540 && REG_P (SUBREG_REG (op
))
1541 && REG_POINTER (SUBREG_REG (op
))
1542 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1543 && !targetm
.have_ptr_extend ())
1546 = convert_memory_address_addr_space_1 (Pmode
, op
,
1547 ADDR_SPACE_GENERIC
, false,
1556 /* Check for a zero extension of a subreg of a promoted
1557 variable, where the promotion is zero-extended, and the
1558 target mode is the same as the variable's promotion. */
1559 if (GET_CODE (op
) == SUBREG
1560 && SUBREG_PROMOTED_VAR_P (op
)
1561 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1562 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1564 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1569 /* Extending a widening multiplication should be canonicalized to
1570 a wider widening multiplication. */
1571 if (GET_CODE (op
) == MULT
)
1573 rtx lhs
= XEXP (op
, 0);
1574 rtx rhs
= XEXP (op
, 1);
1575 enum rtx_code lcode
= GET_CODE (lhs
);
1576 enum rtx_code rcode
= GET_CODE (rhs
);
1578 /* Widening multiplies usually extend both operands, but sometimes
1579 they use a shift to extract a portion of a register. */
1580 if ((lcode
== ZERO_EXTEND
1581 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1582 && (rcode
== ZERO_EXTEND
1583 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1585 machine_mode lmode
= GET_MODE (lhs
);
1586 machine_mode rmode
= GET_MODE (rhs
);
1589 if (lcode
== LSHIFTRT
)
1590 /* Number of bits not shifted off the end. */
1591 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1592 - INTVAL (XEXP (lhs
, 1)));
1593 else /* lcode == ZERO_EXTEND */
1594 /* Size of inner mode. */
1595 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1597 if (rcode
== LSHIFTRT
)
1598 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1599 - INTVAL (XEXP (rhs
, 1)));
1600 else /* rcode == ZERO_EXTEND */
1601 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1603 /* We can only widen multiplies if the result is mathematiclly
1604 equivalent. I.e. if overflow was impossible. */
1605 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1606 return simplify_gen_binary
1608 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1609 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1613 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1614 if (GET_CODE (op
) == ZERO_EXTEND
)
1615 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1616 GET_MODE (XEXP (op
, 0)));
1618 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1619 is (zero_extend:M (subreg:O <X>)) if there is mode with
1620 GET_MODE_PRECISION (N) - I bits. */
1621 if (GET_CODE (op
) == LSHIFTRT
1622 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1623 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1624 && CONST_INT_P (XEXP (op
, 1))
1625 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1626 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1627 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1629 scalar_int_mode tmode
;
1630 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1631 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1634 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1636 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1641 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1642 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1644 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1645 (and:SI (reg:SI) (const_int 63)). */
1646 if (partial_subreg_p (op
)
1647 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1648 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1649 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1650 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1651 && subreg_lowpart_p (op
)
1652 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1653 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1655 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1656 return SUBREG_REG (op
);
1657 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1661 #if defined(POINTERS_EXTEND_UNSIGNED)
1662 /* As we do not know which address space the pointer is referring to,
1663 we can do this only if the target does not support different pointer
1664 or address modes depending on the address space. */
1665 if (target_default_pointer_address_modes_p ()
1666 && POINTERS_EXTEND_UNSIGNED
> 0
1667 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1669 || (GET_CODE (op
) == SUBREG
1670 && REG_P (SUBREG_REG (op
))
1671 && REG_POINTER (SUBREG_REG (op
))
1672 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1673 && !targetm
.have_ptr_extend ())
1676 = convert_memory_address_addr_space_1 (Pmode
, op
,
1677 ADDR_SPACE_GENERIC
, false,
1689 if (VECTOR_MODE_P (mode
)
1690 && vec_duplicate_p (op
, &elt
)
1691 && code
!= VEC_DUPLICATE
)
1693 /* Try applying the operator to ELT and see if that simplifies.
1694 We can duplicate the result if so.
1696 The reason we don't use simplify_gen_unary is that it isn't
1697 necessarily a win to convert things like:
1699 (neg:V (vec_duplicate:V (reg:S R)))
1703 (vec_duplicate:V (neg:S (reg:S R)))
1705 The first might be done entirely in vector registers while the
1706 second might need a move between register files. */
1707 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1708 elt
, GET_MODE_INNER (GET_MODE (op
)));
1710 return gen_vec_duplicate (mode
, temp
);
1716 /* Try to compute the value of a unary operation CODE whose output mode is to
1717 be MODE with input operand OP whose mode was originally OP_MODE.
1718 Return zero if the value cannot be computed. */
1720 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1721 rtx op
, machine_mode op_mode
)
1723 scalar_int_mode result_mode
;
1725 if (code
== VEC_DUPLICATE
)
1727 gcc_assert (VECTOR_MODE_P (mode
));
1728 if (GET_MODE (op
) != VOIDmode
)
1730 if (!VECTOR_MODE_P (GET_MODE (op
)))
1731 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1733 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1736 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1737 return gen_const_vec_duplicate (mode
, op
);
1738 unsigned int n_elts
;
1739 if (GET_CODE (op
) == CONST_VECTOR
1740 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
1742 /* This must be constant if we're duplicating it to a constant
1743 number of elements. */
1744 unsigned int in_n_elts
= CONST_VECTOR_NUNITS (op
).to_constant ();
1745 gcc_assert (in_n_elts
< n_elts
);
1746 gcc_assert ((n_elts
% in_n_elts
) == 0);
1747 rtvec v
= rtvec_alloc (n_elts
);
1748 for (unsigned i
= 0; i
< n_elts
; i
++)
1749 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1750 return gen_rtx_CONST_VECTOR (mode
, v
);
1754 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1756 unsigned int n_elts
;
1757 if (!CONST_VECTOR_NUNITS (op
).is_constant (&n_elts
))
1760 machine_mode opmode
= GET_MODE (op
);
1761 gcc_assert (known_eq (GET_MODE_NUNITS (mode
), n_elts
));
1762 gcc_assert (known_eq (GET_MODE_NUNITS (opmode
), n_elts
));
1764 rtvec v
= rtvec_alloc (n_elts
);
1767 for (i
= 0; i
< n_elts
; i
++)
1769 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1770 CONST_VECTOR_ELT (op
, i
),
1771 GET_MODE_INNER (opmode
));
1772 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1774 RTVEC_ELT (v
, i
) = x
;
1776 return gen_rtx_CONST_VECTOR (mode
, v
);
1779 /* The order of these tests is critical so that, for example, we don't
1780 check the wrong mode (input vs. output) for a conversion operation,
1781 such as FIX. At some point, this should be simplified. */
1783 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1787 if (op_mode
== VOIDmode
)
1789 /* CONST_INT have VOIDmode as the mode. We assume that all
1790 the bits of the constant are significant, though, this is
1791 a dangerous assumption as many times CONST_INTs are
1792 created and used with garbage in the bits outside of the
1793 precision of the implied mode of the const_int. */
1794 op_mode
= MAX_MODE_INT
;
1797 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1799 /* Avoid the folding if flag_signaling_nans is on and
1800 operand is a signaling NaN. */
1801 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1804 d
= real_value_truncate (mode
, d
);
1805 return const_double_from_real_value (d
, mode
);
1807 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1811 if (op_mode
== VOIDmode
)
1813 /* CONST_INT have VOIDmode as the mode. We assume that all
1814 the bits of the constant are significant, though, this is
1815 a dangerous assumption as many times CONST_INTs are
1816 created and used with garbage in the bits outside of the
1817 precision of the implied mode of the const_int. */
1818 op_mode
= MAX_MODE_INT
;
1821 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1823 /* Avoid the folding if flag_signaling_nans is on and
1824 operand is a signaling NaN. */
1825 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1828 d
= real_value_truncate (mode
, d
);
1829 return const_double_from_real_value (d
, mode
);
1832 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1834 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1836 scalar_int_mode imode
= (op_mode
== VOIDmode
1838 : as_a
<scalar_int_mode
> (op_mode
));
1839 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1842 #if TARGET_SUPPORTS_WIDE_INT == 0
1843 /* This assert keeps the simplification from producing a result
1844 that cannot be represented in a CONST_DOUBLE but a lot of
1845 upstream callers expect that this function never fails to
1846 simplify something and so you if you added this to the test
1847 above the code would die later anyway. If this assert
1848 happens, you just need to make the port support wide int. */
1849 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1855 result
= wi::bit_not (op0
);
1859 result
= wi::neg (op0
);
1863 result
= wi::abs (op0
);
1867 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1871 if (wi::ne_p (op0
, 0))
1872 int_value
= wi::clz (op0
);
1873 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1875 result
= wi::shwi (int_value
, result_mode
);
1879 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1883 if (wi::ne_p (op0
, 0))
1884 int_value
= wi::ctz (op0
);
1885 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1887 result
= wi::shwi (int_value
, result_mode
);
1891 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1895 result
= wi::shwi (wi::parity (op0
), result_mode
);
1899 result
= wide_int (op0
).bswap ();
1904 result
= wide_int::from (op0
, width
, UNSIGNED
);
1908 result
= wide_int::from (op0
, width
, SIGNED
);
1916 return immed_wide_int_const (result
, result_mode
);
1919 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1920 && SCALAR_FLOAT_MODE_P (mode
)
1921 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1923 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1929 d
= real_value_abs (&d
);
1932 d
= real_value_negate (&d
);
1934 case FLOAT_TRUNCATE
:
1935 /* Don't perform the operation if flag_signaling_nans is on
1936 and the operand is a signaling NaN. */
1937 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1939 d
= real_value_truncate (mode
, d
);
1942 /* Don't perform the operation if flag_signaling_nans is on
1943 and the operand is a signaling NaN. */
1944 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1946 /* All this does is change the mode, unless changing
1948 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1949 real_convert (&d
, mode
, &d
);
1952 /* Don't perform the operation if flag_signaling_nans is on
1953 and the operand is a signaling NaN. */
1954 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1956 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1963 real_to_target (tmp
, &d
, GET_MODE (op
));
1964 for (i
= 0; i
< 4; i
++)
1966 real_from_target (&d
, tmp
, mode
);
1972 return const_double_from_real_value (d
, mode
);
1974 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1975 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1976 && is_int_mode (mode
, &result_mode
))
1978 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1979 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1980 operators are intentionally left unspecified (to ease implementation
1981 by target backends), for consistency, this routine implements the
1982 same semantics for constant folding as used by the middle-end. */
1984 /* This was formerly used only for non-IEEE float.
1985 eggert@twinsun.com says it is safe for IEEE also. */
1987 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1988 wide_int wmax
, wmin
;
1989 /* This is part of the abi to real_to_integer, but we check
1990 things before making this call. */
1996 if (REAL_VALUE_ISNAN (*x
))
1999 /* Test against the signed upper bound. */
2000 wmax
= wi::max_value (width
, SIGNED
);
2001 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2002 if (real_less (&t
, x
))
2003 return immed_wide_int_const (wmax
, mode
);
2005 /* Test against the signed lower bound. */
2006 wmin
= wi::min_value (width
, SIGNED
);
2007 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2008 if (real_less (x
, &t
))
2009 return immed_wide_int_const (wmin
, mode
);
2011 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2015 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2018 /* Test against the unsigned upper bound. */
2019 wmax
= wi::max_value (width
, UNSIGNED
);
2020 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2021 if (real_less (&t
, x
))
2022 return immed_wide_int_const (wmax
, mode
);
2024 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2032 /* Handle polynomial integers. */
2033 else if (CONST_POLY_INT_P (op
))
2035 poly_wide_int result
;
2039 result
= -const_poly_int_value (op
);
2043 result
= ~const_poly_int_value (op
);
2049 return immed_wide_int_const (result
, mode
);
2055 /* Subroutine of simplify_binary_operation to simplify a binary operation
2056 CODE that can commute with byte swapping, with result mode MODE and
2057 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2058 Return zero if no simplification or canonicalization is possible. */
2061 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2066 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2067 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2069 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2070 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2071 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2074 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2075 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2077 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2078 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2084 /* Subroutine of simplify_binary_operation to simplify a commutative,
2085 associative binary operation CODE with result mode MODE, operating
2086 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2087 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2088 canonicalization is possible. */
2091 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2096 /* Linearize the operator to the left. */
2097 if (GET_CODE (op1
) == code
)
2099 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2100 if (GET_CODE (op0
) == code
)
2102 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2103 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2106 /* "a op (b op c)" becomes "(b op c) op a". */
2107 if (! swap_commutative_operands_p (op1
, op0
))
2108 return simplify_gen_binary (code
, mode
, op1
, op0
);
2110 std::swap (op0
, op1
);
2113 if (GET_CODE (op0
) == code
)
2115 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2116 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2118 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2119 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2122 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2123 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2125 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2127 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2128 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2130 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2137 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2138 and OP1. Return 0 if no simplification is possible.
2140 Don't use this for relational operations such as EQ or LT.
2141 Use simplify_relational_operation instead. */
2143 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2146 rtx trueop0
, trueop1
;
2149 /* Relational operations don't work here. We must know the mode
2150 of the operands in order to do the comparison correctly.
2151 Assuming a full word can give incorrect results.
2152 Consider comparing 128 with -128 in QImode. */
2153 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2154 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2156 /* Make sure the constant is second. */
2157 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2158 && swap_commutative_operands_p (op0
, op1
))
2159 std::swap (op0
, op1
);
2161 trueop0
= avoid_constant_pool_reference (op0
);
2162 trueop1
= avoid_constant_pool_reference (op1
);
2164 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2167 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2172 /* If the above steps did not result in a simplification and op0 or op1
2173 were constant pool references, use the referenced constants directly. */
2174 if (trueop0
!= op0
|| trueop1
!= op1
)
2175 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2180 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2181 which OP0 and OP1 are both vector series or vector duplicates
2182 (which are really just series with a step of 0). If so, try to
2183 form a new series by applying CODE to the bases and to the steps.
2184 Return null if no simplification is possible.
2186 MODE is the mode of the operation and is known to be a vector
2190 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2194 if (vec_duplicate_p (op0
, &base0
))
2196 else if (!vec_series_p (op0
, &base0
, &step0
))
2200 if (vec_duplicate_p (op1
, &base1
))
2202 else if (!vec_series_p (op1
, &base1
, &step1
))
2205 /* Only create a new series if we can simplify both parts. In other
2206 cases this isn't really a simplification, and it's not necessarily
2207 a win to replace a vector operation with a scalar operation. */
2208 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2209 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2213 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2217 return gen_vec_series (mode
, new_base
, new_step
);
2220 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2221 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2222 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2223 actual constants. */
2226 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2227 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2229 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2231 scalar_int_mode int_mode
, inner_mode
;
2234 /* Even if we can't compute a constant result,
2235 there are some cases worth simplifying. */
2240 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2241 when x is NaN, infinite, or finite and nonzero. They aren't
2242 when x is -0 and the rounding mode is not towards -infinity,
2243 since (-0) + 0 is then 0. */
2244 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2247 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2248 transformations are safe even for IEEE. */
2249 if (GET_CODE (op0
) == NEG
)
2250 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2251 else if (GET_CODE (op1
) == NEG
)
2252 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2254 /* (~a) + 1 -> -a */
2255 if (INTEGRAL_MODE_P (mode
)
2256 && GET_CODE (op0
) == NOT
2257 && trueop1
== const1_rtx
)
2258 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2260 /* Handle both-operands-constant cases. We can only add
2261 CONST_INTs to constants since the sum of relocatable symbols
2262 can't be handled by most assemblers. Don't add CONST_INT
2263 to CONST_INT since overflow won't be computed properly if wider
2264 than HOST_BITS_PER_WIDE_INT. */
2266 if ((GET_CODE (op0
) == CONST
2267 || GET_CODE (op0
) == SYMBOL_REF
2268 || GET_CODE (op0
) == LABEL_REF
)
2269 && poly_int_rtx_p (op1
, &offset
))
2270 return plus_constant (mode
, op0
, offset
);
2271 else if ((GET_CODE (op1
) == CONST
2272 || GET_CODE (op1
) == SYMBOL_REF
2273 || GET_CODE (op1
) == LABEL_REF
)
2274 && poly_int_rtx_p (op0
, &offset
))
2275 return plus_constant (mode
, op1
, offset
);
2277 /* See if this is something like X * C - X or vice versa or
2278 if the multiplication is written as a shift. If so, we can
2279 distribute and make a new multiply, shift, or maybe just
2280 have X (if C is 2 in the example above). But don't make
2281 something more expensive than we had before. */
2283 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2285 rtx lhs
= op0
, rhs
= op1
;
2287 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2288 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2290 if (GET_CODE (lhs
) == NEG
)
2292 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2293 lhs
= XEXP (lhs
, 0);
2295 else if (GET_CODE (lhs
) == MULT
2296 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2298 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2299 lhs
= XEXP (lhs
, 0);
2301 else if (GET_CODE (lhs
) == ASHIFT
2302 && CONST_INT_P (XEXP (lhs
, 1))
2303 && INTVAL (XEXP (lhs
, 1)) >= 0
2304 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2306 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2307 GET_MODE_PRECISION (int_mode
));
2308 lhs
= XEXP (lhs
, 0);
2311 if (GET_CODE (rhs
) == NEG
)
2313 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2314 rhs
= XEXP (rhs
, 0);
2316 else if (GET_CODE (rhs
) == MULT
2317 && CONST_INT_P (XEXP (rhs
, 1)))
2319 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2320 rhs
= XEXP (rhs
, 0);
2322 else if (GET_CODE (rhs
) == ASHIFT
2323 && CONST_INT_P (XEXP (rhs
, 1))
2324 && INTVAL (XEXP (rhs
, 1)) >= 0
2325 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2327 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2328 GET_MODE_PRECISION (int_mode
));
2329 rhs
= XEXP (rhs
, 0);
2332 if (rtx_equal_p (lhs
, rhs
))
2334 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2336 bool speed
= optimize_function_for_speed_p (cfun
);
2338 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2340 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2341 return (set_src_cost (tem
, int_mode
, speed
)
2342 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2346 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2347 if (CONST_SCALAR_INT_P (op1
)
2348 && GET_CODE (op0
) == XOR
2349 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2350 && mode_signbit_p (mode
, op1
))
2351 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2352 simplify_gen_binary (XOR
, mode
, op1
,
2355 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2356 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2357 && GET_CODE (op0
) == MULT
2358 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2362 in1
= XEXP (XEXP (op0
, 0), 0);
2363 in2
= XEXP (op0
, 1);
2364 return simplify_gen_binary (MINUS
, mode
, op1
,
2365 simplify_gen_binary (MULT
, mode
,
2369 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2370 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2372 if (COMPARISON_P (op0
)
2373 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2374 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2375 && (reversed
= reversed_comparison (op0
, mode
)))
2377 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2379 /* If one of the operands is a PLUS or a MINUS, see if we can
2380 simplify this by the associative law.
2381 Don't use the associative law for floating point.
2382 The inaccuracy makes it nonassociative,
2383 and subtle programs can break if operations are associated. */
2385 if (INTEGRAL_MODE_P (mode
)
2386 && (plus_minus_operand_p (op0
)
2387 || plus_minus_operand_p (op1
))
2388 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2391 /* Reassociate floating point addition only when the user
2392 specifies associative math operations. */
2393 if (FLOAT_MODE_P (mode
)
2394 && flag_associative_math
)
2396 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2401 /* Handle vector series. */
2402 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2404 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2411 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2412 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2413 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2414 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2416 rtx xop00
= XEXP (op0
, 0);
2417 rtx xop10
= XEXP (op1
, 0);
2419 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2422 if (REG_P (xop00
) && REG_P (xop10
)
2423 && REGNO (xop00
) == REGNO (xop10
)
2424 && GET_MODE (xop00
) == mode
2425 && GET_MODE (xop10
) == mode
2426 && GET_MODE_CLASS (mode
) == MODE_CC
)
2432 /* We can't assume x-x is 0 even with non-IEEE floating point,
2433 but since it is zero except in very strange circumstances, we
2434 will treat it as zero with -ffinite-math-only. */
2435 if (rtx_equal_p (trueop0
, trueop1
)
2436 && ! side_effects_p (op0
)
2437 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2438 return CONST0_RTX (mode
);
2440 /* Change subtraction from zero into negation. (0 - x) is the
2441 same as -x when x is NaN, infinite, or finite and nonzero.
2442 But if the mode has signed zeros, and does not round towards
2443 -infinity, then 0 - 0 is 0, not -0. */
2444 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2445 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2447 /* (-1 - a) is ~a, unless the expression contains symbolic
2448 constants, in which case not retaining additions and
2449 subtractions could cause invalid assembly to be produced. */
2450 if (trueop0
== constm1_rtx
2451 && !contains_symbolic_reference_p (op1
))
2452 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2454 /* Subtracting 0 has no effect unless the mode has signed zeros
2455 and supports rounding towards -infinity. In such a case,
2457 if (!(HONOR_SIGNED_ZEROS (mode
)
2458 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2459 && trueop1
== CONST0_RTX (mode
))
2462 /* See if this is something like X * C - X or vice versa or
2463 if the multiplication is written as a shift. If so, we can
2464 distribute and make a new multiply, shift, or maybe just
2465 have X (if C is 2 in the example above). But don't make
2466 something more expensive than we had before. */
2468 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2470 rtx lhs
= op0
, rhs
= op1
;
2472 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2473 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2475 if (GET_CODE (lhs
) == NEG
)
2477 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2478 lhs
= XEXP (lhs
, 0);
2480 else if (GET_CODE (lhs
) == MULT
2481 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2483 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2484 lhs
= XEXP (lhs
, 0);
2486 else if (GET_CODE (lhs
) == ASHIFT
2487 && CONST_INT_P (XEXP (lhs
, 1))
2488 && INTVAL (XEXP (lhs
, 1)) >= 0
2489 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2491 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2492 GET_MODE_PRECISION (int_mode
));
2493 lhs
= XEXP (lhs
, 0);
2496 if (GET_CODE (rhs
) == NEG
)
2498 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2499 rhs
= XEXP (rhs
, 0);
2501 else if (GET_CODE (rhs
) == MULT
2502 && CONST_INT_P (XEXP (rhs
, 1)))
2504 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2505 rhs
= XEXP (rhs
, 0);
2507 else if (GET_CODE (rhs
) == ASHIFT
2508 && CONST_INT_P (XEXP (rhs
, 1))
2509 && INTVAL (XEXP (rhs
, 1)) >= 0
2510 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2512 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2513 GET_MODE_PRECISION (int_mode
));
2514 negcoeff1
= -negcoeff1
;
2515 rhs
= XEXP (rhs
, 0);
2518 if (rtx_equal_p (lhs
, rhs
))
2520 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2522 bool speed
= optimize_function_for_speed_p (cfun
);
2524 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2526 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2527 return (set_src_cost (tem
, int_mode
, speed
)
2528 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2532 /* (a - (-b)) -> (a + b). True even for IEEE. */
2533 if (GET_CODE (op1
) == NEG
)
2534 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2536 /* (-x - c) may be simplified as (-c - x). */
2537 if (GET_CODE (op0
) == NEG
2538 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2540 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2542 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2545 if ((GET_CODE (op0
) == CONST
2546 || GET_CODE (op0
) == SYMBOL_REF
2547 || GET_CODE (op0
) == LABEL_REF
)
2548 && poly_int_rtx_p (op1
, &offset
))
2549 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2551 /* Don't let a relocatable value get a negative coeff. */
2552 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2553 return simplify_gen_binary (PLUS
, mode
,
2555 neg_const_int (mode
, op1
));
2557 /* (x - (x & y)) -> (x & ~y) */
2558 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2560 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2562 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2563 GET_MODE (XEXP (op1
, 1)));
2564 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2566 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2568 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2569 GET_MODE (XEXP (op1
, 0)));
2570 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2574 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2575 by reversing the comparison code if valid. */
2576 if (STORE_FLAG_VALUE
== 1
2577 && trueop0
== const1_rtx
2578 && COMPARISON_P (op1
)
2579 && (reversed
= reversed_comparison (op1
, mode
)))
2582 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2583 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2584 && GET_CODE (op1
) == MULT
2585 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2589 in1
= XEXP (XEXP (op1
, 0), 0);
2590 in2
= XEXP (op1
, 1);
2591 return simplify_gen_binary (PLUS
, mode
,
2592 simplify_gen_binary (MULT
, mode
,
2597 /* Canonicalize (minus (neg A) (mult B C)) to
2598 (minus (mult (neg B) C) A). */
2599 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2600 && GET_CODE (op1
) == MULT
2601 && GET_CODE (op0
) == NEG
)
2605 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2606 in2
= XEXP (op1
, 1);
2607 return simplify_gen_binary (MINUS
, mode
,
2608 simplify_gen_binary (MULT
, mode
,
2613 /* If one of the operands is a PLUS or a MINUS, see if we can
2614 simplify this by the associative law. This will, for example,
2615 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2616 Don't use the associative law for floating point.
2617 The inaccuracy makes it nonassociative,
2618 and subtle programs can break if operations are associated. */
2620 if (INTEGRAL_MODE_P (mode
)
2621 && (plus_minus_operand_p (op0
)
2622 || plus_minus_operand_p (op1
))
2623 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2626 /* Handle vector series. */
2627 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2629 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2636 if (trueop1
== constm1_rtx
)
2637 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2639 if (GET_CODE (op0
) == NEG
)
2641 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2642 /* If op1 is a MULT as well and simplify_unary_operation
2643 just moved the NEG to the second operand, simplify_gen_binary
2644 below could through simplify_associative_operation move
2645 the NEG around again and recurse endlessly. */
2647 && GET_CODE (op1
) == MULT
2648 && GET_CODE (temp
) == MULT
2649 && XEXP (op1
, 0) == XEXP (temp
, 0)
2650 && GET_CODE (XEXP (temp
, 1)) == NEG
2651 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2654 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2656 if (GET_CODE (op1
) == NEG
)
2658 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2659 /* If op0 is a MULT as well and simplify_unary_operation
2660 just moved the NEG to the second operand, simplify_gen_binary
2661 below could through simplify_associative_operation move
2662 the NEG around again and recurse endlessly. */
2664 && GET_CODE (op0
) == MULT
2665 && GET_CODE (temp
) == MULT
2666 && XEXP (op0
, 0) == XEXP (temp
, 0)
2667 && GET_CODE (XEXP (temp
, 1)) == NEG
2668 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2671 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2674 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2675 x is NaN, since x * 0 is then also NaN. Nor is it valid
2676 when the mode has signed zeros, since multiplying a negative
2677 number by 0 will give -0, not 0. */
2678 if (!HONOR_NANS (mode
)
2679 && !HONOR_SIGNED_ZEROS (mode
)
2680 && trueop1
== CONST0_RTX (mode
)
2681 && ! side_effects_p (op0
))
2684 /* In IEEE floating point, x*1 is not equivalent to x for
2686 if (!HONOR_SNANS (mode
)
2687 && trueop1
== CONST1_RTX (mode
))
2690 /* Convert multiply by constant power of two into shift. */
2691 if (CONST_SCALAR_INT_P (trueop1
))
2693 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2695 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2696 gen_int_shift_amount (mode
, val
));
2699 /* x*2 is x+x and x*(-1) is -x */
2700 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2701 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2702 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2703 && GET_MODE (op0
) == mode
)
2705 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2707 if (real_equal (d1
, &dconst2
))
2708 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2710 if (!HONOR_SNANS (mode
)
2711 && real_equal (d1
, &dconstm1
))
2712 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2715 /* Optimize -x * -x as x * x. */
2716 if (FLOAT_MODE_P (mode
)
2717 && GET_CODE (op0
) == NEG
2718 && GET_CODE (op1
) == NEG
2719 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2720 && !side_effects_p (XEXP (op0
, 0)))
2721 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2723 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2724 if (SCALAR_FLOAT_MODE_P (mode
)
2725 && GET_CODE (op0
) == ABS
2726 && GET_CODE (op1
) == ABS
2727 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2728 && !side_effects_p (XEXP (op0
, 0)))
2729 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2731 /* Reassociate multiplication, but for floating point MULTs
2732 only when the user specifies unsafe math optimizations. */
2733 if (! FLOAT_MODE_P (mode
)
2734 || flag_unsafe_math_optimizations
)
2736 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2743 if (trueop1
== CONST0_RTX (mode
))
2745 if (INTEGRAL_MODE_P (mode
)
2746 && trueop1
== CONSTM1_RTX (mode
)
2747 && !side_effects_p (op0
))
2749 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2751 /* A | (~A) -> -1 */
2752 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2753 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2754 && ! side_effects_p (op0
)
2755 && SCALAR_INT_MODE_P (mode
))
2758 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2759 if (CONST_INT_P (op1
)
2760 && HWI_COMPUTABLE_MODE_P (mode
)
2761 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2762 && !side_effects_p (op0
))
2765 /* Canonicalize (X & C1) | C2. */
2766 if (GET_CODE (op0
) == AND
2767 && CONST_INT_P (trueop1
)
2768 && CONST_INT_P (XEXP (op0
, 1)))
2770 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2771 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2772 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2774 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2776 && !side_effects_p (XEXP (op0
, 0)))
2779 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2780 if (((c1
|c2
) & mask
) == mask
)
2781 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2784 /* Convert (A & B) | A to A. */
2785 if (GET_CODE (op0
) == AND
2786 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2787 || rtx_equal_p (XEXP (op0
, 1), op1
))
2788 && ! side_effects_p (XEXP (op0
, 0))
2789 && ! side_effects_p (XEXP (op0
, 1)))
2792 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2793 mode size to (rotate A CX). */
2795 if (GET_CODE (op1
) == ASHIFT
2796 || GET_CODE (op1
) == SUBREG
)
2807 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2808 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2809 && CONST_INT_P (XEXP (opleft
, 1))
2810 && CONST_INT_P (XEXP (opright
, 1))
2811 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2812 == GET_MODE_UNIT_PRECISION (mode
)))
2813 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2815 /* Same, but for ashift that has been "simplified" to a wider mode
2816 by simplify_shift_const. */
2818 if (GET_CODE (opleft
) == SUBREG
2819 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2820 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2822 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2823 && GET_CODE (opright
) == LSHIFTRT
2824 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2825 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2826 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2827 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2828 SUBREG_REG (XEXP (opright
, 0)))
2829 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2830 && CONST_INT_P (XEXP (opright
, 1))
2831 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2832 + INTVAL (XEXP (opright
, 1))
2833 == GET_MODE_PRECISION (int_mode
)))
2834 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2835 XEXP (SUBREG_REG (opleft
), 1));
2837 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2838 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2839 the PLUS does not affect any of the bits in OP1: then we can do
2840 the IOR as a PLUS and we can associate. This is valid if OP1
2841 can be safely shifted left C bits. */
2842 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2843 && GET_CODE (XEXP (op0
, 0)) == PLUS
2844 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2845 && CONST_INT_P (XEXP (op0
, 1))
2846 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2848 int count
= INTVAL (XEXP (op0
, 1));
2849 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2851 if (mask
>> count
== INTVAL (trueop1
)
2852 && trunc_int_for_mode (mask
, mode
) == mask
2853 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2854 return simplify_gen_binary (ASHIFTRT
, mode
,
2855 plus_constant (mode
, XEXP (op0
, 0),
2860 /* The following happens with bitfield merging.
2861 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2862 if (GET_CODE (op0
) == AND
2863 && GET_CODE (op1
) == AND
2864 && CONST_INT_P (XEXP (op0
, 1))
2865 && CONST_INT_P (XEXP (op1
, 1))
2866 && (INTVAL (XEXP (op0
, 1))
2867 == ~INTVAL (XEXP (op1
, 1))))
2869 /* The IOR may be on both sides. */
2870 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
2871 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
2872 top0
= op0
, top1
= op1
;
2873 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
2874 top0
= op1
, top1
= op0
;
2877 /* X may be on either side of the inner IOR. */
2879 if (rtx_equal_p (XEXP (top0
, 0),
2880 XEXP (XEXP (top1
, 0), 0)))
2881 tem
= XEXP (XEXP (top1
, 0), 1);
2882 else if (rtx_equal_p (XEXP (top0
, 0),
2883 XEXP (XEXP (top1
, 0), 1)))
2884 tem
= XEXP (XEXP (top1
, 0), 0);
2886 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
2888 (AND
, mode
, tem
, XEXP (top1
, 1)));
2892 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2896 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2902 if (trueop1
== CONST0_RTX (mode
))
2904 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2905 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2906 if (rtx_equal_p (trueop0
, trueop1
)
2907 && ! side_effects_p (op0
)
2908 && GET_MODE_CLASS (mode
) != MODE_CC
)
2909 return CONST0_RTX (mode
);
2911 /* Canonicalize XOR of the most significant bit to PLUS. */
2912 if (CONST_SCALAR_INT_P (op1
)
2913 && mode_signbit_p (mode
, op1
))
2914 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2915 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2916 if (CONST_SCALAR_INT_P (op1
)
2917 && GET_CODE (op0
) == PLUS
2918 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2919 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2920 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2921 simplify_gen_binary (XOR
, mode
, op1
,
2924 /* If we are XORing two things that have no bits in common,
2925 convert them into an IOR. This helps to detect rotation encoded
2926 using those methods and possibly other simplifications. */
2928 if (HWI_COMPUTABLE_MODE_P (mode
)
2929 && (nonzero_bits (op0
, mode
)
2930 & nonzero_bits (op1
, mode
)) == 0)
2931 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2933 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2934 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2937 int num_negated
= 0;
2939 if (GET_CODE (op0
) == NOT
)
2940 num_negated
++, op0
= XEXP (op0
, 0);
2941 if (GET_CODE (op1
) == NOT
)
2942 num_negated
++, op1
= XEXP (op1
, 0);
2944 if (num_negated
== 2)
2945 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2946 else if (num_negated
== 1)
2947 return simplify_gen_unary (NOT
, mode
,
2948 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2952 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2953 correspond to a machine insn or result in further simplifications
2954 if B is a constant. */
2956 if (GET_CODE (op0
) == AND
2957 && rtx_equal_p (XEXP (op0
, 1), op1
)
2958 && ! side_effects_p (op1
))
2959 return simplify_gen_binary (AND
, mode
,
2960 simplify_gen_unary (NOT
, mode
,
2961 XEXP (op0
, 0), mode
),
2964 else if (GET_CODE (op0
) == AND
2965 && rtx_equal_p (XEXP (op0
, 0), op1
)
2966 && ! side_effects_p (op1
))
2967 return simplify_gen_binary (AND
, mode
,
2968 simplify_gen_unary (NOT
, mode
,
2969 XEXP (op0
, 1), mode
),
2972 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2973 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2974 out bits inverted twice and not set by C. Similarly, given
2975 (xor (and (xor A B) C) D), simplify without inverting C in
2976 the xor operand: (xor (and A C) (B&C)^D).
2978 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2979 && GET_CODE (XEXP (op0
, 0)) == XOR
2980 && CONST_INT_P (op1
)
2981 && CONST_INT_P (XEXP (op0
, 1))
2982 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2984 enum rtx_code op
= GET_CODE (op0
);
2985 rtx a
= XEXP (XEXP (op0
, 0), 0);
2986 rtx b
= XEXP (XEXP (op0
, 0), 1);
2987 rtx c
= XEXP (op0
, 1);
2989 HOST_WIDE_INT bval
= INTVAL (b
);
2990 HOST_WIDE_INT cval
= INTVAL (c
);
2991 HOST_WIDE_INT dval
= INTVAL (d
);
2992 HOST_WIDE_INT xcval
;
2999 return simplify_gen_binary (XOR
, mode
,
3000 simplify_gen_binary (op
, mode
, a
, c
),
3001 gen_int_mode ((bval
& xcval
) ^ dval
,
3005 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3006 we can transform like this:
3007 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3008 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3009 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3010 Attempt a few simplifications when B and C are both constants. */
3011 if (GET_CODE (op0
) == AND
3012 && CONST_INT_P (op1
)
3013 && CONST_INT_P (XEXP (op0
, 1)))
3015 rtx a
= XEXP (op0
, 0);
3016 rtx b
= XEXP (op0
, 1);
3018 HOST_WIDE_INT bval
= INTVAL (b
);
3019 HOST_WIDE_INT cval
= INTVAL (c
);
3021 /* Instead of computing ~A&C, we compute its negated value,
3022 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3023 optimize for sure. If it does not simplify, we still try
3024 to compute ~A&C below, but since that always allocates
3025 RTL, we don't try that before committing to returning a
3026 simplified expression. */
3027 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3030 if ((~cval
& bval
) == 0)
3032 rtx na_c
= NULL_RTX
;
3034 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3037 /* If ~A does not simplify, don't bother: we don't
3038 want to simplify 2 operations into 3, and if na_c
3039 were to simplify with na, n_na_c would have
3040 simplified as well. */
3041 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3043 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3046 /* Try to simplify ~A&C | ~B&C. */
3047 if (na_c
!= NULL_RTX
)
3048 return simplify_gen_binary (IOR
, mode
, na_c
,
3049 gen_int_mode (~bval
& cval
, mode
));
3053 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3054 if (n_na_c
== CONSTM1_RTX (mode
))
3056 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3057 gen_int_mode (~cval
& bval
,
3059 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3060 gen_int_mode (~bval
& cval
,
3066 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3067 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3068 machines, and also has shorter instruction path length. */
3069 if (GET_CODE (op0
) == AND
3070 && GET_CODE (XEXP (op0
, 0)) == XOR
3071 && CONST_INT_P (XEXP (op0
, 1))
3072 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3075 rtx b
= XEXP (XEXP (op0
, 0), 1);
3076 rtx c
= XEXP (op0
, 1);
3077 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3078 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3079 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3080 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3082 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3083 else if (GET_CODE (op0
) == AND
3084 && GET_CODE (XEXP (op0
, 0)) == XOR
3085 && CONST_INT_P (XEXP (op0
, 1))
3086 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3088 rtx a
= XEXP (XEXP (op0
, 0), 0);
3090 rtx c
= XEXP (op0
, 1);
3091 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3092 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3093 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3094 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3097 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3098 comparison if STORE_FLAG_VALUE is 1. */
3099 if (STORE_FLAG_VALUE
== 1
3100 && trueop1
== const1_rtx
3101 && COMPARISON_P (op0
)
3102 && (reversed
= reversed_comparison (op0
, mode
)))
3105 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3106 is (lt foo (const_int 0)), so we can perform the above
3107 simplification if STORE_FLAG_VALUE is 1. */
3109 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3110 && STORE_FLAG_VALUE
== 1
3111 && trueop1
== const1_rtx
3112 && GET_CODE (op0
) == LSHIFTRT
3113 && CONST_INT_P (XEXP (op0
, 1))
3114 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3115 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3117 /* (xor (comparison foo bar) (const_int sign-bit))
3118 when STORE_FLAG_VALUE is the sign bit. */
3119 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3120 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3121 && trueop1
== const_true_rtx
3122 && COMPARISON_P (op0
)
3123 && (reversed
= reversed_comparison (op0
, int_mode
)))
3126 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3130 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3136 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3138 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3140 if (HWI_COMPUTABLE_MODE_P (mode
))
3142 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3143 HOST_WIDE_INT nzop1
;
3144 if (CONST_INT_P (trueop1
))
3146 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3147 /* If we are turning off bits already known off in OP0, we need
3149 if ((nzop0
& ~val1
) == 0)
3152 nzop1
= nonzero_bits (trueop1
, mode
);
3153 /* If we are clearing all the nonzero bits, the result is zero. */
3154 if ((nzop1
& nzop0
) == 0
3155 && !side_effects_p (op0
) && !side_effects_p (op1
))
3156 return CONST0_RTX (mode
);
3158 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3159 && GET_MODE_CLASS (mode
) != MODE_CC
)
3162 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3163 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3164 && ! side_effects_p (op0
)
3165 && GET_MODE_CLASS (mode
) != MODE_CC
)
3166 return CONST0_RTX (mode
);
3168 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3169 there are no nonzero bits of C outside of X's mode. */
3170 if ((GET_CODE (op0
) == SIGN_EXTEND
3171 || GET_CODE (op0
) == ZERO_EXTEND
)
3172 && CONST_INT_P (trueop1
)
3173 && HWI_COMPUTABLE_MODE_P (mode
)
3174 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3175 & UINTVAL (trueop1
)) == 0)
3177 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3178 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3179 gen_int_mode (INTVAL (trueop1
),
3181 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3184 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3185 we might be able to further simplify the AND with X and potentially
3186 remove the truncation altogether. */
3187 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3189 rtx x
= XEXP (op0
, 0);
3190 machine_mode xmode
= GET_MODE (x
);
3191 tem
= simplify_gen_binary (AND
, xmode
, x
,
3192 gen_int_mode (INTVAL (trueop1
), xmode
));
3193 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3196 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3197 if (GET_CODE (op0
) == IOR
3198 && CONST_INT_P (trueop1
)
3199 && CONST_INT_P (XEXP (op0
, 1)))
3201 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3202 return simplify_gen_binary (IOR
, mode
,
3203 simplify_gen_binary (AND
, mode
,
3204 XEXP (op0
, 0), op1
),
3205 gen_int_mode (tmp
, mode
));
3208 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3209 insn (and may simplify more). */
3210 if (GET_CODE (op0
) == XOR
3211 && rtx_equal_p (XEXP (op0
, 0), op1
)
3212 && ! side_effects_p (op1
))
3213 return simplify_gen_binary (AND
, mode
,
3214 simplify_gen_unary (NOT
, mode
,
3215 XEXP (op0
, 1), mode
),
3218 if (GET_CODE (op0
) == XOR
3219 && rtx_equal_p (XEXP (op0
, 1), op1
)
3220 && ! side_effects_p (op1
))
3221 return simplify_gen_binary (AND
, mode
,
3222 simplify_gen_unary (NOT
, mode
,
3223 XEXP (op0
, 0), mode
),
3226 /* Similarly for (~(A ^ B)) & A. */
3227 if (GET_CODE (op0
) == NOT
3228 && GET_CODE (XEXP (op0
, 0)) == XOR
3229 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3230 && ! side_effects_p (op1
))
3231 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3233 if (GET_CODE (op0
) == NOT
3234 && GET_CODE (XEXP (op0
, 0)) == XOR
3235 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3236 && ! side_effects_p (op1
))
3237 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3239 /* Convert (A | B) & A to A. */
3240 if (GET_CODE (op0
) == IOR
3241 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3242 || rtx_equal_p (XEXP (op0
, 1), op1
))
3243 && ! side_effects_p (XEXP (op0
, 0))
3244 && ! side_effects_p (XEXP (op0
, 1)))
3247 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3248 ((A & N) + B) & M -> (A + B) & M
3249 Similarly if (N & M) == 0,
3250 ((A | N) + B) & M -> (A + B) & M
3251 and for - instead of + and/or ^ instead of |.
3252 Also, if (N & M) == 0, then
3253 (A +- N) & M -> A & M. */
3254 if (CONST_INT_P (trueop1
)
3255 && HWI_COMPUTABLE_MODE_P (mode
)
3256 && ~UINTVAL (trueop1
)
3257 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3258 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3263 pmop
[0] = XEXP (op0
, 0);
3264 pmop
[1] = XEXP (op0
, 1);
3266 if (CONST_INT_P (pmop
[1])
3267 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3268 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3270 for (which
= 0; which
< 2; which
++)
3273 switch (GET_CODE (tem
))
3276 if (CONST_INT_P (XEXP (tem
, 1))
3277 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3278 == UINTVAL (trueop1
))
3279 pmop
[which
] = XEXP (tem
, 0);
3283 if (CONST_INT_P (XEXP (tem
, 1))
3284 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3285 pmop
[which
] = XEXP (tem
, 0);
3292 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3294 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3296 return simplify_gen_binary (code
, mode
, tem
, op1
);
3300 /* (and X (ior (not X) Y) -> (and X Y) */
3301 if (GET_CODE (op1
) == IOR
3302 && GET_CODE (XEXP (op1
, 0)) == NOT
3303 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3304 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3306 /* (and (ior (not X) Y) X) -> (and X Y) */
3307 if (GET_CODE (op0
) == IOR
3308 && GET_CODE (XEXP (op0
, 0)) == NOT
3309 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3310 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3312 /* (and X (ior Y (not X)) -> (and X Y) */
3313 if (GET_CODE (op1
) == IOR
3314 && GET_CODE (XEXP (op1
, 1)) == NOT
3315 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3316 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3318 /* (and (ior Y (not X)) X) -> (and X Y) */
3319 if (GET_CODE (op0
) == IOR
3320 && GET_CODE (XEXP (op0
, 1)) == NOT
3321 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3322 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3324 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3328 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3334 /* 0/x is 0 (or x&0 if x has side-effects). */
3335 if (trueop0
== CONST0_RTX (mode
)
3336 && !cfun
->can_throw_non_call_exceptions
)
3338 if (side_effects_p (op1
))
3339 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3343 if (trueop1
== CONST1_RTX (mode
))
3345 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3349 /* Convert divide by power of two into shift. */
3350 if (CONST_INT_P (trueop1
)
3351 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3352 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3353 gen_int_shift_amount (mode
, val
));
3357 /* Handle floating point and integers separately. */
3358 if (SCALAR_FLOAT_MODE_P (mode
))
3360 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3361 safe for modes with NaNs, since 0.0 / 0.0 will then be
3362 NaN rather than 0.0. Nor is it safe for modes with signed
3363 zeros, since dividing 0 by a negative number gives -0.0 */
3364 if (trueop0
== CONST0_RTX (mode
)
3365 && !HONOR_NANS (mode
)
3366 && !HONOR_SIGNED_ZEROS (mode
)
3367 && ! side_effects_p (op1
))
3370 if (trueop1
== CONST1_RTX (mode
)
3371 && !HONOR_SNANS (mode
))
3374 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3375 && trueop1
!= CONST0_RTX (mode
))
3377 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3380 if (real_equal (d1
, &dconstm1
)
3381 && !HONOR_SNANS (mode
))
3382 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3384 /* Change FP division by a constant into multiplication.
3385 Only do this with -freciprocal-math. */
3386 if (flag_reciprocal_math
3387 && !real_equal (d1
, &dconst0
))
3390 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3391 tem
= const_double_from_real_value (d
, mode
);
3392 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3396 else if (SCALAR_INT_MODE_P (mode
))
3398 /* 0/x is 0 (or x&0 if x has side-effects). */
3399 if (trueop0
== CONST0_RTX (mode
)
3400 && !cfun
->can_throw_non_call_exceptions
)
3402 if (side_effects_p (op1
))
3403 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3407 if (trueop1
== CONST1_RTX (mode
))
3409 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3414 if (trueop1
== constm1_rtx
)
3416 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3418 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3424 /* 0%x is 0 (or x&0 if x has side-effects). */
3425 if (trueop0
== CONST0_RTX (mode
))
3427 if (side_effects_p (op1
))
3428 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3431 /* x%1 is 0 (of x&0 if x has side-effects). */
3432 if (trueop1
== CONST1_RTX (mode
))
3434 if (side_effects_p (op0
))
3435 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3436 return CONST0_RTX (mode
);
3438 /* Implement modulus by power of two as AND. */
3439 if (CONST_INT_P (trueop1
)
3440 && exact_log2 (UINTVAL (trueop1
)) > 0)
3441 return simplify_gen_binary (AND
, mode
, op0
,
3442 gen_int_mode (UINTVAL (trueop1
) - 1,
3447 /* 0%x is 0 (or x&0 if x has side-effects). */
3448 if (trueop0
== CONST0_RTX (mode
))
3450 if (side_effects_p (op1
))
3451 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3454 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3455 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3457 if (side_effects_p (op0
))
3458 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3459 return CONST0_RTX (mode
);
3465 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3466 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3467 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3469 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3470 if (CONST_INT_P (trueop1
)
3471 && IN_RANGE (INTVAL (trueop1
),
3472 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3473 GET_MODE_UNIT_PRECISION (mode
) - 1))
3475 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3476 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3477 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3478 mode
, op0
, new_amount_rtx
);
3483 if (trueop1
== CONST0_RTX (mode
))
3485 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3487 /* Rotating ~0 always results in ~0. */
3488 if (CONST_INT_P (trueop0
)
3489 && HWI_COMPUTABLE_MODE_P (mode
)
3490 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3491 && ! side_effects_p (op1
))
3497 scalar constants c1, c2
3498 size (M2) > size (M1)
3499 c1 == size (M2) - size (M1)
3501 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3505 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3507 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3508 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3510 && CONST_INT_P (op1
)
3511 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3512 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3514 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3515 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3516 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3517 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3518 && subreg_lowpart_p (op0
))
3520 rtx tmp
= gen_int_shift_amount
3521 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3522 tmp
= simplify_gen_binary (code
, inner_mode
,
3523 XEXP (SUBREG_REG (op0
), 0),
3525 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3528 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3530 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3531 if (val
!= INTVAL (op1
))
3532 return simplify_gen_binary (code
, mode
, op0
,
3533 gen_int_shift_amount (mode
, val
));
3540 if (trueop1
== CONST0_RTX (mode
))
3542 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3544 goto canonicalize_shift
;
3547 if (trueop1
== CONST0_RTX (mode
))
3549 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3551 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3552 if (GET_CODE (op0
) == CLZ
3553 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3554 && CONST_INT_P (trueop1
)
3555 && STORE_FLAG_VALUE
== 1
3556 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3558 unsigned HOST_WIDE_INT zero_val
= 0;
3560 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3561 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3562 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3563 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3564 XEXP (op0
, 0), const0_rtx
);
3566 goto canonicalize_shift
;
3569 if (HWI_COMPUTABLE_MODE_P (mode
)
3570 && mode_signbit_p (mode
, trueop1
)
3571 && ! side_effects_p (op0
))
3573 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3575 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3581 if (HWI_COMPUTABLE_MODE_P (mode
)
3582 && CONST_INT_P (trueop1
)
3583 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3584 && ! side_effects_p (op0
))
3586 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3588 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3594 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3596 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3598 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3604 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3606 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3608 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3621 /* ??? There are simplifications that can be done. */
3625 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3626 return gen_vec_duplicate (mode
, op0
);
3627 if (valid_for_const_vector_p (mode
, op0
)
3628 && valid_for_const_vector_p (mode
, op1
))
3629 return gen_const_vec_series (mode
, op0
, op1
);
3633 if (!VECTOR_MODE_P (mode
))
3635 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3636 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3637 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3638 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3640 /* We can't reason about selections made at runtime. */
3641 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3644 if (vec_duplicate_p (trueop0
, &elt0
))
3647 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3648 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3651 /* Extract a scalar element from a nested VEC_SELECT expression
3652 (with optional nested VEC_CONCAT expression). Some targets
3653 (i386) extract scalar element from a vector using chain of
3654 nested VEC_SELECT expressions. When input operand is a memory
3655 operand, this operation can be simplified to a simple scalar
3656 load from an offseted memory address. */
3658 if (GET_CODE (trueop0
) == VEC_SELECT
3659 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3660 .is_constant (&n_elts
)))
3662 rtx op0
= XEXP (trueop0
, 0);
3663 rtx op1
= XEXP (trueop0
, 1);
3665 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3671 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3672 gcc_assert (i
< n_elts
);
3674 /* Select element, pointed by nested selector. */
3675 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3677 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3678 if (GET_CODE (op0
) == VEC_CONCAT
)
3680 rtx op00
= XEXP (op0
, 0);
3681 rtx op01
= XEXP (op0
, 1);
3683 machine_mode mode00
, mode01
;
3684 int n_elts00
, n_elts01
;
3686 mode00
= GET_MODE (op00
);
3687 mode01
= GET_MODE (op01
);
3689 /* Find out the number of elements of each operand.
3690 Since the concatenated result has a constant number
3691 of elements, the operands must too. */
3692 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3693 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3695 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3697 /* Select correct operand of VEC_CONCAT
3698 and adjust selector. */
3699 if (elem
< n_elts01
)
3710 vec
= rtvec_alloc (1);
3711 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3713 tmp
= gen_rtx_fmt_ee (code
, mode
,
3714 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3720 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3721 gcc_assert (GET_MODE_INNER (mode
)
3722 == GET_MODE_INNER (GET_MODE (trueop0
)));
3723 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3725 if (vec_duplicate_p (trueop0
, &elt0
))
3726 /* It doesn't matter which elements are selected by trueop1,
3727 because they are all the same. */
3728 return gen_vec_duplicate (mode
, elt0
);
3730 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3732 unsigned n_elts
= XVECLEN (trueop1
, 0);
3733 rtvec v
= rtvec_alloc (n_elts
);
3736 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3737 for (i
= 0; i
< n_elts
; i
++)
3739 rtx x
= XVECEXP (trueop1
, 0, i
);
3741 if (!CONST_INT_P (x
))
3744 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3748 return gen_rtx_CONST_VECTOR (mode
, v
);
3751 /* Recognize the identity. */
3752 if (GET_MODE (trueop0
) == mode
)
3754 bool maybe_ident
= true;
3755 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3757 rtx j
= XVECEXP (trueop1
, 0, i
);
3758 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3760 maybe_ident
= false;
3768 /* If we build {a,b} then permute it, build the result directly. */
3769 if (XVECLEN (trueop1
, 0) == 2
3770 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3771 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3772 && GET_CODE (trueop0
) == VEC_CONCAT
3773 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3774 && GET_MODE (XEXP (trueop0
, 0)) == mode
3775 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3776 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3778 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3779 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3782 gcc_assert (i0
< 4 && i1
< 4);
3783 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3784 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3786 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3789 if (XVECLEN (trueop1
, 0) == 2
3790 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3791 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3792 && GET_CODE (trueop0
) == VEC_CONCAT
3793 && GET_MODE (trueop0
) == mode
)
3795 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3796 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3799 gcc_assert (i0
< 2 && i1
< 2);
3800 subop0
= XEXP (trueop0
, i0
);
3801 subop1
= XEXP (trueop0
, i1
);
3803 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3806 /* If we select one half of a vec_concat, return that. */
3808 if (GET_CODE (trueop0
) == VEC_CONCAT
3809 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3811 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
3813 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3815 rtx subop0
= XEXP (trueop0
, 0);
3816 rtx subop1
= XEXP (trueop0
, 1);
3817 machine_mode mode0
= GET_MODE (subop0
);
3818 machine_mode mode1
= GET_MODE (subop1
);
3819 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3820 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3822 bool success
= true;
3823 for (int i
= 1; i
< l0
; ++i
)
3825 rtx j
= XVECEXP (trueop1
, 0, i
);
3826 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3835 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3837 bool success
= true;
3838 for (int i
= 1; i
< l1
; ++i
)
3840 rtx j
= XVECEXP (trueop1
, 0, i
);
3841 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3853 if (XVECLEN (trueop1
, 0) == 1
3854 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3855 && GET_CODE (trueop0
) == VEC_CONCAT
)
3858 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3860 /* Try to find the element in the VEC_CONCAT. */
3861 while (GET_MODE (vec
) != mode
3862 && GET_CODE (vec
) == VEC_CONCAT
)
3864 poly_int64 vec_size
;
3866 if (CONST_INT_P (XEXP (vec
, 0)))
3868 /* vec_concat of two const_ints doesn't make sense with
3869 respect to modes. */
3870 if (CONST_INT_P (XEXP (vec
, 1)))
3873 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3874 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3877 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3879 if (known_lt (offset
, vec_size
))
3880 vec
= XEXP (vec
, 0);
3881 else if (known_ge (offset
, vec_size
))
3884 vec
= XEXP (vec
, 1);
3888 vec
= avoid_constant_pool_reference (vec
);
3891 if (GET_MODE (vec
) == mode
)
3895 /* If we select elements in a vec_merge that all come from the same
3896 operand, select from that operand directly. */
3897 if (GET_CODE (op0
) == VEC_MERGE
)
3899 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3900 if (CONST_INT_P (trueop02
))
3902 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3903 bool all_operand0
= true;
3904 bool all_operand1
= true;
3905 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3907 rtx j
= XVECEXP (trueop1
, 0, i
);
3908 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3909 all_operand1
= false;
3911 all_operand0
= false;
3913 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3914 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3915 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3916 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3920 /* If we have two nested selects that are inverses of each
3921 other, replace them with the source operand. */
3922 if (GET_CODE (trueop0
) == VEC_SELECT
3923 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3925 rtx op0_subop1
= XEXP (trueop0
, 1);
3926 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3927 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
3929 /* Apply the outer ordering vector to the inner one. (The inner
3930 ordering vector is expressly permitted to be of a different
3931 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3932 then the two VEC_SELECTs cancel. */
3933 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3935 rtx x
= XVECEXP (trueop1
, 0, i
);
3936 if (!CONST_INT_P (x
))
3938 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3939 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3942 return XEXP (trueop0
, 0);
3948 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3949 ? GET_MODE (trueop0
)
3950 : GET_MODE_INNER (mode
));
3951 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3952 ? GET_MODE (trueop1
)
3953 : GET_MODE_INNER (mode
));
3955 gcc_assert (VECTOR_MODE_P (mode
));
3956 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
3957 + GET_MODE_SIZE (op1_mode
),
3958 GET_MODE_SIZE (mode
)));
3960 if (VECTOR_MODE_P (op0_mode
))
3961 gcc_assert (GET_MODE_INNER (mode
)
3962 == GET_MODE_INNER (op0_mode
));
3964 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3966 if (VECTOR_MODE_P (op1_mode
))
3967 gcc_assert (GET_MODE_INNER (mode
)
3968 == GET_MODE_INNER (op1_mode
));
3970 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3972 unsigned int n_elts
, in_n_elts
;
3973 if ((GET_CODE (trueop0
) == CONST_VECTOR
3974 || CONST_SCALAR_INT_P (trueop0
)
3975 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3976 && (GET_CODE (trueop1
) == CONST_VECTOR
3977 || CONST_SCALAR_INT_P (trueop1
)
3978 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
3979 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
3980 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
3982 rtvec v
= rtvec_alloc (n_elts
);
3984 for (i
= 0; i
< n_elts
; i
++)
3988 if (!VECTOR_MODE_P (op0_mode
))
3989 RTVEC_ELT (v
, i
) = trueop0
;
3991 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3995 if (!VECTOR_MODE_P (op1_mode
))
3996 RTVEC_ELT (v
, i
) = trueop1
;
3998 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4003 return gen_rtx_CONST_VECTOR (mode
, v
);
4006 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4007 Restrict the transformation to avoid generating a VEC_SELECT with a
4008 mode unrelated to its operand. */
4009 if (GET_CODE (trueop0
) == VEC_SELECT
4010 && GET_CODE (trueop1
) == VEC_SELECT
4011 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4012 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4014 rtx par0
= XEXP (trueop0
, 1);
4015 rtx par1
= XEXP (trueop1
, 1);
4016 int len0
= XVECLEN (par0
, 0);
4017 int len1
= XVECLEN (par1
, 0);
4018 rtvec vec
= rtvec_alloc (len0
+ len1
);
4019 for (int i
= 0; i
< len0
; i
++)
4020 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4021 for (int i
= 0; i
< len1
; i
++)
4022 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4023 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4024 gen_rtx_PARALLEL (VOIDmode
, vec
));
4033 if (mode
== GET_MODE (op0
)
4034 && mode
== GET_MODE (op1
)
4035 && vec_duplicate_p (op0
, &elt0
)
4036 && vec_duplicate_p (op1
, &elt1
))
4038 /* Try applying the operator to ELT and see if that simplifies.
4039 We can duplicate the result if so.
4041 The reason we don't use simplify_gen_binary is that it isn't
4042 necessarily a win to convert things like:
4044 (plus:V (vec_duplicate:V (reg:S R1))
4045 (vec_duplicate:V (reg:S R2)))
4049 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4051 The first might be done entirely in vector registers while the
4052 second might need a move between register files. */
4053 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4056 return gen_vec_duplicate (mode
, tem
);
4063 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4066 if (VECTOR_MODE_P (mode
)
4067 && code
!= VEC_CONCAT
4068 && GET_CODE (op0
) == CONST_VECTOR
4069 && GET_CODE (op1
) == CONST_VECTOR
)
4071 unsigned int n_elts
;
4072 if (!CONST_VECTOR_NUNITS (op0
).is_constant (&n_elts
))
4075 gcc_assert (known_eq (n_elts
, CONST_VECTOR_NUNITS (op1
)));
4076 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4077 rtvec v
= rtvec_alloc (n_elts
);
4080 for (i
= 0; i
< n_elts
; i
++)
4082 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4083 CONST_VECTOR_ELT (op0
, i
),
4084 CONST_VECTOR_ELT (op1
, i
));
4085 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4087 RTVEC_ELT (v
, i
) = x
;
4090 return gen_rtx_CONST_VECTOR (mode
, v
);
4093 if (VECTOR_MODE_P (mode
)
4094 && code
== VEC_CONCAT
4095 && (CONST_SCALAR_INT_P (op0
)
4096 || CONST_FIXED_P (op0
)
4097 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4098 && (CONST_SCALAR_INT_P (op1
)
4099 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4100 || CONST_FIXED_P (op1
)))
4102 /* Both inputs have a constant number of elements, so the result
4104 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4105 rtvec v
= rtvec_alloc (n_elts
);
4107 gcc_assert (n_elts
>= 2);
4110 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4111 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4113 RTVEC_ELT (v
, 0) = op0
;
4114 RTVEC_ELT (v
, 1) = op1
;
4118 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4119 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4122 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4123 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4124 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4126 for (i
= 0; i
< op0_n_elts
; ++i
)
4127 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4128 for (i
= 0; i
< op1_n_elts
; ++i
)
4129 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4132 return gen_rtx_CONST_VECTOR (mode
, v
);
4135 if (SCALAR_FLOAT_MODE_P (mode
)
4136 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4137 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4138 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4149 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4151 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4153 for (i
= 0; i
< 4; i
++)
4170 real_from_target (&r
, tmp0
, mode
);
4171 return const_double_from_real_value (r
, mode
);
4175 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4176 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4179 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4180 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4182 if (HONOR_SNANS (mode
)
4183 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4184 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4187 real_convert (&f0
, mode
, opr0
);
4188 real_convert (&f1
, mode
, opr1
);
4191 && real_equal (&f1
, &dconst0
)
4192 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4195 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4196 && flag_trapping_math
4197 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4199 int s0
= REAL_VALUE_NEGATIVE (f0
);
4200 int s1
= REAL_VALUE_NEGATIVE (f1
);
4205 /* Inf + -Inf = NaN plus exception. */
4210 /* Inf - Inf = NaN plus exception. */
4215 /* Inf / Inf = NaN plus exception. */
4222 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4223 && flag_trapping_math
4224 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4225 || (REAL_VALUE_ISINF (f1
)
4226 && real_equal (&f0
, &dconst0
))))
4227 /* Inf * 0 = NaN plus exception. */
4230 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4232 real_convert (&result
, mode
, &value
);
4234 /* Don't constant fold this floating point operation if
4235 the result has overflowed and flag_trapping_math. */
4237 if (flag_trapping_math
4238 && MODE_HAS_INFINITIES (mode
)
4239 && REAL_VALUE_ISINF (result
)
4240 && !REAL_VALUE_ISINF (f0
)
4241 && !REAL_VALUE_ISINF (f1
))
4242 /* Overflow plus exception. */
4245 /* Don't constant fold this floating point operation if the
4246 result may dependent upon the run-time rounding mode and
4247 flag_rounding_math is set, or if GCC's software emulation
4248 is unable to accurately represent the result. */
4250 if ((flag_rounding_math
4251 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4252 && (inexact
|| !real_identical (&result
, &value
)))
4255 return const_double_from_real_value (result
, mode
);
4259 /* We can fold some multi-word operations. */
4260 scalar_int_mode int_mode
;
4261 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4262 && CONST_SCALAR_INT_P (op0
)
4263 && CONST_SCALAR_INT_P (op1
))
4266 wi::overflow_type overflow
;
4267 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4268 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4270 #if TARGET_SUPPORTS_WIDE_INT == 0
4271 /* This assert keeps the simplification from producing a result
4272 that cannot be represented in a CONST_DOUBLE but a lot of
4273 upstream callers expect that this function never fails to
4274 simplify something and so you if you added this to the test
4275 above the code would die later anyway. If this assert
4276 happens, you just need to make the port support wide int. */
4277 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4282 result
= wi::sub (pop0
, pop1
);
4286 result
= wi::add (pop0
, pop1
);
4290 result
= wi::mul (pop0
, pop1
);
4294 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4300 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4306 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4312 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4318 result
= wi::bit_and (pop0
, pop1
);
4322 result
= wi::bit_or (pop0
, pop1
);
4326 result
= wi::bit_xor (pop0
, pop1
);
4330 result
= wi::smin (pop0
, pop1
);
4334 result
= wi::smax (pop0
, pop1
);
4338 result
= wi::umin (pop0
, pop1
);
4342 result
= wi::umax (pop0
, pop1
);
4349 wide_int wop1
= pop1
;
4350 if (SHIFT_COUNT_TRUNCATED
)
4351 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4352 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4358 result
= wi::lrshift (pop0
, wop1
);
4362 result
= wi::arshift (pop0
, wop1
);
4366 result
= wi::lshift (pop0
, wop1
);
4377 if (wi::neg_p (pop1
))
4383 result
= wi::lrotate (pop0
, pop1
);
4387 result
= wi::rrotate (pop0
, pop1
);
4398 return immed_wide_int_const (result
, int_mode
);
4401 /* Handle polynomial integers. */
4402 if (NUM_POLY_INT_COEFFS
> 1
4403 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4404 && poly_int_rtx_p (op0
)
4405 && poly_int_rtx_p (op1
))
4407 poly_wide_int result
;
4411 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4415 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4419 if (CONST_SCALAR_INT_P (op1
))
4420 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4426 if (CONST_SCALAR_INT_P (op1
))
4428 wide_int shift
= rtx_mode_t (op1
, mode
);
4429 if (SHIFT_COUNT_TRUNCATED
)
4430 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4431 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4433 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4440 if (!CONST_SCALAR_INT_P (op1
)
4441 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4442 rtx_mode_t (op1
, mode
), &result
))
4449 return immed_wide_int_const (result
, int_mode
);
4457 /* Return a positive integer if X should sort after Y. The value
4458 returned is 1 if and only if X and Y are both regs. */
4461 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4465 result
= (commutative_operand_precedence (y
)
4466 - commutative_operand_precedence (x
));
4468 return result
+ result
;
4470 /* Group together equal REGs to do more simplification. */
4471 if (REG_P (x
) && REG_P (y
))
4472 return REGNO (x
) > REGNO (y
);
4477 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4478 operands may be another PLUS or MINUS.
4480 Rather than test for specific case, we do this by a brute-force method
4481 and do all possible simplifications until no more changes occur. Then
4482 we rebuild the operation.
4484 May return NULL_RTX when no changes were made. */
4487 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4490 struct simplify_plus_minus_op_data
4497 int changed
, n_constants
, canonicalized
= 0;
4500 memset (ops
, 0, sizeof ops
);
4502 /* Set up the two operands and then expand them until nothing has been
4503 changed. If we run out of room in our array, give up; this should
4504 almost never happen. */
4509 ops
[1].neg
= (code
== MINUS
);
4516 for (i
= 0; i
< n_ops
; i
++)
4518 rtx this_op
= ops
[i
].op
;
4519 int this_neg
= ops
[i
].neg
;
4520 enum rtx_code this_code
= GET_CODE (this_op
);
4526 if (n_ops
== ARRAY_SIZE (ops
))
4529 ops
[n_ops
].op
= XEXP (this_op
, 1);
4530 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4533 ops
[i
].op
= XEXP (this_op
, 0);
4535 /* If this operand was negated then we will potentially
4536 canonicalize the expression. Similarly if we don't
4537 place the operands adjacent we're re-ordering the
4538 expression and thus might be performing a
4539 canonicalization. Ignore register re-ordering.
4540 ??? It might be better to shuffle the ops array here,
4541 but then (plus (plus (A, B), plus (C, D))) wouldn't
4542 be seen as non-canonical. */
4545 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4550 ops
[i
].op
= XEXP (this_op
, 0);
4551 ops
[i
].neg
= ! this_neg
;
4557 if (n_ops
!= ARRAY_SIZE (ops
)
4558 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4559 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4560 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4562 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4563 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4564 ops
[n_ops
].neg
= this_neg
;
4572 /* ~a -> (-a - 1) */
4573 if (n_ops
!= ARRAY_SIZE (ops
))
4575 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4576 ops
[n_ops
++].neg
= this_neg
;
4577 ops
[i
].op
= XEXP (this_op
, 0);
4578 ops
[i
].neg
= !this_neg
;
4588 ops
[i
].op
= neg_const_int (mode
, this_op
);
4602 if (n_constants
> 1)
4605 gcc_assert (n_ops
>= 2);
4607 /* If we only have two operands, we can avoid the loops. */
4610 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4613 /* Get the two operands. Be careful with the order, especially for
4614 the cases where code == MINUS. */
4615 if (ops
[0].neg
&& ops
[1].neg
)
4617 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4620 else if (ops
[0].neg
)
4631 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4634 /* Now simplify each pair of operands until nothing changes. */
4637 /* Insertion sort is good enough for a small array. */
4638 for (i
= 1; i
< n_ops
; i
++)
4640 struct simplify_plus_minus_op_data save
;
4644 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4647 /* Just swapping registers doesn't count as canonicalization. */
4653 ops
[j
+ 1] = ops
[j
];
4655 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4660 for (i
= n_ops
- 1; i
> 0; i
--)
4661 for (j
= i
- 1; j
>= 0; j
--)
4663 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4664 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4666 if (lhs
!= 0 && rhs
!= 0)
4668 enum rtx_code ncode
= PLUS
;
4674 std::swap (lhs
, rhs
);
4676 else if (swap_commutative_operands_p (lhs
, rhs
))
4677 std::swap (lhs
, rhs
);
4679 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4680 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4682 rtx tem_lhs
, tem_rhs
;
4684 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4685 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4686 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4689 if (tem
&& !CONSTANT_P (tem
))
4690 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4693 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4697 /* Reject "simplifications" that just wrap the two
4698 arguments in a CONST. Failure to do so can result
4699 in infinite recursion with simplify_binary_operation
4700 when it calls us to simplify CONST operations.
4701 Also, if we find such a simplification, don't try
4702 any more combinations with this rhs: We must have
4703 something like symbol+offset, ie. one of the
4704 trivial CONST expressions we handle later. */
4705 if (GET_CODE (tem
) == CONST
4706 && GET_CODE (XEXP (tem
, 0)) == ncode
4707 && XEXP (XEXP (tem
, 0), 0) == lhs
4708 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4711 if (GET_CODE (tem
) == NEG
)
4712 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4713 if (CONST_INT_P (tem
) && lneg
)
4714 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4718 ops
[j
].op
= NULL_RTX
;
4728 /* Pack all the operands to the lower-numbered entries. */
4729 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4738 /* If nothing changed, check that rematerialization of rtl instructions
4739 is still required. */
4742 /* Perform rematerialization if only all operands are registers and
4743 all operations are PLUS. */
4744 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4745 around rs6000 and how it uses the CA register. See PR67145. */
4746 for (i
= 0; i
< n_ops
; i
++)
4748 || !REG_P (ops
[i
].op
)
4749 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4750 && fixed_regs
[REGNO (ops
[i
].op
)]
4751 && !global_regs
[REGNO (ops
[i
].op
)]
4752 && ops
[i
].op
!= frame_pointer_rtx
4753 && ops
[i
].op
!= arg_pointer_rtx
4754 && ops
[i
].op
!= stack_pointer_rtx
))
4759 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4761 && CONST_INT_P (ops
[1].op
)
4762 && CONSTANT_P (ops
[0].op
)
4764 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4766 /* We suppressed creation of trivial CONST expressions in the
4767 combination loop to avoid recursion. Create one manually now.
4768 The combination loop should have ensured that there is exactly
4769 one CONST_INT, and the sort will have ensured that it is last
4770 in the array and that any other constant will be next-to-last. */
4773 && CONST_INT_P (ops
[n_ops
- 1].op
)
4774 && CONSTANT_P (ops
[n_ops
- 2].op
))
4776 rtx value
= ops
[n_ops
- 1].op
;
4777 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4778 value
= neg_const_int (mode
, value
);
4779 if (CONST_INT_P (value
))
4781 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4787 /* Put a non-negated operand first, if possible. */
4789 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4792 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4801 /* Now make the result by performing the requested operations. */
4804 for (i
= 1; i
< n_ops
; i
++)
4805 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4806 mode
, result
, ops
[i
].op
);
4811 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4813 plus_minus_operand_p (const_rtx x
)
4815 return GET_CODE (x
) == PLUS
4816 || GET_CODE (x
) == MINUS
4817 || (GET_CODE (x
) == CONST
4818 && GET_CODE (XEXP (x
, 0)) == PLUS
4819 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4820 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4823 /* Like simplify_binary_operation except used for relational operators.
4824 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4825 not also be VOIDmode.
4827 CMP_MODE specifies in which mode the comparison is done in, so it is
4828 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4829 the operands or, if both are VOIDmode, the operands are compared in
4830 "infinite precision". */
4832 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4833 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4835 rtx tem
, trueop0
, trueop1
;
4837 if (cmp_mode
== VOIDmode
)
4838 cmp_mode
= GET_MODE (op0
);
4839 if (cmp_mode
== VOIDmode
)
4840 cmp_mode
= GET_MODE (op1
);
4842 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4845 if (SCALAR_FLOAT_MODE_P (mode
))
4847 if (tem
== const0_rtx
)
4848 return CONST0_RTX (mode
);
4849 #ifdef FLOAT_STORE_FLAG_VALUE
4851 REAL_VALUE_TYPE val
;
4852 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4853 return const_double_from_real_value (val
, mode
);
4859 if (VECTOR_MODE_P (mode
))
4861 if (tem
== const0_rtx
)
4862 return CONST0_RTX (mode
);
4863 #ifdef VECTOR_STORE_FLAG_VALUE
4865 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4866 if (val
== NULL_RTX
)
4868 if (val
== const1_rtx
)
4869 return CONST1_RTX (mode
);
4871 return gen_const_vec_duplicate (mode
, val
);
4881 /* For the following tests, ensure const0_rtx is op1. */
4882 if (swap_commutative_operands_p (op0
, op1
)
4883 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4884 std::swap (op0
, op1
), code
= swap_condition (code
);
4886 /* If op0 is a compare, extract the comparison arguments from it. */
4887 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4888 return simplify_gen_relational (code
, mode
, VOIDmode
,
4889 XEXP (op0
, 0), XEXP (op0
, 1));
4891 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4895 trueop0
= avoid_constant_pool_reference (op0
);
4896 trueop1
= avoid_constant_pool_reference (op1
);
4897 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4901 /* This part of simplify_relational_operation is only used when CMP_MODE
4902 is not in class MODE_CC (i.e. it is a real comparison).
4904 MODE is the mode of the result, while CMP_MODE specifies in which
4905 mode the comparison is done in, so it is the mode of the operands. */
4908 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4909 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4911 enum rtx_code op0code
= GET_CODE (op0
);
4913 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4915 /* If op0 is a comparison, extract the comparison arguments
4919 if (GET_MODE (op0
) == mode
)
4920 return simplify_rtx (op0
);
4922 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4923 XEXP (op0
, 0), XEXP (op0
, 1));
4925 else if (code
== EQ
)
4927 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4928 if (new_code
!= UNKNOWN
)
4929 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4930 XEXP (op0
, 0), XEXP (op0
, 1));
4934 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4935 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4936 if ((code
== LTU
|| code
== GEU
)
4937 && GET_CODE (op0
) == PLUS
4938 && CONST_INT_P (XEXP (op0
, 1))
4939 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4940 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4941 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4942 && XEXP (op0
, 1) != const0_rtx
)
4945 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4946 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4947 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4950 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4951 transformed into (LTU a -C). */
4952 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4953 && CONST_INT_P (XEXP (op0
, 1))
4954 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4955 && XEXP (op0
, 1) != const0_rtx
)
4958 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4959 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4960 XEXP (op0
, 0), new_cmp
);
4963 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4964 if ((code
== LTU
|| code
== GEU
)
4965 && GET_CODE (op0
) == PLUS
4966 && rtx_equal_p (op1
, XEXP (op0
, 1))
4967 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4968 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4969 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4970 copy_rtx (XEXP (op0
, 0)));
4972 if (op1
== const0_rtx
)
4974 /* Canonicalize (GTU x 0) as (NE x 0). */
4976 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4977 /* Canonicalize (LEU x 0) as (EQ x 0). */
4979 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4981 else if (op1
== const1_rtx
)
4986 /* Canonicalize (GE x 1) as (GT x 0). */
4987 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4990 /* Canonicalize (GEU x 1) as (NE x 0). */
4991 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4994 /* Canonicalize (LT x 1) as (LE x 0). */
4995 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4998 /* Canonicalize (LTU x 1) as (EQ x 0). */
4999 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5005 else if (op1
== constm1_rtx
)
5007 /* Canonicalize (LE x -1) as (LT x 0). */
5009 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5010 /* Canonicalize (GT x -1) as (GE x 0). */
5012 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5015 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5016 if ((code
== EQ
|| code
== NE
)
5017 && (op0code
== PLUS
|| op0code
== MINUS
)
5019 && CONSTANT_P (XEXP (op0
, 1))
5020 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5022 rtx x
= XEXP (op0
, 0);
5023 rtx c
= XEXP (op0
, 1);
5024 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5025 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5027 /* Detect an infinite recursive condition, where we oscillate at this
5028 simplification case between:
5029 A + B == C <---> C - B == A,
5030 where A, B, and C are all constants with non-simplifiable expressions,
5031 usually SYMBOL_REFs. */
5032 if (GET_CODE (tem
) == invcode
5034 && rtx_equal_p (c
, XEXP (tem
, 1)))
5037 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5040 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5041 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5042 scalar_int_mode int_mode
, int_cmp_mode
;
5044 && op1
== const0_rtx
5045 && is_int_mode (mode
, &int_mode
)
5046 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5047 /* ??? Work-around BImode bugs in the ia64 backend. */
5048 && int_mode
!= BImode
5049 && int_cmp_mode
!= BImode
5050 && nonzero_bits (op0
, int_cmp_mode
) == 1
5051 && STORE_FLAG_VALUE
== 1)
5052 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5053 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5054 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5056 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5057 if ((code
== EQ
|| code
== NE
)
5058 && op1
== const0_rtx
5060 return simplify_gen_relational (code
, mode
, cmp_mode
,
5061 XEXP (op0
, 0), XEXP (op0
, 1));
5063 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5064 if ((code
== EQ
|| code
== NE
)
5066 && rtx_equal_p (XEXP (op0
, 0), op1
)
5067 && !side_effects_p (XEXP (op0
, 0)))
5068 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5071 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5072 if ((code
== EQ
|| code
== NE
)
5074 && rtx_equal_p (XEXP (op0
, 1), op1
)
5075 && !side_effects_p (XEXP (op0
, 1)))
5076 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5079 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5080 if ((code
== EQ
|| code
== NE
)
5082 && CONST_SCALAR_INT_P (op1
)
5083 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5084 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5085 simplify_gen_binary (XOR
, cmp_mode
,
5086 XEXP (op0
, 1), op1
));
5088 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5089 constant folding if x/y is a constant. */
5090 if ((code
== EQ
|| code
== NE
)
5091 && (op0code
== AND
|| op0code
== IOR
)
5092 && !side_effects_p (op1
)
5093 && op1
!= CONST0_RTX (cmp_mode
))
5095 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5096 (eq/ne (and (not y) x) 0). */
5097 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5098 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5100 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5102 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5104 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5105 CONST0_RTX (cmp_mode
));
5108 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5109 (eq/ne (and (not x) y) 0). */
5110 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5111 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5113 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5115 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5117 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5118 CONST0_RTX (cmp_mode
));
5122 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5123 if ((code
== EQ
|| code
== NE
)
5124 && GET_CODE (op0
) == BSWAP
5125 && CONST_SCALAR_INT_P (op1
))
5126 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5127 simplify_gen_unary (BSWAP
, cmp_mode
,
5130 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5131 if ((code
== EQ
|| code
== NE
)
5132 && GET_CODE (op0
) == BSWAP
5133 && GET_CODE (op1
) == BSWAP
)
5134 return simplify_gen_relational (code
, mode
, cmp_mode
,
5135 XEXP (op0
, 0), XEXP (op1
, 0));
5137 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5143 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5144 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5145 XEXP (op0
, 0), const0_rtx
);
5150 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5151 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5152 XEXP (op0
, 0), const0_rtx
);
5171 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5172 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5173 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5174 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5175 For floating-point comparisons, assume that the operands were ordered. */
5178 comparison_result (enum rtx_code code
, int known_results
)
5184 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5187 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5191 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5194 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5198 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5201 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5204 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5206 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5209 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5211 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5214 return const_true_rtx
;
5222 /* Check if the given comparison (done in the given MODE) is actually
5223 a tautology or a contradiction. If the mode is VOID_mode, the
5224 comparison is done in "infinite precision". If no simplification
5225 is possible, this function returns zero. Otherwise, it returns
5226 either const_true_rtx or const0_rtx. */
5229 simplify_const_relational_operation (enum rtx_code code
,
5237 gcc_assert (mode
!= VOIDmode
5238 || (GET_MODE (op0
) == VOIDmode
5239 && GET_MODE (op1
) == VOIDmode
));
5241 /* If op0 is a compare, extract the comparison arguments from it. */
5242 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5244 op1
= XEXP (op0
, 1);
5245 op0
= XEXP (op0
, 0);
5247 if (GET_MODE (op0
) != VOIDmode
)
5248 mode
= GET_MODE (op0
);
5249 else if (GET_MODE (op1
) != VOIDmode
)
5250 mode
= GET_MODE (op1
);
5255 /* We can't simplify MODE_CC values since we don't know what the
5256 actual comparison is. */
5257 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5260 /* Make sure the constant is second. */
5261 if (swap_commutative_operands_p (op0
, op1
))
5263 std::swap (op0
, op1
);
5264 code
= swap_condition (code
);
5267 trueop0
= avoid_constant_pool_reference (op0
);
5268 trueop1
= avoid_constant_pool_reference (op1
);
5270 /* For integer comparisons of A and B maybe we can simplify A - B and can
5271 then simplify a comparison of that with zero. If A and B are both either
5272 a register or a CONST_INT, this can't help; testing for these cases will
5273 prevent infinite recursion here and speed things up.
5275 We can only do this for EQ and NE comparisons as otherwise we may
5276 lose or introduce overflow which we cannot disregard as undefined as
5277 we do not know the signedness of the operation on either the left or
5278 the right hand side of the comparison. */
5280 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5281 && (code
== EQ
|| code
== NE
)
5282 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5283 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5284 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5285 /* We cannot do this if tem is a nonzero address. */
5286 && ! nonzero_address_p (tem
))
5287 return simplify_const_relational_operation (signed_condition (code
),
5288 mode
, tem
, const0_rtx
);
5290 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5291 return const_true_rtx
;
5293 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5296 /* For modes without NaNs, if the two operands are equal, we know the
5297 result except if they have side-effects. Even with NaNs we know
5298 the result of unordered comparisons and, if signaling NaNs are
5299 irrelevant, also the result of LT/GT/LTGT. */
5300 if ((! HONOR_NANS (trueop0
)
5301 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5302 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5303 && ! HONOR_SNANS (trueop0
)))
5304 && rtx_equal_p (trueop0
, trueop1
)
5305 && ! side_effects_p (trueop0
))
5306 return comparison_result (code
, CMP_EQ
);
5308 /* If the operands are floating-point constants, see if we can fold
5310 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5311 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5312 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5314 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5315 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5317 /* Comparisons are unordered iff at least one of the values is NaN. */
5318 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5328 return const_true_rtx
;
5341 return comparison_result (code
,
5342 (real_equal (d0
, d1
) ? CMP_EQ
:
5343 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5346 /* Otherwise, see if the operands are both integers. */
5347 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5348 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5350 /* It would be nice if we really had a mode here. However, the
5351 largest int representable on the target is as good as
5353 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5354 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5355 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5357 if (wi::eq_p (ptrueop0
, ptrueop1
))
5358 return comparison_result (code
, CMP_EQ
);
5361 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5362 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5363 return comparison_result (code
, cr
);
5367 /* Optimize comparisons with upper and lower bounds. */
5368 scalar_int_mode int_mode
;
5369 if (CONST_INT_P (trueop1
)
5370 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5371 && HWI_COMPUTABLE_MODE_P (int_mode
)
5372 && !side_effects_p (trueop0
))
5375 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5376 HOST_WIDE_INT val
= INTVAL (trueop1
);
5377 HOST_WIDE_INT mmin
, mmax
;
5387 /* Get a reduced range if the sign bit is zero. */
5388 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5395 rtx mmin_rtx
, mmax_rtx
;
5396 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5398 mmin
= INTVAL (mmin_rtx
);
5399 mmax
= INTVAL (mmax_rtx
);
5402 unsigned int sign_copies
5403 = num_sign_bit_copies (trueop0
, int_mode
);
5405 mmin
>>= (sign_copies
- 1);
5406 mmax
>>= (sign_copies
- 1);
5412 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5414 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5415 return const_true_rtx
;
5416 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5421 return const_true_rtx
;
5426 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5428 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5429 return const_true_rtx
;
5430 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5435 return const_true_rtx
;
5441 /* x == y is always false for y out of range. */
5442 if (val
< mmin
|| val
> mmax
)
5446 /* x > y is always false for y >= mmax, always true for y < mmin. */
5448 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5450 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5451 return const_true_rtx
;
5457 return const_true_rtx
;
5460 /* x < y is always false for y <= mmin, always true for y > mmax. */
5462 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5464 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5465 return const_true_rtx
;
5471 return const_true_rtx
;
5475 /* x != y is always true for y out of range. */
5476 if (val
< mmin
|| val
> mmax
)
5477 return const_true_rtx
;
5485 /* Optimize integer comparisons with zero. */
5486 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5487 && trueop1
== const0_rtx
5488 && !side_effects_p (trueop0
))
5490 /* Some addresses are known to be nonzero. We don't know
5491 their sign, but equality comparisons are known. */
5492 if (nonzero_address_p (trueop0
))
5494 if (code
== EQ
|| code
== LEU
)
5496 if (code
== NE
|| code
== GTU
)
5497 return const_true_rtx
;
5500 /* See if the first operand is an IOR with a constant. If so, we
5501 may be able to determine the result of this comparison. */
5502 if (GET_CODE (op0
) == IOR
)
5504 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5505 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5507 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5508 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5509 && (UINTVAL (inner_const
)
5520 return const_true_rtx
;
5524 return const_true_rtx
;
5538 /* Optimize comparison of ABS with zero. */
5539 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5540 && (GET_CODE (trueop0
) == ABS
5541 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5542 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5547 /* Optimize abs(x) < 0.0. */
5548 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5553 /* Optimize abs(x) >= 0.0. */
5554 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5555 return const_true_rtx
;
5559 /* Optimize ! (abs(x) < 0.0). */
5560 return const_true_rtx
;
5570 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5571 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5572 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5573 can be simplified to that or NULL_RTX if not.
5574 Assume X is compared against zero with CMP_CODE and the true
5575 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5578 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5580 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5583 /* Result on X == 0 and X !=0 respectively. */
5584 rtx on_zero
, on_nonzero
;
5588 on_nonzero
= false_val
;
5592 on_zero
= false_val
;
5593 on_nonzero
= true_val
;
5596 rtx_code op_code
= GET_CODE (on_nonzero
);
5597 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5598 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5599 || !CONST_INT_P (on_zero
))
5602 HOST_WIDE_INT op_val
;
5603 scalar_int_mode mode ATTRIBUTE_UNUSED
5604 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5605 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5606 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5607 && op_val
== INTVAL (on_zero
))
5613 /* Try to simplify X given that it appears within operand OP of a
5614 VEC_MERGE operation whose mask is MASK. X need not use the same
5615 vector mode as the VEC_MERGE, but it must have the same number of
5618 Return the simplified X on success, otherwise return NULL_RTX. */
5621 simplify_merge_mask (rtx x
, rtx mask
, int op
)
5623 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
5624 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
5625 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
5627 if (side_effects_p (XEXP (x
, 1 - op
)))
5630 return XEXP (x
, op
);
5633 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5634 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
5636 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5638 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
5639 GET_MODE (XEXP (x
, 0)));
5642 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5643 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5644 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5645 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
5647 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5648 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5651 if (COMPARISON_P (x
))
5652 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
5653 GET_MODE (XEXP (x
, 0)) != VOIDmode
5654 ? GET_MODE (XEXP (x
, 0))
5655 : GET_MODE (XEXP (x
, 1)),
5656 top0
? top0
: XEXP (x
, 0),
5657 top1
? top1
: XEXP (x
, 1));
5659 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
5660 top0
? top0
: XEXP (x
, 0),
5661 top1
? top1
: XEXP (x
, 1));
5664 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
5665 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
5666 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
5667 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
5668 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
5669 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
5670 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
5672 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
5673 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
5674 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
5675 if (top0
|| top1
|| top2
)
5676 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
5677 GET_MODE (XEXP (x
, 0)),
5678 top0
? top0
: XEXP (x
, 0),
5679 top1
? top1
: XEXP (x
, 1),
5680 top2
? top2
: XEXP (x
, 2));
5686 /* Simplify CODE, an operation with result mode MODE and three operands,
5687 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5688 a constant. Return 0 if no simplifications is possible. */
5691 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5692 machine_mode op0_mode
, rtx op0
, rtx op1
,
5695 bool any_change
= false;
5697 scalar_int_mode int_mode
, int_op0_mode
;
5698 unsigned int n_elts
;
5703 /* Simplify negations around the multiplication. */
5704 /* -a * -b + c => a * b + c. */
5705 if (GET_CODE (op0
) == NEG
)
5707 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5709 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5711 else if (GET_CODE (op1
) == NEG
)
5713 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5715 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5718 /* Canonicalize the two multiplication operands. */
5719 /* a * -b + c => -b * a + c. */
5720 if (swap_commutative_operands_p (op0
, op1
))
5721 std::swap (op0
, op1
), any_change
= true;
5724 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5729 if (CONST_INT_P (op0
)
5730 && CONST_INT_P (op1
)
5731 && CONST_INT_P (op2
)
5732 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5733 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5734 && HWI_COMPUTABLE_MODE_P (int_mode
))
5736 /* Extracting a bit-field from a constant */
5737 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5738 HOST_WIDE_INT op1val
= INTVAL (op1
);
5739 HOST_WIDE_INT op2val
= INTVAL (op2
);
5740 if (!BITS_BIG_ENDIAN
)
5742 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5743 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5745 /* Not enough information to calculate the bit position. */
5748 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5750 /* First zero-extend. */
5751 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5752 /* If desired, propagate sign bit. */
5753 if (code
== SIGN_EXTRACT
5754 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5756 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5759 return gen_int_mode (val
, int_mode
);
5764 if (CONST_INT_P (op0
))
5765 return op0
!= const0_rtx
? op1
: op2
;
5767 /* Convert c ? a : a into "a". */
5768 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5771 /* Convert a != b ? a : b into "a". */
5772 if (GET_CODE (op0
) == NE
5773 && ! side_effects_p (op0
)
5774 && ! HONOR_NANS (mode
)
5775 && ! HONOR_SIGNED_ZEROS (mode
)
5776 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5777 && rtx_equal_p (XEXP (op0
, 1), op2
))
5778 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5779 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5782 /* Convert a == b ? a : b into "b". */
5783 if (GET_CODE (op0
) == EQ
5784 && ! side_effects_p (op0
)
5785 && ! HONOR_NANS (mode
)
5786 && ! HONOR_SIGNED_ZEROS (mode
)
5787 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5788 && rtx_equal_p (XEXP (op0
, 1), op2
))
5789 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5790 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5793 /* Convert (!c) != {0,...,0} ? a : b into
5794 c != {0,...,0} ? b : a for vector modes. */
5795 if (VECTOR_MODE_P (GET_MODE (op1
))
5796 && GET_CODE (op0
) == NE
5797 && GET_CODE (XEXP (op0
, 0)) == NOT
5798 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5800 rtx cv
= XEXP (op0
, 1);
5803 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
5806 for (int i
= 0; i
< nunits
; ++i
)
5807 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5814 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5815 XEXP (XEXP (op0
, 0), 0),
5817 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5822 /* Convert x == 0 ? N : clz (x) into clz (x) when
5823 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5824 Similarly for ctz (x). */
5825 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5826 && XEXP (op0
, 1) == const0_rtx
)
5829 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5835 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5837 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5838 ? GET_MODE (XEXP (op0
, 1))
5839 : GET_MODE (XEXP (op0
, 0)));
5842 /* Look for happy constants in op1 and op2. */
5843 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5845 HOST_WIDE_INT t
= INTVAL (op1
);
5846 HOST_WIDE_INT f
= INTVAL (op2
);
5848 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5849 code
= GET_CODE (op0
);
5850 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5853 tmp
= reversed_comparison_code (op0
, NULL
);
5861 return simplify_gen_relational (code
, mode
, cmp_mode
,
5862 XEXP (op0
, 0), XEXP (op0
, 1));
5865 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5866 cmp_mode
, XEXP (op0
, 0),
5869 /* See if any simplifications were possible. */
5872 if (CONST_INT_P (temp
))
5873 return temp
== const0_rtx
? op2
: op1
;
5875 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5881 gcc_assert (GET_MODE (op0
) == mode
);
5882 gcc_assert (GET_MODE (op1
) == mode
);
5883 gcc_assert (VECTOR_MODE_P (mode
));
5884 trueop2
= avoid_constant_pool_reference (op2
);
5885 if (CONST_INT_P (trueop2
)
5886 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
5888 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5889 unsigned HOST_WIDE_INT mask
;
5890 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5893 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5895 if (!(sel
& mask
) && !side_effects_p (op0
))
5897 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5900 rtx trueop0
= avoid_constant_pool_reference (op0
);
5901 rtx trueop1
= avoid_constant_pool_reference (op1
);
5902 if (GET_CODE (trueop0
) == CONST_VECTOR
5903 && GET_CODE (trueop1
) == CONST_VECTOR
)
5905 rtvec v
= rtvec_alloc (n_elts
);
5908 for (i
= 0; i
< n_elts
; i
++)
5909 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5910 ? CONST_VECTOR_ELT (trueop0
, i
)
5911 : CONST_VECTOR_ELT (trueop1
, i
));
5912 return gen_rtx_CONST_VECTOR (mode
, v
);
5915 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5916 if no element from a appears in the result. */
5917 if (GET_CODE (op0
) == VEC_MERGE
)
5919 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5920 if (CONST_INT_P (tem
))
5922 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5923 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5924 return simplify_gen_ternary (code
, mode
, mode
,
5925 XEXP (op0
, 1), op1
, op2
);
5926 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5927 return simplify_gen_ternary (code
, mode
, mode
,
5928 XEXP (op0
, 0), op1
, op2
);
5931 if (GET_CODE (op1
) == VEC_MERGE
)
5933 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5934 if (CONST_INT_P (tem
))
5936 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5937 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5938 return simplify_gen_ternary (code
, mode
, mode
,
5939 op0
, XEXP (op1
, 1), op2
);
5940 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5941 return simplify_gen_ternary (code
, mode
, mode
,
5942 op0
, XEXP (op1
, 0), op2
);
5946 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5948 if (GET_CODE (op0
) == VEC_DUPLICATE
5949 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5950 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5951 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
5953 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5954 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5956 if (XEXP (XEXP (op0
, 0), 0) == op1
5957 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5961 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5963 with (vec_concat (X) (B)) if N == 1 or
5964 (vec_concat (A) (X)) if N == 2. */
5965 if (GET_CODE (op0
) == VEC_DUPLICATE
5966 && GET_CODE (op1
) == CONST_VECTOR
5967 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
5968 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5969 && IN_RANGE (sel
, 1, 2))
5971 rtx newop0
= XEXP (op0
, 0);
5972 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
5974 std::swap (newop0
, newop1
);
5975 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5977 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5978 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5979 Only applies for vectors of two elements. */
5980 if (GET_CODE (op0
) == VEC_DUPLICATE
5981 && GET_CODE (op1
) == VEC_CONCAT
5982 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5983 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
5984 && IN_RANGE (sel
, 1, 2))
5986 rtx newop0
= XEXP (op0
, 0);
5987 rtx newop1
= XEXP (op1
, 2 - sel
);
5988 rtx otherop
= XEXP (op1
, sel
- 1);
5990 std::swap (newop0
, newop1
);
5991 /* Don't want to throw away the other part of the vec_concat if
5992 it has side-effects. */
5993 if (!side_effects_p (otherop
))
5994 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5999 (vec_merge:outer (vec_duplicate:outer x:inner)
6000 (subreg:outer y:inner 0)
6003 with (vec_concat:outer x:inner y:inner) if N == 1,
6004 or (vec_concat:outer y:inner x:inner) if N == 2.
6006 Implicitly, this means we have a paradoxical subreg, but such
6007 a check is cheap, so make it anyway.
6009 Only applies for vectors of two elements. */
6010 if (GET_CODE (op0
) == VEC_DUPLICATE
6011 && GET_CODE (op1
) == SUBREG
6012 && GET_MODE (op1
) == GET_MODE (op0
)
6013 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6014 && paradoxical_subreg_p (op1
)
6015 && subreg_lowpart_p (op1
)
6016 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6017 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6018 && IN_RANGE (sel
, 1, 2))
6020 rtx newop0
= XEXP (op0
, 0);
6021 rtx newop1
= SUBREG_REG (op1
);
6023 std::swap (newop0
, newop1
);
6024 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6027 /* Same as above but with switched operands:
6028 Replace (vec_merge:outer (subreg:outer x:inner 0)
6029 (vec_duplicate:outer y:inner)
6032 with (vec_concat:outer x:inner y:inner) if N == 1,
6033 or (vec_concat:outer y:inner x:inner) if N == 2. */
6034 if (GET_CODE (op1
) == VEC_DUPLICATE
6035 && GET_CODE (op0
) == SUBREG
6036 && GET_MODE (op0
) == GET_MODE (op1
)
6037 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6038 && paradoxical_subreg_p (op0
)
6039 && subreg_lowpart_p (op0
)
6040 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6041 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6042 && IN_RANGE (sel
, 1, 2))
6044 rtx newop0
= SUBREG_REG (op0
);
6045 rtx newop1
= XEXP (op1
, 0);
6047 std::swap (newop0
, newop1
);
6048 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6051 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6053 with (vec_concat x y) or (vec_concat y x) depending on value
6055 if (GET_CODE (op0
) == VEC_DUPLICATE
6056 && GET_CODE (op1
) == VEC_DUPLICATE
6057 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6058 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6059 && IN_RANGE (sel
, 1, 2))
6061 rtx newop0
= XEXP (op0
, 0);
6062 rtx newop1
= XEXP (op1
, 0);
6064 std::swap (newop0
, newop1
);
6066 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6070 if (rtx_equal_p (op0
, op1
)
6071 && !side_effects_p (op2
) && !side_effects_p (op1
))
6074 if (!side_effects_p (op2
))
6076 rtx top0
= simplify_merge_mask (op0
, op2
, 0);
6077 rtx top1
= simplify_merge_mask (op1
, op2
, 1);
6079 return simplify_gen_ternary (code
, mode
, mode
,
6081 top1
? top1
: op1
, op2
);
6093 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
6094 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
6095 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
6097 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
6098 represented as a little-endian array of 'unsigned char', selecting by BYTE,
6099 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
6100 FIRST_ELEM is the number of the first element to extract, otherwise
6101 FIRST_ELEM is ignored. */
6104 simplify_immed_subreg (fixed_size_mode outermode
, rtx op
,
6105 machine_mode innermode
, unsigned int byte
,
6106 unsigned int first_elem
, unsigned int inner_bytes
)
6110 value_mask
= (1 << value_bit
) - 1
6112 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
6120 rtx result_s
= NULL
;
6121 rtvec result_v
= NULL
;
6122 enum mode_class outer_class
;
6123 scalar_mode outer_submode
;
6126 /* Some ports misuse CCmode. */
6127 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
6130 /* We have no way to represent a complex constant at the rtl level. */
6131 if (COMPLEX_MODE_P (outermode
))
6134 /* We support any size mode. */
6135 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
6136 inner_bytes
* BITS_PER_UNIT
);
6138 /* Unpack the value. */
6140 if (GET_CODE (op
) == CONST_VECTOR
)
6142 num_elem
= CEIL (inner_bytes
, GET_MODE_UNIT_SIZE (innermode
));
6143 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
6148 elem_bitsize
= max_bitsize
;
6150 /* If this asserts, it is too complicated; reducing value_bit may help. */
6151 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
6152 /* I don't know how to handle endianness of sub-units. */
6153 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
6155 for (elem
= 0; elem
< num_elem
; elem
++)
6158 rtx el
= (GET_CODE (op
) == CONST_VECTOR
6159 ? CONST_VECTOR_ELT (op
, first_elem
+ elem
)
6162 /* Vectors are kept in target memory order. (This is probably
6165 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6166 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6168 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6169 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6170 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6171 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6172 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6175 switch (GET_CODE (el
))
6179 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6181 *vp
++ = INTVAL (el
) >> i
;
6182 /* CONST_INTs are always logically sign-extended. */
6183 for (; i
< elem_bitsize
; i
+= value_bit
)
6184 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
6187 case CONST_WIDE_INT
:
6189 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
6190 unsigned char extend
= wi::sign_mask (val
);
6191 int prec
= wi::get_precision (val
);
6193 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
6194 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
6195 for (; i
< elem_bitsize
; i
+= value_bit
)
6201 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
6203 unsigned char extend
= 0;
6204 /* If this triggers, someone should have generated a
6205 CONST_INT instead. */
6206 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
6208 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6209 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
6210 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
6213 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
6217 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
6219 for (; i
< elem_bitsize
; i
+= value_bit
)
6224 /* This is big enough for anything on the platform. */
6225 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6226 scalar_float_mode el_mode
;
6228 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
6229 int bitsize
= GET_MODE_BITSIZE (el_mode
);
6231 gcc_assert (bitsize
<= elem_bitsize
);
6232 gcc_assert (bitsize
% value_bit
== 0);
6234 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
6237 /* real_to_target produces its result in words affected by
6238 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6239 and use WORDS_BIG_ENDIAN instead; see the documentation
6240 of SUBREG in rtl.texi. */
6241 for (i
= 0; i
< bitsize
; i
+= value_bit
)
6244 if (WORDS_BIG_ENDIAN
)
6245 ibase
= bitsize
- 1 - i
;
6248 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
6251 /* It shouldn't matter what's done here, so fill it with
6253 for (; i
< elem_bitsize
; i
+= value_bit
)
6259 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
6261 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6262 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6266 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6267 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6268 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
6270 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
6271 >> (i
- HOST_BITS_PER_WIDE_INT
);
6272 for (; i
< elem_bitsize
; i
+= value_bit
)
6282 /* Now, pick the right byte to start with. */
6283 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6284 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6285 will already have offset 0. */
6286 if (inner_bytes
>= GET_MODE_SIZE (outermode
))
6288 unsigned ibyte
= inner_bytes
- GET_MODE_SIZE (outermode
) - byte
;
6289 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6290 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6291 byte
= (subword_byte
% UNITS_PER_WORD
6292 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6295 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6296 so if it's become negative it will instead be very large.) */
6297 gcc_assert (byte
< inner_bytes
);
6299 /* Convert from bytes to chunks of size value_bit. */
6300 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
6302 /* Re-pack the value. */
6303 num_elem
= GET_MODE_NUNITS (outermode
);
6305 if (VECTOR_MODE_P (outermode
))
6307 result_v
= rtvec_alloc (num_elem
);
6308 elems
= &RTVEC_ELT (result_v
, 0);
6313 outer_submode
= GET_MODE_INNER (outermode
);
6314 outer_class
= GET_MODE_CLASS (outer_submode
);
6315 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
6317 gcc_assert (elem_bitsize
% value_bit
== 0);
6318 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
6320 for (elem
= 0; elem
< num_elem
; elem
++)
6324 /* Vectors are stored in target memory order. (This is probably
6327 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6328 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6330 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6331 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6332 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6333 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6334 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6337 switch (outer_class
)
6340 case MODE_PARTIAL_INT
:
6345 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
6346 / HOST_BITS_PER_WIDE_INT
;
6347 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
6350 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
6352 for (u
= 0; u
< units
; u
++)
6354 unsigned HOST_WIDE_INT buf
= 0;
6356 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
6358 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6361 base
+= HOST_BITS_PER_WIDE_INT
;
6363 r
= wide_int::from_array (tmp
, units
,
6364 GET_MODE_PRECISION (outer_submode
));
6365 #if TARGET_SUPPORTS_WIDE_INT == 0
6366 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6367 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
6370 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
6375 case MODE_DECIMAL_FLOAT
:
6378 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
6380 /* real_from_target wants its input in words affected by
6381 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6382 and use WORDS_BIG_ENDIAN instead; see the documentation
6383 of SUBREG in rtl.texi. */
6384 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6387 if (WORDS_BIG_ENDIAN
)
6388 ibase
= elem_bitsize
- 1 - i
;
6391 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6394 real_from_target (&r
, tmp
, outer_submode
);
6395 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6407 f
.mode
= outer_submode
;
6410 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6412 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6413 for (; i
< elem_bitsize
; i
+= value_bit
)
6414 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6415 << (i
- HOST_BITS_PER_WIDE_INT
));
6417 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6425 if (VECTOR_MODE_P (outermode
))
6426 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6431 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6432 Return 0 if no simplifications are possible. */
6434 simplify_subreg (machine_mode outermode
, rtx op
,
6435 machine_mode innermode
, poly_uint64 byte
)
6437 /* Little bit of sanity checking. */
6438 gcc_assert (innermode
!= VOIDmode
);
6439 gcc_assert (outermode
!= VOIDmode
);
6440 gcc_assert (innermode
!= BLKmode
);
6441 gcc_assert (outermode
!= BLKmode
);
6443 gcc_assert (GET_MODE (op
) == innermode
6444 || GET_MODE (op
) == VOIDmode
);
6446 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6447 if (!multiple_p (byte
, outersize
))
6450 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6451 if (maybe_ge (byte
, innersize
))
6454 if (outermode
== innermode
&& known_eq (byte
, 0U))
6457 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6461 if (VECTOR_MODE_P (outermode
)
6462 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6463 && vec_duplicate_p (op
, &elt
))
6464 return gen_vec_duplicate (outermode
, elt
);
6466 if (outermode
== GET_MODE_INNER (innermode
)
6467 && vec_duplicate_p (op
, &elt
))
6471 if (CONST_SCALAR_INT_P (op
)
6472 || CONST_DOUBLE_AS_FLOAT_P (op
)
6473 || CONST_FIXED_P (op
)
6474 || GET_CODE (op
) == CONST_VECTOR
)
6476 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6477 the result from bytes, so it only works if the sizes of the modes
6478 and the value of the offset are known at compile time. Cases that
6479 that apply to general modes and offsets should be handled here
6480 before calling simplify_immed_subreg. */
6481 fixed_size_mode fs_outermode
, fs_innermode
;
6482 unsigned HOST_WIDE_INT cbyte
;
6483 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6484 && is_a
<fixed_size_mode
> (innermode
, &fs_innermode
)
6485 && byte
.is_constant (&cbyte
))
6486 return simplify_immed_subreg (fs_outermode
, op
, fs_innermode
, cbyte
,
6487 0, GET_MODE_SIZE (fs_innermode
));
6489 /* Handle constant-sized outer modes and variable-sized inner modes. */
6490 unsigned HOST_WIDE_INT first_elem
;
6491 if (GET_CODE (op
) == CONST_VECTOR
6492 && is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6493 && constant_multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
),
6495 return simplify_immed_subreg (fs_outermode
, op
, innermode
, 0,
6497 GET_MODE_SIZE (fs_outermode
));
6502 /* Changing mode twice with SUBREG => just change it once,
6503 or not at all if changing back op starting mode. */
6504 if (GET_CODE (op
) == SUBREG
)
6506 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6507 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6510 if (outermode
== innermostmode
6511 && known_eq (byte
, 0U)
6512 && known_eq (SUBREG_BYTE (op
), 0))
6513 return SUBREG_REG (op
);
6515 /* Work out the memory offset of the final OUTERMODE value relative
6516 to the inner value of OP. */
6517 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6519 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6520 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6522 /* See whether resulting subreg will be paradoxical. */
6523 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6525 /* Bail out in case resulting subreg would be incorrect. */
6526 if (maybe_lt (final_offset
, 0)
6527 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6528 || !multiple_p (final_offset
, outersize
))
6533 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6535 if (maybe_ne (final_offset
, required_offset
))
6537 /* Paradoxical subregs always have byte offset 0. */
6541 /* Recurse for further possible simplifications. */
6542 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6546 if (validate_subreg (outermode
, innermostmode
,
6547 SUBREG_REG (op
), final_offset
))
6549 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6550 if (SUBREG_PROMOTED_VAR_P (op
)
6551 && SUBREG_PROMOTED_SIGN (op
) >= 0
6552 && GET_MODE_CLASS (outermode
) == MODE_INT
6553 && known_ge (outersize
, innersize
)
6554 && known_le (outersize
, innermostsize
)
6555 && subreg_lowpart_p (newx
))
6557 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6558 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6565 /* SUBREG of a hard register => just change the register number
6566 and/or mode. If the hard register is not valid in that mode,
6567 suppress this simplification. If the hard register is the stack,
6568 frame, or argument pointer, leave this as a SUBREG. */
6570 if (REG_P (op
) && HARD_REGISTER_P (op
))
6572 unsigned int regno
, final_regno
;
6575 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6576 if (HARD_REGISTER_NUM_P (final_regno
))
6578 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6579 subreg_memory_offset (outermode
,
6582 /* Propagate original regno. We don't have any way to specify
6583 the offset inside original regno, so do so only for lowpart.
6584 The information is used only by alias analysis that can not
6585 grog partial register anyway. */
6587 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6588 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6593 /* If we have a SUBREG of a register that we are replacing and we are
6594 replacing it with a MEM, make a new MEM and try replacing the
6595 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6596 or if we would be widening it. */
6599 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6600 /* Allow splitting of volatile memory references in case we don't
6601 have instruction to move the whole thing. */
6602 && (! MEM_VOLATILE_P (op
)
6603 || ! have_insn_for (SET
, innermode
))
6604 && known_le (outersize
, innersize
))
6605 return adjust_address_nv (op
, outermode
, byte
);
6607 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6609 if (GET_CODE (op
) == CONCAT
6610 || GET_CODE (op
) == VEC_CONCAT
)
6612 poly_uint64 final_offset
;
6615 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6616 if (part_mode
== VOIDmode
)
6617 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6618 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6619 if (known_lt (byte
, part_size
))
6621 part
= XEXP (op
, 0);
6622 final_offset
= byte
;
6624 else if (known_ge (byte
, part_size
))
6626 part
= XEXP (op
, 1);
6627 final_offset
= byte
- part_size
;
6632 if (maybe_gt (final_offset
+ outersize
, part_size
))
6635 part_mode
= GET_MODE (part
);
6636 if (part_mode
== VOIDmode
)
6637 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6638 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6641 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6642 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6647 (subreg (vec_merge (X)
6649 (const_int ((1 << N) | M)))
6650 (N * sizeof (outermode)))
6652 (subreg (X) (N * sizeof (outermode)))
6655 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
6656 && idx
< HOST_BITS_PER_WIDE_INT
6657 && GET_CODE (op
) == VEC_MERGE
6658 && GET_MODE_INNER (innermode
) == outermode
6659 && CONST_INT_P (XEXP (op
, 2))
6660 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
6661 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
6663 /* A SUBREG resulting from a zero extension may fold to zero if
6664 it extracts higher bits that the ZERO_EXTEND's source bits. */
6665 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6667 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6668 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6669 return CONST0_RTX (outermode
);
6672 scalar_int_mode int_outermode
, int_innermode
;
6673 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6674 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6675 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6677 /* Handle polynomial integers. The upper bits of a paradoxical
6678 subreg are undefined, so this is safe regardless of whether
6679 we're truncating or extending. */
6680 if (CONST_POLY_INT_P (op
))
6683 = poly_wide_int::from (const_poly_int_value (op
),
6684 GET_MODE_PRECISION (int_outermode
),
6686 return immed_wide_int_const (val
, int_outermode
);
6689 if (GET_MODE_PRECISION (int_outermode
)
6690 < GET_MODE_PRECISION (int_innermode
))
6692 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6701 /* Make a SUBREG operation or equivalent if it folds. */
6704 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6705 machine_mode innermode
, poly_uint64 byte
)
6709 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6713 if (GET_CODE (op
) == SUBREG
6714 || GET_CODE (op
) == CONCAT
6715 || GET_MODE (op
) == VOIDmode
)
6718 if (validate_subreg (outermode
, innermode
, op
, byte
))
6719 return gen_rtx_SUBREG (outermode
, op
, byte
);
6724 /* Generates a subreg to get the least significant part of EXPR (in mode
6725 INNER_MODE) to OUTER_MODE. */
6728 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6729 machine_mode inner_mode
)
6731 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6732 subreg_lowpart_offset (outer_mode
, inner_mode
));
6735 /* Simplify X, an rtx expression.
6737 Return the simplified expression or NULL if no simplifications
6740 This is the preferred entry point into the simplification routines;
6741 however, we still allow passes to call the more specific routines.
6743 Right now GCC has three (yes, three) major bodies of RTL simplification
6744 code that need to be unified.
6746 1. fold_rtx in cse.c. This code uses various CSE specific
6747 information to aid in RTL simplification.
6749 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6750 it uses combine specific information to aid in RTL
6753 3. The routines in this file.
6756 Long term we want to only have one body of simplification code; to
6757 get to that state I recommend the following steps:
6759 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6760 which are not pass dependent state into these routines.
6762 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6763 use this routine whenever possible.
6765 3. Allow for pass dependent state to be provided to these
6766 routines and add simplifications based on the pass dependent
6767 state. Remove code from cse.c & combine.c that becomes
6770 It will take time, but ultimately the compiler will be easier to
6771 maintain and improve. It's totally silly that when we add a
6772 simplification that it needs to be added to 4 places (3 for RTL
6773 simplification and 1 for tree simplification. */
6776 simplify_rtx (const_rtx x
)
6778 const enum rtx_code code
= GET_CODE (x
);
6779 const machine_mode mode
= GET_MODE (x
);
6781 switch (GET_RTX_CLASS (code
))
6784 return simplify_unary_operation (code
, mode
,
6785 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6786 case RTX_COMM_ARITH
:
6787 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6788 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6793 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6796 case RTX_BITFIELD_OPS
:
6797 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6798 XEXP (x
, 0), XEXP (x
, 1),
6802 case RTX_COMM_COMPARE
:
6803 return simplify_relational_operation (code
, mode
,
6804 ((GET_MODE (XEXP (x
, 0))
6806 ? GET_MODE (XEXP (x
, 0))
6807 : GET_MODE (XEXP (x
, 1))),
6813 return simplify_subreg (mode
, SUBREG_REG (x
),
6814 GET_MODE (SUBREG_REG (x
)),
6821 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6822 if (GET_CODE (XEXP (x
, 0)) == HIGH
6823 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6836 namespace selftest
{
6838 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6841 make_test_reg (machine_mode mode
)
6843 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
6845 return gen_rtx_REG (mode
, test_reg_num
++);
6848 /* Test vector simplifications involving VEC_DUPLICATE in which the
6849 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6850 register that holds one element of MODE. */
6853 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
6855 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6856 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6857 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
6858 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
6860 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6861 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
6862 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
6863 ASSERT_RTX_EQ (duplicate
,
6864 simplify_unary_operation (NOT
, mode
,
6865 duplicate_not
, mode
));
6867 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6868 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
6869 ASSERT_RTX_EQ (duplicate
,
6870 simplify_unary_operation (NEG
, mode
,
6871 duplicate_neg
, mode
));
6873 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6874 ASSERT_RTX_EQ (duplicate
,
6875 simplify_binary_operation (PLUS
, mode
, duplicate
,
6876 CONST0_RTX (mode
)));
6878 ASSERT_RTX_EQ (duplicate
,
6879 simplify_binary_operation (MINUS
, mode
, duplicate
,
6880 CONST0_RTX (mode
)));
6882 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
6883 simplify_binary_operation (MINUS
, mode
, duplicate
,
6887 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6888 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
6889 ASSERT_RTX_PTR_EQ (scalar_reg
,
6890 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6891 duplicate
, zero_par
));
6893 unsigned HOST_WIDE_INT const_nunits
;
6894 if (nunits
.is_constant (&const_nunits
))
6896 /* And again with the final element. */
6897 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
6898 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
6899 ASSERT_RTX_PTR_EQ (scalar_reg
,
6900 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6901 duplicate
, last_par
));
6903 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
6904 rtx vector_reg
= make_test_reg (mode
);
6905 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
6907 if (i
>= HOST_BITS_PER_WIDE_INT
)
6909 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
6910 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
6911 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
6912 ASSERT_RTX_EQ (scalar_reg
,
6913 simplify_gen_subreg (inner_mode
, vm
,
6918 /* Test a scalar subreg of a VEC_DUPLICATE. */
6919 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
6920 ASSERT_RTX_EQ (scalar_reg
,
6921 simplify_gen_subreg (inner_mode
, duplicate
,
6924 machine_mode narrower_mode
;
6925 if (maybe_ne (nunits
, 2U)
6926 && multiple_p (nunits
, 2)
6927 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
6928 && VECTOR_MODE_P (narrower_mode
))
6930 /* Test VEC_SELECT of a vector. */
6932 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
6933 rtx narrower_duplicate
6934 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
6935 ASSERT_RTX_EQ (narrower_duplicate
,
6936 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
6937 duplicate
, vec_par
));
6939 /* Test a vector subreg of a VEC_DUPLICATE. */
6940 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
6941 ASSERT_RTX_EQ (narrower_duplicate
,
6942 simplify_gen_subreg (narrower_mode
, duplicate
,
6947 /* Test vector simplifications involving VEC_SERIES in which the
6948 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6949 register that holds one element of MODE. */
6952 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
6954 /* Test unary cases with VEC_SERIES arguments. */
6955 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6956 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6957 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6958 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
6959 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
6960 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
6961 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
6962 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
6963 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
6965 ASSERT_RTX_EQ (series_0_r
,
6966 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
6967 ASSERT_RTX_EQ (series_r_m1
,
6968 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
6969 ASSERT_RTX_EQ (series_r_r
,
6970 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
6972 /* Test that a VEC_SERIES with a zero step is simplified away. */
6973 ASSERT_RTX_EQ (duplicate
,
6974 simplify_binary_operation (VEC_SERIES
, mode
,
6975 scalar_reg
, const0_rtx
));
6977 /* Test PLUS and MINUS with VEC_SERIES. */
6978 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
6979 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
6980 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
6981 ASSERT_RTX_EQ (series_r_r
,
6982 simplify_binary_operation (PLUS
, mode
, series_0_r
,
6984 ASSERT_RTX_EQ (series_r_1
,
6985 simplify_binary_operation (PLUS
, mode
, duplicate
,
6987 ASSERT_RTX_EQ (series_r_m1
,
6988 simplify_binary_operation (PLUS
, mode
, duplicate
,
6990 ASSERT_RTX_EQ (series_0_r
,
6991 simplify_binary_operation (MINUS
, mode
, series_r_r
,
6993 ASSERT_RTX_EQ (series_r_m1
,
6994 simplify_binary_operation (MINUS
, mode
, duplicate
,
6996 ASSERT_RTX_EQ (series_r_1
,
6997 simplify_binary_operation (MINUS
, mode
, duplicate
,
6999 ASSERT_RTX_EQ (series_0_m1
,
7000 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7004 /* Verify simplify_merge_mask works correctly. */
7007 test_vec_merge (machine_mode mode
)
7009 rtx op0
= make_test_reg (mode
);
7010 rtx op1
= make_test_reg (mode
);
7011 rtx op2
= make_test_reg (mode
);
7012 rtx op3
= make_test_reg (mode
);
7013 rtx op4
= make_test_reg (mode
);
7014 rtx op5
= make_test_reg (mode
);
7015 rtx mask1
= make_test_reg (SImode
);
7016 rtx mask2
= make_test_reg (SImode
);
7017 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
7018 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
7019 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
7021 /* Simple vec_merge. */
7022 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
7023 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
7024 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
7025 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
7027 /* Nested vec_merge.
7028 It's tempting to make this simplify right down to opN, but we don't
7029 because all the simplify_* functions assume that the operands have
7030 already been simplified. */
7031 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
7032 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
7033 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
7035 /* Intermediate unary op. */
7036 rtx unop
= gen_rtx_NOT (mode
, vm1
);
7037 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
7038 simplify_merge_mask (unop
, mask1
, 0));
7039 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
7040 simplify_merge_mask (unop
, mask1
, 1));
7042 /* Intermediate binary op. */
7043 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
7044 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
7045 simplify_merge_mask (binop
, mask1
, 0));
7046 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
7047 simplify_merge_mask (binop
, mask1
, 1));
7049 /* Intermediate ternary op. */
7050 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
7051 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
7052 simplify_merge_mask (tenop
, mask1
, 0));
7053 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
7054 simplify_merge_mask (tenop
, mask1
, 1));
7057 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
7058 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
7059 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
7060 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
7062 /* Called indirectly. */
7063 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
7064 simplify_rtx (nvm
));
7067 /* Verify some simplifications involving vectors. */
7072 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7074 machine_mode mode
= (machine_mode
) i
;
7075 if (VECTOR_MODE_P (mode
))
7077 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
7078 test_vector_ops_duplicate (mode
, scalar_reg
);
7079 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
7080 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
7081 test_vector_ops_series (mode
, scalar_reg
);
7082 test_vec_merge (mode
);
7087 template<unsigned int N
>
7088 struct simplify_const_poly_int_tests
7094 struct simplify_const_poly_int_tests
<1>
7096 static void run () {}
7099 /* Test various CONST_POLY_INT properties. */
7101 template<unsigned int N
>
7103 simplify_const_poly_int_tests
<N
>::run ()
7105 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
7106 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
7107 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
7108 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
7109 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
7110 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
7111 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
7112 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
7113 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
7114 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
7115 rtx two
= GEN_INT (2);
7116 rtx six
= GEN_INT (6);
7117 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
7119 /* These tests only try limited operation combinations. Fuller arithmetic
7120 testing is done directly on poly_ints. */
7121 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
7122 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
7123 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
7124 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
7125 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
7126 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
7127 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
7128 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
7129 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
7130 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
7131 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
7134 /* Run all of the selftests within this file. */
7137 simplify_rtx_c_tests ()
7140 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
7143 } // namespace selftest
7145 #endif /* CHECKING_P */