1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx
neg_const_int (machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (!HWI_COMPUTABLE_MODE_P (mode
)
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
80 scalar_int_mode int_mode
;
82 if (!is_int_mode (mode
, &int_mode
))
85 width
= GET_MODE_PRECISION (int_mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x
))
96 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
97 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
99 for (i
= 0; i
< elts
- 1; i
++)
100 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
102 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
103 width
%= HOST_BITS_PER_WIDE_INT
;
105 width
= HOST_BITS_PER_WIDE_INT
;
108 else if (width
<= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x
)
110 && CONST_DOUBLE_LOW (x
) == 0)
112 val
= CONST_DOUBLE_HIGH (x
);
113 width
-= HOST_BITS_PER_WIDE_INT
;
117 /* X is not an integer constant. */
120 if (width
< HOST_BITS_PER_WIDE_INT
)
121 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
122 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
130 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 scalar_int_mode int_mode
;
135 if (!is_int_mode (mode
, &int_mode
))
138 width
= GET_MODE_PRECISION (int_mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= GET_MODE_MASK (int_mode
);
143 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 scalar_int_mode int_mode
;
154 if (!is_int_mode (mode
, &int_mode
))
157 width
= GET_MODE_PRECISION (int_mode
);
158 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
161 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
168 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
172 scalar_int_mode int_mode
;
173 if (!is_int_mode (mode
, &int_mode
))
176 width
= GET_MODE_PRECISION (int_mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 poly_int64 offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 addr
= strip_offset (addr
, &offset
);
244 if (GET_CODE (addr
) == LO_SUM
)
245 addr
= XEXP (addr
, 1);
247 /* If this is a constant pool reference, we can turn it into its
248 constant and hope that simplifications happen. */
249 if (GET_CODE (addr
) == SYMBOL_REF
250 && CONSTANT_POOL_ADDRESS_P (addr
))
252 c
= get_pool_constant (addr
);
253 cmode
= get_pool_mode (addr
);
255 /* If we're accessing the constant in a different mode than it was
256 originally stored, attempt to fix that up via subreg simplifications.
257 If that fails we have no choice but to return the original memory. */
258 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
260 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
262 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
263 if (tem
&& CONSTANT_P (tem
))
271 /* Simplify a MEM based on its attributes. This is the default
272 delegitimize_address target hook, and it's recommended that every
273 overrider call it. */
276 delegitimize_mem_from_attrs (rtx x
)
278 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
279 use their base addresses as equivalent. */
282 && MEM_OFFSET_KNOWN_P (x
))
284 tree decl
= MEM_EXPR (x
);
285 machine_mode mode
= GET_MODE (x
);
286 poly_int64 offset
= 0;
288 switch (TREE_CODE (decl
))
298 case ARRAY_RANGE_REF
:
303 case VIEW_CONVERT_EXPR
:
305 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
307 int unsignedp
, reversep
, volatilep
= 0;
310 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
311 &unsignedp
, &reversep
, &volatilep
);
312 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
313 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
314 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
317 offset
+= bytepos
+ toffset_val
;
323 && mode
== GET_MODE (x
)
325 && (TREE_STATIC (decl
)
326 || DECL_THREAD_LOCAL_P (decl
))
327 && DECL_RTL_SET_P (decl
)
328 && MEM_P (DECL_RTL (decl
)))
332 offset
+= MEM_OFFSET (x
);
334 newx
= DECL_RTL (decl
);
338 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
339 poly_int64 n_offset
, o_offset
;
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 n
= strip_offset (n
, &n_offset
);
348 o
= strip_offset (o
, &o_offset
);
349 if (!(known_eq (o_offset
, n_offset
+ offset
)
350 && rtx_equal_p (o
, n
)))
351 x
= adjust_address_nv (newx
, mode
, offset
);
353 else if (GET_MODE (x
) == GET_MODE (newx
)
354 && known_eq (offset
, 0))
362 /* Make a unary operation by first seeing if it folds and otherwise making
363 the specified operation. */
366 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
367 machine_mode op_mode
)
371 /* If this simplifies, use it. */
372 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
375 return gen_rtx_fmt_e (code
, mode
, op
);
378 /* Likewise for ternary operations. */
381 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
382 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
386 /* If this simplifies, use it. */
387 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
388 op0
, op1
, op2
)) != 0)
391 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
394 /* Likewise, for relational operations.
395 CMP_MODE specifies mode comparison is done in. */
398 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
399 machine_mode cmp_mode
, rtx op0
, rtx op1
)
403 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
407 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
410 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
411 and simplify the result. If FN is non-NULL, call this callback on each
412 X, if it returns non-NULL, replace X with its return value and simplify the
416 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
417 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
419 enum rtx_code code
= GET_CODE (x
);
420 machine_mode mode
= GET_MODE (x
);
421 machine_mode op_mode
;
423 rtx op0
, op1
, op2
, newx
, op
;
427 if (__builtin_expect (fn
!= NULL
, 0))
429 newx
= fn (x
, old_rtx
, data
);
433 else if (rtx_equal_p (x
, old_rtx
))
434 return copy_rtx ((rtx
) data
);
436 switch (GET_RTX_CLASS (code
))
440 op_mode
= GET_MODE (op0
);
441 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
442 if (op0
== XEXP (x
, 0))
444 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
448 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
449 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
450 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
452 return simplify_gen_binary (code
, mode
, op0
, op1
);
455 case RTX_COMM_COMPARE
:
458 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
459 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
460 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
461 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
463 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
466 case RTX_BITFIELD_OPS
:
468 op_mode
= GET_MODE (op0
);
469 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
470 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
471 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
472 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
474 if (op_mode
== VOIDmode
)
475 op_mode
= GET_MODE (op0
);
476 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
481 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
482 if (op0
== SUBREG_REG (x
))
484 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
485 GET_MODE (SUBREG_REG (x
)),
487 return op0
? op0
: x
;
494 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
495 if (op0
== XEXP (x
, 0))
497 return replace_equiv_address_nv (x
, op0
);
499 else if (code
== LO_SUM
)
501 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
502 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
504 /* (lo_sum (high x) y) -> y where x and y have the same base. */
505 if (GET_CODE (op0
) == HIGH
)
507 rtx base0
, base1
, offset0
, offset1
;
508 split_const (XEXP (op0
, 0), &base0
, &offset0
);
509 split_const (op1
, &base1
, &offset1
);
510 if (rtx_equal_p (base0
, base1
))
514 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
516 return gen_rtx_LO_SUM (mode
, op0
, op1
);
525 fmt
= GET_RTX_FORMAT (code
);
526 for (i
= 0; fmt
[i
]; i
++)
531 newvec
= XVEC (newx
, i
);
532 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
534 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
536 if (op
!= RTVEC_ELT (vec
, j
))
540 newvec
= shallow_copy_rtvec (vec
);
542 newx
= shallow_copy_rtx (x
);
543 XVEC (newx
, i
) = newvec
;
545 RTVEC_ELT (newvec
, j
) = op
;
553 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
554 if (op
!= XEXP (x
, i
))
557 newx
= shallow_copy_rtx (x
);
566 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
567 resulting RTX. Return a new RTX which is as simplified as possible. */
570 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
572 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
575 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
576 Only handle cases where the truncated value is inherently an rvalue.
578 RTL provides two ways of truncating a value:
580 1. a lowpart subreg. This form is only a truncation when both
581 the outer and inner modes (here MODE and OP_MODE respectively)
582 are scalar integers, and only then when the subreg is used as
585 It is only valid to form such truncating subregs if the
586 truncation requires no action by the target. The onus for
587 proving this is on the creator of the subreg -- e.g. the
588 caller to simplify_subreg or simplify_gen_subreg -- and typically
589 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
591 2. a TRUNCATE. This form handles both scalar and compound integers.
593 The first form is preferred where valid. However, the TRUNCATE
594 handling in simplify_unary_operation turns the second form into the
595 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
596 so it is generally safe to form rvalue truncations using:
598 simplify_gen_unary (TRUNCATE, ...)
600 and leave simplify_unary_operation to work out which representation
603 Because of the proof requirements on (1), simplify_truncation must
604 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
605 regardless of whether the outer truncation came from a SUBREG or a
606 TRUNCATE. For example, if the caller has proven that an SImode
611 is a no-op and can be represented as a subreg, it does not follow
612 that SImode truncations of X and Y are also no-ops. On a target
613 like 64-bit MIPS that requires SImode values to be stored in
614 sign-extended form, an SImode truncation of:
616 (and:DI (reg:DI X) (const_int 63))
618 is trivially a no-op because only the lower 6 bits can be set.
619 However, X is still an arbitrary 64-bit number and so we cannot
620 assume that truncating it too is a no-op. */
623 simplify_truncation (machine_mode mode
, rtx op
,
624 machine_mode op_mode
)
626 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
627 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
628 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
630 gcc_assert (precision
<= op_precision
);
632 /* Optimize truncations of zero and sign extended values. */
633 if (GET_CODE (op
) == ZERO_EXTEND
634 || GET_CODE (op
) == SIGN_EXTEND
)
636 /* There are three possibilities. If MODE is the same as the
637 origmode, we can omit both the extension and the subreg.
638 If MODE is not larger than the origmode, we can apply the
639 truncation without the extension. Finally, if the outermode
640 is larger than the origmode, we can just extend to the appropriate
642 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
643 if (mode
== origmode
)
645 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
646 return simplify_gen_unary (TRUNCATE
, mode
,
647 XEXP (op
, 0), origmode
);
649 return simplify_gen_unary (GET_CODE (op
), mode
,
650 XEXP (op
, 0), origmode
);
653 /* If the machine can perform operations in the truncated mode, distribute
654 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
655 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
657 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
658 && (GET_CODE (op
) == PLUS
659 || GET_CODE (op
) == MINUS
660 || GET_CODE (op
) == MULT
))
662 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
665 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
667 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
671 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
672 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
673 the outer subreg is effectively a truncation to the original mode. */
674 if ((GET_CODE (op
) == LSHIFTRT
675 || GET_CODE (op
) == ASHIFTRT
)
676 /* Ensure that OP_MODE is at least twice as wide as MODE
677 to avoid the possibility that an outer LSHIFTRT shifts by more
678 than the sign extension's sign_bit_copies and introduces zeros
679 into the high bits of the result. */
680 && 2 * precision
<= op_precision
681 && CONST_INT_P (XEXP (op
, 1))
682 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
683 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
684 && UINTVAL (XEXP (op
, 1)) < precision
)
685 return simplify_gen_binary (ASHIFTRT
, mode
,
686 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
688 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
689 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op
) == LSHIFTRT
692 || GET_CODE (op
) == ASHIFTRT
)
693 && CONST_INT_P (XEXP (op
, 1))
694 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
695 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
696 && UINTVAL (XEXP (op
, 1)) < precision
)
697 return simplify_gen_binary (LSHIFTRT
, mode
,
698 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
700 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
701 to (ashift:QI (x:QI) C), where C is a suitable small constant and
702 the outer subreg is effectively a truncation to the original mode. */
703 if (GET_CODE (op
) == ASHIFT
704 && CONST_INT_P (XEXP (op
, 1))
705 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
706 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
707 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
708 && UINTVAL (XEXP (op
, 1)) < precision
)
709 return simplify_gen_binary (ASHIFT
, mode
,
710 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
712 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
713 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
715 if (GET_CODE (op
) == AND
716 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
717 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
718 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
719 && CONST_INT_P (XEXP (op
, 1)))
721 rtx op0
= (XEXP (XEXP (op
, 0), 0));
722 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
723 rtx mask_op
= XEXP (op
, 1);
724 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
725 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
727 if (shift
< precision
728 /* If doing this transform works for an X with all bits set,
729 it works for any X. */
730 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
731 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
732 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
733 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
735 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
736 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
740 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
741 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
743 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
744 && REG_P (XEXP (op
, 0))
745 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
746 && CONST_INT_P (XEXP (op
, 1))
747 && CONST_INT_P (XEXP (op
, 2)))
749 rtx op0
= XEXP (op
, 0);
750 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
751 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
752 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
754 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
757 pos
-= op_precision
- precision
;
758 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
759 XEXP (op
, 1), GEN_INT (pos
));
762 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
764 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
766 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
767 XEXP (op
, 1), XEXP (op
, 2));
771 /* Recognize a word extraction from a multi-word subreg. */
772 if ((GET_CODE (op
) == LSHIFTRT
773 || GET_CODE (op
) == ASHIFTRT
)
774 && SCALAR_INT_MODE_P (mode
)
775 && SCALAR_INT_MODE_P (op_mode
)
776 && precision
>= BITS_PER_WORD
777 && 2 * precision
<= op_precision
778 && CONST_INT_P (XEXP (op
, 1))
779 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
780 && UINTVAL (XEXP (op
, 1)) < op_precision
)
782 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
783 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
784 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
786 ? byte
- shifted_bytes
787 : byte
+ shifted_bytes
));
790 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
791 and try replacing the TRUNCATE and shift with it. Don't do this
792 if the MEM has a mode-dependent address. */
793 if ((GET_CODE (op
) == LSHIFTRT
794 || GET_CODE (op
) == ASHIFTRT
)
795 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
796 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
797 && MEM_P (XEXP (op
, 0))
798 && CONST_INT_P (XEXP (op
, 1))
799 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
800 && INTVAL (XEXP (op
, 1)) > 0
801 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
802 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
803 MEM_ADDR_SPACE (XEXP (op
, 0)))
804 && ! MEM_VOLATILE_P (XEXP (op
, 0))
805 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
806 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
808 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
809 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
810 return adjust_address_nv (XEXP (op
, 0), int_mode
,
812 ? byte
- shifted_bytes
813 : byte
+ shifted_bytes
));
816 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
817 (OP:SI foo:SI) if OP is NEG or ABS. */
818 if ((GET_CODE (op
) == ABS
819 || GET_CODE (op
) == NEG
)
820 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
821 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
822 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
823 return simplify_gen_unary (GET_CODE (op
), mode
,
824 XEXP (XEXP (op
, 0), 0), mode
);
826 /* (truncate:A (subreg:B (truncate:C X) 0)) is
828 if (GET_CODE (op
) == SUBREG
829 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
830 && SCALAR_INT_MODE_P (op_mode
)
831 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
832 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
833 && subreg_lowpart_p (op
))
835 rtx inner
= XEXP (SUBREG_REG (op
), 0);
836 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
837 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
840 /* If subreg above is paradoxical and C is narrower
841 than A, return (subreg:A (truncate:C X) 0). */
842 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
845 /* (truncate:A (truncate:B X)) is (truncate:A X). */
846 if (GET_CODE (op
) == TRUNCATE
)
847 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
848 GET_MODE (XEXP (op
, 0)));
850 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
852 if (GET_CODE (op
) == IOR
853 && SCALAR_INT_MODE_P (mode
)
854 && SCALAR_INT_MODE_P (op_mode
)
855 && CONST_INT_P (XEXP (op
, 1))
856 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
862 /* Try to simplify a unary operation CODE whose output mode is to be
863 MODE with input operand OP whose mode was originally OP_MODE.
864 Return zero if no simplification can be made. */
866 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
867 rtx op
, machine_mode op_mode
)
871 trueop
= avoid_constant_pool_reference (op
);
873 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
877 return simplify_unary_operation_1 (code
, mode
, op
);
880 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
884 exact_int_to_float_conversion_p (const_rtx op
)
886 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
887 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
888 /* Constants shouldn't reach here. */
889 gcc_assert (op0_mode
!= VOIDmode
);
890 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
891 int in_bits
= in_prec
;
892 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
894 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
895 if (GET_CODE (op
) == FLOAT
)
896 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
897 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
898 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
901 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
903 return in_bits
<= out_bits
;
906 /* Perform some simplifications we can do even if the operands
909 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
911 enum rtx_code reversed
;
912 rtx temp
, elt
, base
, step
;
913 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
918 /* (not (not X)) == X. */
919 if (GET_CODE (op
) == NOT
)
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op
)
925 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
926 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
927 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
928 XEXP (op
, 0), XEXP (op
, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op
) == PLUS
932 && XEXP (op
, 1) == constm1_rtx
)
933 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
939 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op
) == XOR
944 && CONST_INT_P (XEXP (op
, 1))
945 && (temp
= simplify_unary_operation (NOT
, mode
,
946 XEXP (op
, 1), mode
)) != 0)
947 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op
) == PLUS
951 && CONST_INT_P (XEXP (op
, 1))
952 && mode_signbit_p (mode
, XEXP (op
, 1))
953 && (temp
= simplify_unary_operation (NOT
, mode
,
954 XEXP (op
, 1), mode
)) != 0)
955 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
963 if (GET_CODE (op
) == ASHIFT
964 && XEXP (op
, 0) == const1_rtx
)
966 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
967 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE
== -1
974 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
975 && GET_CODE (op
) == ASHIFTRT
976 && CONST_INT_P (XEXP (op
, 1))
977 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
978 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
979 XEXP (op
, 0), const0_rtx
);
982 if (partial_subreg_p (op
)
983 && subreg_lowpart_p (op
)
984 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
985 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
987 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
990 x
= gen_rtx_ROTATE (inner_mode
,
991 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
993 XEXP (SUBREG_REG (op
), 1));
994 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
999 /* Apply De Morgan's laws to reduce number of patterns for machines
1000 with negating logical insns (and-not, nand, etc.). If result has
1001 only one NOT, put it first, since that is how the patterns are
1003 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1005 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1006 machine_mode op_mode
;
1008 op_mode
= GET_MODE (in1
);
1009 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1011 op_mode
= GET_MODE (in2
);
1012 if (op_mode
== VOIDmode
)
1014 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1016 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1017 std::swap (in1
, in2
);
1019 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1023 /* (not (bswap x)) -> (bswap (not x)). */
1024 if (GET_CODE (op
) == BSWAP
)
1026 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1027 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1032 /* (neg (neg X)) == X. */
1033 if (GET_CODE (op
) == NEG
)
1034 return XEXP (op
, 0);
1036 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1037 If comparison is not reversible use
1039 if (GET_CODE (op
) == IF_THEN_ELSE
)
1041 rtx cond
= XEXP (op
, 0);
1042 rtx true_rtx
= XEXP (op
, 1);
1043 rtx false_rtx
= XEXP (op
, 2);
1045 if ((GET_CODE (true_rtx
) == NEG
1046 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1047 || (GET_CODE (false_rtx
) == NEG
1048 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1050 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1051 temp
= reversed_comparison (cond
, mode
);
1055 std::swap (true_rtx
, false_rtx
);
1057 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1058 mode
, temp
, true_rtx
, false_rtx
);
1062 /* (neg (plus X 1)) can become (not X). */
1063 if (GET_CODE (op
) == PLUS
1064 && XEXP (op
, 1) == const1_rtx
)
1065 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1067 /* Similarly, (neg (not X)) is (plus X 1). */
1068 if (GET_CODE (op
) == NOT
)
1069 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1072 /* (neg (minus X Y)) can become (minus Y X). This transformation
1073 isn't safe for modes with signed zeros, since if X and Y are
1074 both +0, (minus Y X) is the same as (minus X Y). If the
1075 rounding mode is towards +infinity (or -infinity) then the two
1076 expressions will be rounded differently. */
1077 if (GET_CODE (op
) == MINUS
1078 && !HONOR_SIGNED_ZEROS (mode
)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1080 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1082 if (GET_CODE (op
) == PLUS
1083 && !HONOR_SIGNED_ZEROS (mode
)
1084 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1086 /* (neg (plus A C)) is simplified to (minus -C A). */
1087 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1088 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1090 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1092 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1095 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1096 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1097 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1100 /* (neg (mult A B)) becomes (mult A (neg B)).
1101 This works even for floating-point values. */
1102 if (GET_CODE (op
) == MULT
1103 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1105 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1106 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1109 /* NEG commutes with ASHIFT since it is multiplication. Only do
1110 this if we can then eliminate the NEG (e.g., if the operand
1112 if (GET_CODE (op
) == ASHIFT
)
1114 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1116 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1119 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1120 C is equal to the width of MODE minus 1. */
1121 if (GET_CODE (op
) == ASHIFTRT
1122 && CONST_INT_P (XEXP (op
, 1))
1123 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1124 return simplify_gen_binary (LSHIFTRT
, mode
,
1125 XEXP (op
, 0), XEXP (op
, 1));
1127 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1128 C is equal to the width of MODE minus 1. */
1129 if (GET_CODE (op
) == LSHIFTRT
1130 && CONST_INT_P (XEXP (op
, 1))
1131 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1132 return simplify_gen_binary (ASHIFTRT
, mode
,
1133 XEXP (op
, 0), XEXP (op
, 1));
1135 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1136 if (GET_CODE (op
) == XOR
1137 && XEXP (op
, 1) == const1_rtx
1138 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1139 return plus_constant (mode
, XEXP (op
, 0), -1);
1141 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1142 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1143 if (GET_CODE (op
) == LT
1144 && XEXP (op
, 1) == const0_rtx
1145 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1147 int_mode
= as_a
<scalar_int_mode
> (mode
);
1148 int isize
= GET_MODE_PRECISION (inner
);
1149 if (STORE_FLAG_VALUE
== 1)
1151 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1152 gen_int_shift_amount (inner
,
1154 if (int_mode
== inner
)
1156 if (GET_MODE_PRECISION (int_mode
) > isize
)
1157 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1158 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1160 else if (STORE_FLAG_VALUE
== -1)
1162 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1163 gen_int_shift_amount (inner
,
1165 if (int_mode
== inner
)
1167 if (GET_MODE_PRECISION (int_mode
) > isize
)
1168 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1169 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1173 if (vec_series_p (op
, &base
, &step
))
1175 /* Only create a new series if we can simplify both parts. In other
1176 cases this isn't really a simplification, and it's not necessarily
1177 a win to replace a vector operation with a scalar operation. */
1178 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1179 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1182 step
= simplify_unary_operation (NEG
, inner_mode
,
1185 return gen_vec_series (mode
, base
, step
);
1191 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1192 with the umulXi3_highpart patterns. */
1193 if (GET_CODE (op
) == LSHIFTRT
1194 && GET_CODE (XEXP (op
, 0)) == MULT
)
1197 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1199 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1201 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1205 /* We can't handle truncation to a partial integer mode here
1206 because we don't know the real bitsize of the partial
1211 if (GET_MODE (op
) != VOIDmode
)
1213 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1218 /* If we know that the value is already truncated, we can
1219 replace the TRUNCATE with a SUBREG. */
1220 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1221 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1222 || truncated_to_mode (mode
, op
)))
1224 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1229 /* A truncate of a comparison can be replaced with a subreg if
1230 STORE_FLAG_VALUE permits. This is like the previous test,
1231 but it works even if the comparison is done in a mode larger
1232 than HOST_BITS_PER_WIDE_INT. */
1233 if (HWI_COMPUTABLE_MODE_P (mode
)
1234 && COMPARISON_P (op
)
1235 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1237 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1242 /* A truncate of a memory is just loading the low part of the memory
1243 if we are not changing the meaning of the address. */
1244 if (GET_CODE (op
) == MEM
1245 && !VECTOR_MODE_P (mode
)
1246 && !MEM_VOLATILE_P (op
)
1247 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1249 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1256 case FLOAT_TRUNCATE
:
1257 if (DECIMAL_FLOAT_MODE_P (mode
))
1260 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1261 if (GET_CODE (op
) == FLOAT_EXTEND
1262 && GET_MODE (XEXP (op
, 0)) == mode
)
1263 return XEXP (op
, 0);
1265 /* (float_truncate:SF (float_truncate:DF foo:XF))
1266 = (float_truncate:SF foo:XF).
1267 This may eliminate double rounding, so it is unsafe.
1269 (float_truncate:SF (float_extend:XF foo:DF))
1270 = (float_truncate:SF foo:DF).
1272 (float_truncate:DF (float_extend:XF foo:SF))
1273 = (float_extend:DF foo:SF). */
1274 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1275 && flag_unsafe_math_optimizations
)
1276 || GET_CODE (op
) == FLOAT_EXTEND
)
1277 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1278 > GET_MODE_UNIT_SIZE (mode
)
1279 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1281 XEXP (op
, 0), mode
);
1283 /* (float_truncate (float x)) is (float x) */
1284 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1285 && (flag_unsafe_math_optimizations
1286 || exact_int_to_float_conversion_p (op
)))
1287 return simplify_gen_unary (GET_CODE (op
), mode
,
1289 GET_MODE (XEXP (op
, 0)));
1291 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1292 (OP:SF foo:SF) if OP is NEG or ABS. */
1293 if ((GET_CODE (op
) == ABS
1294 || GET_CODE (op
) == NEG
)
1295 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1296 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1297 return simplify_gen_unary (GET_CODE (op
), mode
,
1298 XEXP (XEXP (op
, 0), 0), mode
);
1300 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1301 is (float_truncate:SF x). */
1302 if (GET_CODE (op
) == SUBREG
1303 && subreg_lowpart_p (op
)
1304 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1305 return SUBREG_REG (op
);
1309 if (DECIMAL_FLOAT_MODE_P (mode
))
1312 /* (float_extend (float_extend x)) is (float_extend x)
1314 (float_extend (float x)) is (float x) assuming that double
1315 rounding can't happen.
1317 if (GET_CODE (op
) == FLOAT_EXTEND
1318 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1319 && exact_int_to_float_conversion_p (op
)))
1320 return simplify_gen_unary (GET_CODE (op
), mode
,
1322 GET_MODE (XEXP (op
, 0)));
1327 /* (abs (neg <foo>)) -> (abs <foo>) */
1328 if (GET_CODE (op
) == NEG
)
1329 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1330 GET_MODE (XEXP (op
, 0)));
1332 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1334 if (GET_MODE (op
) == VOIDmode
)
1337 /* If operand is something known to be positive, ignore the ABS. */
1338 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1339 || val_signbit_known_clear_p (GET_MODE (op
),
1340 nonzero_bits (op
, GET_MODE (op
))))
1343 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1344 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1345 && (num_sign_bit_copies (op
, int_mode
)
1346 == GET_MODE_PRECISION (int_mode
)))
1347 return gen_rtx_NEG (int_mode
, op
);
1352 /* (ffs (*_extend <X>)) = (ffs <X>) */
1353 if (GET_CODE (op
) == SIGN_EXTEND
1354 || GET_CODE (op
) == ZERO_EXTEND
)
1355 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1356 GET_MODE (XEXP (op
, 0)));
1360 switch (GET_CODE (op
))
1364 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1365 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1366 GET_MODE (XEXP (op
, 0)));
1370 /* Rotations don't affect popcount. */
1371 if (!side_effects_p (XEXP (op
, 1)))
1372 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1373 GET_MODE (XEXP (op
, 0)));
1382 switch (GET_CODE (op
))
1388 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1389 GET_MODE (XEXP (op
, 0)));
1393 /* Rotations don't affect parity. */
1394 if (!side_effects_p (XEXP (op
, 1)))
1395 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1396 GET_MODE (XEXP (op
, 0)));
1405 /* (bswap (bswap x)) -> x. */
1406 if (GET_CODE (op
) == BSWAP
)
1407 return XEXP (op
, 0);
1411 /* (float (sign_extend <X>)) = (float <X>). */
1412 if (GET_CODE (op
) == SIGN_EXTEND
)
1413 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1414 GET_MODE (XEXP (op
, 0)));
1418 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1419 becomes just the MINUS if its mode is MODE. This allows
1420 folding switch statements on machines using casesi (such as
1422 if (GET_CODE (op
) == TRUNCATE
1423 && GET_MODE (XEXP (op
, 0)) == mode
1424 && GET_CODE (XEXP (op
, 0)) == MINUS
1425 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1426 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1427 return XEXP (op
, 0);
1429 /* Extending a widening multiplication should be canonicalized to
1430 a wider widening multiplication. */
1431 if (GET_CODE (op
) == MULT
)
1433 rtx lhs
= XEXP (op
, 0);
1434 rtx rhs
= XEXP (op
, 1);
1435 enum rtx_code lcode
= GET_CODE (lhs
);
1436 enum rtx_code rcode
= GET_CODE (rhs
);
1438 /* Widening multiplies usually extend both operands, but sometimes
1439 they use a shift to extract a portion of a register. */
1440 if ((lcode
== SIGN_EXTEND
1441 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1442 && (rcode
== SIGN_EXTEND
1443 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1445 machine_mode lmode
= GET_MODE (lhs
);
1446 machine_mode rmode
= GET_MODE (rhs
);
1449 if (lcode
== ASHIFTRT
)
1450 /* Number of bits not shifted off the end. */
1451 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1452 - INTVAL (XEXP (lhs
, 1)));
1453 else /* lcode == SIGN_EXTEND */
1454 /* Size of inner mode. */
1455 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1457 if (rcode
== ASHIFTRT
)
1458 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1459 - INTVAL (XEXP (rhs
, 1)));
1460 else /* rcode == SIGN_EXTEND */
1461 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1463 /* We can only widen multiplies if the result is mathematiclly
1464 equivalent. I.e. if overflow was impossible. */
1465 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1466 return simplify_gen_binary
1468 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1469 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1473 /* Check for a sign extension of a subreg of a promoted
1474 variable, where the promotion is sign-extended, and the
1475 target mode is the same as the variable's promotion. */
1476 if (GET_CODE (op
) == SUBREG
1477 && SUBREG_PROMOTED_VAR_P (op
)
1478 && SUBREG_PROMOTED_SIGNED_P (op
)
1479 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1481 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1486 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1487 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1488 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1490 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1491 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1492 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1493 GET_MODE (XEXP (op
, 0)));
1496 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1497 is (sign_extend:M (subreg:O <X>)) if there is mode with
1498 GET_MODE_BITSIZE (N) - I bits.
1499 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1500 is similarly (zero_extend:M (subreg:O <X>)). */
1501 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1502 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1503 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1504 && CONST_INT_P (XEXP (op
, 1))
1505 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1506 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1507 GET_MODE_BITSIZE (op_mode
) > INTVAL (XEXP (op
, 1))))
1509 scalar_int_mode tmode
;
1510 gcc_assert (GET_MODE_BITSIZE (int_mode
)
1511 > GET_MODE_BITSIZE (op_mode
));
1512 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode
)
1513 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1516 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1518 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1519 ? SIGN_EXTEND
: ZERO_EXTEND
,
1520 int_mode
, inner
, tmode
);
1524 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1525 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1526 if (GET_CODE (op
) == LSHIFTRT
1527 && CONST_INT_P (XEXP (op
, 1))
1528 && XEXP (op
, 1) != const0_rtx
)
1529 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1531 #if defined(POINTERS_EXTEND_UNSIGNED)
1532 /* As we do not know which address space the pointer is referring to,
1533 we can do this only if the target does not support different pointer
1534 or address modes depending on the address space. */
1535 if (target_default_pointer_address_modes_p ()
1536 && ! POINTERS_EXTEND_UNSIGNED
1537 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1539 || (GET_CODE (op
) == SUBREG
1540 && REG_P (SUBREG_REG (op
))
1541 && REG_POINTER (SUBREG_REG (op
))
1542 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1543 && !targetm
.have_ptr_extend ())
1546 = convert_memory_address_addr_space_1 (Pmode
, op
,
1547 ADDR_SPACE_GENERIC
, false,
1556 /* Check for a zero extension of a subreg of a promoted
1557 variable, where the promotion is zero-extended, and the
1558 target mode is the same as the variable's promotion. */
1559 if (GET_CODE (op
) == SUBREG
1560 && SUBREG_PROMOTED_VAR_P (op
)
1561 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1562 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1564 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1569 /* Extending a widening multiplication should be canonicalized to
1570 a wider widening multiplication. */
1571 if (GET_CODE (op
) == MULT
)
1573 rtx lhs
= XEXP (op
, 0);
1574 rtx rhs
= XEXP (op
, 1);
1575 enum rtx_code lcode
= GET_CODE (lhs
);
1576 enum rtx_code rcode
= GET_CODE (rhs
);
1578 /* Widening multiplies usually extend both operands, but sometimes
1579 they use a shift to extract a portion of a register. */
1580 if ((lcode
== ZERO_EXTEND
1581 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1582 && (rcode
== ZERO_EXTEND
1583 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1585 machine_mode lmode
= GET_MODE (lhs
);
1586 machine_mode rmode
= GET_MODE (rhs
);
1589 if (lcode
== LSHIFTRT
)
1590 /* Number of bits not shifted off the end. */
1591 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1592 - INTVAL (XEXP (lhs
, 1)));
1593 else /* lcode == ZERO_EXTEND */
1594 /* Size of inner mode. */
1595 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1597 if (rcode
== LSHIFTRT
)
1598 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1599 - INTVAL (XEXP (rhs
, 1)));
1600 else /* rcode == ZERO_EXTEND */
1601 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1603 /* We can only widen multiplies if the result is mathematiclly
1604 equivalent. I.e. if overflow was impossible. */
1605 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1606 return simplify_gen_binary
1608 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1609 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1613 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1614 if (GET_CODE (op
) == ZERO_EXTEND
)
1615 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1616 GET_MODE (XEXP (op
, 0)));
1618 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1619 is (zero_extend:M (subreg:O <X>)) if there is mode with
1620 GET_MODE_PRECISION (N) - I bits. */
1621 if (GET_CODE (op
) == LSHIFTRT
1622 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1623 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1624 && CONST_INT_P (XEXP (op
, 1))
1625 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1626 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1627 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1629 scalar_int_mode tmode
;
1630 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1631 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1634 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1636 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1641 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1642 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1644 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1645 (and:SI (reg:SI) (const_int 63)). */
1646 if (partial_subreg_p (op
)
1647 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1648 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1649 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1650 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1651 && subreg_lowpart_p (op
)
1652 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1653 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1655 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1656 return SUBREG_REG (op
);
1657 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1661 #if defined(POINTERS_EXTEND_UNSIGNED)
1662 /* As we do not know which address space the pointer is referring to,
1663 we can do this only if the target does not support different pointer
1664 or address modes depending on the address space. */
1665 if (target_default_pointer_address_modes_p ()
1666 && POINTERS_EXTEND_UNSIGNED
> 0
1667 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1669 || (GET_CODE (op
) == SUBREG
1670 && REG_P (SUBREG_REG (op
))
1671 && REG_POINTER (SUBREG_REG (op
))
1672 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1673 && !targetm
.have_ptr_extend ())
1676 = convert_memory_address_addr_space_1 (Pmode
, op
,
1677 ADDR_SPACE_GENERIC
, false,
1689 if (VECTOR_MODE_P (mode
)
1690 && vec_duplicate_p (op
, &elt
)
1691 && code
!= VEC_DUPLICATE
)
1693 /* Try applying the operator to ELT and see if that simplifies.
1694 We can duplicate the result if so.
1696 The reason we don't use simplify_gen_unary is that it isn't
1697 necessarily a win to convert things like:
1699 (neg:V (vec_duplicate:V (reg:S R)))
1703 (vec_duplicate:V (neg:S (reg:S R)))
1705 The first might be done entirely in vector registers while the
1706 second might need a move between register files. */
1707 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1708 elt
, GET_MODE_INNER (GET_MODE (op
)));
1710 return gen_vec_duplicate (mode
, temp
);
1716 /* Try to compute the value of a unary operation CODE whose output mode is to
1717 be MODE with input operand OP whose mode was originally OP_MODE.
1718 Return zero if the value cannot be computed. */
1720 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1721 rtx op
, machine_mode op_mode
)
1723 scalar_int_mode result_mode
;
1725 if (code
== VEC_DUPLICATE
)
1727 gcc_assert (VECTOR_MODE_P (mode
));
1728 if (GET_MODE (op
) != VOIDmode
)
1730 if (!VECTOR_MODE_P (GET_MODE (op
)))
1731 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1733 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1736 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1737 return gen_const_vec_duplicate (mode
, op
);
1738 unsigned int n_elts
;
1739 if (GET_CODE (op
) == CONST_VECTOR
1740 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
1742 /* This must be constant if we're duplicating it to a constant
1743 number of elements. */
1744 unsigned int in_n_elts
= CONST_VECTOR_NUNITS (op
).to_constant ();
1745 gcc_assert (in_n_elts
< n_elts
);
1746 gcc_assert ((n_elts
% in_n_elts
) == 0);
1747 rtvec v
= rtvec_alloc (n_elts
);
1748 for (unsigned i
= 0; i
< n_elts
; i
++)
1749 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1750 return gen_rtx_CONST_VECTOR (mode
, v
);
1754 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1756 unsigned int n_elts
;
1757 if (!CONST_VECTOR_NUNITS (op
).is_constant (&n_elts
))
1760 machine_mode opmode
= GET_MODE (op
);
1761 gcc_assert (known_eq (GET_MODE_NUNITS (mode
), n_elts
));
1762 gcc_assert (known_eq (GET_MODE_NUNITS (opmode
), n_elts
));
1764 rtvec v
= rtvec_alloc (n_elts
);
1767 for (i
= 0; i
< n_elts
; i
++)
1769 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1770 CONST_VECTOR_ELT (op
, i
),
1771 GET_MODE_INNER (opmode
));
1772 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1774 RTVEC_ELT (v
, i
) = x
;
1776 return gen_rtx_CONST_VECTOR (mode
, v
);
1779 /* The order of these tests is critical so that, for example, we don't
1780 check the wrong mode (input vs. output) for a conversion operation,
1781 such as FIX. At some point, this should be simplified. */
1783 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1787 if (op_mode
== VOIDmode
)
1789 /* CONST_INT have VOIDmode as the mode. We assume that all
1790 the bits of the constant are significant, though, this is
1791 a dangerous assumption as many times CONST_INTs are
1792 created and used with garbage in the bits outside of the
1793 precision of the implied mode of the const_int. */
1794 op_mode
= MAX_MODE_INT
;
1797 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1799 /* Avoid the folding if flag_signaling_nans is on and
1800 operand is a signaling NaN. */
1801 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1804 d
= real_value_truncate (mode
, d
);
1805 return const_double_from_real_value (d
, mode
);
1807 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1811 if (op_mode
== VOIDmode
)
1813 /* CONST_INT have VOIDmode as the mode. We assume that all
1814 the bits of the constant are significant, though, this is
1815 a dangerous assumption as many times CONST_INTs are
1816 created and used with garbage in the bits outside of the
1817 precision of the implied mode of the const_int. */
1818 op_mode
= MAX_MODE_INT
;
1821 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1823 /* Avoid the folding if flag_signaling_nans is on and
1824 operand is a signaling NaN. */
1825 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1828 d
= real_value_truncate (mode
, d
);
1829 return const_double_from_real_value (d
, mode
);
1832 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1834 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1836 scalar_int_mode imode
= (op_mode
== VOIDmode
1838 : as_a
<scalar_int_mode
> (op_mode
));
1839 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1842 #if TARGET_SUPPORTS_WIDE_INT == 0
1843 /* This assert keeps the simplification from producing a result
1844 that cannot be represented in a CONST_DOUBLE but a lot of
1845 upstream callers expect that this function never fails to
1846 simplify something and so you if you added this to the test
1847 above the code would die later anyway. If this assert
1848 happens, you just need to make the port support wide int. */
1849 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1855 result
= wi::bit_not (op0
);
1859 result
= wi::neg (op0
);
1863 result
= wi::abs (op0
);
1867 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1871 if (wi::ne_p (op0
, 0))
1872 int_value
= wi::clz (op0
);
1873 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1875 result
= wi::shwi (int_value
, result_mode
);
1879 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1883 if (wi::ne_p (op0
, 0))
1884 int_value
= wi::ctz (op0
);
1885 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1887 result
= wi::shwi (int_value
, result_mode
);
1891 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1895 result
= wi::shwi (wi::parity (op0
), result_mode
);
1899 result
= wide_int (op0
).bswap ();
1904 result
= wide_int::from (op0
, width
, UNSIGNED
);
1908 result
= wide_int::from (op0
, width
, SIGNED
);
1916 return immed_wide_int_const (result
, result_mode
);
1919 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1920 && SCALAR_FLOAT_MODE_P (mode
)
1921 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1923 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1929 d
= real_value_abs (&d
);
1932 d
= real_value_negate (&d
);
1934 case FLOAT_TRUNCATE
:
1935 /* Don't perform the operation if flag_signaling_nans is on
1936 and the operand is a signaling NaN. */
1937 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1939 d
= real_value_truncate (mode
, d
);
1942 /* Don't perform the operation if flag_signaling_nans is on
1943 and the operand is a signaling NaN. */
1944 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1946 /* All this does is change the mode, unless changing
1948 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1949 real_convert (&d
, mode
, &d
);
1952 /* Don't perform the operation if flag_signaling_nans is on
1953 and the operand is a signaling NaN. */
1954 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1956 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1963 real_to_target (tmp
, &d
, GET_MODE (op
));
1964 for (i
= 0; i
< 4; i
++)
1966 real_from_target (&d
, tmp
, mode
);
1972 return const_double_from_real_value (d
, mode
);
1974 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1975 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1976 && is_int_mode (mode
, &result_mode
))
1978 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1979 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1980 operators are intentionally left unspecified (to ease implementation
1981 by target backends), for consistency, this routine implements the
1982 same semantics for constant folding as used by the middle-end. */
1984 /* This was formerly used only for non-IEEE float.
1985 eggert@twinsun.com says it is safe for IEEE also. */
1987 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1988 wide_int wmax
, wmin
;
1989 /* This is part of the abi to real_to_integer, but we check
1990 things before making this call. */
1996 if (REAL_VALUE_ISNAN (*x
))
1999 /* Test against the signed upper bound. */
2000 wmax
= wi::max_value (width
, SIGNED
);
2001 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2002 if (real_less (&t
, x
))
2003 return immed_wide_int_const (wmax
, mode
);
2005 /* Test against the signed lower bound. */
2006 wmin
= wi::min_value (width
, SIGNED
);
2007 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2008 if (real_less (x
, &t
))
2009 return immed_wide_int_const (wmin
, mode
);
2011 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2015 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2018 /* Test against the unsigned upper bound. */
2019 wmax
= wi::max_value (width
, UNSIGNED
);
2020 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2021 if (real_less (&t
, x
))
2022 return immed_wide_int_const (wmax
, mode
);
2024 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2032 /* Handle polynomial integers. */
2033 else if (CONST_POLY_INT_P (op
))
2035 poly_wide_int result
;
2039 result
= -const_poly_int_value (op
);
2043 result
= ~const_poly_int_value (op
);
2049 return immed_wide_int_const (result
, mode
);
2055 /* Subroutine of simplify_binary_operation to simplify a binary operation
2056 CODE that can commute with byte swapping, with result mode MODE and
2057 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2058 Return zero if no simplification or canonicalization is possible. */
2061 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2066 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2067 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2069 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2070 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2071 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2074 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2075 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2077 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2078 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2084 /* Subroutine of simplify_binary_operation to simplify a commutative,
2085 associative binary operation CODE with result mode MODE, operating
2086 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2087 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2088 canonicalization is possible. */
2091 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2096 /* Linearize the operator to the left. */
2097 if (GET_CODE (op1
) == code
)
2099 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2100 if (GET_CODE (op0
) == code
)
2102 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2103 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2106 /* "a op (b op c)" becomes "(b op c) op a". */
2107 if (! swap_commutative_operands_p (op1
, op0
))
2108 return simplify_gen_binary (code
, mode
, op1
, op0
);
2110 std::swap (op0
, op1
);
2113 if (GET_CODE (op0
) == code
)
2115 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2116 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2118 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2119 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2122 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2123 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2125 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2127 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2128 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2130 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2137 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2138 and OP1. Return 0 if no simplification is possible.
2140 Don't use this for relational operations such as EQ or LT.
2141 Use simplify_relational_operation instead. */
2143 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2146 rtx trueop0
, trueop1
;
2149 /* Relational operations don't work here. We must know the mode
2150 of the operands in order to do the comparison correctly.
2151 Assuming a full word can give incorrect results.
2152 Consider comparing 128 with -128 in QImode. */
2153 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2154 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2156 /* Make sure the constant is second. */
2157 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2158 && swap_commutative_operands_p (op0
, op1
))
2159 std::swap (op0
, op1
);
2161 trueop0
= avoid_constant_pool_reference (op0
);
2162 trueop1
= avoid_constant_pool_reference (op1
);
2164 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2167 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2172 /* If the above steps did not result in a simplification and op0 or op1
2173 were constant pool references, use the referenced constants directly. */
2174 if (trueop0
!= op0
|| trueop1
!= op1
)
2175 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2180 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2181 which OP0 and OP1 are both vector series or vector duplicates
2182 (which are really just series with a step of 0). If so, try to
2183 form a new series by applying CODE to the bases and to the steps.
2184 Return null if no simplification is possible.
2186 MODE is the mode of the operation and is known to be a vector
2190 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2194 if (vec_duplicate_p (op0
, &base0
))
2196 else if (!vec_series_p (op0
, &base0
, &step0
))
2200 if (vec_duplicate_p (op1
, &base1
))
2202 else if (!vec_series_p (op1
, &base1
, &step1
))
2205 /* Only create a new series if we can simplify both parts. In other
2206 cases this isn't really a simplification, and it's not necessarily
2207 a win to replace a vector operation with a scalar operation. */
2208 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2209 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2213 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2217 return gen_vec_series (mode
, new_base
, new_step
);
2220 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2221 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2222 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2223 actual constants. */
2226 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2227 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2229 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2231 scalar_int_mode int_mode
, inner_mode
;
2234 /* Even if we can't compute a constant result,
2235 there are some cases worth simplifying. */
2240 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2241 when x is NaN, infinite, or finite and nonzero. They aren't
2242 when x is -0 and the rounding mode is not towards -infinity,
2243 since (-0) + 0 is then 0. */
2244 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2247 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2248 transformations are safe even for IEEE. */
2249 if (GET_CODE (op0
) == NEG
)
2250 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2251 else if (GET_CODE (op1
) == NEG
)
2252 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2254 /* (~a) + 1 -> -a */
2255 if (INTEGRAL_MODE_P (mode
)
2256 && GET_CODE (op0
) == NOT
2257 && trueop1
== const1_rtx
)
2258 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2260 /* Handle both-operands-constant cases. We can only add
2261 CONST_INTs to constants since the sum of relocatable symbols
2262 can't be handled by most assemblers. Don't add CONST_INT
2263 to CONST_INT since overflow won't be computed properly if wider
2264 than HOST_BITS_PER_WIDE_INT. */
2266 if ((GET_CODE (op0
) == CONST
2267 || GET_CODE (op0
) == SYMBOL_REF
2268 || GET_CODE (op0
) == LABEL_REF
)
2269 && poly_int_rtx_p (op1
, &offset
))
2270 return plus_constant (mode
, op0
, offset
);
2271 else if ((GET_CODE (op1
) == CONST
2272 || GET_CODE (op1
) == SYMBOL_REF
2273 || GET_CODE (op1
) == LABEL_REF
)
2274 && poly_int_rtx_p (op0
, &offset
))
2275 return plus_constant (mode
, op1
, offset
);
2277 /* See if this is something like X * C - X or vice versa or
2278 if the multiplication is written as a shift. If so, we can
2279 distribute and make a new multiply, shift, or maybe just
2280 have X (if C is 2 in the example above). But don't make
2281 something more expensive than we had before. */
2283 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2285 rtx lhs
= op0
, rhs
= op1
;
2287 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2288 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2290 if (GET_CODE (lhs
) == NEG
)
2292 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2293 lhs
= XEXP (lhs
, 0);
2295 else if (GET_CODE (lhs
) == MULT
2296 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2298 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2299 lhs
= XEXP (lhs
, 0);
2301 else if (GET_CODE (lhs
) == ASHIFT
2302 && CONST_INT_P (XEXP (lhs
, 1))
2303 && INTVAL (XEXP (lhs
, 1)) >= 0
2304 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2306 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2307 GET_MODE_PRECISION (int_mode
));
2308 lhs
= XEXP (lhs
, 0);
2311 if (GET_CODE (rhs
) == NEG
)
2313 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2314 rhs
= XEXP (rhs
, 0);
2316 else if (GET_CODE (rhs
) == MULT
2317 && CONST_INT_P (XEXP (rhs
, 1)))
2319 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2320 rhs
= XEXP (rhs
, 0);
2322 else if (GET_CODE (rhs
) == ASHIFT
2323 && CONST_INT_P (XEXP (rhs
, 1))
2324 && INTVAL (XEXP (rhs
, 1)) >= 0
2325 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2327 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2328 GET_MODE_PRECISION (int_mode
));
2329 rhs
= XEXP (rhs
, 0);
2332 if (rtx_equal_p (lhs
, rhs
))
2334 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2336 bool speed
= optimize_function_for_speed_p (cfun
);
2338 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2340 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2341 return (set_src_cost (tem
, int_mode
, speed
)
2342 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2346 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2347 if (CONST_SCALAR_INT_P (op1
)
2348 && GET_CODE (op0
) == XOR
2349 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2350 && mode_signbit_p (mode
, op1
))
2351 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2352 simplify_gen_binary (XOR
, mode
, op1
,
2355 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2356 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2357 && GET_CODE (op0
) == MULT
2358 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2362 in1
= XEXP (XEXP (op0
, 0), 0);
2363 in2
= XEXP (op0
, 1);
2364 return simplify_gen_binary (MINUS
, mode
, op1
,
2365 simplify_gen_binary (MULT
, mode
,
2369 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2370 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2372 if (COMPARISON_P (op0
)
2373 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2374 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2375 && (reversed
= reversed_comparison (op0
, mode
)))
2377 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2379 /* If one of the operands is a PLUS or a MINUS, see if we can
2380 simplify this by the associative law.
2381 Don't use the associative law for floating point.
2382 The inaccuracy makes it nonassociative,
2383 and subtle programs can break if operations are associated. */
2385 if (INTEGRAL_MODE_P (mode
)
2386 && (plus_minus_operand_p (op0
)
2387 || plus_minus_operand_p (op1
))
2388 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2391 /* Reassociate floating point addition only when the user
2392 specifies associative math operations. */
2393 if (FLOAT_MODE_P (mode
)
2394 && flag_associative_math
)
2396 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2401 /* Handle vector series. */
2402 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2404 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2411 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2412 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2413 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2414 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2416 rtx xop00
= XEXP (op0
, 0);
2417 rtx xop10
= XEXP (op1
, 0);
2419 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2422 if (REG_P (xop00
) && REG_P (xop10
)
2423 && REGNO (xop00
) == REGNO (xop10
)
2424 && GET_MODE (xop00
) == mode
2425 && GET_MODE (xop10
) == mode
2426 && GET_MODE_CLASS (mode
) == MODE_CC
)
2432 /* We can't assume x-x is 0 even with non-IEEE floating point,
2433 but since it is zero except in very strange circumstances, we
2434 will treat it as zero with -ffinite-math-only. */
2435 if (rtx_equal_p (trueop0
, trueop1
)
2436 && ! side_effects_p (op0
)
2437 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2438 return CONST0_RTX (mode
);
2440 /* Change subtraction from zero into negation. (0 - x) is the
2441 same as -x when x is NaN, infinite, or finite and nonzero.
2442 But if the mode has signed zeros, and does not round towards
2443 -infinity, then 0 - 0 is 0, not -0. */
2444 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2445 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2447 /* (-1 - a) is ~a, unless the expression contains symbolic
2448 constants, in which case not retaining additions and
2449 subtractions could cause invalid assembly to be produced. */
2450 if (trueop0
== constm1_rtx
2451 && !contains_symbolic_reference_p (op1
))
2452 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2454 /* Subtracting 0 has no effect unless the mode has signed zeros
2455 and supports rounding towards -infinity. In such a case,
2457 if (!(HONOR_SIGNED_ZEROS (mode
)
2458 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2459 && trueop1
== CONST0_RTX (mode
))
2462 /* See if this is something like X * C - X or vice versa or
2463 if the multiplication is written as a shift. If so, we can
2464 distribute and make a new multiply, shift, or maybe just
2465 have X (if C is 2 in the example above). But don't make
2466 something more expensive than we had before. */
2468 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2470 rtx lhs
= op0
, rhs
= op1
;
2472 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2473 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2475 if (GET_CODE (lhs
) == NEG
)
2477 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2478 lhs
= XEXP (lhs
, 0);
2480 else if (GET_CODE (lhs
) == MULT
2481 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2483 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2484 lhs
= XEXP (lhs
, 0);
2486 else if (GET_CODE (lhs
) == ASHIFT
2487 && CONST_INT_P (XEXP (lhs
, 1))
2488 && INTVAL (XEXP (lhs
, 1)) >= 0
2489 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2491 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2492 GET_MODE_PRECISION (int_mode
));
2493 lhs
= XEXP (lhs
, 0);
2496 if (GET_CODE (rhs
) == NEG
)
2498 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2499 rhs
= XEXP (rhs
, 0);
2501 else if (GET_CODE (rhs
) == MULT
2502 && CONST_INT_P (XEXP (rhs
, 1)))
2504 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2505 rhs
= XEXP (rhs
, 0);
2507 else if (GET_CODE (rhs
) == ASHIFT
2508 && CONST_INT_P (XEXP (rhs
, 1))
2509 && INTVAL (XEXP (rhs
, 1)) >= 0
2510 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2512 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2513 GET_MODE_PRECISION (int_mode
));
2514 negcoeff1
= -negcoeff1
;
2515 rhs
= XEXP (rhs
, 0);
2518 if (rtx_equal_p (lhs
, rhs
))
2520 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2522 bool speed
= optimize_function_for_speed_p (cfun
);
2524 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2526 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2527 return (set_src_cost (tem
, int_mode
, speed
)
2528 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2532 /* (a - (-b)) -> (a + b). True even for IEEE. */
2533 if (GET_CODE (op1
) == NEG
)
2534 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2536 /* (-x - c) may be simplified as (-c - x). */
2537 if (GET_CODE (op0
) == NEG
2538 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2540 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2542 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2545 if ((GET_CODE (op0
) == CONST
2546 || GET_CODE (op0
) == SYMBOL_REF
2547 || GET_CODE (op0
) == LABEL_REF
)
2548 && poly_int_rtx_p (op1
, &offset
))
2549 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2551 /* Don't let a relocatable value get a negative coeff. */
2552 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2553 return simplify_gen_binary (PLUS
, mode
,
2555 neg_const_int (mode
, op1
));
2557 /* (x - (x & y)) -> (x & ~y) */
2558 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2560 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2562 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2563 GET_MODE (XEXP (op1
, 1)));
2564 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2566 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2568 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2569 GET_MODE (XEXP (op1
, 0)));
2570 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2574 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2575 by reversing the comparison code if valid. */
2576 if (STORE_FLAG_VALUE
== 1
2577 && trueop0
== const1_rtx
2578 && COMPARISON_P (op1
)
2579 && (reversed
= reversed_comparison (op1
, mode
)))
2582 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2583 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2584 && GET_CODE (op1
) == MULT
2585 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2589 in1
= XEXP (XEXP (op1
, 0), 0);
2590 in2
= XEXP (op1
, 1);
2591 return simplify_gen_binary (PLUS
, mode
,
2592 simplify_gen_binary (MULT
, mode
,
2597 /* Canonicalize (minus (neg A) (mult B C)) to
2598 (minus (mult (neg B) C) A). */
2599 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2600 && GET_CODE (op1
) == MULT
2601 && GET_CODE (op0
) == NEG
)
2605 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2606 in2
= XEXP (op1
, 1);
2607 return simplify_gen_binary (MINUS
, mode
,
2608 simplify_gen_binary (MULT
, mode
,
2613 /* If one of the operands is a PLUS or a MINUS, see if we can
2614 simplify this by the associative law. This will, for example,
2615 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2616 Don't use the associative law for floating point.
2617 The inaccuracy makes it nonassociative,
2618 and subtle programs can break if operations are associated. */
2620 if (INTEGRAL_MODE_P (mode
)
2621 && (plus_minus_operand_p (op0
)
2622 || plus_minus_operand_p (op1
))
2623 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2626 /* Handle vector series. */
2627 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2629 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2636 if (trueop1
== constm1_rtx
)
2637 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2639 if (GET_CODE (op0
) == NEG
)
2641 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2642 /* If op1 is a MULT as well and simplify_unary_operation
2643 just moved the NEG to the second operand, simplify_gen_binary
2644 below could through simplify_associative_operation move
2645 the NEG around again and recurse endlessly. */
2647 && GET_CODE (op1
) == MULT
2648 && GET_CODE (temp
) == MULT
2649 && XEXP (op1
, 0) == XEXP (temp
, 0)
2650 && GET_CODE (XEXP (temp
, 1)) == NEG
2651 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2654 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2656 if (GET_CODE (op1
) == NEG
)
2658 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2659 /* If op0 is a MULT as well and simplify_unary_operation
2660 just moved the NEG to the second operand, simplify_gen_binary
2661 below could through simplify_associative_operation move
2662 the NEG around again and recurse endlessly. */
2664 && GET_CODE (op0
) == MULT
2665 && GET_CODE (temp
) == MULT
2666 && XEXP (op0
, 0) == XEXP (temp
, 0)
2667 && GET_CODE (XEXP (temp
, 1)) == NEG
2668 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2671 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2674 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2675 x is NaN, since x * 0 is then also NaN. Nor is it valid
2676 when the mode has signed zeros, since multiplying a negative
2677 number by 0 will give -0, not 0. */
2678 if (!HONOR_NANS (mode
)
2679 && !HONOR_SIGNED_ZEROS (mode
)
2680 && trueop1
== CONST0_RTX (mode
)
2681 && ! side_effects_p (op0
))
2684 /* In IEEE floating point, x*1 is not equivalent to x for
2686 if (!HONOR_SNANS (mode
)
2687 && trueop1
== CONST1_RTX (mode
))
2690 /* Convert multiply by constant power of two into shift. */
2691 if (CONST_SCALAR_INT_P (trueop1
))
2693 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2695 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2696 gen_int_shift_amount (mode
, val
));
2699 /* x*2 is x+x and x*(-1) is -x */
2700 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2701 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2702 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2703 && GET_MODE (op0
) == mode
)
2705 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2707 if (real_equal (d1
, &dconst2
))
2708 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2710 if (!HONOR_SNANS (mode
)
2711 && real_equal (d1
, &dconstm1
))
2712 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2715 /* Optimize -x * -x as x * x. */
2716 if (FLOAT_MODE_P (mode
)
2717 && GET_CODE (op0
) == NEG
2718 && GET_CODE (op1
) == NEG
2719 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2720 && !side_effects_p (XEXP (op0
, 0)))
2721 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2723 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2724 if (SCALAR_FLOAT_MODE_P (mode
)
2725 && GET_CODE (op0
) == ABS
2726 && GET_CODE (op1
) == ABS
2727 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2728 && !side_effects_p (XEXP (op0
, 0)))
2729 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2731 /* Reassociate multiplication, but for floating point MULTs
2732 only when the user specifies unsafe math optimizations. */
2733 if (! FLOAT_MODE_P (mode
)
2734 || flag_unsafe_math_optimizations
)
2736 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2743 if (trueop1
== CONST0_RTX (mode
))
2745 if (INTEGRAL_MODE_P (mode
)
2746 && trueop1
== CONSTM1_RTX (mode
)
2747 && !side_effects_p (op0
))
2749 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2751 /* A | (~A) -> -1 */
2752 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2753 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2754 && ! side_effects_p (op0
)
2755 && SCALAR_INT_MODE_P (mode
))
2758 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2759 if (CONST_INT_P (op1
)
2760 && HWI_COMPUTABLE_MODE_P (mode
)
2761 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2762 && !side_effects_p (op0
))
2765 /* Canonicalize (X & C1) | C2. */
2766 if (GET_CODE (op0
) == AND
2767 && CONST_INT_P (trueop1
)
2768 && CONST_INT_P (XEXP (op0
, 1)))
2770 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2771 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2772 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2774 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2776 && !side_effects_p (XEXP (op0
, 0)))
2779 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2780 if (((c1
|c2
) & mask
) == mask
)
2781 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2784 /* Convert (A & B) | A to A. */
2785 if (GET_CODE (op0
) == AND
2786 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2787 || rtx_equal_p (XEXP (op0
, 1), op1
))
2788 && ! side_effects_p (XEXP (op0
, 0))
2789 && ! side_effects_p (XEXP (op0
, 1)))
2792 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2793 mode size to (rotate A CX). */
2795 if (GET_CODE (op1
) == ASHIFT
2796 || GET_CODE (op1
) == SUBREG
)
2807 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2808 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2809 && CONST_INT_P (XEXP (opleft
, 1))
2810 && CONST_INT_P (XEXP (opright
, 1))
2811 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2812 == GET_MODE_UNIT_PRECISION (mode
)))
2813 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2815 /* Same, but for ashift that has been "simplified" to a wider mode
2816 by simplify_shift_const. */
2818 if (GET_CODE (opleft
) == SUBREG
2819 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2820 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2822 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2823 && GET_CODE (opright
) == LSHIFTRT
2824 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2825 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2826 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2827 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2828 SUBREG_REG (XEXP (opright
, 0)))
2829 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2830 && CONST_INT_P (XEXP (opright
, 1))
2831 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2832 + INTVAL (XEXP (opright
, 1))
2833 == GET_MODE_PRECISION (int_mode
)))
2834 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2835 XEXP (SUBREG_REG (opleft
), 1));
2837 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2838 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2839 the PLUS does not affect any of the bits in OP1: then we can do
2840 the IOR as a PLUS and we can associate. This is valid if OP1
2841 can be safely shifted left C bits. */
2842 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2843 && GET_CODE (XEXP (op0
, 0)) == PLUS
2844 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2845 && CONST_INT_P (XEXP (op0
, 1))
2846 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2848 int count
= INTVAL (XEXP (op0
, 1));
2849 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2851 if (mask
>> count
== INTVAL (trueop1
)
2852 && trunc_int_for_mode (mask
, mode
) == mask
2853 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2854 return simplify_gen_binary (ASHIFTRT
, mode
,
2855 plus_constant (mode
, XEXP (op0
, 0),
2860 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2864 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2870 if (trueop1
== CONST0_RTX (mode
))
2872 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2873 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2874 if (rtx_equal_p (trueop0
, trueop1
)
2875 && ! side_effects_p (op0
)
2876 && GET_MODE_CLASS (mode
) != MODE_CC
)
2877 return CONST0_RTX (mode
);
2879 /* Canonicalize XOR of the most significant bit to PLUS. */
2880 if (CONST_SCALAR_INT_P (op1
)
2881 && mode_signbit_p (mode
, op1
))
2882 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2883 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2884 if (CONST_SCALAR_INT_P (op1
)
2885 && GET_CODE (op0
) == PLUS
2886 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2887 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2888 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2889 simplify_gen_binary (XOR
, mode
, op1
,
2892 /* If we are XORing two things that have no bits in common,
2893 convert them into an IOR. This helps to detect rotation encoded
2894 using those methods and possibly other simplifications. */
2896 if (HWI_COMPUTABLE_MODE_P (mode
)
2897 && (nonzero_bits (op0
, mode
)
2898 & nonzero_bits (op1
, mode
)) == 0)
2899 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2901 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2902 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2905 int num_negated
= 0;
2907 if (GET_CODE (op0
) == NOT
)
2908 num_negated
++, op0
= XEXP (op0
, 0);
2909 if (GET_CODE (op1
) == NOT
)
2910 num_negated
++, op1
= XEXP (op1
, 0);
2912 if (num_negated
== 2)
2913 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2914 else if (num_negated
== 1)
2915 return simplify_gen_unary (NOT
, mode
,
2916 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2920 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2921 correspond to a machine insn or result in further simplifications
2922 if B is a constant. */
2924 if (GET_CODE (op0
) == AND
2925 && rtx_equal_p (XEXP (op0
, 1), op1
)
2926 && ! side_effects_p (op1
))
2927 return simplify_gen_binary (AND
, mode
,
2928 simplify_gen_unary (NOT
, mode
,
2929 XEXP (op0
, 0), mode
),
2932 else if (GET_CODE (op0
) == AND
2933 && rtx_equal_p (XEXP (op0
, 0), op1
)
2934 && ! side_effects_p (op1
))
2935 return simplify_gen_binary (AND
, mode
,
2936 simplify_gen_unary (NOT
, mode
,
2937 XEXP (op0
, 1), mode
),
2940 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2941 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2942 out bits inverted twice and not set by C. Similarly, given
2943 (xor (and (xor A B) C) D), simplify without inverting C in
2944 the xor operand: (xor (and A C) (B&C)^D).
2946 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2947 && GET_CODE (XEXP (op0
, 0)) == XOR
2948 && CONST_INT_P (op1
)
2949 && CONST_INT_P (XEXP (op0
, 1))
2950 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2952 enum rtx_code op
= GET_CODE (op0
);
2953 rtx a
= XEXP (XEXP (op0
, 0), 0);
2954 rtx b
= XEXP (XEXP (op0
, 0), 1);
2955 rtx c
= XEXP (op0
, 1);
2957 HOST_WIDE_INT bval
= INTVAL (b
);
2958 HOST_WIDE_INT cval
= INTVAL (c
);
2959 HOST_WIDE_INT dval
= INTVAL (d
);
2960 HOST_WIDE_INT xcval
;
2967 return simplify_gen_binary (XOR
, mode
,
2968 simplify_gen_binary (op
, mode
, a
, c
),
2969 gen_int_mode ((bval
& xcval
) ^ dval
,
2973 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2974 we can transform like this:
2975 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2976 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2977 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2978 Attempt a few simplifications when B and C are both constants. */
2979 if (GET_CODE (op0
) == AND
2980 && CONST_INT_P (op1
)
2981 && CONST_INT_P (XEXP (op0
, 1)))
2983 rtx a
= XEXP (op0
, 0);
2984 rtx b
= XEXP (op0
, 1);
2986 HOST_WIDE_INT bval
= INTVAL (b
);
2987 HOST_WIDE_INT cval
= INTVAL (c
);
2989 /* Instead of computing ~A&C, we compute its negated value,
2990 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2991 optimize for sure. If it does not simplify, we still try
2992 to compute ~A&C below, but since that always allocates
2993 RTL, we don't try that before committing to returning a
2994 simplified expression. */
2995 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2998 if ((~cval
& bval
) == 0)
3000 rtx na_c
= NULL_RTX
;
3002 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3005 /* If ~A does not simplify, don't bother: we don't
3006 want to simplify 2 operations into 3, and if na_c
3007 were to simplify with na, n_na_c would have
3008 simplified as well. */
3009 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3011 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3014 /* Try to simplify ~A&C | ~B&C. */
3015 if (na_c
!= NULL_RTX
)
3016 return simplify_gen_binary (IOR
, mode
, na_c
,
3017 gen_int_mode (~bval
& cval
, mode
));
3021 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3022 if (n_na_c
== CONSTM1_RTX (mode
))
3024 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3025 gen_int_mode (~cval
& bval
,
3027 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3028 gen_int_mode (~bval
& cval
,
3034 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3035 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3036 machines, and also has shorter instruction path length. */
3037 if (GET_CODE (op0
) == AND
3038 && GET_CODE (XEXP (op0
, 0)) == XOR
3039 && CONST_INT_P (XEXP (op0
, 1))
3040 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3043 rtx b
= XEXP (XEXP (op0
, 0), 1);
3044 rtx c
= XEXP (op0
, 1);
3045 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3046 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3047 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3048 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3050 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3051 else if (GET_CODE (op0
) == AND
3052 && GET_CODE (XEXP (op0
, 0)) == XOR
3053 && CONST_INT_P (XEXP (op0
, 1))
3054 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3056 rtx a
= XEXP (XEXP (op0
, 0), 0);
3058 rtx c
= XEXP (op0
, 1);
3059 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3060 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3061 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3062 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3065 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3066 comparison if STORE_FLAG_VALUE is 1. */
3067 if (STORE_FLAG_VALUE
== 1
3068 && trueop1
== const1_rtx
3069 && COMPARISON_P (op0
)
3070 && (reversed
= reversed_comparison (op0
, mode
)))
3073 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3074 is (lt foo (const_int 0)), so we can perform the above
3075 simplification if STORE_FLAG_VALUE is 1. */
3077 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3078 && STORE_FLAG_VALUE
== 1
3079 && trueop1
== const1_rtx
3080 && GET_CODE (op0
) == LSHIFTRT
3081 && CONST_INT_P (XEXP (op0
, 1))
3082 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3083 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3085 /* (xor (comparison foo bar) (const_int sign-bit))
3086 when STORE_FLAG_VALUE is the sign bit. */
3087 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3088 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3089 && trueop1
== const_true_rtx
3090 && COMPARISON_P (op0
)
3091 && (reversed
= reversed_comparison (op0
, int_mode
)))
3094 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3098 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3104 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3106 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3108 if (HWI_COMPUTABLE_MODE_P (mode
))
3110 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3111 HOST_WIDE_INT nzop1
;
3112 if (CONST_INT_P (trueop1
))
3114 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3115 /* If we are turning off bits already known off in OP0, we need
3117 if ((nzop0
& ~val1
) == 0)
3120 nzop1
= nonzero_bits (trueop1
, mode
);
3121 /* If we are clearing all the nonzero bits, the result is zero. */
3122 if ((nzop1
& nzop0
) == 0
3123 && !side_effects_p (op0
) && !side_effects_p (op1
))
3124 return CONST0_RTX (mode
);
3126 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3127 && GET_MODE_CLASS (mode
) != MODE_CC
)
3130 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3131 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3132 && ! side_effects_p (op0
)
3133 && GET_MODE_CLASS (mode
) != MODE_CC
)
3134 return CONST0_RTX (mode
);
3136 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3137 there are no nonzero bits of C outside of X's mode. */
3138 if ((GET_CODE (op0
) == SIGN_EXTEND
3139 || GET_CODE (op0
) == ZERO_EXTEND
)
3140 && CONST_INT_P (trueop1
)
3141 && HWI_COMPUTABLE_MODE_P (mode
)
3142 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3143 & UINTVAL (trueop1
)) == 0)
3145 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3146 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3147 gen_int_mode (INTVAL (trueop1
),
3149 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3152 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3153 we might be able to further simplify the AND with X and potentially
3154 remove the truncation altogether. */
3155 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3157 rtx x
= XEXP (op0
, 0);
3158 machine_mode xmode
= GET_MODE (x
);
3159 tem
= simplify_gen_binary (AND
, xmode
, x
,
3160 gen_int_mode (INTVAL (trueop1
), xmode
));
3161 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3164 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3165 if (GET_CODE (op0
) == IOR
3166 && CONST_INT_P (trueop1
)
3167 && CONST_INT_P (XEXP (op0
, 1)))
3169 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3170 return simplify_gen_binary (IOR
, mode
,
3171 simplify_gen_binary (AND
, mode
,
3172 XEXP (op0
, 0), op1
),
3173 gen_int_mode (tmp
, mode
));
3176 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3177 insn (and may simplify more). */
3178 if (GET_CODE (op0
) == XOR
3179 && rtx_equal_p (XEXP (op0
, 0), op1
)
3180 && ! side_effects_p (op1
))
3181 return simplify_gen_binary (AND
, mode
,
3182 simplify_gen_unary (NOT
, mode
,
3183 XEXP (op0
, 1), mode
),
3186 if (GET_CODE (op0
) == XOR
3187 && rtx_equal_p (XEXP (op0
, 1), op1
)
3188 && ! side_effects_p (op1
))
3189 return simplify_gen_binary (AND
, mode
,
3190 simplify_gen_unary (NOT
, mode
,
3191 XEXP (op0
, 0), mode
),
3194 /* Similarly for (~(A ^ B)) & A. */
3195 if (GET_CODE (op0
) == NOT
3196 && GET_CODE (XEXP (op0
, 0)) == XOR
3197 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3198 && ! side_effects_p (op1
))
3199 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3201 if (GET_CODE (op0
) == NOT
3202 && GET_CODE (XEXP (op0
, 0)) == XOR
3203 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3204 && ! side_effects_p (op1
))
3205 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3207 /* Convert (A | B) & A to A. */
3208 if (GET_CODE (op0
) == IOR
3209 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3210 || rtx_equal_p (XEXP (op0
, 1), op1
))
3211 && ! side_effects_p (XEXP (op0
, 0))
3212 && ! side_effects_p (XEXP (op0
, 1)))
3215 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3216 ((A & N) + B) & M -> (A + B) & M
3217 Similarly if (N & M) == 0,
3218 ((A | N) + B) & M -> (A + B) & M
3219 and for - instead of + and/or ^ instead of |.
3220 Also, if (N & M) == 0, then
3221 (A +- N) & M -> A & M. */
3222 if (CONST_INT_P (trueop1
)
3223 && HWI_COMPUTABLE_MODE_P (mode
)
3224 && ~UINTVAL (trueop1
)
3225 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3226 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3231 pmop
[0] = XEXP (op0
, 0);
3232 pmop
[1] = XEXP (op0
, 1);
3234 if (CONST_INT_P (pmop
[1])
3235 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3236 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3238 for (which
= 0; which
< 2; which
++)
3241 switch (GET_CODE (tem
))
3244 if (CONST_INT_P (XEXP (tem
, 1))
3245 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3246 == UINTVAL (trueop1
))
3247 pmop
[which
] = XEXP (tem
, 0);
3251 if (CONST_INT_P (XEXP (tem
, 1))
3252 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3253 pmop
[which
] = XEXP (tem
, 0);
3260 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3262 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3264 return simplify_gen_binary (code
, mode
, tem
, op1
);
3268 /* (and X (ior (not X) Y) -> (and X Y) */
3269 if (GET_CODE (op1
) == IOR
3270 && GET_CODE (XEXP (op1
, 0)) == NOT
3271 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3272 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3274 /* (and (ior (not X) Y) X) -> (and X Y) */
3275 if (GET_CODE (op0
) == IOR
3276 && GET_CODE (XEXP (op0
, 0)) == NOT
3277 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3278 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3280 /* (and X (ior Y (not X)) -> (and X Y) */
3281 if (GET_CODE (op1
) == IOR
3282 && GET_CODE (XEXP (op1
, 1)) == NOT
3283 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3284 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3286 /* (and (ior Y (not X)) X) -> (and X Y) */
3287 if (GET_CODE (op0
) == IOR
3288 && GET_CODE (XEXP (op0
, 1)) == NOT
3289 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3290 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3292 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3296 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3302 /* 0/x is 0 (or x&0 if x has side-effects). */
3303 if (trueop0
== CONST0_RTX (mode
)
3304 && !cfun
->can_throw_non_call_exceptions
)
3306 if (side_effects_p (op1
))
3307 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3311 if (trueop1
== CONST1_RTX (mode
))
3313 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3317 /* Convert divide by power of two into shift. */
3318 if (CONST_INT_P (trueop1
)
3319 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3320 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3321 gen_int_shift_amount (mode
, val
));
3325 /* Handle floating point and integers separately. */
3326 if (SCALAR_FLOAT_MODE_P (mode
))
3328 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3329 safe for modes with NaNs, since 0.0 / 0.0 will then be
3330 NaN rather than 0.0. Nor is it safe for modes with signed
3331 zeros, since dividing 0 by a negative number gives -0.0 */
3332 if (trueop0
== CONST0_RTX (mode
)
3333 && !HONOR_NANS (mode
)
3334 && !HONOR_SIGNED_ZEROS (mode
)
3335 && ! side_effects_p (op1
))
3338 if (trueop1
== CONST1_RTX (mode
)
3339 && !HONOR_SNANS (mode
))
3342 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3343 && trueop1
!= CONST0_RTX (mode
))
3345 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3348 if (real_equal (d1
, &dconstm1
)
3349 && !HONOR_SNANS (mode
))
3350 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3352 /* Change FP division by a constant into multiplication.
3353 Only do this with -freciprocal-math. */
3354 if (flag_reciprocal_math
3355 && !real_equal (d1
, &dconst0
))
3358 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3359 tem
= const_double_from_real_value (d
, mode
);
3360 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3364 else if (SCALAR_INT_MODE_P (mode
))
3366 /* 0/x is 0 (or x&0 if x has side-effects). */
3367 if (trueop0
== CONST0_RTX (mode
)
3368 && !cfun
->can_throw_non_call_exceptions
)
3370 if (side_effects_p (op1
))
3371 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3375 if (trueop1
== CONST1_RTX (mode
))
3377 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3382 if (trueop1
== constm1_rtx
)
3384 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3386 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3392 /* 0%x is 0 (or x&0 if x has side-effects). */
3393 if (trueop0
== CONST0_RTX (mode
))
3395 if (side_effects_p (op1
))
3396 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3399 /* x%1 is 0 (of x&0 if x has side-effects). */
3400 if (trueop1
== CONST1_RTX (mode
))
3402 if (side_effects_p (op0
))
3403 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3404 return CONST0_RTX (mode
);
3406 /* Implement modulus by power of two as AND. */
3407 if (CONST_INT_P (trueop1
)
3408 && exact_log2 (UINTVAL (trueop1
)) > 0)
3409 return simplify_gen_binary (AND
, mode
, op0
,
3410 gen_int_mode (UINTVAL (trueop1
) - 1,
3415 /* 0%x is 0 (or x&0 if x has side-effects). */
3416 if (trueop0
== CONST0_RTX (mode
))
3418 if (side_effects_p (op1
))
3419 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3422 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3423 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3425 if (side_effects_p (op0
))
3426 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3427 return CONST0_RTX (mode
);
3433 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3434 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3435 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3437 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3438 if (CONST_INT_P (trueop1
)
3439 && IN_RANGE (INTVAL (trueop1
),
3440 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3441 GET_MODE_UNIT_PRECISION (mode
) - 1))
3443 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3444 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3445 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3446 mode
, op0
, new_amount_rtx
);
3451 if (trueop1
== CONST0_RTX (mode
))
3453 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3455 /* Rotating ~0 always results in ~0. */
3456 if (CONST_INT_P (trueop0
)
3457 && HWI_COMPUTABLE_MODE_P (mode
)
3458 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3459 && ! side_effects_p (op1
))
3465 scalar constants c1, c2
3466 size (M2) > size (M1)
3467 c1 == size (M2) - size (M1)
3469 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3473 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3475 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3476 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3478 && CONST_INT_P (op1
)
3479 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3480 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3482 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3483 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3484 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3485 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3486 && subreg_lowpart_p (op0
))
3488 rtx tmp
= gen_int_shift_amount
3489 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3490 tmp
= simplify_gen_binary (code
, inner_mode
,
3491 XEXP (SUBREG_REG (op0
), 0),
3493 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3496 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3498 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3499 if (val
!= INTVAL (op1
))
3500 return simplify_gen_binary (code
, mode
, op0
,
3501 gen_int_shift_amount (mode
, val
));
3508 if (trueop1
== CONST0_RTX (mode
))
3510 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3512 goto canonicalize_shift
;
3515 if (trueop1
== CONST0_RTX (mode
))
3517 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3519 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3520 if (GET_CODE (op0
) == CLZ
3521 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3522 && CONST_INT_P (trueop1
)
3523 && STORE_FLAG_VALUE
== 1
3524 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3526 unsigned HOST_WIDE_INT zero_val
= 0;
3528 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3529 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3530 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3531 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3532 XEXP (op0
, 0), const0_rtx
);
3534 goto canonicalize_shift
;
3537 if (HWI_COMPUTABLE_MODE_P (mode
)
3538 && mode_signbit_p (mode
, trueop1
)
3539 && ! side_effects_p (op0
))
3541 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3543 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3549 if (HWI_COMPUTABLE_MODE_P (mode
)
3550 && CONST_INT_P (trueop1
)
3551 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3552 && ! side_effects_p (op0
))
3554 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3556 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3562 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3564 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3566 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3572 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3574 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3576 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3589 /* ??? There are simplifications that can be done. */
3593 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3594 return gen_vec_duplicate (mode
, op0
);
3595 if (valid_for_const_vector_p (mode
, op0
)
3596 && valid_for_const_vector_p (mode
, op1
))
3597 return gen_const_vec_series (mode
, op0
, op1
);
3601 if (!VECTOR_MODE_P (mode
))
3603 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3604 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3605 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3606 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3607 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3609 if (vec_duplicate_p (trueop0
, &elt0
))
3612 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3613 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3616 /* Extract a scalar element from a nested VEC_SELECT expression
3617 (with optional nested VEC_CONCAT expression). Some targets
3618 (i386) extract scalar element from a vector using chain of
3619 nested VEC_SELECT expressions. When input operand is a memory
3620 operand, this operation can be simplified to a simple scalar
3621 load from an offseted memory address. */
3623 if (GET_CODE (trueop0
) == VEC_SELECT
3624 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3625 .is_constant (&n_elts
)))
3627 rtx op0
= XEXP (trueop0
, 0);
3628 rtx op1
= XEXP (trueop0
, 1);
3630 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3636 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3637 gcc_assert (i
< n_elts
);
3639 /* Select element, pointed by nested selector. */
3640 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3642 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3643 if (GET_CODE (op0
) == VEC_CONCAT
)
3645 rtx op00
= XEXP (op0
, 0);
3646 rtx op01
= XEXP (op0
, 1);
3648 machine_mode mode00
, mode01
;
3649 int n_elts00
, n_elts01
;
3651 mode00
= GET_MODE (op00
);
3652 mode01
= GET_MODE (op01
);
3654 /* Find out the number of elements of each operand.
3655 Since the concatenated result has a constant number
3656 of elements, the operands must too. */
3657 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3658 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3660 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3662 /* Select correct operand of VEC_CONCAT
3663 and adjust selector. */
3664 if (elem
< n_elts01
)
3675 vec
= rtvec_alloc (1);
3676 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3678 tmp
= gen_rtx_fmt_ee (code
, mode
,
3679 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3685 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3686 gcc_assert (GET_MODE_INNER (mode
)
3687 == GET_MODE_INNER (GET_MODE (trueop0
)));
3688 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3690 if (vec_duplicate_p (trueop0
, &elt0
))
3691 /* It doesn't matter which elements are selected by trueop1,
3692 because they are all the same. */
3693 return gen_vec_duplicate (mode
, elt0
);
3695 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3697 unsigned n_elts
= XVECLEN (trueop1
, 0);
3698 rtvec v
= rtvec_alloc (n_elts
);
3701 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3702 for (i
= 0; i
< n_elts
; i
++)
3704 rtx x
= XVECEXP (trueop1
, 0, i
);
3706 gcc_assert (CONST_INT_P (x
));
3707 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3711 return gen_rtx_CONST_VECTOR (mode
, v
);
3714 /* Recognize the identity. */
3715 if (GET_MODE (trueop0
) == mode
)
3717 bool maybe_ident
= true;
3718 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3720 rtx j
= XVECEXP (trueop1
, 0, i
);
3721 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3723 maybe_ident
= false;
3731 /* If we build {a,b} then permute it, build the result directly. */
3732 if (XVECLEN (trueop1
, 0) == 2
3733 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3734 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3735 && GET_CODE (trueop0
) == VEC_CONCAT
3736 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3737 && GET_MODE (XEXP (trueop0
, 0)) == mode
3738 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3739 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3741 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3742 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3745 gcc_assert (i0
< 4 && i1
< 4);
3746 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3747 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3749 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3752 if (XVECLEN (trueop1
, 0) == 2
3753 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3754 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3755 && GET_CODE (trueop0
) == VEC_CONCAT
3756 && GET_MODE (trueop0
) == mode
)
3758 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3759 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3762 gcc_assert (i0
< 2 && i1
< 2);
3763 subop0
= XEXP (trueop0
, i0
);
3764 subop1
= XEXP (trueop0
, i1
);
3766 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3769 /* If we select one half of a vec_concat, return that. */
3771 if (GET_CODE (trueop0
) == VEC_CONCAT
3772 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3774 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
3776 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3778 rtx subop0
= XEXP (trueop0
, 0);
3779 rtx subop1
= XEXP (trueop0
, 1);
3780 machine_mode mode0
= GET_MODE (subop0
);
3781 machine_mode mode1
= GET_MODE (subop1
);
3782 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3783 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3785 bool success
= true;
3786 for (int i
= 1; i
< l0
; ++i
)
3788 rtx j
= XVECEXP (trueop1
, 0, i
);
3789 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3798 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3800 bool success
= true;
3801 for (int i
= 1; i
< l1
; ++i
)
3803 rtx j
= XVECEXP (trueop1
, 0, i
);
3804 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3816 if (XVECLEN (trueop1
, 0) == 1
3817 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3818 && GET_CODE (trueop0
) == VEC_CONCAT
)
3821 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3823 /* Try to find the element in the VEC_CONCAT. */
3824 while (GET_MODE (vec
) != mode
3825 && GET_CODE (vec
) == VEC_CONCAT
)
3827 poly_int64 vec_size
;
3829 if (CONST_INT_P (XEXP (vec
, 0)))
3831 /* vec_concat of two const_ints doesn't make sense with
3832 respect to modes. */
3833 if (CONST_INT_P (XEXP (vec
, 1)))
3836 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3837 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3840 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3842 if (known_lt (offset
, vec_size
))
3843 vec
= XEXP (vec
, 0);
3844 else if (known_ge (offset
, vec_size
))
3847 vec
= XEXP (vec
, 1);
3851 vec
= avoid_constant_pool_reference (vec
);
3854 if (GET_MODE (vec
) == mode
)
3858 /* If we select elements in a vec_merge that all come from the same
3859 operand, select from that operand directly. */
3860 if (GET_CODE (op0
) == VEC_MERGE
)
3862 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3863 if (CONST_INT_P (trueop02
))
3865 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3866 bool all_operand0
= true;
3867 bool all_operand1
= true;
3868 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3870 rtx j
= XVECEXP (trueop1
, 0, i
);
3871 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3872 all_operand1
= false;
3874 all_operand0
= false;
3876 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3877 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3878 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3879 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3883 /* If we have two nested selects that are inverses of each
3884 other, replace them with the source operand. */
3885 if (GET_CODE (trueop0
) == VEC_SELECT
3886 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3888 rtx op0_subop1
= XEXP (trueop0
, 1);
3889 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3890 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
3892 /* Apply the outer ordering vector to the inner one. (The inner
3893 ordering vector is expressly permitted to be of a different
3894 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3895 then the two VEC_SELECTs cancel. */
3896 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3898 rtx x
= XVECEXP (trueop1
, 0, i
);
3899 if (!CONST_INT_P (x
))
3901 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3902 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3905 return XEXP (trueop0
, 0);
3911 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3912 ? GET_MODE (trueop0
)
3913 : GET_MODE_INNER (mode
));
3914 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3915 ? GET_MODE (trueop1
)
3916 : GET_MODE_INNER (mode
));
3918 gcc_assert (VECTOR_MODE_P (mode
));
3919 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
3920 + GET_MODE_SIZE (op1_mode
),
3921 GET_MODE_SIZE (mode
)));
3923 if (VECTOR_MODE_P (op0_mode
))
3924 gcc_assert (GET_MODE_INNER (mode
)
3925 == GET_MODE_INNER (op0_mode
));
3927 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3929 if (VECTOR_MODE_P (op1_mode
))
3930 gcc_assert (GET_MODE_INNER (mode
)
3931 == GET_MODE_INNER (op1_mode
));
3933 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3935 unsigned int n_elts
, in_n_elts
;
3936 if ((GET_CODE (trueop0
) == CONST_VECTOR
3937 || CONST_SCALAR_INT_P (trueop0
)
3938 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3939 && (GET_CODE (trueop1
) == CONST_VECTOR
3940 || CONST_SCALAR_INT_P (trueop1
)
3941 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
3942 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
3943 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
3945 rtvec v
= rtvec_alloc (n_elts
);
3947 for (i
= 0; i
< n_elts
; i
++)
3951 if (!VECTOR_MODE_P (op0_mode
))
3952 RTVEC_ELT (v
, i
) = trueop0
;
3954 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3958 if (!VECTOR_MODE_P (op1_mode
))
3959 RTVEC_ELT (v
, i
) = trueop1
;
3961 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3966 return gen_rtx_CONST_VECTOR (mode
, v
);
3969 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3970 Restrict the transformation to avoid generating a VEC_SELECT with a
3971 mode unrelated to its operand. */
3972 if (GET_CODE (trueop0
) == VEC_SELECT
3973 && GET_CODE (trueop1
) == VEC_SELECT
3974 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3975 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3977 rtx par0
= XEXP (trueop0
, 1);
3978 rtx par1
= XEXP (trueop1
, 1);
3979 int len0
= XVECLEN (par0
, 0);
3980 int len1
= XVECLEN (par1
, 0);
3981 rtvec vec
= rtvec_alloc (len0
+ len1
);
3982 for (int i
= 0; i
< len0
; i
++)
3983 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3984 for (int i
= 0; i
< len1
; i
++)
3985 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3986 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3987 gen_rtx_PARALLEL (VOIDmode
, vec
));
3996 if (mode
== GET_MODE (op0
)
3997 && mode
== GET_MODE (op1
)
3998 && vec_duplicate_p (op0
, &elt0
)
3999 && vec_duplicate_p (op1
, &elt1
))
4001 /* Try applying the operator to ELT and see if that simplifies.
4002 We can duplicate the result if so.
4004 The reason we don't use simplify_gen_binary is that it isn't
4005 necessarily a win to convert things like:
4007 (plus:V (vec_duplicate:V (reg:S R1))
4008 (vec_duplicate:V (reg:S R2)))
4012 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4014 The first might be done entirely in vector registers while the
4015 second might need a move between register files. */
4016 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4019 return gen_vec_duplicate (mode
, tem
);
4026 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4029 if (VECTOR_MODE_P (mode
)
4030 && code
!= VEC_CONCAT
4031 && GET_CODE (op0
) == CONST_VECTOR
4032 && GET_CODE (op1
) == CONST_VECTOR
)
4034 unsigned int n_elts
;
4035 if (!CONST_VECTOR_NUNITS (op0
).is_constant (&n_elts
))
4038 gcc_assert (known_eq (n_elts
, CONST_VECTOR_NUNITS (op1
)));
4039 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4040 rtvec v
= rtvec_alloc (n_elts
);
4043 for (i
= 0; i
< n_elts
; i
++)
4045 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4046 CONST_VECTOR_ELT (op0
, i
),
4047 CONST_VECTOR_ELT (op1
, i
));
4048 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4050 RTVEC_ELT (v
, i
) = x
;
4053 return gen_rtx_CONST_VECTOR (mode
, v
);
4056 if (VECTOR_MODE_P (mode
)
4057 && code
== VEC_CONCAT
4058 && (CONST_SCALAR_INT_P (op0
)
4059 || CONST_FIXED_P (op0
)
4060 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4061 && (CONST_SCALAR_INT_P (op1
)
4062 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4063 || CONST_FIXED_P (op1
)))
4065 /* Both inputs have a constant number of elements, so the result
4067 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4068 rtvec v
= rtvec_alloc (n_elts
);
4070 gcc_assert (n_elts
>= 2);
4073 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4074 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4076 RTVEC_ELT (v
, 0) = op0
;
4077 RTVEC_ELT (v
, 1) = op1
;
4081 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4082 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4085 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4086 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4087 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4089 for (i
= 0; i
< op0_n_elts
; ++i
)
4090 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4091 for (i
= 0; i
< op1_n_elts
; ++i
)
4092 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4095 return gen_rtx_CONST_VECTOR (mode
, v
);
4098 if (SCALAR_FLOAT_MODE_P (mode
)
4099 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4100 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4101 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4112 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4114 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4116 for (i
= 0; i
< 4; i
++)
4133 real_from_target (&r
, tmp0
, mode
);
4134 return const_double_from_real_value (r
, mode
);
4138 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4139 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4142 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4143 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4145 if (HONOR_SNANS (mode
)
4146 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4147 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4150 real_convert (&f0
, mode
, opr0
);
4151 real_convert (&f1
, mode
, opr1
);
4154 && real_equal (&f1
, &dconst0
)
4155 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4158 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4159 && flag_trapping_math
4160 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4162 int s0
= REAL_VALUE_NEGATIVE (f0
);
4163 int s1
= REAL_VALUE_NEGATIVE (f1
);
4168 /* Inf + -Inf = NaN plus exception. */
4173 /* Inf - Inf = NaN plus exception. */
4178 /* Inf / Inf = NaN plus exception. */
4185 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4186 && flag_trapping_math
4187 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4188 || (REAL_VALUE_ISINF (f1
)
4189 && real_equal (&f0
, &dconst0
))))
4190 /* Inf * 0 = NaN plus exception. */
4193 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4195 real_convert (&result
, mode
, &value
);
4197 /* Don't constant fold this floating point operation if
4198 the result has overflowed and flag_trapping_math. */
4200 if (flag_trapping_math
4201 && MODE_HAS_INFINITIES (mode
)
4202 && REAL_VALUE_ISINF (result
)
4203 && !REAL_VALUE_ISINF (f0
)
4204 && !REAL_VALUE_ISINF (f1
))
4205 /* Overflow plus exception. */
4208 /* Don't constant fold this floating point operation if the
4209 result may dependent upon the run-time rounding mode and
4210 flag_rounding_math is set, or if GCC's software emulation
4211 is unable to accurately represent the result. */
4213 if ((flag_rounding_math
4214 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4215 && (inexact
|| !real_identical (&result
, &value
)))
4218 return const_double_from_real_value (result
, mode
);
4222 /* We can fold some multi-word operations. */
4223 scalar_int_mode int_mode
;
4224 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4225 && CONST_SCALAR_INT_P (op0
)
4226 && CONST_SCALAR_INT_P (op1
))
4229 wi::overflow_type overflow
;
4230 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4231 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4233 #if TARGET_SUPPORTS_WIDE_INT == 0
4234 /* This assert keeps the simplification from producing a result
4235 that cannot be represented in a CONST_DOUBLE but a lot of
4236 upstream callers expect that this function never fails to
4237 simplify something and so you if you added this to the test
4238 above the code would die later anyway. If this assert
4239 happens, you just need to make the port support wide int. */
4240 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4245 result
= wi::sub (pop0
, pop1
);
4249 result
= wi::add (pop0
, pop1
);
4253 result
= wi::mul (pop0
, pop1
);
4257 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4263 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4269 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4275 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4281 result
= wi::bit_and (pop0
, pop1
);
4285 result
= wi::bit_or (pop0
, pop1
);
4289 result
= wi::bit_xor (pop0
, pop1
);
4293 result
= wi::smin (pop0
, pop1
);
4297 result
= wi::smax (pop0
, pop1
);
4301 result
= wi::umin (pop0
, pop1
);
4305 result
= wi::umax (pop0
, pop1
);
4312 wide_int wop1
= pop1
;
4313 if (SHIFT_COUNT_TRUNCATED
)
4314 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4315 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4321 result
= wi::lrshift (pop0
, wop1
);
4325 result
= wi::arshift (pop0
, wop1
);
4329 result
= wi::lshift (pop0
, wop1
);
4340 if (wi::neg_p (pop1
))
4346 result
= wi::lrotate (pop0
, pop1
);
4350 result
= wi::rrotate (pop0
, pop1
);
4361 return immed_wide_int_const (result
, int_mode
);
4364 /* Handle polynomial integers. */
4365 if (NUM_POLY_INT_COEFFS
> 1
4366 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4367 && poly_int_rtx_p (op0
)
4368 && poly_int_rtx_p (op1
))
4370 poly_wide_int result
;
4374 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4378 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4382 if (CONST_SCALAR_INT_P (op1
))
4383 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4389 if (CONST_SCALAR_INT_P (op1
))
4391 wide_int shift
= rtx_mode_t (op1
, mode
);
4392 if (SHIFT_COUNT_TRUNCATED
)
4393 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4394 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4396 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4403 if (!CONST_SCALAR_INT_P (op1
)
4404 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4405 rtx_mode_t (op1
, mode
), &result
))
4412 return immed_wide_int_const (result
, int_mode
);
4420 /* Return a positive integer if X should sort after Y. The value
4421 returned is 1 if and only if X and Y are both regs. */
4424 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4428 result
= (commutative_operand_precedence (y
)
4429 - commutative_operand_precedence (x
));
4431 return result
+ result
;
4433 /* Group together equal REGs to do more simplification. */
4434 if (REG_P (x
) && REG_P (y
))
4435 return REGNO (x
) > REGNO (y
);
4440 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4441 operands may be another PLUS or MINUS.
4443 Rather than test for specific case, we do this by a brute-force method
4444 and do all possible simplifications until no more changes occur. Then
4445 we rebuild the operation.
4447 May return NULL_RTX when no changes were made. */
4450 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4453 struct simplify_plus_minus_op_data
4460 int changed
, n_constants
, canonicalized
= 0;
4463 memset (ops
, 0, sizeof ops
);
4465 /* Set up the two operands and then expand them until nothing has been
4466 changed. If we run out of room in our array, give up; this should
4467 almost never happen. */
4472 ops
[1].neg
= (code
== MINUS
);
4479 for (i
= 0; i
< n_ops
; i
++)
4481 rtx this_op
= ops
[i
].op
;
4482 int this_neg
= ops
[i
].neg
;
4483 enum rtx_code this_code
= GET_CODE (this_op
);
4489 if (n_ops
== ARRAY_SIZE (ops
))
4492 ops
[n_ops
].op
= XEXP (this_op
, 1);
4493 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4496 ops
[i
].op
= XEXP (this_op
, 0);
4498 /* If this operand was negated then we will potentially
4499 canonicalize the expression. Similarly if we don't
4500 place the operands adjacent we're re-ordering the
4501 expression and thus might be performing a
4502 canonicalization. Ignore register re-ordering.
4503 ??? It might be better to shuffle the ops array here,
4504 but then (plus (plus (A, B), plus (C, D))) wouldn't
4505 be seen as non-canonical. */
4508 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4513 ops
[i
].op
= XEXP (this_op
, 0);
4514 ops
[i
].neg
= ! this_neg
;
4520 if (n_ops
!= ARRAY_SIZE (ops
)
4521 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4522 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4523 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4525 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4526 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4527 ops
[n_ops
].neg
= this_neg
;
4535 /* ~a -> (-a - 1) */
4536 if (n_ops
!= ARRAY_SIZE (ops
))
4538 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4539 ops
[n_ops
++].neg
= this_neg
;
4540 ops
[i
].op
= XEXP (this_op
, 0);
4541 ops
[i
].neg
= !this_neg
;
4551 ops
[i
].op
= neg_const_int (mode
, this_op
);
4565 if (n_constants
> 1)
4568 gcc_assert (n_ops
>= 2);
4570 /* If we only have two operands, we can avoid the loops. */
4573 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4576 /* Get the two operands. Be careful with the order, especially for
4577 the cases where code == MINUS. */
4578 if (ops
[0].neg
&& ops
[1].neg
)
4580 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4583 else if (ops
[0].neg
)
4594 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4597 /* Now simplify each pair of operands until nothing changes. */
4600 /* Insertion sort is good enough for a small array. */
4601 for (i
= 1; i
< n_ops
; i
++)
4603 struct simplify_plus_minus_op_data save
;
4607 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4610 /* Just swapping registers doesn't count as canonicalization. */
4616 ops
[j
+ 1] = ops
[j
];
4618 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4623 for (i
= n_ops
- 1; i
> 0; i
--)
4624 for (j
= i
- 1; j
>= 0; j
--)
4626 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4627 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4629 if (lhs
!= 0 && rhs
!= 0)
4631 enum rtx_code ncode
= PLUS
;
4637 std::swap (lhs
, rhs
);
4639 else if (swap_commutative_operands_p (lhs
, rhs
))
4640 std::swap (lhs
, rhs
);
4642 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4643 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4645 rtx tem_lhs
, tem_rhs
;
4647 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4648 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4649 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4652 if (tem
&& !CONSTANT_P (tem
))
4653 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4656 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4660 /* Reject "simplifications" that just wrap the two
4661 arguments in a CONST. Failure to do so can result
4662 in infinite recursion with simplify_binary_operation
4663 when it calls us to simplify CONST operations.
4664 Also, if we find such a simplification, don't try
4665 any more combinations with this rhs: We must have
4666 something like symbol+offset, ie. one of the
4667 trivial CONST expressions we handle later. */
4668 if (GET_CODE (tem
) == CONST
4669 && GET_CODE (XEXP (tem
, 0)) == ncode
4670 && XEXP (XEXP (tem
, 0), 0) == lhs
4671 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4674 if (GET_CODE (tem
) == NEG
)
4675 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4676 if (CONST_INT_P (tem
) && lneg
)
4677 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4681 ops
[j
].op
= NULL_RTX
;
4691 /* Pack all the operands to the lower-numbered entries. */
4692 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4701 /* If nothing changed, check that rematerialization of rtl instructions
4702 is still required. */
4705 /* Perform rematerialization if only all operands are registers and
4706 all operations are PLUS. */
4707 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4708 around rs6000 and how it uses the CA register. See PR67145. */
4709 for (i
= 0; i
< n_ops
; i
++)
4711 || !REG_P (ops
[i
].op
)
4712 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4713 && fixed_regs
[REGNO (ops
[i
].op
)]
4714 && !global_regs
[REGNO (ops
[i
].op
)]
4715 && ops
[i
].op
!= frame_pointer_rtx
4716 && ops
[i
].op
!= arg_pointer_rtx
4717 && ops
[i
].op
!= stack_pointer_rtx
))
4722 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4724 && CONST_INT_P (ops
[1].op
)
4725 && CONSTANT_P (ops
[0].op
)
4727 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4729 /* We suppressed creation of trivial CONST expressions in the
4730 combination loop to avoid recursion. Create one manually now.
4731 The combination loop should have ensured that there is exactly
4732 one CONST_INT, and the sort will have ensured that it is last
4733 in the array and that any other constant will be next-to-last. */
4736 && CONST_INT_P (ops
[n_ops
- 1].op
)
4737 && CONSTANT_P (ops
[n_ops
- 2].op
))
4739 rtx value
= ops
[n_ops
- 1].op
;
4740 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4741 value
= neg_const_int (mode
, value
);
4742 if (CONST_INT_P (value
))
4744 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4750 /* Put a non-negated operand first, if possible. */
4752 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4755 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4764 /* Now make the result by performing the requested operations. */
4767 for (i
= 1; i
< n_ops
; i
++)
4768 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4769 mode
, result
, ops
[i
].op
);
4774 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4776 plus_minus_operand_p (const_rtx x
)
4778 return GET_CODE (x
) == PLUS
4779 || GET_CODE (x
) == MINUS
4780 || (GET_CODE (x
) == CONST
4781 && GET_CODE (XEXP (x
, 0)) == PLUS
4782 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4783 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4786 /* Like simplify_binary_operation except used for relational operators.
4787 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4788 not also be VOIDmode.
4790 CMP_MODE specifies in which mode the comparison is done in, so it is
4791 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4792 the operands or, if both are VOIDmode, the operands are compared in
4793 "infinite precision". */
4795 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4796 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4798 rtx tem
, trueop0
, trueop1
;
4800 if (cmp_mode
== VOIDmode
)
4801 cmp_mode
= GET_MODE (op0
);
4802 if (cmp_mode
== VOIDmode
)
4803 cmp_mode
= GET_MODE (op1
);
4805 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4808 if (SCALAR_FLOAT_MODE_P (mode
))
4810 if (tem
== const0_rtx
)
4811 return CONST0_RTX (mode
);
4812 #ifdef FLOAT_STORE_FLAG_VALUE
4814 REAL_VALUE_TYPE val
;
4815 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4816 return const_double_from_real_value (val
, mode
);
4822 if (VECTOR_MODE_P (mode
))
4824 if (tem
== const0_rtx
)
4825 return CONST0_RTX (mode
);
4826 #ifdef VECTOR_STORE_FLAG_VALUE
4828 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4829 if (val
== NULL_RTX
)
4831 if (val
== const1_rtx
)
4832 return CONST1_RTX (mode
);
4834 return gen_const_vec_duplicate (mode
, val
);
4844 /* For the following tests, ensure const0_rtx is op1. */
4845 if (swap_commutative_operands_p (op0
, op1
)
4846 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4847 std::swap (op0
, op1
), code
= swap_condition (code
);
4849 /* If op0 is a compare, extract the comparison arguments from it. */
4850 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4851 return simplify_gen_relational (code
, mode
, VOIDmode
,
4852 XEXP (op0
, 0), XEXP (op0
, 1));
4854 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4858 trueop0
= avoid_constant_pool_reference (op0
);
4859 trueop1
= avoid_constant_pool_reference (op1
);
4860 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4864 /* This part of simplify_relational_operation is only used when CMP_MODE
4865 is not in class MODE_CC (i.e. it is a real comparison).
4867 MODE is the mode of the result, while CMP_MODE specifies in which
4868 mode the comparison is done in, so it is the mode of the operands. */
4871 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4872 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4874 enum rtx_code op0code
= GET_CODE (op0
);
4876 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4878 /* If op0 is a comparison, extract the comparison arguments
4882 if (GET_MODE (op0
) == mode
)
4883 return simplify_rtx (op0
);
4885 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4886 XEXP (op0
, 0), XEXP (op0
, 1));
4888 else if (code
== EQ
)
4890 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4891 if (new_code
!= UNKNOWN
)
4892 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4893 XEXP (op0
, 0), XEXP (op0
, 1));
4897 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4898 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4899 if ((code
== LTU
|| code
== GEU
)
4900 && GET_CODE (op0
) == PLUS
4901 && CONST_INT_P (XEXP (op0
, 1))
4902 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4903 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4904 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4905 && XEXP (op0
, 1) != const0_rtx
)
4908 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4909 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4910 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4913 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4914 transformed into (LTU a -C). */
4915 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4916 && CONST_INT_P (XEXP (op0
, 1))
4917 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4918 && XEXP (op0
, 1) != const0_rtx
)
4921 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4922 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4923 XEXP (op0
, 0), new_cmp
);
4926 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4927 if ((code
== LTU
|| code
== GEU
)
4928 && GET_CODE (op0
) == PLUS
4929 && rtx_equal_p (op1
, XEXP (op0
, 1))
4930 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4931 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4932 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4933 copy_rtx (XEXP (op0
, 0)));
4935 if (op1
== const0_rtx
)
4937 /* Canonicalize (GTU x 0) as (NE x 0). */
4939 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4940 /* Canonicalize (LEU x 0) as (EQ x 0). */
4942 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4944 else if (op1
== const1_rtx
)
4949 /* Canonicalize (GE x 1) as (GT x 0). */
4950 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4953 /* Canonicalize (GEU x 1) as (NE x 0). */
4954 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4957 /* Canonicalize (LT x 1) as (LE x 0). */
4958 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4961 /* Canonicalize (LTU x 1) as (EQ x 0). */
4962 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4968 else if (op1
== constm1_rtx
)
4970 /* Canonicalize (LE x -1) as (LT x 0). */
4972 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4973 /* Canonicalize (GT x -1) as (GE x 0). */
4975 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4978 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4979 if ((code
== EQ
|| code
== NE
)
4980 && (op0code
== PLUS
|| op0code
== MINUS
)
4982 && CONSTANT_P (XEXP (op0
, 1))
4983 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4985 rtx x
= XEXP (op0
, 0);
4986 rtx c
= XEXP (op0
, 1);
4987 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4988 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4990 /* Detect an infinite recursive condition, where we oscillate at this
4991 simplification case between:
4992 A + B == C <---> C - B == A,
4993 where A, B, and C are all constants with non-simplifiable expressions,
4994 usually SYMBOL_REFs. */
4995 if (GET_CODE (tem
) == invcode
4997 && rtx_equal_p (c
, XEXP (tem
, 1)))
5000 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5003 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5004 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5005 scalar_int_mode int_mode
, int_cmp_mode
;
5007 && op1
== const0_rtx
5008 && is_int_mode (mode
, &int_mode
)
5009 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5010 /* ??? Work-around BImode bugs in the ia64 backend. */
5011 && int_mode
!= BImode
5012 && int_cmp_mode
!= BImode
5013 && nonzero_bits (op0
, int_cmp_mode
) == 1
5014 && STORE_FLAG_VALUE
== 1)
5015 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5016 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5017 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5019 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5020 if ((code
== EQ
|| code
== NE
)
5021 && op1
== const0_rtx
5023 return simplify_gen_relational (code
, mode
, cmp_mode
,
5024 XEXP (op0
, 0), XEXP (op0
, 1));
5026 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5027 if ((code
== EQ
|| code
== NE
)
5029 && rtx_equal_p (XEXP (op0
, 0), op1
)
5030 && !side_effects_p (XEXP (op0
, 0)))
5031 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5034 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5035 if ((code
== EQ
|| code
== NE
)
5037 && rtx_equal_p (XEXP (op0
, 1), op1
)
5038 && !side_effects_p (XEXP (op0
, 1)))
5039 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5042 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5043 if ((code
== EQ
|| code
== NE
)
5045 && CONST_SCALAR_INT_P (op1
)
5046 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5047 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5048 simplify_gen_binary (XOR
, cmp_mode
,
5049 XEXP (op0
, 1), op1
));
5051 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5052 constant folding if x/y is a constant. */
5053 if ((code
== EQ
|| code
== NE
)
5054 && (op0code
== AND
|| op0code
== IOR
)
5055 && !side_effects_p (op1
)
5056 && op1
!= CONST0_RTX (cmp_mode
))
5058 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5059 (eq/ne (and (not y) x) 0). */
5060 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5061 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5063 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5065 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5067 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5068 CONST0_RTX (cmp_mode
));
5071 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5072 (eq/ne (and (not x) y) 0). */
5073 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5074 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5076 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5078 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5080 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5081 CONST0_RTX (cmp_mode
));
5085 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5086 if ((code
== EQ
|| code
== NE
)
5087 && GET_CODE (op0
) == BSWAP
5088 && CONST_SCALAR_INT_P (op1
))
5089 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5090 simplify_gen_unary (BSWAP
, cmp_mode
,
5093 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5094 if ((code
== EQ
|| code
== NE
)
5095 && GET_CODE (op0
) == BSWAP
5096 && GET_CODE (op1
) == BSWAP
)
5097 return simplify_gen_relational (code
, mode
, cmp_mode
,
5098 XEXP (op0
, 0), XEXP (op1
, 0));
5100 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5106 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5107 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5108 XEXP (op0
, 0), const0_rtx
);
5113 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5114 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5115 XEXP (op0
, 0), const0_rtx
);
5134 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5135 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5136 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5137 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5138 For floating-point comparisons, assume that the operands were ordered. */
5141 comparison_result (enum rtx_code code
, int known_results
)
5147 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5150 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5154 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5157 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5161 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5164 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5167 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5169 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5172 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5174 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5177 return const_true_rtx
;
5185 /* Check if the given comparison (done in the given MODE) is actually
5186 a tautology or a contradiction. If the mode is VOID_mode, the
5187 comparison is done in "infinite precision". If no simplification
5188 is possible, this function returns zero. Otherwise, it returns
5189 either const_true_rtx or const0_rtx. */
5192 simplify_const_relational_operation (enum rtx_code code
,
5200 gcc_assert (mode
!= VOIDmode
5201 || (GET_MODE (op0
) == VOIDmode
5202 && GET_MODE (op1
) == VOIDmode
));
5204 /* If op0 is a compare, extract the comparison arguments from it. */
5205 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5207 op1
= XEXP (op0
, 1);
5208 op0
= XEXP (op0
, 0);
5210 if (GET_MODE (op0
) != VOIDmode
)
5211 mode
= GET_MODE (op0
);
5212 else if (GET_MODE (op1
) != VOIDmode
)
5213 mode
= GET_MODE (op1
);
5218 /* We can't simplify MODE_CC values since we don't know what the
5219 actual comparison is. */
5220 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5223 /* Make sure the constant is second. */
5224 if (swap_commutative_operands_p (op0
, op1
))
5226 std::swap (op0
, op1
);
5227 code
= swap_condition (code
);
5230 trueop0
= avoid_constant_pool_reference (op0
);
5231 trueop1
= avoid_constant_pool_reference (op1
);
5233 /* For integer comparisons of A and B maybe we can simplify A - B and can
5234 then simplify a comparison of that with zero. If A and B are both either
5235 a register or a CONST_INT, this can't help; testing for these cases will
5236 prevent infinite recursion here and speed things up.
5238 We can only do this for EQ and NE comparisons as otherwise we may
5239 lose or introduce overflow which we cannot disregard as undefined as
5240 we do not know the signedness of the operation on either the left or
5241 the right hand side of the comparison. */
5243 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5244 && (code
== EQ
|| code
== NE
)
5245 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5246 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5247 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5248 /* We cannot do this if tem is a nonzero address. */
5249 && ! nonzero_address_p (tem
))
5250 return simplify_const_relational_operation (signed_condition (code
),
5251 mode
, tem
, const0_rtx
);
5253 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5254 return const_true_rtx
;
5256 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5259 /* For modes without NaNs, if the two operands are equal, we know the
5260 result except if they have side-effects. Even with NaNs we know
5261 the result of unordered comparisons and, if signaling NaNs are
5262 irrelevant, also the result of LT/GT/LTGT. */
5263 if ((! HONOR_NANS (trueop0
)
5264 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5265 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5266 && ! HONOR_SNANS (trueop0
)))
5267 && rtx_equal_p (trueop0
, trueop1
)
5268 && ! side_effects_p (trueop0
))
5269 return comparison_result (code
, CMP_EQ
);
5271 /* If the operands are floating-point constants, see if we can fold
5273 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5274 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5275 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5277 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5278 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5280 /* Comparisons are unordered iff at least one of the values is NaN. */
5281 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5291 return const_true_rtx
;
5304 return comparison_result (code
,
5305 (real_equal (d0
, d1
) ? CMP_EQ
:
5306 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5309 /* Otherwise, see if the operands are both integers. */
5310 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5311 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5313 /* It would be nice if we really had a mode here. However, the
5314 largest int representable on the target is as good as
5316 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5317 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5318 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5320 if (wi::eq_p (ptrueop0
, ptrueop1
))
5321 return comparison_result (code
, CMP_EQ
);
5324 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5325 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5326 return comparison_result (code
, cr
);
5330 /* Optimize comparisons with upper and lower bounds. */
5331 scalar_int_mode int_mode
;
5332 if (CONST_INT_P (trueop1
)
5333 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5334 && HWI_COMPUTABLE_MODE_P (int_mode
)
5335 && !side_effects_p (trueop0
))
5338 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5339 HOST_WIDE_INT val
= INTVAL (trueop1
);
5340 HOST_WIDE_INT mmin
, mmax
;
5350 /* Get a reduced range if the sign bit is zero. */
5351 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5358 rtx mmin_rtx
, mmax_rtx
;
5359 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5361 mmin
= INTVAL (mmin_rtx
);
5362 mmax
= INTVAL (mmax_rtx
);
5365 unsigned int sign_copies
5366 = num_sign_bit_copies (trueop0
, int_mode
);
5368 mmin
>>= (sign_copies
- 1);
5369 mmax
>>= (sign_copies
- 1);
5375 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5377 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5378 return const_true_rtx
;
5379 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5384 return const_true_rtx
;
5389 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5391 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5392 return const_true_rtx
;
5393 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5398 return const_true_rtx
;
5404 /* x == y is always false for y out of range. */
5405 if (val
< mmin
|| val
> mmax
)
5409 /* x > y is always false for y >= mmax, always true for y < mmin. */
5411 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5413 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5414 return const_true_rtx
;
5420 return const_true_rtx
;
5423 /* x < y is always false for y <= mmin, always true for y > mmax. */
5425 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5427 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5428 return const_true_rtx
;
5434 return const_true_rtx
;
5438 /* x != y is always true for y out of range. */
5439 if (val
< mmin
|| val
> mmax
)
5440 return const_true_rtx
;
5448 /* Optimize integer comparisons with zero. */
5449 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5450 && trueop1
== const0_rtx
5451 && !side_effects_p (trueop0
))
5453 /* Some addresses are known to be nonzero. We don't know
5454 their sign, but equality comparisons are known. */
5455 if (nonzero_address_p (trueop0
))
5457 if (code
== EQ
|| code
== LEU
)
5459 if (code
== NE
|| code
== GTU
)
5460 return const_true_rtx
;
5463 /* See if the first operand is an IOR with a constant. If so, we
5464 may be able to determine the result of this comparison. */
5465 if (GET_CODE (op0
) == IOR
)
5467 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5468 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5470 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5471 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5472 && (UINTVAL (inner_const
)
5483 return const_true_rtx
;
5487 return const_true_rtx
;
5501 /* Optimize comparison of ABS with zero. */
5502 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5503 && (GET_CODE (trueop0
) == ABS
5504 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5505 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5510 /* Optimize abs(x) < 0.0. */
5511 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5516 /* Optimize abs(x) >= 0.0. */
5517 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5518 return const_true_rtx
;
5522 /* Optimize ! (abs(x) < 0.0). */
5523 return const_true_rtx
;
5533 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5534 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5535 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5536 can be simplified to that or NULL_RTX if not.
5537 Assume X is compared against zero with CMP_CODE and the true
5538 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5541 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5543 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5546 /* Result on X == 0 and X !=0 respectively. */
5547 rtx on_zero
, on_nonzero
;
5551 on_nonzero
= false_val
;
5555 on_zero
= false_val
;
5556 on_nonzero
= true_val
;
5559 rtx_code op_code
= GET_CODE (on_nonzero
);
5560 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5561 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5562 || !CONST_INT_P (on_zero
))
5565 HOST_WIDE_INT op_val
;
5566 scalar_int_mode mode ATTRIBUTE_UNUSED
5567 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5568 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5569 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5570 && op_val
== INTVAL (on_zero
))
5577 /* Simplify CODE, an operation with result mode MODE and three operands,
5578 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5579 a constant. Return 0 if no simplifications is possible. */
5582 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5583 machine_mode op0_mode
, rtx op0
, rtx op1
,
5586 bool any_change
= false;
5588 scalar_int_mode int_mode
, int_op0_mode
;
5589 unsigned int n_elts
;
5594 /* Simplify negations around the multiplication. */
5595 /* -a * -b + c => a * b + c. */
5596 if (GET_CODE (op0
) == NEG
)
5598 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5600 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5602 else if (GET_CODE (op1
) == NEG
)
5604 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5606 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5609 /* Canonicalize the two multiplication operands. */
5610 /* a * -b + c => -b * a + c. */
5611 if (swap_commutative_operands_p (op0
, op1
))
5612 std::swap (op0
, op1
), any_change
= true;
5615 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5620 if (CONST_INT_P (op0
)
5621 && CONST_INT_P (op1
)
5622 && CONST_INT_P (op2
)
5623 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5624 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5625 && HWI_COMPUTABLE_MODE_P (int_mode
))
5627 /* Extracting a bit-field from a constant */
5628 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5629 HOST_WIDE_INT op1val
= INTVAL (op1
);
5630 HOST_WIDE_INT op2val
= INTVAL (op2
);
5631 if (!BITS_BIG_ENDIAN
)
5633 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5634 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5636 /* Not enough information to calculate the bit position. */
5639 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5641 /* First zero-extend. */
5642 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5643 /* If desired, propagate sign bit. */
5644 if (code
== SIGN_EXTRACT
5645 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5647 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5650 return gen_int_mode (val
, int_mode
);
5655 if (CONST_INT_P (op0
))
5656 return op0
!= const0_rtx
? op1
: op2
;
5658 /* Convert c ? a : a into "a". */
5659 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5662 /* Convert a != b ? a : b into "a". */
5663 if (GET_CODE (op0
) == NE
5664 && ! side_effects_p (op0
)
5665 && ! HONOR_NANS (mode
)
5666 && ! HONOR_SIGNED_ZEROS (mode
)
5667 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5668 && rtx_equal_p (XEXP (op0
, 1), op2
))
5669 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5670 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5673 /* Convert a == b ? a : b into "b". */
5674 if (GET_CODE (op0
) == EQ
5675 && ! side_effects_p (op0
)
5676 && ! HONOR_NANS (mode
)
5677 && ! HONOR_SIGNED_ZEROS (mode
)
5678 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5679 && rtx_equal_p (XEXP (op0
, 1), op2
))
5680 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5681 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5684 /* Convert (!c) != {0,...,0} ? a : b into
5685 c != {0,...,0} ? b : a for vector modes. */
5686 if (VECTOR_MODE_P (GET_MODE (op1
))
5687 && GET_CODE (op0
) == NE
5688 && GET_CODE (XEXP (op0
, 0)) == NOT
5689 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5691 rtx cv
= XEXP (op0
, 1);
5694 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
5697 for (int i
= 0; i
< nunits
; ++i
)
5698 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5705 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5706 XEXP (XEXP (op0
, 0), 0),
5708 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5713 /* Convert x == 0 ? N : clz (x) into clz (x) when
5714 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5715 Similarly for ctz (x). */
5716 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5717 && XEXP (op0
, 1) == const0_rtx
)
5720 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5726 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5728 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5729 ? GET_MODE (XEXP (op0
, 1))
5730 : GET_MODE (XEXP (op0
, 0)));
5733 /* Look for happy constants in op1 and op2. */
5734 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5736 HOST_WIDE_INT t
= INTVAL (op1
);
5737 HOST_WIDE_INT f
= INTVAL (op2
);
5739 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5740 code
= GET_CODE (op0
);
5741 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5744 tmp
= reversed_comparison_code (op0
, NULL
);
5752 return simplify_gen_relational (code
, mode
, cmp_mode
,
5753 XEXP (op0
, 0), XEXP (op0
, 1));
5756 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5757 cmp_mode
, XEXP (op0
, 0),
5760 /* See if any simplifications were possible. */
5763 if (CONST_INT_P (temp
))
5764 return temp
== const0_rtx
? op2
: op1
;
5766 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5772 gcc_assert (GET_MODE (op0
) == mode
);
5773 gcc_assert (GET_MODE (op1
) == mode
);
5774 gcc_assert (VECTOR_MODE_P (mode
));
5775 trueop2
= avoid_constant_pool_reference (op2
);
5776 if (CONST_INT_P (trueop2
)
5777 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
5779 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5780 unsigned HOST_WIDE_INT mask
;
5781 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5784 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5786 if (!(sel
& mask
) && !side_effects_p (op0
))
5788 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5791 rtx trueop0
= avoid_constant_pool_reference (op0
);
5792 rtx trueop1
= avoid_constant_pool_reference (op1
);
5793 if (GET_CODE (trueop0
) == CONST_VECTOR
5794 && GET_CODE (trueop1
) == CONST_VECTOR
)
5796 rtvec v
= rtvec_alloc (n_elts
);
5799 for (i
= 0; i
< n_elts
; i
++)
5800 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5801 ? CONST_VECTOR_ELT (trueop0
, i
)
5802 : CONST_VECTOR_ELT (trueop1
, i
));
5803 return gen_rtx_CONST_VECTOR (mode
, v
);
5806 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5807 if no element from a appears in the result. */
5808 if (GET_CODE (op0
) == VEC_MERGE
)
5810 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5811 if (CONST_INT_P (tem
))
5813 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5814 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5815 return simplify_gen_ternary (code
, mode
, mode
,
5816 XEXP (op0
, 1), op1
, op2
);
5817 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5818 return simplify_gen_ternary (code
, mode
, mode
,
5819 XEXP (op0
, 0), op1
, op2
);
5822 if (GET_CODE (op1
) == VEC_MERGE
)
5824 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5825 if (CONST_INT_P (tem
))
5827 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5828 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5829 return simplify_gen_ternary (code
, mode
, mode
,
5830 op0
, XEXP (op1
, 1), op2
);
5831 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5832 return simplify_gen_ternary (code
, mode
, mode
,
5833 op0
, XEXP (op1
, 0), op2
);
5837 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5839 if (GET_CODE (op0
) == VEC_DUPLICATE
5840 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5841 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5842 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
5844 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5845 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5847 if (XEXP (XEXP (op0
, 0), 0) == op1
5848 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5852 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5854 with (vec_concat (X) (B)) if N == 1 or
5855 (vec_concat (A) (X)) if N == 2. */
5856 if (GET_CODE (op0
) == VEC_DUPLICATE
5857 && GET_CODE (op1
) == CONST_VECTOR
5858 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
5859 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5860 && IN_RANGE (sel
, 1, 2))
5862 rtx newop0
= XEXP (op0
, 0);
5863 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
5865 std::swap (newop0
, newop1
);
5866 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5868 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5869 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5870 Only applies for vectors of two elements. */
5871 if (GET_CODE (op0
) == VEC_DUPLICATE
5872 && GET_CODE (op1
) == VEC_CONCAT
5873 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5874 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
5875 && IN_RANGE (sel
, 1, 2))
5877 rtx newop0
= XEXP (op0
, 0);
5878 rtx newop1
= XEXP (op1
, 2 - sel
);
5879 rtx otherop
= XEXP (op1
, sel
- 1);
5881 std::swap (newop0
, newop1
);
5882 /* Don't want to throw away the other part of the vec_concat if
5883 it has side-effects. */
5884 if (!side_effects_p (otherop
))
5885 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5890 (vec_merge:outer (vec_duplicate:outer x:inner)
5891 (subreg:outer y:inner 0)
5894 with (vec_concat:outer x:inner y:inner) if N == 1,
5895 or (vec_concat:outer y:inner x:inner) if N == 2.
5897 Implicitly, this means we have a paradoxical subreg, but such
5898 a check is cheap, so make it anyway.
5900 Only applies for vectors of two elements. */
5901 if (GET_CODE (op0
) == VEC_DUPLICATE
5902 && GET_CODE (op1
) == SUBREG
5903 && GET_MODE (op1
) == GET_MODE (op0
)
5904 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
5905 && paradoxical_subreg_p (op1
)
5906 && subreg_lowpart_p (op1
)
5907 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5908 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
5909 && IN_RANGE (sel
, 1, 2))
5911 rtx newop0
= XEXP (op0
, 0);
5912 rtx newop1
= SUBREG_REG (op1
);
5914 std::swap (newop0
, newop1
);
5915 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5918 /* Same as above but with switched operands:
5919 Replace (vec_merge:outer (subreg:outer x:inner 0)
5920 (vec_duplicate:outer y:inner)
5923 with (vec_concat:outer x:inner y:inner) if N == 1,
5924 or (vec_concat:outer y:inner x:inner) if N == 2. */
5925 if (GET_CODE (op1
) == VEC_DUPLICATE
5926 && GET_CODE (op0
) == SUBREG
5927 && GET_MODE (op0
) == GET_MODE (op1
)
5928 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
5929 && paradoxical_subreg_p (op0
)
5930 && subreg_lowpart_p (op0
)
5931 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
5932 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5933 && IN_RANGE (sel
, 1, 2))
5935 rtx newop0
= SUBREG_REG (op0
);
5936 rtx newop1
= XEXP (op1
, 0);
5938 std::swap (newop0
, newop1
);
5939 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5942 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5944 with (vec_concat x y) or (vec_concat y x) depending on value
5946 if (GET_CODE (op0
) == VEC_DUPLICATE
5947 && GET_CODE (op1
) == VEC_DUPLICATE
5948 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5949 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
5950 && IN_RANGE (sel
, 1, 2))
5952 rtx newop0
= XEXP (op0
, 0);
5953 rtx newop1
= XEXP (op1
, 0);
5955 std::swap (newop0
, newop1
);
5957 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5961 if (rtx_equal_p (op0
, op1
)
5962 && !side_effects_p (op2
) && !side_effects_p (op1
))
5974 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5975 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5976 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5978 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
5979 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5980 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
5981 FIRST_ELEM is the number of the first element to extract, otherwise
5982 FIRST_ELEM is ignored. */
5985 simplify_immed_subreg (fixed_size_mode outermode
, rtx op
,
5986 machine_mode innermode
, unsigned int byte
,
5987 unsigned int first_elem
, unsigned int inner_bytes
)
5991 value_mask
= (1 << value_bit
) - 1
5993 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
6001 rtx result_s
= NULL
;
6002 rtvec result_v
= NULL
;
6003 enum mode_class outer_class
;
6004 scalar_mode outer_submode
;
6007 /* Some ports misuse CCmode. */
6008 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
6011 /* We have no way to represent a complex constant at the rtl level. */
6012 if (COMPLEX_MODE_P (outermode
))
6015 /* We support any size mode. */
6016 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
6017 inner_bytes
* BITS_PER_UNIT
);
6019 /* Unpack the value. */
6021 if (GET_CODE (op
) == CONST_VECTOR
)
6023 num_elem
= CEIL (inner_bytes
, GET_MODE_UNIT_SIZE (innermode
));
6024 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
6029 elem_bitsize
= max_bitsize
;
6031 /* If this asserts, it is too complicated; reducing value_bit may help. */
6032 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
6033 /* I don't know how to handle endianness of sub-units. */
6034 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
6036 for (elem
= 0; elem
< num_elem
; elem
++)
6039 rtx el
= (GET_CODE (op
) == CONST_VECTOR
6040 ? CONST_VECTOR_ELT (op
, first_elem
+ elem
)
6043 /* Vectors are kept in target memory order. (This is probably
6046 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6047 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6049 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6050 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6051 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6052 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6053 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6056 switch (GET_CODE (el
))
6060 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6062 *vp
++ = INTVAL (el
) >> i
;
6063 /* CONST_INTs are always logically sign-extended. */
6064 for (; i
< elem_bitsize
; i
+= value_bit
)
6065 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
6068 case CONST_WIDE_INT
:
6070 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
6071 unsigned char extend
= wi::sign_mask (val
);
6072 int prec
= wi::get_precision (val
);
6074 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
6075 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
6076 for (; i
< elem_bitsize
; i
+= value_bit
)
6082 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
6084 unsigned char extend
= 0;
6085 /* If this triggers, someone should have generated a
6086 CONST_INT instead. */
6087 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
6089 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6090 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
6091 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
6094 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
6098 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
6100 for (; i
< elem_bitsize
; i
+= value_bit
)
6105 /* This is big enough for anything on the platform. */
6106 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6107 scalar_float_mode el_mode
;
6109 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
6110 int bitsize
= GET_MODE_BITSIZE (el_mode
);
6112 gcc_assert (bitsize
<= elem_bitsize
);
6113 gcc_assert (bitsize
% value_bit
== 0);
6115 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
6118 /* real_to_target produces its result in words affected by
6119 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6120 and use WORDS_BIG_ENDIAN instead; see the documentation
6121 of SUBREG in rtl.texi. */
6122 for (i
= 0; i
< bitsize
; i
+= value_bit
)
6125 if (WORDS_BIG_ENDIAN
)
6126 ibase
= bitsize
- 1 - i
;
6129 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
6132 /* It shouldn't matter what's done here, so fill it with
6134 for (; i
< elem_bitsize
; i
+= value_bit
)
6140 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
6142 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6143 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6147 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6148 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6149 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
6151 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
6152 >> (i
- HOST_BITS_PER_WIDE_INT
);
6153 for (; i
< elem_bitsize
; i
+= value_bit
)
6163 /* Now, pick the right byte to start with. */
6164 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6165 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6166 will already have offset 0. */
6167 if (inner_bytes
>= GET_MODE_SIZE (outermode
))
6169 unsigned ibyte
= inner_bytes
- GET_MODE_SIZE (outermode
) - byte
;
6170 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6171 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6172 byte
= (subword_byte
% UNITS_PER_WORD
6173 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6176 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6177 so if it's become negative it will instead be very large.) */
6178 gcc_assert (byte
< inner_bytes
);
6180 /* Convert from bytes to chunks of size value_bit. */
6181 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
6183 /* Re-pack the value. */
6184 num_elem
= GET_MODE_NUNITS (outermode
);
6186 if (VECTOR_MODE_P (outermode
))
6188 result_v
= rtvec_alloc (num_elem
);
6189 elems
= &RTVEC_ELT (result_v
, 0);
6194 outer_submode
= GET_MODE_INNER (outermode
);
6195 outer_class
= GET_MODE_CLASS (outer_submode
);
6196 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
6198 gcc_assert (elem_bitsize
% value_bit
== 0);
6199 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
6201 for (elem
= 0; elem
< num_elem
; elem
++)
6205 /* Vectors are stored in target memory order. (This is probably
6208 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6209 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6211 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6212 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6213 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6214 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6215 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6218 switch (outer_class
)
6221 case MODE_PARTIAL_INT
:
6226 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
6227 / HOST_BITS_PER_WIDE_INT
;
6228 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
6231 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
6233 for (u
= 0; u
< units
; u
++)
6235 unsigned HOST_WIDE_INT buf
= 0;
6237 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
6239 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6242 base
+= HOST_BITS_PER_WIDE_INT
;
6244 r
= wide_int::from_array (tmp
, units
,
6245 GET_MODE_PRECISION (outer_submode
));
6246 #if TARGET_SUPPORTS_WIDE_INT == 0
6247 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6248 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
6251 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
6256 case MODE_DECIMAL_FLOAT
:
6259 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
6261 /* real_from_target wants its input in words affected by
6262 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6263 and use WORDS_BIG_ENDIAN instead; see the documentation
6264 of SUBREG in rtl.texi. */
6265 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6268 if (WORDS_BIG_ENDIAN
)
6269 ibase
= elem_bitsize
- 1 - i
;
6272 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6275 real_from_target (&r
, tmp
, outer_submode
);
6276 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6288 f
.mode
= outer_submode
;
6291 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6293 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6294 for (; i
< elem_bitsize
; i
+= value_bit
)
6295 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6296 << (i
- HOST_BITS_PER_WIDE_INT
));
6298 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6306 if (VECTOR_MODE_P (outermode
))
6307 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6312 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6313 Return 0 if no simplifications are possible. */
6315 simplify_subreg (machine_mode outermode
, rtx op
,
6316 machine_mode innermode
, poly_uint64 byte
)
6318 /* Little bit of sanity checking. */
6319 gcc_assert (innermode
!= VOIDmode
);
6320 gcc_assert (outermode
!= VOIDmode
);
6321 gcc_assert (innermode
!= BLKmode
);
6322 gcc_assert (outermode
!= BLKmode
);
6324 gcc_assert (GET_MODE (op
) == innermode
6325 || GET_MODE (op
) == VOIDmode
);
6327 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6328 if (!multiple_p (byte
, outersize
))
6331 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6332 if (maybe_ge (byte
, innersize
))
6335 if (outermode
== innermode
&& known_eq (byte
, 0U))
6338 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6342 if (VECTOR_MODE_P (outermode
)
6343 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6344 && vec_duplicate_p (op
, &elt
))
6345 return gen_vec_duplicate (outermode
, elt
);
6347 if (outermode
== GET_MODE_INNER (innermode
)
6348 && vec_duplicate_p (op
, &elt
))
6352 if (CONST_SCALAR_INT_P (op
)
6353 || CONST_DOUBLE_AS_FLOAT_P (op
)
6354 || CONST_FIXED_P (op
)
6355 || GET_CODE (op
) == CONST_VECTOR
)
6357 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6358 the result from bytes, so it only works if the sizes of the modes
6359 and the value of the offset are known at compile time. Cases that
6360 that apply to general modes and offsets should be handled here
6361 before calling simplify_immed_subreg. */
6362 fixed_size_mode fs_outermode
, fs_innermode
;
6363 unsigned HOST_WIDE_INT cbyte
;
6364 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6365 && is_a
<fixed_size_mode
> (innermode
, &fs_innermode
)
6366 && byte
.is_constant (&cbyte
))
6367 return simplify_immed_subreg (fs_outermode
, op
, fs_innermode
, cbyte
,
6368 0, GET_MODE_SIZE (fs_innermode
));
6370 /* Handle constant-sized outer modes and variable-sized inner modes. */
6371 unsigned HOST_WIDE_INT first_elem
;
6372 if (GET_CODE (op
) == CONST_VECTOR
6373 && is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6374 && constant_multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
),
6376 return simplify_immed_subreg (fs_outermode
, op
, innermode
, 0,
6378 GET_MODE_SIZE (fs_outermode
));
6383 /* Changing mode twice with SUBREG => just change it once,
6384 or not at all if changing back op starting mode. */
6385 if (GET_CODE (op
) == SUBREG
)
6387 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6388 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6391 if (outermode
== innermostmode
6392 && known_eq (byte
, 0U)
6393 && known_eq (SUBREG_BYTE (op
), 0))
6394 return SUBREG_REG (op
);
6396 /* Work out the memory offset of the final OUTERMODE value relative
6397 to the inner value of OP. */
6398 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6400 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6401 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6403 /* See whether resulting subreg will be paradoxical. */
6404 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6406 /* Bail out in case resulting subreg would be incorrect. */
6407 if (maybe_lt (final_offset
, 0)
6408 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6409 || !multiple_p (final_offset
, outersize
))
6414 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6416 if (maybe_ne (final_offset
, required_offset
))
6418 /* Paradoxical subregs always have byte offset 0. */
6422 /* Recurse for further possible simplifications. */
6423 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6427 if (validate_subreg (outermode
, innermostmode
,
6428 SUBREG_REG (op
), final_offset
))
6430 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6431 if (SUBREG_PROMOTED_VAR_P (op
)
6432 && SUBREG_PROMOTED_SIGN (op
) >= 0
6433 && GET_MODE_CLASS (outermode
) == MODE_INT
6434 && known_ge (outersize
, innersize
)
6435 && known_le (outersize
, innermostsize
)
6436 && subreg_lowpart_p (newx
))
6438 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6439 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6446 /* SUBREG of a hard register => just change the register number
6447 and/or mode. If the hard register is not valid in that mode,
6448 suppress this simplification. If the hard register is the stack,
6449 frame, or argument pointer, leave this as a SUBREG. */
6451 if (REG_P (op
) && HARD_REGISTER_P (op
))
6453 unsigned int regno
, final_regno
;
6456 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6457 if (HARD_REGISTER_NUM_P (final_regno
))
6459 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6460 subreg_memory_offset (outermode
,
6463 /* Propagate original regno. We don't have any way to specify
6464 the offset inside original regno, so do so only for lowpart.
6465 The information is used only by alias analysis that can not
6466 grog partial register anyway. */
6468 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6469 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6474 /* If we have a SUBREG of a register that we are replacing and we are
6475 replacing it with a MEM, make a new MEM and try replacing the
6476 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6477 or if we would be widening it. */
6480 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6481 /* Allow splitting of volatile memory references in case we don't
6482 have instruction to move the whole thing. */
6483 && (! MEM_VOLATILE_P (op
)
6484 || ! have_insn_for (SET
, innermode
))
6485 && known_le (outersize
, innersize
))
6486 return adjust_address_nv (op
, outermode
, byte
);
6488 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6490 if (GET_CODE (op
) == CONCAT
6491 || GET_CODE (op
) == VEC_CONCAT
)
6493 poly_uint64 final_offset
;
6496 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6497 if (part_mode
== VOIDmode
)
6498 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6499 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6500 if (known_lt (byte
, part_size
))
6502 part
= XEXP (op
, 0);
6503 final_offset
= byte
;
6505 else if (known_ge (byte
, part_size
))
6507 part
= XEXP (op
, 1);
6508 final_offset
= byte
- part_size
;
6513 if (maybe_gt (final_offset
+ outersize
, part_size
))
6516 part_mode
= GET_MODE (part
);
6517 if (part_mode
== VOIDmode
)
6518 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6519 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6522 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6523 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6527 /* A SUBREG resulting from a zero extension may fold to zero if
6528 it extracts higher bits that the ZERO_EXTEND's source bits. */
6529 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6531 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6532 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6533 return CONST0_RTX (outermode
);
6536 scalar_int_mode int_outermode
, int_innermode
;
6537 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6538 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6539 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6541 /* Handle polynomial integers. The upper bits of a paradoxical
6542 subreg are undefined, so this is safe regardless of whether
6543 we're truncating or extending. */
6544 if (CONST_POLY_INT_P (op
))
6547 = poly_wide_int::from (const_poly_int_value (op
),
6548 GET_MODE_PRECISION (int_outermode
),
6550 return immed_wide_int_const (val
, int_outermode
);
6553 if (GET_MODE_PRECISION (int_outermode
)
6554 < GET_MODE_PRECISION (int_innermode
))
6556 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6565 /* Make a SUBREG operation or equivalent if it folds. */
6568 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6569 machine_mode innermode
, poly_uint64 byte
)
6573 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6577 if (GET_CODE (op
) == SUBREG
6578 || GET_CODE (op
) == CONCAT
6579 || GET_MODE (op
) == VOIDmode
)
6582 if (validate_subreg (outermode
, innermode
, op
, byte
))
6583 return gen_rtx_SUBREG (outermode
, op
, byte
);
6588 /* Generates a subreg to get the least significant part of EXPR (in mode
6589 INNER_MODE) to OUTER_MODE. */
6592 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6593 machine_mode inner_mode
)
6595 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6596 subreg_lowpart_offset (outer_mode
, inner_mode
));
6599 /* Simplify X, an rtx expression.
6601 Return the simplified expression or NULL if no simplifications
6604 This is the preferred entry point into the simplification routines;
6605 however, we still allow passes to call the more specific routines.
6607 Right now GCC has three (yes, three) major bodies of RTL simplification
6608 code that need to be unified.
6610 1. fold_rtx in cse.c. This code uses various CSE specific
6611 information to aid in RTL simplification.
6613 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6614 it uses combine specific information to aid in RTL
6617 3. The routines in this file.
6620 Long term we want to only have one body of simplification code; to
6621 get to that state I recommend the following steps:
6623 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6624 which are not pass dependent state into these routines.
6626 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6627 use this routine whenever possible.
6629 3. Allow for pass dependent state to be provided to these
6630 routines and add simplifications based on the pass dependent
6631 state. Remove code from cse.c & combine.c that becomes
6634 It will take time, but ultimately the compiler will be easier to
6635 maintain and improve. It's totally silly that when we add a
6636 simplification that it needs to be added to 4 places (3 for RTL
6637 simplification and 1 for tree simplification. */
6640 simplify_rtx (const_rtx x
)
6642 const enum rtx_code code
= GET_CODE (x
);
6643 const machine_mode mode
= GET_MODE (x
);
6645 switch (GET_RTX_CLASS (code
))
6648 return simplify_unary_operation (code
, mode
,
6649 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6650 case RTX_COMM_ARITH
:
6651 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6652 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6657 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6660 case RTX_BITFIELD_OPS
:
6661 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6662 XEXP (x
, 0), XEXP (x
, 1),
6666 case RTX_COMM_COMPARE
:
6667 return simplify_relational_operation (code
, mode
,
6668 ((GET_MODE (XEXP (x
, 0))
6670 ? GET_MODE (XEXP (x
, 0))
6671 : GET_MODE (XEXP (x
, 1))),
6677 return simplify_subreg (mode
, SUBREG_REG (x
),
6678 GET_MODE (SUBREG_REG (x
)),
6685 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6686 if (GET_CODE (XEXP (x
, 0)) == HIGH
6687 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6700 namespace selftest
{
6702 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6705 make_test_reg (machine_mode mode
)
6707 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
6709 return gen_rtx_REG (mode
, test_reg_num
++);
6712 /* Test vector simplifications involving VEC_DUPLICATE in which the
6713 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6714 register that holds one element of MODE. */
6717 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
6719 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6720 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6721 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
6722 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
6724 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6725 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
6726 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
6727 ASSERT_RTX_EQ (duplicate
,
6728 simplify_unary_operation (NOT
, mode
,
6729 duplicate_not
, mode
));
6731 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6732 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
6733 ASSERT_RTX_EQ (duplicate
,
6734 simplify_unary_operation (NEG
, mode
,
6735 duplicate_neg
, mode
));
6737 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6738 ASSERT_RTX_EQ (duplicate
,
6739 simplify_binary_operation (PLUS
, mode
, duplicate
,
6740 CONST0_RTX (mode
)));
6742 ASSERT_RTX_EQ (duplicate
,
6743 simplify_binary_operation (MINUS
, mode
, duplicate
,
6744 CONST0_RTX (mode
)));
6746 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
6747 simplify_binary_operation (MINUS
, mode
, duplicate
,
6751 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6752 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
6753 ASSERT_RTX_PTR_EQ (scalar_reg
,
6754 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6755 duplicate
, zero_par
));
6757 /* And again with the final element. */
6758 unsigned HOST_WIDE_INT const_nunits
;
6759 if (nunits
.is_constant (&const_nunits
))
6761 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
6762 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
6763 ASSERT_RTX_PTR_EQ (scalar_reg
,
6764 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6765 duplicate
, last_par
));
6768 /* Test a scalar subreg of a VEC_DUPLICATE. */
6769 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
6770 ASSERT_RTX_EQ (scalar_reg
,
6771 simplify_gen_subreg (inner_mode
, duplicate
,
6774 machine_mode narrower_mode
;
6775 if (maybe_ne (nunits
, 2U)
6776 && multiple_p (nunits
, 2)
6777 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
6778 && VECTOR_MODE_P (narrower_mode
))
6780 /* Test VEC_SELECT of a vector. */
6782 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
6783 rtx narrower_duplicate
6784 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
6785 ASSERT_RTX_EQ (narrower_duplicate
,
6786 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
6787 duplicate
, vec_par
));
6789 /* Test a vector subreg of a VEC_DUPLICATE. */
6790 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
6791 ASSERT_RTX_EQ (narrower_duplicate
,
6792 simplify_gen_subreg (narrower_mode
, duplicate
,
6797 /* Test vector simplifications involving VEC_SERIES in which the
6798 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6799 register that holds one element of MODE. */
6802 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
6804 /* Test unary cases with VEC_SERIES arguments. */
6805 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6806 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6807 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6808 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
6809 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
6810 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
6811 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
6812 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
6813 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
6815 ASSERT_RTX_EQ (series_0_r
,
6816 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
6817 ASSERT_RTX_EQ (series_r_m1
,
6818 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
6819 ASSERT_RTX_EQ (series_r_r
,
6820 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
6822 /* Test that a VEC_SERIES with a zero step is simplified away. */
6823 ASSERT_RTX_EQ (duplicate
,
6824 simplify_binary_operation (VEC_SERIES
, mode
,
6825 scalar_reg
, const0_rtx
));
6827 /* Test PLUS and MINUS with VEC_SERIES. */
6828 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
6829 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
6830 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
6831 ASSERT_RTX_EQ (series_r_r
,
6832 simplify_binary_operation (PLUS
, mode
, series_0_r
,
6834 ASSERT_RTX_EQ (series_r_1
,
6835 simplify_binary_operation (PLUS
, mode
, duplicate
,
6837 ASSERT_RTX_EQ (series_r_m1
,
6838 simplify_binary_operation (PLUS
, mode
, duplicate
,
6840 ASSERT_RTX_EQ (series_0_r
,
6841 simplify_binary_operation (MINUS
, mode
, series_r_r
,
6843 ASSERT_RTX_EQ (series_r_m1
,
6844 simplify_binary_operation (MINUS
, mode
, duplicate
,
6846 ASSERT_RTX_EQ (series_r_1
,
6847 simplify_binary_operation (MINUS
, mode
, duplicate
,
6849 ASSERT_RTX_EQ (series_0_m1
,
6850 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
6854 /* Verify some simplifications involving vectors. */
6859 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
6861 machine_mode mode
= (machine_mode
) i
;
6862 if (VECTOR_MODE_P (mode
))
6864 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
6865 test_vector_ops_duplicate (mode
, scalar_reg
);
6866 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
6867 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
6868 test_vector_ops_series (mode
, scalar_reg
);
6873 template<unsigned int N
>
6874 struct simplify_const_poly_int_tests
6880 struct simplify_const_poly_int_tests
<1>
6882 static void run () {}
6885 /* Test various CONST_POLY_INT properties. */
6887 template<unsigned int N
>
6889 simplify_const_poly_int_tests
<N
>::run ()
6891 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
6892 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
6893 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
6894 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
6895 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
6896 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
6897 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
6898 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
6899 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
6900 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
6901 rtx two
= GEN_INT (2);
6902 rtx six
= GEN_INT (6);
6903 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
6905 /* These tests only try limited operation combinations. Fuller arithmetic
6906 testing is done directly on poly_ints. */
6907 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
6908 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
6909 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
6910 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
6911 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
6912 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
6913 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
6914 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
6915 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
6916 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
6917 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
6920 /* Run all of the selftests within this file. */
6923 simplify_rtx_c_tests ()
6926 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
6929 } // namespace selftest
6931 #endif /* CHECKING_P */