1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx
neg_const_int (machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (!HWI_COMPUTABLE_MODE_P (mode
)
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
80 scalar_int_mode int_mode
;
82 if (!is_int_mode (mode
, &int_mode
))
85 width
= GET_MODE_PRECISION (int_mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x
))
96 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
97 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
99 for (i
= 0; i
< elts
- 1; i
++)
100 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
102 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
103 width
%= HOST_BITS_PER_WIDE_INT
;
105 width
= HOST_BITS_PER_WIDE_INT
;
108 else if (width
<= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x
)
110 && CONST_DOUBLE_LOW (x
) == 0)
112 val
= CONST_DOUBLE_HIGH (x
);
113 width
-= HOST_BITS_PER_WIDE_INT
;
117 /* X is not an integer constant. */
120 if (width
< HOST_BITS_PER_WIDE_INT
)
121 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
122 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
130 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 scalar_int_mode int_mode
;
135 if (!is_int_mode (mode
, &int_mode
))
138 width
= GET_MODE_PRECISION (int_mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= GET_MODE_MASK (int_mode
);
143 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 scalar_int_mode int_mode
;
154 if (!is_int_mode (mode
, &int_mode
))
157 width
= GET_MODE_PRECISION (int_mode
);
158 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
161 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
168 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
172 scalar_int_mode int_mode
;
173 if (!is_int_mode (mode
, &int_mode
))
176 width
= GET_MODE_PRECISION (int_mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 HOST_WIDE_INT offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr
) == CONST
243 && GET_CODE (XEXP (addr
, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
246 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
247 addr
= XEXP (XEXP (addr
, 0), 0);
250 if (GET_CODE (addr
) == LO_SUM
)
251 addr
= XEXP (addr
, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr
) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr
))
258 c
= get_pool_constant (addr
);
259 cmode
= get_pool_mode (addr
);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset
== 0 && cmode
== GET_MODE (x
))
266 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
268 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
269 if (tem
&& CONSTANT_P (tem
))
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x
)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
288 && MEM_OFFSET_KNOWN_P (x
))
290 tree decl
= MEM_EXPR (x
);
291 machine_mode mode
= GET_MODE (x
);
292 poly_int64 offset
= 0;
294 switch (TREE_CODE (decl
))
304 case ARRAY_RANGE_REF
:
309 case VIEW_CONVERT_EXPR
:
311 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
313 int unsignedp
, reversep
, volatilep
= 0;
316 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
317 &unsignedp
, &reversep
, &volatilep
);
318 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
319 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
320 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
323 offset
+= bytepos
+ toffset_val
;
329 && mode
== GET_MODE (x
)
331 && (TREE_STATIC (decl
)
332 || DECL_THREAD_LOCAL_P (decl
))
333 && DECL_RTL_SET_P (decl
)
334 && MEM_P (DECL_RTL (decl
)))
338 offset
+= MEM_OFFSET (x
);
340 newx
= DECL_RTL (decl
);
344 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
345 poly_int64 n_offset
, o_offset
;
347 /* Avoid creating a new MEM needlessly if we already had
348 the same address. We do if there's no OFFSET and the
349 old address X is identical to NEWX, or if X is of the
350 form (plus NEWX OFFSET), or the NEWX is of the form
351 (plus Y (const_int Z)) and X is that with the offset
352 added: (plus Y (const_int Z+OFFSET)). */
353 n
= strip_offset (n
, &n_offset
);
354 o
= strip_offset (o
, &o_offset
);
355 if (!(known_eq (o_offset
, n_offset
+ offset
)
356 && rtx_equal_p (o
, n
)))
357 x
= adjust_address_nv (newx
, mode
, offset
);
359 else if (GET_MODE (x
) == GET_MODE (newx
)
360 && known_eq (offset
, 0))
368 /* Make a unary operation by first seeing if it folds and otherwise making
369 the specified operation. */
372 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
373 machine_mode op_mode
)
377 /* If this simplifies, use it. */
378 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
381 return gen_rtx_fmt_e (code
, mode
, op
);
384 /* Likewise for ternary operations. */
387 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
388 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
392 /* If this simplifies, use it. */
393 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
394 op0
, op1
, op2
)) != 0)
397 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
400 /* Likewise, for relational operations.
401 CMP_MODE specifies mode comparison is done in. */
404 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
405 machine_mode cmp_mode
, rtx op0
, rtx op1
)
409 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
413 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417 and simplify the result. If FN is non-NULL, call this callback on each
418 X, if it returns non-NULL, replace X with its return value and simplify the
422 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
423 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
425 enum rtx_code code
= GET_CODE (x
);
426 machine_mode mode
= GET_MODE (x
);
427 machine_mode op_mode
;
429 rtx op0
, op1
, op2
, newx
, op
;
433 if (__builtin_expect (fn
!= NULL
, 0))
435 newx
= fn (x
, old_rtx
, data
);
439 else if (rtx_equal_p (x
, old_rtx
))
440 return copy_rtx ((rtx
) data
);
442 switch (GET_RTX_CLASS (code
))
446 op_mode
= GET_MODE (op0
);
447 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0))
450 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
454 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
455 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
456 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
458 return simplify_gen_binary (code
, mode
, op0
, op1
);
461 case RTX_COMM_COMPARE
:
464 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
465 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
466 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
467 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
469 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
472 case RTX_BITFIELD_OPS
:
474 op_mode
= GET_MODE (op0
);
475 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
476 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
477 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
478 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
480 if (op_mode
== VOIDmode
)
481 op_mode
= GET_MODE (op0
);
482 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
487 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
488 if (op0
== SUBREG_REG (x
))
490 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
491 GET_MODE (SUBREG_REG (x
)),
493 return op0
? op0
: x
;
500 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
501 if (op0
== XEXP (x
, 0))
503 return replace_equiv_address_nv (x
, op0
);
505 else if (code
== LO_SUM
)
507 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
508 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
510 /* (lo_sum (high x) y) -> y where x and y have the same base. */
511 if (GET_CODE (op0
) == HIGH
)
513 rtx base0
, base1
, offset0
, offset1
;
514 split_const (XEXP (op0
, 0), &base0
, &offset0
);
515 split_const (op1
, &base1
, &offset1
);
516 if (rtx_equal_p (base0
, base1
))
520 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
522 return gen_rtx_LO_SUM (mode
, op0
, op1
);
531 fmt
= GET_RTX_FORMAT (code
);
532 for (i
= 0; fmt
[i
]; i
++)
537 newvec
= XVEC (newx
, i
);
538 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
540 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
542 if (op
!= RTVEC_ELT (vec
, j
))
546 newvec
= shallow_copy_rtvec (vec
);
548 newx
= shallow_copy_rtx (x
);
549 XVEC (newx
, i
) = newvec
;
551 RTVEC_ELT (newvec
, j
) = op
;
559 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
560 if (op
!= XEXP (x
, i
))
563 newx
= shallow_copy_rtx (x
);
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573 resulting RTX. Return a new RTX which is as simplified as possible. */
576 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
578 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582 Only handle cases where the truncated value is inherently an rvalue.
584 RTL provides two ways of truncating a value:
586 1. a lowpart subreg. This form is only a truncation when both
587 the outer and inner modes (here MODE and OP_MODE respectively)
588 are scalar integers, and only then when the subreg is used as
591 It is only valid to form such truncating subregs if the
592 truncation requires no action by the target. The onus for
593 proving this is on the creator of the subreg -- e.g. the
594 caller to simplify_subreg or simplify_gen_subreg -- and typically
595 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
597 2. a TRUNCATE. This form handles both scalar and compound integers.
599 The first form is preferred where valid. However, the TRUNCATE
600 handling in simplify_unary_operation turns the second form into the
601 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602 so it is generally safe to form rvalue truncations using:
604 simplify_gen_unary (TRUNCATE, ...)
606 and leave simplify_unary_operation to work out which representation
609 Because of the proof requirements on (1), simplify_truncation must
610 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611 regardless of whether the outer truncation came from a SUBREG or a
612 TRUNCATE. For example, if the caller has proven that an SImode
617 is a no-op and can be represented as a subreg, it does not follow
618 that SImode truncations of X and Y are also no-ops. On a target
619 like 64-bit MIPS that requires SImode values to be stored in
620 sign-extended form, an SImode truncation of:
622 (and:DI (reg:DI X) (const_int 63))
624 is trivially a no-op because only the lower 6 bits can be set.
625 However, X is still an arbitrary 64-bit number and so we cannot
626 assume that truncating it too is a no-op. */
629 simplify_truncation (machine_mode mode
, rtx op
,
630 machine_mode op_mode
)
632 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
633 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
634 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
636 gcc_assert (precision
<= op_precision
);
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op
) == ZERO_EXTEND
640 || GET_CODE (op
) == SIGN_EXTEND
)
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
648 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
649 if (mode
== origmode
)
651 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
652 return simplify_gen_unary (TRUNCATE
, mode
,
653 XEXP (op
, 0), origmode
);
655 return simplify_gen_unary (GET_CODE (op
), mode
,
656 XEXP (op
, 0), origmode
);
659 /* If the machine can perform operations in the truncated mode, distribute
660 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
661 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
663 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
664 && (GET_CODE (op
) == PLUS
665 || GET_CODE (op
) == MINUS
666 || GET_CODE (op
) == MULT
))
668 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
671 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
673 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
677 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
678 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
679 the outer subreg is effectively a truncation to the original mode. */
680 if ((GET_CODE (op
) == LSHIFTRT
681 || GET_CODE (op
) == ASHIFTRT
)
682 /* Ensure that OP_MODE is at least twice as wide as MODE
683 to avoid the possibility that an outer LSHIFTRT shifts by more
684 than the sign extension's sign_bit_copies and introduces zeros
685 into the high bits of the result. */
686 && 2 * precision
<= op_precision
687 && CONST_INT_P (XEXP (op
, 1))
688 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
689 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
690 && UINTVAL (XEXP (op
, 1)) < precision
)
691 return simplify_gen_binary (ASHIFTRT
, mode
,
692 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
694 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
695 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
696 the outer subreg is effectively a truncation to the original mode. */
697 if ((GET_CODE (op
) == LSHIFTRT
698 || GET_CODE (op
) == ASHIFTRT
)
699 && CONST_INT_P (XEXP (op
, 1))
700 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
701 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
702 && UINTVAL (XEXP (op
, 1)) < precision
)
703 return simplify_gen_binary (LSHIFTRT
, mode
,
704 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
706 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
707 to (ashift:QI (x:QI) C), where C is a suitable small constant and
708 the outer subreg is effectively a truncation to the original mode. */
709 if (GET_CODE (op
) == ASHIFT
710 && CONST_INT_P (XEXP (op
, 1))
711 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
712 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
713 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
714 && UINTVAL (XEXP (op
, 1)) < precision
)
715 return simplify_gen_binary (ASHIFT
, mode
,
716 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
718 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
719 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
721 if (GET_CODE (op
) == AND
722 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
723 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
724 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
725 && CONST_INT_P (XEXP (op
, 1)))
727 rtx op0
= (XEXP (XEXP (op
, 0), 0));
728 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
729 rtx mask_op
= XEXP (op
, 1);
730 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
731 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
733 if (shift
< precision
734 /* If doing this transform works for an X with all bits set,
735 it works for any X. */
736 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
737 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
738 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
739 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
741 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
742 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
746 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
747 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
749 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
750 && REG_P (XEXP (op
, 0))
751 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
752 && CONST_INT_P (XEXP (op
, 1))
753 && CONST_INT_P (XEXP (op
, 2)))
755 rtx op0
= XEXP (op
, 0);
756 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
757 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
758 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
760 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
763 pos
-= op_precision
- precision
;
764 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
765 XEXP (op
, 1), GEN_INT (pos
));
768 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
770 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
772 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
773 XEXP (op
, 1), XEXP (op
, 2));
777 /* Recognize a word extraction from a multi-word subreg. */
778 if ((GET_CODE (op
) == LSHIFTRT
779 || GET_CODE (op
) == ASHIFTRT
)
780 && SCALAR_INT_MODE_P (mode
)
781 && SCALAR_INT_MODE_P (op_mode
)
782 && precision
>= BITS_PER_WORD
783 && 2 * precision
<= op_precision
784 && CONST_INT_P (XEXP (op
, 1))
785 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
786 && UINTVAL (XEXP (op
, 1)) < op_precision
)
788 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
789 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
790 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
792 ? byte
- shifted_bytes
793 : byte
+ shifted_bytes
));
796 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
797 and try replacing the TRUNCATE and shift with it. Don't do this
798 if the MEM has a mode-dependent address. */
799 if ((GET_CODE (op
) == LSHIFTRT
800 || GET_CODE (op
) == ASHIFTRT
)
801 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
802 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
803 && MEM_P (XEXP (op
, 0))
804 && CONST_INT_P (XEXP (op
, 1))
805 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
806 && INTVAL (XEXP (op
, 1)) > 0
807 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
808 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
809 MEM_ADDR_SPACE (XEXP (op
, 0)))
810 && ! MEM_VOLATILE_P (XEXP (op
, 0))
811 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
812 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
814 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
815 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
816 return adjust_address_nv (XEXP (op
, 0), int_mode
,
818 ? byte
- shifted_bytes
819 : byte
+ shifted_bytes
));
822 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
823 (OP:SI foo:SI) if OP is NEG or ABS. */
824 if ((GET_CODE (op
) == ABS
825 || GET_CODE (op
) == NEG
)
826 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
827 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
828 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
829 return simplify_gen_unary (GET_CODE (op
), mode
,
830 XEXP (XEXP (op
, 0), 0), mode
);
832 /* (truncate:A (subreg:B (truncate:C X) 0)) is
834 if (GET_CODE (op
) == SUBREG
835 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
836 && SCALAR_INT_MODE_P (op_mode
)
837 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
838 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
839 && subreg_lowpart_p (op
))
841 rtx inner
= XEXP (SUBREG_REG (op
), 0);
842 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
843 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
846 /* If subreg above is paradoxical and C is narrower
847 than A, return (subreg:A (truncate:C X) 0). */
848 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
851 /* (truncate:A (truncate:B X)) is (truncate:A X). */
852 if (GET_CODE (op
) == TRUNCATE
)
853 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
854 GET_MODE (XEXP (op
, 0)));
856 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
858 if (GET_CODE (op
) == IOR
859 && SCALAR_INT_MODE_P (mode
)
860 && SCALAR_INT_MODE_P (op_mode
)
861 && CONST_INT_P (XEXP (op
, 1))
862 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
868 /* Try to simplify a unary operation CODE whose output mode is to be
869 MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if no simplification can be made. */
872 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
873 rtx op
, machine_mode op_mode
)
877 trueop
= avoid_constant_pool_reference (op
);
879 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
883 return simplify_unary_operation_1 (code
, mode
, op
);
886 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
890 exact_int_to_float_conversion_p (const_rtx op
)
892 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
893 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
894 /* Constants shouldn't reach here. */
895 gcc_assert (op0_mode
!= VOIDmode
);
896 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
897 int in_bits
= in_prec
;
898 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
900 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
901 if (GET_CODE (op
) == FLOAT
)
902 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
903 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
904 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
907 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
909 return in_bits
<= out_bits
;
912 /* Perform some simplifications we can do even if the operands
915 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
917 enum rtx_code reversed
;
918 rtx temp
, elt
, base
, step
;
919 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
924 /* (not (not X)) == X. */
925 if (GET_CODE (op
) == NOT
)
928 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
929 comparison is all ones. */
930 if (COMPARISON_P (op
)
931 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
932 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
933 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
934 XEXP (op
, 0), XEXP (op
, 1));
936 /* (not (plus X -1)) can become (neg X). */
937 if (GET_CODE (op
) == PLUS
938 && XEXP (op
, 1) == constm1_rtx
)
939 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
941 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
942 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
943 and MODE_VECTOR_INT. */
944 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
945 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
948 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
949 if (GET_CODE (op
) == XOR
950 && CONST_INT_P (XEXP (op
, 1))
951 && (temp
= simplify_unary_operation (NOT
, mode
,
952 XEXP (op
, 1), mode
)) != 0)
953 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
955 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
956 if (GET_CODE (op
) == PLUS
957 && CONST_INT_P (XEXP (op
, 1))
958 && mode_signbit_p (mode
, XEXP (op
, 1))
959 && (temp
= simplify_unary_operation (NOT
, mode
,
960 XEXP (op
, 1), mode
)) != 0)
961 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
964 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
965 operands other than 1, but that is not valid. We could do a
966 similar simplification for (not (lshiftrt C X)) where C is
967 just the sign bit, but this doesn't seem common enough to
969 if (GET_CODE (op
) == ASHIFT
970 && XEXP (op
, 0) == const1_rtx
)
972 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
973 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
976 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
977 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
978 so we can perform the above simplification. */
979 if (STORE_FLAG_VALUE
== -1
980 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
981 && GET_CODE (op
) == ASHIFTRT
982 && CONST_INT_P (XEXP (op
, 1))
983 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
984 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
985 XEXP (op
, 0), const0_rtx
);
988 if (partial_subreg_p (op
)
989 && subreg_lowpart_p (op
)
990 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
991 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
993 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
996 x
= gen_rtx_ROTATE (inner_mode
,
997 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
999 XEXP (SUBREG_REG (op
), 1));
1000 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1005 /* Apply De Morgan's laws to reduce number of patterns for machines
1006 with negating logical insns (and-not, nand, etc.). If result has
1007 only one NOT, put it first, since that is how the patterns are
1009 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1011 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1012 machine_mode op_mode
;
1014 op_mode
= GET_MODE (in1
);
1015 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1017 op_mode
= GET_MODE (in2
);
1018 if (op_mode
== VOIDmode
)
1020 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1022 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1023 std::swap (in1
, in2
);
1025 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1029 /* (not (bswap x)) -> (bswap (not x)). */
1030 if (GET_CODE (op
) == BSWAP
)
1032 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1033 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1038 /* (neg (neg X)) == X. */
1039 if (GET_CODE (op
) == NEG
)
1040 return XEXP (op
, 0);
1042 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1043 If comparison is not reversible use
1045 if (GET_CODE (op
) == IF_THEN_ELSE
)
1047 rtx cond
= XEXP (op
, 0);
1048 rtx true_rtx
= XEXP (op
, 1);
1049 rtx false_rtx
= XEXP (op
, 2);
1051 if ((GET_CODE (true_rtx
) == NEG
1052 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1053 || (GET_CODE (false_rtx
) == NEG
1054 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1056 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1057 temp
= reversed_comparison (cond
, mode
);
1061 std::swap (true_rtx
, false_rtx
);
1063 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1064 mode
, temp
, true_rtx
, false_rtx
);
1068 /* (neg (plus X 1)) can become (not X). */
1069 if (GET_CODE (op
) == PLUS
1070 && XEXP (op
, 1) == const1_rtx
)
1071 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1073 /* Similarly, (neg (not X)) is (plus X 1). */
1074 if (GET_CODE (op
) == NOT
)
1075 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1078 /* (neg (minus X Y)) can become (minus Y X). This transformation
1079 isn't safe for modes with signed zeros, since if X and Y are
1080 both +0, (minus Y X) is the same as (minus X Y). If the
1081 rounding mode is towards +infinity (or -infinity) then the two
1082 expressions will be rounded differently. */
1083 if (GET_CODE (op
) == MINUS
1084 && !HONOR_SIGNED_ZEROS (mode
)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1086 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1088 if (GET_CODE (op
) == PLUS
1089 && !HONOR_SIGNED_ZEROS (mode
)
1090 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1092 /* (neg (plus A C)) is simplified to (minus -C A). */
1093 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1094 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1096 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1098 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1101 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1102 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1103 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1106 /* (neg (mult A B)) becomes (mult A (neg B)).
1107 This works even for floating-point values. */
1108 if (GET_CODE (op
) == MULT
1109 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1111 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1112 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1115 /* NEG commutes with ASHIFT since it is multiplication. Only do
1116 this if we can then eliminate the NEG (e.g., if the operand
1118 if (GET_CODE (op
) == ASHIFT
)
1120 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1122 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1125 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1126 C is equal to the width of MODE minus 1. */
1127 if (GET_CODE (op
) == ASHIFTRT
1128 && CONST_INT_P (XEXP (op
, 1))
1129 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1130 return simplify_gen_binary (LSHIFTRT
, mode
,
1131 XEXP (op
, 0), XEXP (op
, 1));
1133 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1134 C is equal to the width of MODE minus 1. */
1135 if (GET_CODE (op
) == LSHIFTRT
1136 && CONST_INT_P (XEXP (op
, 1))
1137 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1138 return simplify_gen_binary (ASHIFTRT
, mode
,
1139 XEXP (op
, 0), XEXP (op
, 1));
1141 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1142 if (GET_CODE (op
) == XOR
1143 && XEXP (op
, 1) == const1_rtx
1144 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1145 return plus_constant (mode
, XEXP (op
, 0), -1);
1147 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1148 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1149 if (GET_CODE (op
) == LT
1150 && XEXP (op
, 1) == const0_rtx
1151 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1153 int_mode
= as_a
<scalar_int_mode
> (mode
);
1154 int isize
= GET_MODE_PRECISION (inner
);
1155 if (STORE_FLAG_VALUE
== 1)
1157 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1158 gen_int_shift_amount (inner
,
1160 if (int_mode
== inner
)
1162 if (GET_MODE_PRECISION (int_mode
) > isize
)
1163 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1164 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1166 else if (STORE_FLAG_VALUE
== -1)
1168 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1169 gen_int_shift_amount (inner
,
1171 if (int_mode
== inner
)
1173 if (GET_MODE_PRECISION (int_mode
) > isize
)
1174 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1175 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1179 if (vec_series_p (op
, &base
, &step
))
1181 /* Only create a new series if we can simplify both parts. In other
1182 cases this isn't really a simplification, and it's not necessarily
1183 a win to replace a vector operation with a scalar operation. */
1184 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1185 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1188 step
= simplify_unary_operation (NEG
, inner_mode
,
1191 return gen_vec_series (mode
, base
, step
);
1197 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1198 with the umulXi3_highpart patterns. */
1199 if (GET_CODE (op
) == LSHIFTRT
1200 && GET_CODE (XEXP (op
, 0)) == MULT
)
1203 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1205 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1207 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1211 /* We can't handle truncation to a partial integer mode here
1212 because we don't know the real bitsize of the partial
1217 if (GET_MODE (op
) != VOIDmode
)
1219 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1224 /* If we know that the value is already truncated, we can
1225 replace the TRUNCATE with a SUBREG. */
1226 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1227 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1228 || truncated_to_mode (mode
, op
)))
1230 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1235 /* A truncate of a comparison can be replaced with a subreg if
1236 STORE_FLAG_VALUE permits. This is like the previous test,
1237 but it works even if the comparison is done in a mode larger
1238 than HOST_BITS_PER_WIDE_INT. */
1239 if (HWI_COMPUTABLE_MODE_P (mode
)
1240 && COMPARISON_P (op
)
1241 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1243 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1248 /* A truncate of a memory is just loading the low part of the memory
1249 if we are not changing the meaning of the address. */
1250 if (GET_CODE (op
) == MEM
1251 && !VECTOR_MODE_P (mode
)
1252 && !MEM_VOLATILE_P (op
)
1253 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1255 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1262 case FLOAT_TRUNCATE
:
1263 if (DECIMAL_FLOAT_MODE_P (mode
))
1266 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1267 if (GET_CODE (op
) == FLOAT_EXTEND
1268 && GET_MODE (XEXP (op
, 0)) == mode
)
1269 return XEXP (op
, 0);
1271 /* (float_truncate:SF (float_truncate:DF foo:XF))
1272 = (float_truncate:SF foo:XF).
1273 This may eliminate double rounding, so it is unsafe.
1275 (float_truncate:SF (float_extend:XF foo:DF))
1276 = (float_truncate:SF foo:DF).
1278 (float_truncate:DF (float_extend:XF foo:SF))
1279 = (float_extend:DF foo:SF). */
1280 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1281 && flag_unsafe_math_optimizations
)
1282 || GET_CODE (op
) == FLOAT_EXTEND
)
1283 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1284 > GET_MODE_UNIT_SIZE (mode
)
1285 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1287 XEXP (op
, 0), mode
);
1289 /* (float_truncate (float x)) is (float x) */
1290 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1291 && (flag_unsafe_math_optimizations
1292 || exact_int_to_float_conversion_p (op
)))
1293 return simplify_gen_unary (GET_CODE (op
), mode
,
1295 GET_MODE (XEXP (op
, 0)));
1297 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1298 (OP:SF foo:SF) if OP is NEG or ABS. */
1299 if ((GET_CODE (op
) == ABS
1300 || GET_CODE (op
) == NEG
)
1301 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1302 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1303 return simplify_gen_unary (GET_CODE (op
), mode
,
1304 XEXP (XEXP (op
, 0), 0), mode
);
1306 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1307 is (float_truncate:SF x). */
1308 if (GET_CODE (op
) == SUBREG
1309 && subreg_lowpart_p (op
)
1310 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1311 return SUBREG_REG (op
);
1315 if (DECIMAL_FLOAT_MODE_P (mode
))
1318 /* (float_extend (float_extend x)) is (float_extend x)
1320 (float_extend (float x)) is (float x) assuming that double
1321 rounding can't happen.
1323 if (GET_CODE (op
) == FLOAT_EXTEND
1324 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1325 && exact_int_to_float_conversion_p (op
)))
1326 return simplify_gen_unary (GET_CODE (op
), mode
,
1328 GET_MODE (XEXP (op
, 0)));
1333 /* (abs (neg <foo>)) -> (abs <foo>) */
1334 if (GET_CODE (op
) == NEG
)
1335 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1336 GET_MODE (XEXP (op
, 0)));
1338 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1340 if (GET_MODE (op
) == VOIDmode
)
1343 /* If operand is something known to be positive, ignore the ABS. */
1344 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1345 || val_signbit_known_clear_p (GET_MODE (op
),
1346 nonzero_bits (op
, GET_MODE (op
))))
1349 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1350 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1351 && (num_sign_bit_copies (op
, int_mode
)
1352 == GET_MODE_PRECISION (int_mode
)))
1353 return gen_rtx_NEG (int_mode
, op
);
1358 /* (ffs (*_extend <X>)) = (ffs <X>) */
1359 if (GET_CODE (op
) == SIGN_EXTEND
1360 || GET_CODE (op
) == ZERO_EXTEND
)
1361 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1362 GET_MODE (XEXP (op
, 0)));
1366 switch (GET_CODE (op
))
1370 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1371 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1372 GET_MODE (XEXP (op
, 0)));
1376 /* Rotations don't affect popcount. */
1377 if (!side_effects_p (XEXP (op
, 1)))
1378 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1379 GET_MODE (XEXP (op
, 0)));
1388 switch (GET_CODE (op
))
1394 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1395 GET_MODE (XEXP (op
, 0)));
1399 /* Rotations don't affect parity. */
1400 if (!side_effects_p (XEXP (op
, 1)))
1401 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1402 GET_MODE (XEXP (op
, 0)));
1411 /* (bswap (bswap x)) -> x. */
1412 if (GET_CODE (op
) == BSWAP
)
1413 return XEXP (op
, 0);
1417 /* (float (sign_extend <X>)) = (float <X>). */
1418 if (GET_CODE (op
) == SIGN_EXTEND
)
1419 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1420 GET_MODE (XEXP (op
, 0)));
1424 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1425 becomes just the MINUS if its mode is MODE. This allows
1426 folding switch statements on machines using casesi (such as
1428 if (GET_CODE (op
) == TRUNCATE
1429 && GET_MODE (XEXP (op
, 0)) == mode
1430 && GET_CODE (XEXP (op
, 0)) == MINUS
1431 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1432 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1433 return XEXP (op
, 0);
1435 /* Extending a widening multiplication should be canonicalized to
1436 a wider widening multiplication. */
1437 if (GET_CODE (op
) == MULT
)
1439 rtx lhs
= XEXP (op
, 0);
1440 rtx rhs
= XEXP (op
, 1);
1441 enum rtx_code lcode
= GET_CODE (lhs
);
1442 enum rtx_code rcode
= GET_CODE (rhs
);
1444 /* Widening multiplies usually extend both operands, but sometimes
1445 they use a shift to extract a portion of a register. */
1446 if ((lcode
== SIGN_EXTEND
1447 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1448 && (rcode
== SIGN_EXTEND
1449 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1451 machine_mode lmode
= GET_MODE (lhs
);
1452 machine_mode rmode
= GET_MODE (rhs
);
1455 if (lcode
== ASHIFTRT
)
1456 /* Number of bits not shifted off the end. */
1457 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1458 - INTVAL (XEXP (lhs
, 1)));
1459 else /* lcode == SIGN_EXTEND */
1460 /* Size of inner mode. */
1461 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1463 if (rcode
== ASHIFTRT
)
1464 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1465 - INTVAL (XEXP (rhs
, 1)));
1466 else /* rcode == SIGN_EXTEND */
1467 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1469 /* We can only widen multiplies if the result is mathematiclly
1470 equivalent. I.e. if overflow was impossible. */
1471 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1472 return simplify_gen_binary
1474 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1475 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1479 /* Check for a sign extension of a subreg of a promoted
1480 variable, where the promotion is sign-extended, and the
1481 target mode is the same as the variable's promotion. */
1482 if (GET_CODE (op
) == SUBREG
1483 && SUBREG_PROMOTED_VAR_P (op
)
1484 && SUBREG_PROMOTED_SIGNED_P (op
)
1485 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1487 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1492 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1493 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1494 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1496 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1497 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1498 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1499 GET_MODE (XEXP (op
, 0)));
1502 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1503 is (sign_extend:M (subreg:O <X>)) if there is mode with
1504 GET_MODE_BITSIZE (N) - I bits.
1505 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1506 is similarly (zero_extend:M (subreg:O <X>)). */
1507 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1508 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1509 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1510 && CONST_INT_P (XEXP (op
, 1))
1511 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1512 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1513 GET_MODE_BITSIZE (op_mode
) > INTVAL (XEXP (op
, 1))))
1515 scalar_int_mode tmode
;
1516 gcc_assert (GET_MODE_BITSIZE (int_mode
)
1517 > GET_MODE_BITSIZE (op_mode
));
1518 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode
)
1519 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1522 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1524 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1525 ? SIGN_EXTEND
: ZERO_EXTEND
,
1526 int_mode
, inner
, tmode
);
1530 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1531 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1532 if (GET_CODE (op
) == LSHIFTRT
1533 && CONST_INT_P (XEXP (op
, 1))
1534 && XEXP (op
, 1) != const0_rtx
)
1535 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1537 #if defined(POINTERS_EXTEND_UNSIGNED)
1538 /* As we do not know which address space the pointer is referring to,
1539 we can do this only if the target does not support different pointer
1540 or address modes depending on the address space. */
1541 if (target_default_pointer_address_modes_p ()
1542 && ! POINTERS_EXTEND_UNSIGNED
1543 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1545 || (GET_CODE (op
) == SUBREG
1546 && REG_P (SUBREG_REG (op
))
1547 && REG_POINTER (SUBREG_REG (op
))
1548 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1549 && !targetm
.have_ptr_extend ())
1552 = convert_memory_address_addr_space_1 (Pmode
, op
,
1553 ADDR_SPACE_GENERIC
, false,
1562 /* Check for a zero extension of a subreg of a promoted
1563 variable, where the promotion is zero-extended, and the
1564 target mode is the same as the variable's promotion. */
1565 if (GET_CODE (op
) == SUBREG
1566 && SUBREG_PROMOTED_VAR_P (op
)
1567 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1568 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1570 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1575 /* Extending a widening multiplication should be canonicalized to
1576 a wider widening multiplication. */
1577 if (GET_CODE (op
) == MULT
)
1579 rtx lhs
= XEXP (op
, 0);
1580 rtx rhs
= XEXP (op
, 1);
1581 enum rtx_code lcode
= GET_CODE (lhs
);
1582 enum rtx_code rcode
= GET_CODE (rhs
);
1584 /* Widening multiplies usually extend both operands, but sometimes
1585 they use a shift to extract a portion of a register. */
1586 if ((lcode
== ZERO_EXTEND
1587 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1588 && (rcode
== ZERO_EXTEND
1589 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1591 machine_mode lmode
= GET_MODE (lhs
);
1592 machine_mode rmode
= GET_MODE (rhs
);
1595 if (lcode
== LSHIFTRT
)
1596 /* Number of bits not shifted off the end. */
1597 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1598 - INTVAL (XEXP (lhs
, 1)));
1599 else /* lcode == ZERO_EXTEND */
1600 /* Size of inner mode. */
1601 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1603 if (rcode
== LSHIFTRT
)
1604 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1605 - INTVAL (XEXP (rhs
, 1)));
1606 else /* rcode == ZERO_EXTEND */
1607 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1609 /* We can only widen multiplies if the result is mathematiclly
1610 equivalent. I.e. if overflow was impossible. */
1611 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1612 return simplify_gen_binary
1614 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1615 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1619 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1620 if (GET_CODE (op
) == ZERO_EXTEND
)
1621 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1622 GET_MODE (XEXP (op
, 0)));
1624 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1625 is (zero_extend:M (subreg:O <X>)) if there is mode with
1626 GET_MODE_PRECISION (N) - I bits. */
1627 if (GET_CODE (op
) == LSHIFTRT
1628 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1629 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1630 && CONST_INT_P (XEXP (op
, 1))
1631 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1632 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1633 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1635 scalar_int_mode tmode
;
1636 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1637 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1640 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1642 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1647 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1648 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1650 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1651 (and:SI (reg:SI) (const_int 63)). */
1652 if (partial_subreg_p (op
)
1653 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1654 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1655 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1656 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1657 && subreg_lowpart_p (op
)
1658 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1659 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1661 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1662 return SUBREG_REG (op
);
1663 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1667 #if defined(POINTERS_EXTEND_UNSIGNED)
1668 /* As we do not know which address space the pointer is referring to,
1669 we can do this only if the target does not support different pointer
1670 or address modes depending on the address space. */
1671 if (target_default_pointer_address_modes_p ()
1672 && POINTERS_EXTEND_UNSIGNED
> 0
1673 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1675 || (GET_CODE (op
) == SUBREG
1676 && REG_P (SUBREG_REG (op
))
1677 && REG_POINTER (SUBREG_REG (op
))
1678 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1679 && !targetm
.have_ptr_extend ())
1682 = convert_memory_address_addr_space_1 (Pmode
, op
,
1683 ADDR_SPACE_GENERIC
, false,
1695 if (VECTOR_MODE_P (mode
) && vec_duplicate_p (op
, &elt
))
1697 /* Try applying the operator to ELT and see if that simplifies.
1698 We can duplicate the result if so.
1700 The reason we don't use simplify_gen_unary is that it isn't
1701 necessarily a win to convert things like:
1703 (neg:V (vec_duplicate:V (reg:S R)))
1707 (vec_duplicate:V (neg:S (reg:S R)))
1709 The first might be done entirely in vector registers while the
1710 second might need a move between register files. */
1711 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1712 elt
, GET_MODE_INNER (GET_MODE (op
)));
1714 return gen_vec_duplicate (mode
, temp
);
1720 /* Try to compute the value of a unary operation CODE whose output mode is to
1721 be MODE with input operand OP whose mode was originally OP_MODE.
1722 Return zero if the value cannot be computed. */
1724 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1725 rtx op
, machine_mode op_mode
)
1727 scalar_int_mode result_mode
;
1729 if (code
== VEC_DUPLICATE
)
1731 gcc_assert (VECTOR_MODE_P (mode
));
1732 if (GET_MODE (op
) != VOIDmode
)
1734 if (!VECTOR_MODE_P (GET_MODE (op
)))
1735 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1737 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1740 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1741 return gen_const_vec_duplicate (mode
, op
);
1742 unsigned int n_elts
;
1743 if (GET_CODE (op
) == CONST_VECTOR
1744 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
1746 /* This must be constant if we're duplicating it to a constant
1747 number of elements. */
1748 unsigned int in_n_elts
= CONST_VECTOR_NUNITS (op
).to_constant ();
1749 gcc_assert (in_n_elts
< n_elts
);
1750 gcc_assert ((n_elts
% in_n_elts
) == 0);
1751 rtvec v
= rtvec_alloc (n_elts
);
1752 for (unsigned i
= 0; i
< n_elts
; i
++)
1753 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1754 return gen_rtx_CONST_VECTOR (mode
, v
);
1758 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1760 unsigned int n_elts
;
1761 if (!CONST_VECTOR_NUNITS (op
).is_constant (&n_elts
))
1764 machine_mode opmode
= GET_MODE (op
);
1765 gcc_assert (known_eq (GET_MODE_NUNITS (mode
), n_elts
));
1766 gcc_assert (known_eq (GET_MODE_NUNITS (opmode
), n_elts
));
1768 rtvec v
= rtvec_alloc (n_elts
);
1771 for (i
= 0; i
< n_elts
; i
++)
1773 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1774 CONST_VECTOR_ELT (op
, i
),
1775 GET_MODE_INNER (opmode
));
1776 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1778 RTVEC_ELT (v
, i
) = x
;
1780 return gen_rtx_CONST_VECTOR (mode
, v
);
1783 /* The order of these tests is critical so that, for example, we don't
1784 check the wrong mode (input vs. output) for a conversion operation,
1785 such as FIX. At some point, this should be simplified. */
1787 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1791 if (op_mode
== VOIDmode
)
1793 /* CONST_INT have VOIDmode as the mode. We assume that all
1794 the bits of the constant are significant, though, this is
1795 a dangerous assumption as many times CONST_INTs are
1796 created and used with garbage in the bits outside of the
1797 precision of the implied mode of the const_int. */
1798 op_mode
= MAX_MODE_INT
;
1801 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1803 /* Avoid the folding if flag_signaling_nans is on and
1804 operand is a signaling NaN. */
1805 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1808 d
= real_value_truncate (mode
, d
);
1809 return const_double_from_real_value (d
, mode
);
1811 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1815 if (op_mode
== VOIDmode
)
1817 /* CONST_INT have VOIDmode as the mode. We assume that all
1818 the bits of the constant are significant, though, this is
1819 a dangerous assumption as many times CONST_INTs are
1820 created and used with garbage in the bits outside of the
1821 precision of the implied mode of the const_int. */
1822 op_mode
= MAX_MODE_INT
;
1825 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1827 /* Avoid the folding if flag_signaling_nans is on and
1828 operand is a signaling NaN. */
1829 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1832 d
= real_value_truncate (mode
, d
);
1833 return const_double_from_real_value (d
, mode
);
1836 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1838 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1840 scalar_int_mode imode
= (op_mode
== VOIDmode
1842 : as_a
<scalar_int_mode
> (op_mode
));
1843 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1846 #if TARGET_SUPPORTS_WIDE_INT == 0
1847 /* This assert keeps the simplification from producing a result
1848 that cannot be represented in a CONST_DOUBLE but a lot of
1849 upstream callers expect that this function never fails to
1850 simplify something and so you if you added this to the test
1851 above the code would die later anyway. If this assert
1852 happens, you just need to make the port support wide int. */
1853 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1859 result
= wi::bit_not (op0
);
1863 result
= wi::neg (op0
);
1867 result
= wi::abs (op0
);
1871 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1875 if (wi::ne_p (op0
, 0))
1876 int_value
= wi::clz (op0
);
1877 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1878 int_value
= GET_MODE_PRECISION (imode
);
1879 result
= wi::shwi (int_value
, result_mode
);
1883 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1887 if (wi::ne_p (op0
, 0))
1888 int_value
= wi::ctz (op0
);
1889 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1890 int_value
= GET_MODE_PRECISION (imode
);
1891 result
= wi::shwi (int_value
, result_mode
);
1895 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1899 result
= wi::shwi (wi::parity (op0
), result_mode
);
1903 result
= wide_int (op0
).bswap ();
1908 result
= wide_int::from (op0
, width
, UNSIGNED
);
1912 result
= wide_int::from (op0
, width
, SIGNED
);
1920 return immed_wide_int_const (result
, result_mode
);
1923 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1924 && SCALAR_FLOAT_MODE_P (mode
)
1925 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1927 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1933 d
= real_value_abs (&d
);
1936 d
= real_value_negate (&d
);
1938 case FLOAT_TRUNCATE
:
1939 /* Don't perform the operation if flag_signaling_nans is on
1940 and the operand is a signaling NaN. */
1941 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1943 d
= real_value_truncate (mode
, d
);
1946 /* Don't perform the operation if flag_signaling_nans is on
1947 and the operand is a signaling NaN. */
1948 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1950 /* All this does is change the mode, unless changing
1952 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1953 real_convert (&d
, mode
, &d
);
1956 /* Don't perform the operation if flag_signaling_nans is on
1957 and the operand is a signaling NaN. */
1958 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1960 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1967 real_to_target (tmp
, &d
, GET_MODE (op
));
1968 for (i
= 0; i
< 4; i
++)
1970 real_from_target (&d
, tmp
, mode
);
1976 return const_double_from_real_value (d
, mode
);
1978 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1979 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1980 && is_int_mode (mode
, &result_mode
))
1982 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1983 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1984 operators are intentionally left unspecified (to ease implementation
1985 by target backends), for consistency, this routine implements the
1986 same semantics for constant folding as used by the middle-end. */
1988 /* This was formerly used only for non-IEEE float.
1989 eggert@twinsun.com says it is safe for IEEE also. */
1991 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1992 wide_int wmax
, wmin
;
1993 /* This is part of the abi to real_to_integer, but we check
1994 things before making this call. */
2000 if (REAL_VALUE_ISNAN (*x
))
2003 /* Test against the signed upper bound. */
2004 wmax
= wi::max_value (width
, SIGNED
);
2005 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2006 if (real_less (&t
, x
))
2007 return immed_wide_int_const (wmax
, mode
);
2009 /* Test against the signed lower bound. */
2010 wmin
= wi::min_value (width
, SIGNED
);
2011 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2012 if (real_less (x
, &t
))
2013 return immed_wide_int_const (wmin
, mode
);
2015 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2019 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2022 /* Test against the unsigned upper bound. */
2023 wmax
= wi::max_value (width
, UNSIGNED
);
2024 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2025 if (real_less (&t
, x
))
2026 return immed_wide_int_const (wmax
, mode
);
2028 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2036 /* Handle polynomial integers. */
2037 else if (CONST_POLY_INT_P (op
))
2039 poly_wide_int result
;
2043 result
= -const_poly_int_value (op
);
2047 result
= ~const_poly_int_value (op
);
2053 return immed_wide_int_const (result
, mode
);
2059 /* Subroutine of simplify_binary_operation to simplify a binary operation
2060 CODE that can commute with byte swapping, with result mode MODE and
2061 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2062 Return zero if no simplification or canonicalization is possible. */
2065 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2070 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2071 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2073 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2074 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2075 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2078 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2079 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2081 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2082 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2088 /* Subroutine of simplify_binary_operation to simplify a commutative,
2089 associative binary operation CODE with result mode MODE, operating
2090 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2091 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2092 canonicalization is possible. */
2095 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2100 /* Linearize the operator to the left. */
2101 if (GET_CODE (op1
) == code
)
2103 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2104 if (GET_CODE (op0
) == code
)
2106 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2107 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2110 /* "a op (b op c)" becomes "(b op c) op a". */
2111 if (! swap_commutative_operands_p (op1
, op0
))
2112 return simplify_gen_binary (code
, mode
, op1
, op0
);
2114 std::swap (op0
, op1
);
2117 if (GET_CODE (op0
) == code
)
2119 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2120 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2122 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2123 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2126 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2127 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2129 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2131 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2132 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2134 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2141 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2142 and OP1. Return 0 if no simplification is possible.
2144 Don't use this for relational operations such as EQ or LT.
2145 Use simplify_relational_operation instead. */
2147 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2150 rtx trueop0
, trueop1
;
2153 /* Relational operations don't work here. We must know the mode
2154 of the operands in order to do the comparison correctly.
2155 Assuming a full word can give incorrect results.
2156 Consider comparing 128 with -128 in QImode. */
2157 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2158 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2160 /* Make sure the constant is second. */
2161 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2162 && swap_commutative_operands_p (op0
, op1
))
2163 std::swap (op0
, op1
);
2165 trueop0
= avoid_constant_pool_reference (op0
);
2166 trueop1
= avoid_constant_pool_reference (op1
);
2168 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2171 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2176 /* If the above steps did not result in a simplification and op0 or op1
2177 were constant pool references, use the referenced constants directly. */
2178 if (trueop0
!= op0
|| trueop1
!= op1
)
2179 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2184 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2185 which OP0 and OP1 are both vector series or vector duplicates
2186 (which are really just series with a step of 0). If so, try to
2187 form a new series by applying CODE to the bases and to the steps.
2188 Return null if no simplification is possible.
2190 MODE is the mode of the operation and is known to be a vector
2194 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2198 if (vec_duplicate_p (op0
, &base0
))
2200 else if (!vec_series_p (op0
, &base0
, &step0
))
2204 if (vec_duplicate_p (op1
, &base1
))
2206 else if (!vec_series_p (op1
, &base1
, &step1
))
2209 /* Only create a new series if we can simplify both parts. In other
2210 cases this isn't really a simplification, and it's not necessarily
2211 a win to replace a vector operation with a scalar operation. */
2212 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2213 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2217 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2221 return gen_vec_series (mode
, new_base
, new_step
);
2224 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2225 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2226 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2227 actual constants. */
2230 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2231 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2233 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2235 scalar_int_mode int_mode
, inner_mode
;
2238 /* Even if we can't compute a constant result,
2239 there are some cases worth simplifying. */
2244 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2245 when x is NaN, infinite, or finite and nonzero. They aren't
2246 when x is -0 and the rounding mode is not towards -infinity,
2247 since (-0) + 0 is then 0. */
2248 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2251 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2252 transformations are safe even for IEEE. */
2253 if (GET_CODE (op0
) == NEG
)
2254 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2255 else if (GET_CODE (op1
) == NEG
)
2256 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2258 /* (~a) + 1 -> -a */
2259 if (INTEGRAL_MODE_P (mode
)
2260 && GET_CODE (op0
) == NOT
2261 && trueop1
== const1_rtx
)
2262 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2264 /* Handle both-operands-constant cases. We can only add
2265 CONST_INTs to constants since the sum of relocatable symbols
2266 can't be handled by most assemblers. Don't add CONST_INT
2267 to CONST_INT since overflow won't be computed properly if wider
2268 than HOST_BITS_PER_WIDE_INT. */
2270 if ((GET_CODE (op0
) == CONST
2271 || GET_CODE (op0
) == SYMBOL_REF
2272 || GET_CODE (op0
) == LABEL_REF
)
2273 && CONST_INT_P (op1
))
2274 return plus_constant (mode
, op0
, INTVAL (op1
));
2275 else if ((GET_CODE (op1
) == CONST
2276 || GET_CODE (op1
) == SYMBOL_REF
2277 || GET_CODE (op1
) == LABEL_REF
)
2278 && CONST_INT_P (op0
))
2279 return plus_constant (mode
, op1
, INTVAL (op0
));
2281 /* See if this is something like X * C - X or vice versa or
2282 if the multiplication is written as a shift. If so, we can
2283 distribute and make a new multiply, shift, or maybe just
2284 have X (if C is 2 in the example above). But don't make
2285 something more expensive than we had before. */
2287 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2289 rtx lhs
= op0
, rhs
= op1
;
2291 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2292 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2294 if (GET_CODE (lhs
) == NEG
)
2296 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2297 lhs
= XEXP (lhs
, 0);
2299 else if (GET_CODE (lhs
) == MULT
2300 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2302 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2303 lhs
= XEXP (lhs
, 0);
2305 else if (GET_CODE (lhs
) == ASHIFT
2306 && CONST_INT_P (XEXP (lhs
, 1))
2307 && INTVAL (XEXP (lhs
, 1)) >= 0
2308 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2310 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2311 GET_MODE_PRECISION (int_mode
));
2312 lhs
= XEXP (lhs
, 0);
2315 if (GET_CODE (rhs
) == NEG
)
2317 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2318 rhs
= XEXP (rhs
, 0);
2320 else if (GET_CODE (rhs
) == MULT
2321 && CONST_INT_P (XEXP (rhs
, 1)))
2323 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2324 rhs
= XEXP (rhs
, 0);
2326 else if (GET_CODE (rhs
) == ASHIFT
2327 && CONST_INT_P (XEXP (rhs
, 1))
2328 && INTVAL (XEXP (rhs
, 1)) >= 0
2329 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2331 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2332 GET_MODE_PRECISION (int_mode
));
2333 rhs
= XEXP (rhs
, 0);
2336 if (rtx_equal_p (lhs
, rhs
))
2338 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2340 bool speed
= optimize_function_for_speed_p (cfun
);
2342 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2344 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2345 return (set_src_cost (tem
, int_mode
, speed
)
2346 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2350 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2351 if (CONST_SCALAR_INT_P (op1
)
2352 && GET_CODE (op0
) == XOR
2353 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2354 && mode_signbit_p (mode
, op1
))
2355 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2356 simplify_gen_binary (XOR
, mode
, op1
,
2359 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2360 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2361 && GET_CODE (op0
) == MULT
2362 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2366 in1
= XEXP (XEXP (op0
, 0), 0);
2367 in2
= XEXP (op0
, 1);
2368 return simplify_gen_binary (MINUS
, mode
, op1
,
2369 simplify_gen_binary (MULT
, mode
,
2373 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2374 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2376 if (COMPARISON_P (op0
)
2377 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2378 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2379 && (reversed
= reversed_comparison (op0
, mode
)))
2381 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2383 /* If one of the operands is a PLUS or a MINUS, see if we can
2384 simplify this by the associative law.
2385 Don't use the associative law for floating point.
2386 The inaccuracy makes it nonassociative,
2387 and subtle programs can break if operations are associated. */
2389 if (INTEGRAL_MODE_P (mode
)
2390 && (plus_minus_operand_p (op0
)
2391 || plus_minus_operand_p (op1
))
2392 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2395 /* Reassociate floating point addition only when the user
2396 specifies associative math operations. */
2397 if (FLOAT_MODE_P (mode
)
2398 && flag_associative_math
)
2400 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2405 /* Handle vector series. */
2406 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2408 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2415 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2416 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2417 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2418 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2420 rtx xop00
= XEXP (op0
, 0);
2421 rtx xop10
= XEXP (op1
, 0);
2423 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2426 if (REG_P (xop00
) && REG_P (xop10
)
2427 && REGNO (xop00
) == REGNO (xop10
)
2428 && GET_MODE (xop00
) == mode
2429 && GET_MODE (xop10
) == mode
2430 && GET_MODE_CLASS (mode
) == MODE_CC
)
2436 /* We can't assume x-x is 0 even with non-IEEE floating point,
2437 but since it is zero except in very strange circumstances, we
2438 will treat it as zero with -ffinite-math-only. */
2439 if (rtx_equal_p (trueop0
, trueop1
)
2440 && ! side_effects_p (op0
)
2441 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2442 return CONST0_RTX (mode
);
2444 /* Change subtraction from zero into negation. (0 - x) is the
2445 same as -x when x is NaN, infinite, or finite and nonzero.
2446 But if the mode has signed zeros, and does not round towards
2447 -infinity, then 0 - 0 is 0, not -0. */
2448 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2449 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2451 /* (-1 - a) is ~a, unless the expression contains symbolic
2452 constants, in which case not retaining additions and
2453 subtractions could cause invalid assembly to be produced. */
2454 if (trueop0
== constm1_rtx
2455 && !contains_symbolic_reference_p (op1
))
2456 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2458 /* Subtracting 0 has no effect unless the mode has signed zeros
2459 and supports rounding towards -infinity. In such a case,
2461 if (!(HONOR_SIGNED_ZEROS (mode
)
2462 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2463 && trueop1
== CONST0_RTX (mode
))
2466 /* See if this is something like X * C - X or vice versa or
2467 if the multiplication is written as a shift. If so, we can
2468 distribute and make a new multiply, shift, or maybe just
2469 have X (if C is 2 in the example above). But don't make
2470 something more expensive than we had before. */
2472 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2474 rtx lhs
= op0
, rhs
= op1
;
2476 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2477 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2479 if (GET_CODE (lhs
) == NEG
)
2481 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2482 lhs
= XEXP (lhs
, 0);
2484 else if (GET_CODE (lhs
) == MULT
2485 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2487 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2488 lhs
= XEXP (lhs
, 0);
2490 else if (GET_CODE (lhs
) == ASHIFT
2491 && CONST_INT_P (XEXP (lhs
, 1))
2492 && INTVAL (XEXP (lhs
, 1)) >= 0
2493 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2495 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2496 GET_MODE_PRECISION (int_mode
));
2497 lhs
= XEXP (lhs
, 0);
2500 if (GET_CODE (rhs
) == NEG
)
2502 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2503 rhs
= XEXP (rhs
, 0);
2505 else if (GET_CODE (rhs
) == MULT
2506 && CONST_INT_P (XEXP (rhs
, 1)))
2508 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2509 rhs
= XEXP (rhs
, 0);
2511 else if (GET_CODE (rhs
) == ASHIFT
2512 && CONST_INT_P (XEXP (rhs
, 1))
2513 && INTVAL (XEXP (rhs
, 1)) >= 0
2514 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2516 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2517 GET_MODE_PRECISION (int_mode
));
2518 negcoeff1
= -negcoeff1
;
2519 rhs
= XEXP (rhs
, 0);
2522 if (rtx_equal_p (lhs
, rhs
))
2524 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2526 bool speed
= optimize_function_for_speed_p (cfun
);
2528 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2530 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2531 return (set_src_cost (tem
, int_mode
, speed
)
2532 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2536 /* (a - (-b)) -> (a + b). True even for IEEE. */
2537 if (GET_CODE (op1
) == NEG
)
2538 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2540 /* (-x - c) may be simplified as (-c - x). */
2541 if (GET_CODE (op0
) == NEG
2542 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2544 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2546 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2549 if ((GET_CODE (op0
) == CONST
2550 || GET_CODE (op0
) == SYMBOL_REF
2551 || GET_CODE (op0
) == LABEL_REF
)
2552 && poly_int_rtx_p (op1
, &offset
))
2553 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2555 /* Don't let a relocatable value get a negative coeff. */
2556 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2557 return simplify_gen_binary (PLUS
, mode
,
2559 neg_const_int (mode
, op1
));
2561 /* (x - (x & y)) -> (x & ~y) */
2562 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2564 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2566 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2567 GET_MODE (XEXP (op1
, 1)));
2568 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2570 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2572 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2573 GET_MODE (XEXP (op1
, 0)));
2574 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2578 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2579 by reversing the comparison code if valid. */
2580 if (STORE_FLAG_VALUE
== 1
2581 && trueop0
== const1_rtx
2582 && COMPARISON_P (op1
)
2583 && (reversed
= reversed_comparison (op1
, mode
)))
2586 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2587 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2588 && GET_CODE (op1
) == MULT
2589 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2593 in1
= XEXP (XEXP (op1
, 0), 0);
2594 in2
= XEXP (op1
, 1);
2595 return simplify_gen_binary (PLUS
, mode
,
2596 simplify_gen_binary (MULT
, mode
,
2601 /* Canonicalize (minus (neg A) (mult B C)) to
2602 (minus (mult (neg B) C) A). */
2603 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2604 && GET_CODE (op1
) == MULT
2605 && GET_CODE (op0
) == NEG
)
2609 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2610 in2
= XEXP (op1
, 1);
2611 return simplify_gen_binary (MINUS
, mode
,
2612 simplify_gen_binary (MULT
, mode
,
2617 /* If one of the operands is a PLUS or a MINUS, see if we can
2618 simplify this by the associative law. This will, for example,
2619 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2620 Don't use the associative law for floating point.
2621 The inaccuracy makes it nonassociative,
2622 and subtle programs can break if operations are associated. */
2624 if (INTEGRAL_MODE_P (mode
)
2625 && (plus_minus_operand_p (op0
)
2626 || plus_minus_operand_p (op1
))
2627 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2630 /* Handle vector series. */
2631 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2633 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2640 if (trueop1
== constm1_rtx
)
2641 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2643 if (GET_CODE (op0
) == NEG
)
2645 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2646 /* If op1 is a MULT as well and simplify_unary_operation
2647 just moved the NEG to the second operand, simplify_gen_binary
2648 below could through simplify_associative_operation move
2649 the NEG around again and recurse endlessly. */
2651 && GET_CODE (op1
) == MULT
2652 && GET_CODE (temp
) == MULT
2653 && XEXP (op1
, 0) == XEXP (temp
, 0)
2654 && GET_CODE (XEXP (temp
, 1)) == NEG
2655 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2658 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2660 if (GET_CODE (op1
) == NEG
)
2662 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2663 /* If op0 is a MULT as well and simplify_unary_operation
2664 just moved the NEG to the second operand, simplify_gen_binary
2665 below could through simplify_associative_operation move
2666 the NEG around again and recurse endlessly. */
2668 && GET_CODE (op0
) == MULT
2669 && GET_CODE (temp
) == MULT
2670 && XEXP (op0
, 0) == XEXP (temp
, 0)
2671 && GET_CODE (XEXP (temp
, 1)) == NEG
2672 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2675 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2678 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2679 x is NaN, since x * 0 is then also NaN. Nor is it valid
2680 when the mode has signed zeros, since multiplying a negative
2681 number by 0 will give -0, not 0. */
2682 if (!HONOR_NANS (mode
)
2683 && !HONOR_SIGNED_ZEROS (mode
)
2684 && trueop1
== CONST0_RTX (mode
)
2685 && ! side_effects_p (op0
))
2688 /* In IEEE floating point, x*1 is not equivalent to x for
2690 if (!HONOR_SNANS (mode
)
2691 && trueop1
== CONST1_RTX (mode
))
2694 /* Convert multiply by constant power of two into shift. */
2695 if (CONST_SCALAR_INT_P (trueop1
))
2697 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2699 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2700 gen_int_shift_amount (mode
, val
));
2703 /* x*2 is x+x and x*(-1) is -x */
2704 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2705 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2706 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2707 && GET_MODE (op0
) == mode
)
2709 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2711 if (real_equal (d1
, &dconst2
))
2712 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2714 if (!HONOR_SNANS (mode
)
2715 && real_equal (d1
, &dconstm1
))
2716 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2719 /* Optimize -x * -x as x * x. */
2720 if (FLOAT_MODE_P (mode
)
2721 && GET_CODE (op0
) == NEG
2722 && GET_CODE (op1
) == NEG
2723 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2724 && !side_effects_p (XEXP (op0
, 0)))
2725 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2727 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2728 if (SCALAR_FLOAT_MODE_P (mode
)
2729 && GET_CODE (op0
) == ABS
2730 && GET_CODE (op1
) == ABS
2731 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2732 && !side_effects_p (XEXP (op0
, 0)))
2733 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2735 /* Reassociate multiplication, but for floating point MULTs
2736 only when the user specifies unsafe math optimizations. */
2737 if (! FLOAT_MODE_P (mode
)
2738 || flag_unsafe_math_optimizations
)
2740 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2747 if (trueop1
== CONST0_RTX (mode
))
2749 if (INTEGRAL_MODE_P (mode
)
2750 && trueop1
== CONSTM1_RTX (mode
)
2751 && !side_effects_p (op0
))
2753 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2755 /* A | (~A) -> -1 */
2756 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2757 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2758 && ! side_effects_p (op0
)
2759 && SCALAR_INT_MODE_P (mode
))
2762 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2763 if (CONST_INT_P (op1
)
2764 && HWI_COMPUTABLE_MODE_P (mode
)
2765 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2766 && !side_effects_p (op0
))
2769 /* Canonicalize (X & C1) | C2. */
2770 if (GET_CODE (op0
) == AND
2771 && CONST_INT_P (trueop1
)
2772 && CONST_INT_P (XEXP (op0
, 1)))
2774 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2775 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2776 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2778 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2780 && !side_effects_p (XEXP (op0
, 0)))
2783 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2784 if (((c1
|c2
) & mask
) == mask
)
2785 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2788 /* Convert (A & B) | A to A. */
2789 if (GET_CODE (op0
) == AND
2790 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2791 || rtx_equal_p (XEXP (op0
, 1), op1
))
2792 && ! side_effects_p (XEXP (op0
, 0))
2793 && ! side_effects_p (XEXP (op0
, 1)))
2796 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2797 mode size to (rotate A CX). */
2799 if (GET_CODE (op1
) == ASHIFT
2800 || GET_CODE (op1
) == SUBREG
)
2811 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2812 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2813 && CONST_INT_P (XEXP (opleft
, 1))
2814 && CONST_INT_P (XEXP (opright
, 1))
2815 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2816 == GET_MODE_UNIT_PRECISION (mode
)))
2817 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2819 /* Same, but for ashift that has been "simplified" to a wider mode
2820 by simplify_shift_const. */
2822 if (GET_CODE (opleft
) == SUBREG
2823 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2824 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2826 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2827 && GET_CODE (opright
) == LSHIFTRT
2828 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2829 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2830 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2831 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2832 SUBREG_REG (XEXP (opright
, 0)))
2833 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2834 && CONST_INT_P (XEXP (opright
, 1))
2835 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2836 + INTVAL (XEXP (opright
, 1))
2837 == GET_MODE_PRECISION (int_mode
)))
2838 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2839 XEXP (SUBREG_REG (opleft
), 1));
2841 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2842 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2843 the PLUS does not affect any of the bits in OP1: then we can do
2844 the IOR as a PLUS and we can associate. This is valid if OP1
2845 can be safely shifted left C bits. */
2846 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2847 && GET_CODE (XEXP (op0
, 0)) == PLUS
2848 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2849 && CONST_INT_P (XEXP (op0
, 1))
2850 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2852 int count
= INTVAL (XEXP (op0
, 1));
2853 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2855 if (mask
>> count
== INTVAL (trueop1
)
2856 && trunc_int_for_mode (mask
, mode
) == mask
2857 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2858 return simplify_gen_binary (ASHIFTRT
, mode
,
2859 plus_constant (mode
, XEXP (op0
, 0),
2864 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2868 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2874 if (trueop1
== CONST0_RTX (mode
))
2876 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2877 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2878 if (rtx_equal_p (trueop0
, trueop1
)
2879 && ! side_effects_p (op0
)
2880 && GET_MODE_CLASS (mode
) != MODE_CC
)
2881 return CONST0_RTX (mode
);
2883 /* Canonicalize XOR of the most significant bit to PLUS. */
2884 if (CONST_SCALAR_INT_P (op1
)
2885 && mode_signbit_p (mode
, op1
))
2886 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2887 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2888 if (CONST_SCALAR_INT_P (op1
)
2889 && GET_CODE (op0
) == PLUS
2890 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2891 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2892 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2893 simplify_gen_binary (XOR
, mode
, op1
,
2896 /* If we are XORing two things that have no bits in common,
2897 convert them into an IOR. This helps to detect rotation encoded
2898 using those methods and possibly other simplifications. */
2900 if (HWI_COMPUTABLE_MODE_P (mode
)
2901 && (nonzero_bits (op0
, mode
)
2902 & nonzero_bits (op1
, mode
)) == 0)
2903 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2905 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2906 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2909 int num_negated
= 0;
2911 if (GET_CODE (op0
) == NOT
)
2912 num_negated
++, op0
= XEXP (op0
, 0);
2913 if (GET_CODE (op1
) == NOT
)
2914 num_negated
++, op1
= XEXP (op1
, 0);
2916 if (num_negated
== 2)
2917 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2918 else if (num_negated
== 1)
2919 return simplify_gen_unary (NOT
, mode
,
2920 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2924 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2925 correspond to a machine insn or result in further simplifications
2926 if B is a constant. */
2928 if (GET_CODE (op0
) == AND
2929 && rtx_equal_p (XEXP (op0
, 1), op1
)
2930 && ! side_effects_p (op1
))
2931 return simplify_gen_binary (AND
, mode
,
2932 simplify_gen_unary (NOT
, mode
,
2933 XEXP (op0
, 0), mode
),
2936 else if (GET_CODE (op0
) == AND
2937 && rtx_equal_p (XEXP (op0
, 0), op1
)
2938 && ! side_effects_p (op1
))
2939 return simplify_gen_binary (AND
, mode
,
2940 simplify_gen_unary (NOT
, mode
,
2941 XEXP (op0
, 1), mode
),
2944 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2945 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2946 out bits inverted twice and not set by C. Similarly, given
2947 (xor (and (xor A B) C) D), simplify without inverting C in
2948 the xor operand: (xor (and A C) (B&C)^D).
2950 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2951 && GET_CODE (XEXP (op0
, 0)) == XOR
2952 && CONST_INT_P (op1
)
2953 && CONST_INT_P (XEXP (op0
, 1))
2954 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2956 enum rtx_code op
= GET_CODE (op0
);
2957 rtx a
= XEXP (XEXP (op0
, 0), 0);
2958 rtx b
= XEXP (XEXP (op0
, 0), 1);
2959 rtx c
= XEXP (op0
, 1);
2961 HOST_WIDE_INT bval
= INTVAL (b
);
2962 HOST_WIDE_INT cval
= INTVAL (c
);
2963 HOST_WIDE_INT dval
= INTVAL (d
);
2964 HOST_WIDE_INT xcval
;
2971 return simplify_gen_binary (XOR
, mode
,
2972 simplify_gen_binary (op
, mode
, a
, c
),
2973 gen_int_mode ((bval
& xcval
) ^ dval
,
2977 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2978 we can transform like this:
2979 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2980 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2981 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2982 Attempt a few simplifications when B and C are both constants. */
2983 if (GET_CODE (op0
) == AND
2984 && CONST_INT_P (op1
)
2985 && CONST_INT_P (XEXP (op0
, 1)))
2987 rtx a
= XEXP (op0
, 0);
2988 rtx b
= XEXP (op0
, 1);
2990 HOST_WIDE_INT bval
= INTVAL (b
);
2991 HOST_WIDE_INT cval
= INTVAL (c
);
2993 /* Instead of computing ~A&C, we compute its negated value,
2994 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2995 optimize for sure. If it does not simplify, we still try
2996 to compute ~A&C below, but since that always allocates
2997 RTL, we don't try that before committing to returning a
2998 simplified expression. */
2999 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3002 if ((~cval
& bval
) == 0)
3004 rtx na_c
= NULL_RTX
;
3006 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3009 /* If ~A does not simplify, don't bother: we don't
3010 want to simplify 2 operations into 3, and if na_c
3011 were to simplify with na, n_na_c would have
3012 simplified as well. */
3013 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3015 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3018 /* Try to simplify ~A&C | ~B&C. */
3019 if (na_c
!= NULL_RTX
)
3020 return simplify_gen_binary (IOR
, mode
, na_c
,
3021 gen_int_mode (~bval
& cval
, mode
));
3025 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3026 if (n_na_c
== CONSTM1_RTX (mode
))
3028 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3029 gen_int_mode (~cval
& bval
,
3031 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3032 gen_int_mode (~bval
& cval
,
3038 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3039 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3040 machines, and also has shorter instruction path length. */
3041 if (GET_CODE (op0
) == AND
3042 && GET_CODE (XEXP (op0
, 0)) == XOR
3043 && CONST_INT_P (XEXP (op0
, 1))
3044 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3047 rtx b
= XEXP (XEXP (op0
, 0), 1);
3048 rtx c
= XEXP (op0
, 1);
3049 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3050 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3051 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3052 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3054 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3055 else if (GET_CODE (op0
) == AND
3056 && GET_CODE (XEXP (op0
, 0)) == XOR
3057 && CONST_INT_P (XEXP (op0
, 1))
3058 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3060 rtx a
= XEXP (XEXP (op0
, 0), 0);
3062 rtx c
= XEXP (op0
, 1);
3063 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3064 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3065 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3066 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3069 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3070 comparison if STORE_FLAG_VALUE is 1. */
3071 if (STORE_FLAG_VALUE
== 1
3072 && trueop1
== const1_rtx
3073 && COMPARISON_P (op0
)
3074 && (reversed
= reversed_comparison (op0
, mode
)))
3077 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3078 is (lt foo (const_int 0)), so we can perform the above
3079 simplification if STORE_FLAG_VALUE is 1. */
3081 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3082 && STORE_FLAG_VALUE
== 1
3083 && trueop1
== const1_rtx
3084 && GET_CODE (op0
) == LSHIFTRT
3085 && CONST_INT_P (XEXP (op0
, 1))
3086 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3087 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3089 /* (xor (comparison foo bar) (const_int sign-bit))
3090 when STORE_FLAG_VALUE is the sign bit. */
3091 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3092 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3093 && trueop1
== const_true_rtx
3094 && COMPARISON_P (op0
)
3095 && (reversed
= reversed_comparison (op0
, int_mode
)))
3098 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3102 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3108 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3110 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3112 if (HWI_COMPUTABLE_MODE_P (mode
))
3114 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3115 HOST_WIDE_INT nzop1
;
3116 if (CONST_INT_P (trueop1
))
3118 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3119 /* If we are turning off bits already known off in OP0, we need
3121 if ((nzop0
& ~val1
) == 0)
3124 nzop1
= nonzero_bits (trueop1
, mode
);
3125 /* If we are clearing all the nonzero bits, the result is zero. */
3126 if ((nzop1
& nzop0
) == 0
3127 && !side_effects_p (op0
) && !side_effects_p (op1
))
3128 return CONST0_RTX (mode
);
3130 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3131 && GET_MODE_CLASS (mode
) != MODE_CC
)
3134 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3135 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3136 && ! side_effects_p (op0
)
3137 && GET_MODE_CLASS (mode
) != MODE_CC
)
3138 return CONST0_RTX (mode
);
3140 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3141 there are no nonzero bits of C outside of X's mode. */
3142 if ((GET_CODE (op0
) == SIGN_EXTEND
3143 || GET_CODE (op0
) == ZERO_EXTEND
)
3144 && CONST_INT_P (trueop1
)
3145 && HWI_COMPUTABLE_MODE_P (mode
)
3146 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3147 & UINTVAL (trueop1
)) == 0)
3149 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3150 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3151 gen_int_mode (INTVAL (trueop1
),
3153 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3156 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3157 we might be able to further simplify the AND with X and potentially
3158 remove the truncation altogether. */
3159 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3161 rtx x
= XEXP (op0
, 0);
3162 machine_mode xmode
= GET_MODE (x
);
3163 tem
= simplify_gen_binary (AND
, xmode
, x
,
3164 gen_int_mode (INTVAL (trueop1
), xmode
));
3165 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3168 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3169 if (GET_CODE (op0
) == IOR
3170 && CONST_INT_P (trueop1
)
3171 && CONST_INT_P (XEXP (op0
, 1)))
3173 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3174 return simplify_gen_binary (IOR
, mode
,
3175 simplify_gen_binary (AND
, mode
,
3176 XEXP (op0
, 0), op1
),
3177 gen_int_mode (tmp
, mode
));
3180 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3181 insn (and may simplify more). */
3182 if (GET_CODE (op0
) == XOR
3183 && rtx_equal_p (XEXP (op0
, 0), op1
)
3184 && ! side_effects_p (op1
))
3185 return simplify_gen_binary (AND
, mode
,
3186 simplify_gen_unary (NOT
, mode
,
3187 XEXP (op0
, 1), mode
),
3190 if (GET_CODE (op0
) == XOR
3191 && rtx_equal_p (XEXP (op0
, 1), op1
)
3192 && ! side_effects_p (op1
))
3193 return simplify_gen_binary (AND
, mode
,
3194 simplify_gen_unary (NOT
, mode
,
3195 XEXP (op0
, 0), mode
),
3198 /* Similarly for (~(A ^ B)) & A. */
3199 if (GET_CODE (op0
) == NOT
3200 && GET_CODE (XEXP (op0
, 0)) == XOR
3201 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3202 && ! side_effects_p (op1
))
3203 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3205 if (GET_CODE (op0
) == NOT
3206 && GET_CODE (XEXP (op0
, 0)) == XOR
3207 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3208 && ! side_effects_p (op1
))
3209 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3211 /* Convert (A | B) & A to A. */
3212 if (GET_CODE (op0
) == IOR
3213 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3214 || rtx_equal_p (XEXP (op0
, 1), op1
))
3215 && ! side_effects_p (XEXP (op0
, 0))
3216 && ! side_effects_p (XEXP (op0
, 1)))
3219 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3220 ((A & N) + B) & M -> (A + B) & M
3221 Similarly if (N & M) == 0,
3222 ((A | N) + B) & M -> (A + B) & M
3223 and for - instead of + and/or ^ instead of |.
3224 Also, if (N & M) == 0, then
3225 (A +- N) & M -> A & M. */
3226 if (CONST_INT_P (trueop1
)
3227 && HWI_COMPUTABLE_MODE_P (mode
)
3228 && ~UINTVAL (trueop1
)
3229 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3230 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3235 pmop
[0] = XEXP (op0
, 0);
3236 pmop
[1] = XEXP (op0
, 1);
3238 if (CONST_INT_P (pmop
[1])
3239 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3240 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3242 for (which
= 0; which
< 2; which
++)
3245 switch (GET_CODE (tem
))
3248 if (CONST_INT_P (XEXP (tem
, 1))
3249 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3250 == UINTVAL (trueop1
))
3251 pmop
[which
] = XEXP (tem
, 0);
3255 if (CONST_INT_P (XEXP (tem
, 1))
3256 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3257 pmop
[which
] = XEXP (tem
, 0);
3264 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3266 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3268 return simplify_gen_binary (code
, mode
, tem
, op1
);
3272 /* (and X (ior (not X) Y) -> (and X Y) */
3273 if (GET_CODE (op1
) == IOR
3274 && GET_CODE (XEXP (op1
, 0)) == NOT
3275 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3276 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3278 /* (and (ior (not X) Y) X) -> (and X Y) */
3279 if (GET_CODE (op0
) == IOR
3280 && GET_CODE (XEXP (op0
, 0)) == NOT
3281 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3282 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3284 /* (and X (ior Y (not X)) -> (and X Y) */
3285 if (GET_CODE (op1
) == IOR
3286 && GET_CODE (XEXP (op1
, 1)) == NOT
3287 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3288 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3290 /* (and (ior Y (not X)) X) -> (and X Y) */
3291 if (GET_CODE (op0
) == IOR
3292 && GET_CODE (XEXP (op0
, 1)) == NOT
3293 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3294 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3296 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3300 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3306 /* 0/x is 0 (or x&0 if x has side-effects). */
3307 if (trueop0
== CONST0_RTX (mode
)
3308 && !cfun
->can_throw_non_call_exceptions
)
3310 if (side_effects_p (op1
))
3311 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3315 if (trueop1
== CONST1_RTX (mode
))
3317 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3321 /* Convert divide by power of two into shift. */
3322 if (CONST_INT_P (trueop1
)
3323 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3324 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3325 gen_int_shift_amount (mode
, val
));
3329 /* Handle floating point and integers separately. */
3330 if (SCALAR_FLOAT_MODE_P (mode
))
3332 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3333 safe for modes with NaNs, since 0.0 / 0.0 will then be
3334 NaN rather than 0.0. Nor is it safe for modes with signed
3335 zeros, since dividing 0 by a negative number gives -0.0 */
3336 if (trueop0
== CONST0_RTX (mode
)
3337 && !HONOR_NANS (mode
)
3338 && !HONOR_SIGNED_ZEROS (mode
)
3339 && ! side_effects_p (op1
))
3342 if (trueop1
== CONST1_RTX (mode
)
3343 && !HONOR_SNANS (mode
))
3346 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3347 && trueop1
!= CONST0_RTX (mode
))
3349 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3352 if (real_equal (d1
, &dconstm1
)
3353 && !HONOR_SNANS (mode
))
3354 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3356 /* Change FP division by a constant into multiplication.
3357 Only do this with -freciprocal-math. */
3358 if (flag_reciprocal_math
3359 && !real_equal (d1
, &dconst0
))
3362 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3363 tem
= const_double_from_real_value (d
, mode
);
3364 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3368 else if (SCALAR_INT_MODE_P (mode
))
3370 /* 0/x is 0 (or x&0 if x has side-effects). */
3371 if (trueop0
== CONST0_RTX (mode
)
3372 && !cfun
->can_throw_non_call_exceptions
)
3374 if (side_effects_p (op1
))
3375 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3379 if (trueop1
== CONST1_RTX (mode
))
3381 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3386 if (trueop1
== constm1_rtx
)
3388 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3390 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3396 /* 0%x is 0 (or x&0 if x has side-effects). */
3397 if (trueop0
== CONST0_RTX (mode
))
3399 if (side_effects_p (op1
))
3400 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3403 /* x%1 is 0 (of x&0 if x has side-effects). */
3404 if (trueop1
== CONST1_RTX (mode
))
3406 if (side_effects_p (op0
))
3407 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3408 return CONST0_RTX (mode
);
3410 /* Implement modulus by power of two as AND. */
3411 if (CONST_INT_P (trueop1
)
3412 && exact_log2 (UINTVAL (trueop1
)) > 0)
3413 return simplify_gen_binary (AND
, mode
, op0
,
3414 gen_int_mode (UINTVAL (trueop1
) - 1,
3419 /* 0%x is 0 (or x&0 if x has side-effects). */
3420 if (trueop0
== CONST0_RTX (mode
))
3422 if (side_effects_p (op1
))
3423 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3426 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3427 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3429 if (side_effects_p (op0
))
3430 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3431 return CONST0_RTX (mode
);
3437 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3438 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3439 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3441 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3442 if (CONST_INT_P (trueop1
)
3443 && IN_RANGE (INTVAL (trueop1
),
3444 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3445 GET_MODE_UNIT_PRECISION (mode
) - 1))
3447 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3448 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3449 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3450 mode
, op0
, new_amount_rtx
);
3455 if (trueop1
== CONST0_RTX (mode
))
3457 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3459 /* Rotating ~0 always results in ~0. */
3460 if (CONST_INT_P (trueop0
)
3461 && HWI_COMPUTABLE_MODE_P (mode
)
3462 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3463 && ! side_effects_p (op1
))
3469 scalar constants c1, c2
3470 size (M2) > size (M1)
3471 c1 == size (M2) - size (M1)
3473 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3477 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3479 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3480 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3482 && CONST_INT_P (op1
)
3483 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3484 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3486 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3487 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3488 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3489 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3490 && subreg_lowpart_p (op0
))
3492 rtx tmp
= gen_int_shift_amount
3493 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3494 tmp
= simplify_gen_binary (code
, inner_mode
,
3495 XEXP (SUBREG_REG (op0
), 0),
3497 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3500 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3502 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3503 if (val
!= INTVAL (op1
))
3504 return simplify_gen_binary (code
, mode
, op0
,
3505 gen_int_shift_amount (mode
, val
));
3512 if (trueop1
== CONST0_RTX (mode
))
3514 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3516 goto canonicalize_shift
;
3519 if (trueop1
== CONST0_RTX (mode
))
3521 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3523 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3524 if (GET_CODE (op0
) == CLZ
3525 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3526 && CONST_INT_P (trueop1
)
3527 && STORE_FLAG_VALUE
== 1
3528 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3530 unsigned HOST_WIDE_INT zero_val
= 0;
3532 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3533 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3534 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3535 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3536 XEXP (op0
, 0), const0_rtx
);
3538 goto canonicalize_shift
;
3541 if (HWI_COMPUTABLE_MODE_P (mode
)
3542 && mode_signbit_p (mode
, trueop1
)
3543 && ! side_effects_p (op0
))
3545 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3547 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3553 if (HWI_COMPUTABLE_MODE_P (mode
)
3554 && CONST_INT_P (trueop1
)
3555 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3556 && ! side_effects_p (op0
))
3558 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3560 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3566 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3568 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3570 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3576 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3578 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3580 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3593 /* ??? There are simplifications that can be done. */
3597 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3598 return gen_vec_duplicate (mode
, op0
);
3599 if (valid_for_const_vector_p (mode
, op0
)
3600 && valid_for_const_vector_p (mode
, op1
))
3601 return gen_const_vec_series (mode
, op0
, op1
);
3605 if (!VECTOR_MODE_P (mode
))
3607 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3608 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3609 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3610 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3611 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3613 if (vec_duplicate_p (trueop0
, &elt0
))
3616 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3617 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3620 /* Extract a scalar element from a nested VEC_SELECT expression
3621 (with optional nested VEC_CONCAT expression). Some targets
3622 (i386) extract scalar element from a vector using chain of
3623 nested VEC_SELECT expressions. When input operand is a memory
3624 operand, this operation can be simplified to a simple scalar
3625 load from an offseted memory address. */
3627 if (GET_CODE (trueop0
) == VEC_SELECT
3628 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3629 .is_constant (&n_elts
)))
3631 rtx op0
= XEXP (trueop0
, 0);
3632 rtx op1
= XEXP (trueop0
, 1);
3634 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3640 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3641 gcc_assert (i
< n_elts
);
3643 /* Select element, pointed by nested selector. */
3644 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3646 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3647 if (GET_CODE (op0
) == VEC_CONCAT
)
3649 rtx op00
= XEXP (op0
, 0);
3650 rtx op01
= XEXP (op0
, 1);
3652 machine_mode mode00
, mode01
;
3653 int n_elts00
, n_elts01
;
3655 mode00
= GET_MODE (op00
);
3656 mode01
= GET_MODE (op01
);
3658 /* Find out the number of elements of each operand.
3659 Since the concatenated result has a constant number
3660 of elements, the operands must too. */
3661 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
3662 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
3664 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3666 /* Select correct operand of VEC_CONCAT
3667 and adjust selector. */
3668 if (elem
< n_elts01
)
3679 vec
= rtvec_alloc (1);
3680 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3682 tmp
= gen_rtx_fmt_ee (code
, mode
,
3683 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3689 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3690 gcc_assert (GET_MODE_INNER (mode
)
3691 == GET_MODE_INNER (GET_MODE (trueop0
)));
3692 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3694 if (vec_duplicate_p (trueop0
, &elt0
))
3695 /* It doesn't matter which elements are selected by trueop1,
3696 because they are all the same. */
3697 return gen_vec_duplicate (mode
, elt0
);
3699 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3701 unsigned n_elts
= XVECLEN (trueop1
, 0);
3702 rtvec v
= rtvec_alloc (n_elts
);
3705 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
3706 for (i
= 0; i
< n_elts
; i
++)
3708 rtx x
= XVECEXP (trueop1
, 0, i
);
3710 gcc_assert (CONST_INT_P (x
));
3711 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3715 return gen_rtx_CONST_VECTOR (mode
, v
);
3718 /* Recognize the identity. */
3719 if (GET_MODE (trueop0
) == mode
)
3721 bool maybe_ident
= true;
3722 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3724 rtx j
= XVECEXP (trueop1
, 0, i
);
3725 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3727 maybe_ident
= false;
3735 /* If we build {a,b} then permute it, build the result directly. */
3736 if (XVECLEN (trueop1
, 0) == 2
3737 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3738 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3739 && GET_CODE (trueop0
) == VEC_CONCAT
3740 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3741 && GET_MODE (XEXP (trueop0
, 0)) == mode
3742 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3743 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3745 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3746 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3749 gcc_assert (i0
< 4 && i1
< 4);
3750 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3751 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3753 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3756 if (XVECLEN (trueop1
, 0) == 2
3757 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3758 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3759 && GET_CODE (trueop0
) == VEC_CONCAT
3760 && GET_MODE (trueop0
) == mode
)
3762 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3763 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3766 gcc_assert (i0
< 2 && i1
< 2);
3767 subop0
= XEXP (trueop0
, i0
);
3768 subop1
= XEXP (trueop0
, i1
);
3770 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3773 /* If we select one half of a vec_concat, return that. */
3775 if (GET_CODE (trueop0
) == VEC_CONCAT
3776 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
3778 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
3780 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3782 rtx subop0
= XEXP (trueop0
, 0);
3783 rtx subop1
= XEXP (trueop0
, 1);
3784 machine_mode mode0
= GET_MODE (subop0
);
3785 machine_mode mode1
= GET_MODE (subop1
);
3786 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3787 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3789 bool success
= true;
3790 for (int i
= 1; i
< l0
; ++i
)
3792 rtx j
= XVECEXP (trueop1
, 0, i
);
3793 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3802 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3804 bool success
= true;
3805 for (int i
= 1; i
< l1
; ++i
)
3807 rtx j
= XVECEXP (trueop1
, 0, i
);
3808 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3820 if (XVECLEN (trueop1
, 0) == 1
3821 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3822 && GET_CODE (trueop0
) == VEC_CONCAT
)
3825 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3827 /* Try to find the element in the VEC_CONCAT. */
3828 while (GET_MODE (vec
) != mode
3829 && GET_CODE (vec
) == VEC_CONCAT
)
3831 poly_int64 vec_size
;
3833 if (CONST_INT_P (XEXP (vec
, 0)))
3835 /* vec_concat of two const_ints doesn't make sense with
3836 respect to modes. */
3837 if (CONST_INT_P (XEXP (vec
, 1)))
3840 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3841 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3844 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3846 if (known_lt (offset
, vec_size
))
3847 vec
= XEXP (vec
, 0);
3848 else if (known_ge (offset
, vec_size
))
3851 vec
= XEXP (vec
, 1);
3855 vec
= avoid_constant_pool_reference (vec
);
3858 if (GET_MODE (vec
) == mode
)
3862 /* If we select elements in a vec_merge that all come from the same
3863 operand, select from that operand directly. */
3864 if (GET_CODE (op0
) == VEC_MERGE
)
3866 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3867 if (CONST_INT_P (trueop02
))
3869 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3870 bool all_operand0
= true;
3871 bool all_operand1
= true;
3872 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3874 rtx j
= XVECEXP (trueop1
, 0, i
);
3875 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3876 all_operand1
= false;
3878 all_operand0
= false;
3880 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3881 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3882 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3883 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3887 /* If we have two nested selects that are inverses of each
3888 other, replace them with the source operand. */
3889 if (GET_CODE (trueop0
) == VEC_SELECT
3890 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3892 rtx op0_subop1
= XEXP (trueop0
, 1);
3893 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3894 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
3896 /* Apply the outer ordering vector to the inner one. (The inner
3897 ordering vector is expressly permitted to be of a different
3898 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3899 then the two VEC_SELECTs cancel. */
3900 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3902 rtx x
= XVECEXP (trueop1
, 0, i
);
3903 if (!CONST_INT_P (x
))
3905 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3906 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3909 return XEXP (trueop0
, 0);
3915 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3916 ? GET_MODE (trueop0
)
3917 : GET_MODE_INNER (mode
));
3918 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3919 ? GET_MODE (trueop1
)
3920 : GET_MODE_INNER (mode
));
3922 gcc_assert (VECTOR_MODE_P (mode
));
3923 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
3924 + GET_MODE_SIZE (op1_mode
),
3925 GET_MODE_SIZE (mode
)));
3927 if (VECTOR_MODE_P (op0_mode
))
3928 gcc_assert (GET_MODE_INNER (mode
)
3929 == GET_MODE_INNER (op0_mode
));
3931 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3933 if (VECTOR_MODE_P (op1_mode
))
3934 gcc_assert (GET_MODE_INNER (mode
)
3935 == GET_MODE_INNER (op1_mode
));
3937 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3939 unsigned int n_elts
, in_n_elts
;
3940 if ((GET_CODE (trueop0
) == CONST_VECTOR
3941 || CONST_SCALAR_INT_P (trueop0
)
3942 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3943 && (GET_CODE (trueop1
) == CONST_VECTOR
3944 || CONST_SCALAR_INT_P (trueop1
)
3945 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
3946 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
3947 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
3949 rtvec v
= rtvec_alloc (n_elts
);
3951 for (i
= 0; i
< n_elts
; i
++)
3955 if (!VECTOR_MODE_P (op0_mode
))
3956 RTVEC_ELT (v
, i
) = trueop0
;
3958 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3962 if (!VECTOR_MODE_P (op1_mode
))
3963 RTVEC_ELT (v
, i
) = trueop1
;
3965 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3970 return gen_rtx_CONST_VECTOR (mode
, v
);
3973 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3974 Restrict the transformation to avoid generating a VEC_SELECT with a
3975 mode unrelated to its operand. */
3976 if (GET_CODE (trueop0
) == VEC_SELECT
3977 && GET_CODE (trueop1
) == VEC_SELECT
3978 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3979 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3981 rtx par0
= XEXP (trueop0
, 1);
3982 rtx par1
= XEXP (trueop1
, 1);
3983 int len0
= XVECLEN (par0
, 0);
3984 int len1
= XVECLEN (par1
, 0);
3985 rtvec vec
= rtvec_alloc (len0
+ len1
);
3986 for (int i
= 0; i
< len0
; i
++)
3987 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3988 for (int i
= 0; i
< len1
; i
++)
3989 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3990 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3991 gen_rtx_PARALLEL (VOIDmode
, vec
));
4000 if (mode
== GET_MODE (op0
)
4001 && mode
== GET_MODE (op1
)
4002 && vec_duplicate_p (op0
, &elt0
)
4003 && vec_duplicate_p (op1
, &elt1
))
4005 /* Try applying the operator to ELT and see if that simplifies.
4006 We can duplicate the result if so.
4008 The reason we don't use simplify_gen_binary is that it isn't
4009 necessarily a win to convert things like:
4011 (plus:V (vec_duplicate:V (reg:S R1))
4012 (vec_duplicate:V (reg:S R2)))
4016 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4018 The first might be done entirely in vector registers while the
4019 second might need a move between register files. */
4020 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4023 return gen_vec_duplicate (mode
, tem
);
4030 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4033 if (VECTOR_MODE_P (mode
)
4034 && code
!= VEC_CONCAT
4035 && GET_CODE (op0
) == CONST_VECTOR
4036 && GET_CODE (op1
) == CONST_VECTOR
)
4038 unsigned int n_elts
;
4039 if (!CONST_VECTOR_NUNITS (op0
).is_constant (&n_elts
))
4042 gcc_assert (known_eq (n_elts
, CONST_VECTOR_NUNITS (op1
)));
4043 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4044 rtvec v
= rtvec_alloc (n_elts
);
4047 for (i
= 0; i
< n_elts
; i
++)
4049 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4050 CONST_VECTOR_ELT (op0
, i
),
4051 CONST_VECTOR_ELT (op1
, i
));
4052 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4054 RTVEC_ELT (v
, i
) = x
;
4057 return gen_rtx_CONST_VECTOR (mode
, v
);
4060 if (VECTOR_MODE_P (mode
)
4061 && code
== VEC_CONCAT
4062 && (CONST_SCALAR_INT_P (op0
)
4063 || CONST_FIXED_P (op0
)
4064 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4065 && (CONST_SCALAR_INT_P (op1
)
4066 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4067 || CONST_FIXED_P (op1
)))
4069 /* Both inputs have a constant number of elements, so the result
4071 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4072 rtvec v
= rtvec_alloc (n_elts
);
4074 gcc_assert (n_elts
>= 2);
4077 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4078 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4080 RTVEC_ELT (v
, 0) = op0
;
4081 RTVEC_ELT (v
, 1) = op1
;
4085 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4086 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4089 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4090 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4091 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4093 for (i
= 0; i
< op0_n_elts
; ++i
)
4094 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4095 for (i
= 0; i
< op1_n_elts
; ++i
)
4096 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4099 return gen_rtx_CONST_VECTOR (mode
, v
);
4102 if (SCALAR_FLOAT_MODE_P (mode
)
4103 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4104 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4105 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4116 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4118 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4120 for (i
= 0; i
< 4; i
++)
4137 real_from_target (&r
, tmp0
, mode
);
4138 return const_double_from_real_value (r
, mode
);
4142 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4143 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4146 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4147 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4149 if (HONOR_SNANS (mode
)
4150 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4151 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4154 real_convert (&f0
, mode
, opr0
);
4155 real_convert (&f1
, mode
, opr1
);
4158 && real_equal (&f1
, &dconst0
)
4159 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4162 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4163 && flag_trapping_math
4164 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4166 int s0
= REAL_VALUE_NEGATIVE (f0
);
4167 int s1
= REAL_VALUE_NEGATIVE (f1
);
4172 /* Inf + -Inf = NaN plus exception. */
4177 /* Inf - Inf = NaN plus exception. */
4182 /* Inf / Inf = NaN plus exception. */
4189 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4190 && flag_trapping_math
4191 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4192 || (REAL_VALUE_ISINF (f1
)
4193 && real_equal (&f0
, &dconst0
))))
4194 /* Inf * 0 = NaN plus exception. */
4197 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4199 real_convert (&result
, mode
, &value
);
4201 /* Don't constant fold this floating point operation if
4202 the result has overflowed and flag_trapping_math. */
4204 if (flag_trapping_math
4205 && MODE_HAS_INFINITIES (mode
)
4206 && REAL_VALUE_ISINF (result
)
4207 && !REAL_VALUE_ISINF (f0
)
4208 && !REAL_VALUE_ISINF (f1
))
4209 /* Overflow plus exception. */
4212 /* Don't constant fold this floating point operation if the
4213 result may dependent upon the run-time rounding mode and
4214 flag_rounding_math is set, or if GCC's software emulation
4215 is unable to accurately represent the result. */
4217 if ((flag_rounding_math
4218 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4219 && (inexact
|| !real_identical (&result
, &value
)))
4222 return const_double_from_real_value (result
, mode
);
4226 /* We can fold some multi-word operations. */
4227 scalar_int_mode int_mode
;
4228 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4229 && CONST_SCALAR_INT_P (op0
)
4230 && CONST_SCALAR_INT_P (op1
))
4234 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4235 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4237 #if TARGET_SUPPORTS_WIDE_INT == 0
4238 /* This assert keeps the simplification from producing a result
4239 that cannot be represented in a CONST_DOUBLE but a lot of
4240 upstream callers expect that this function never fails to
4241 simplify something and so you if you added this to the test
4242 above the code would die later anyway. If this assert
4243 happens, you just need to make the port support wide int. */
4244 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4249 result
= wi::sub (pop0
, pop1
);
4253 result
= wi::add (pop0
, pop1
);
4257 result
= wi::mul (pop0
, pop1
);
4261 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4267 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4273 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4279 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4285 result
= wi::bit_and (pop0
, pop1
);
4289 result
= wi::bit_or (pop0
, pop1
);
4293 result
= wi::bit_xor (pop0
, pop1
);
4297 result
= wi::smin (pop0
, pop1
);
4301 result
= wi::smax (pop0
, pop1
);
4305 result
= wi::umin (pop0
, pop1
);
4309 result
= wi::umax (pop0
, pop1
);
4316 wide_int wop1
= pop1
;
4317 if (SHIFT_COUNT_TRUNCATED
)
4318 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4319 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4325 result
= wi::lrshift (pop0
, wop1
);
4329 result
= wi::arshift (pop0
, wop1
);
4333 result
= wi::lshift (pop0
, wop1
);
4344 if (wi::neg_p (pop1
))
4350 result
= wi::lrotate (pop0
, pop1
);
4354 result
= wi::rrotate (pop0
, pop1
);
4365 return immed_wide_int_const (result
, int_mode
);
4368 /* Handle polynomial integers. */
4369 if (NUM_POLY_INT_COEFFS
> 1
4370 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4371 && poly_int_rtx_p (op0
)
4372 && poly_int_rtx_p (op1
))
4374 poly_wide_int result
;
4378 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4382 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4386 if (CONST_SCALAR_INT_P (op1
))
4387 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4393 if (CONST_SCALAR_INT_P (op1
))
4395 wide_int shift
= rtx_mode_t (op1
, mode
);
4396 if (SHIFT_COUNT_TRUNCATED
)
4397 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4398 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4400 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4407 if (!CONST_SCALAR_INT_P (op1
)
4408 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4409 rtx_mode_t (op1
, mode
), &result
))
4416 return immed_wide_int_const (result
, int_mode
);
4424 /* Return a positive integer if X should sort after Y. The value
4425 returned is 1 if and only if X and Y are both regs. */
4428 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4432 result
= (commutative_operand_precedence (y
)
4433 - commutative_operand_precedence (x
));
4435 return result
+ result
;
4437 /* Group together equal REGs to do more simplification. */
4438 if (REG_P (x
) && REG_P (y
))
4439 return REGNO (x
) > REGNO (y
);
4444 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4445 operands may be another PLUS or MINUS.
4447 Rather than test for specific case, we do this by a brute-force method
4448 and do all possible simplifications until no more changes occur. Then
4449 we rebuild the operation.
4451 May return NULL_RTX when no changes were made. */
4454 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4457 struct simplify_plus_minus_op_data
4464 int changed
, n_constants
, canonicalized
= 0;
4467 memset (ops
, 0, sizeof ops
);
4469 /* Set up the two operands and then expand them until nothing has been
4470 changed. If we run out of room in our array, give up; this should
4471 almost never happen. */
4476 ops
[1].neg
= (code
== MINUS
);
4483 for (i
= 0; i
< n_ops
; i
++)
4485 rtx this_op
= ops
[i
].op
;
4486 int this_neg
= ops
[i
].neg
;
4487 enum rtx_code this_code
= GET_CODE (this_op
);
4493 if (n_ops
== ARRAY_SIZE (ops
))
4496 ops
[n_ops
].op
= XEXP (this_op
, 1);
4497 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4500 ops
[i
].op
= XEXP (this_op
, 0);
4502 /* If this operand was negated then we will potentially
4503 canonicalize the expression. Similarly if we don't
4504 place the operands adjacent we're re-ordering the
4505 expression and thus might be performing a
4506 canonicalization. Ignore register re-ordering.
4507 ??? It might be better to shuffle the ops array here,
4508 but then (plus (plus (A, B), plus (C, D))) wouldn't
4509 be seen as non-canonical. */
4512 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4517 ops
[i
].op
= XEXP (this_op
, 0);
4518 ops
[i
].neg
= ! this_neg
;
4524 if (n_ops
!= ARRAY_SIZE (ops
)
4525 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4526 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4527 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4529 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4530 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4531 ops
[n_ops
].neg
= this_neg
;
4539 /* ~a -> (-a - 1) */
4540 if (n_ops
!= ARRAY_SIZE (ops
))
4542 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4543 ops
[n_ops
++].neg
= this_neg
;
4544 ops
[i
].op
= XEXP (this_op
, 0);
4545 ops
[i
].neg
= !this_neg
;
4555 ops
[i
].op
= neg_const_int (mode
, this_op
);
4569 if (n_constants
> 1)
4572 gcc_assert (n_ops
>= 2);
4574 /* If we only have two operands, we can avoid the loops. */
4577 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4580 /* Get the two operands. Be careful with the order, especially for
4581 the cases where code == MINUS. */
4582 if (ops
[0].neg
&& ops
[1].neg
)
4584 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4587 else if (ops
[0].neg
)
4598 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4601 /* Now simplify each pair of operands until nothing changes. */
4604 /* Insertion sort is good enough for a small array. */
4605 for (i
= 1; i
< n_ops
; i
++)
4607 struct simplify_plus_minus_op_data save
;
4611 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4614 /* Just swapping registers doesn't count as canonicalization. */
4620 ops
[j
+ 1] = ops
[j
];
4622 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4627 for (i
= n_ops
- 1; i
> 0; i
--)
4628 for (j
= i
- 1; j
>= 0; j
--)
4630 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4631 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4633 if (lhs
!= 0 && rhs
!= 0)
4635 enum rtx_code ncode
= PLUS
;
4641 std::swap (lhs
, rhs
);
4643 else if (swap_commutative_operands_p (lhs
, rhs
))
4644 std::swap (lhs
, rhs
);
4646 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4647 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4649 rtx tem_lhs
, tem_rhs
;
4651 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4652 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4653 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4656 if (tem
&& !CONSTANT_P (tem
))
4657 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4660 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4664 /* Reject "simplifications" that just wrap the two
4665 arguments in a CONST. Failure to do so can result
4666 in infinite recursion with simplify_binary_operation
4667 when it calls us to simplify CONST operations.
4668 Also, if we find such a simplification, don't try
4669 any more combinations with this rhs: We must have
4670 something like symbol+offset, ie. one of the
4671 trivial CONST expressions we handle later. */
4672 if (GET_CODE (tem
) == CONST
4673 && GET_CODE (XEXP (tem
, 0)) == ncode
4674 && XEXP (XEXP (tem
, 0), 0) == lhs
4675 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4678 if (GET_CODE (tem
) == NEG
)
4679 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4680 if (CONST_INT_P (tem
) && lneg
)
4681 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4685 ops
[j
].op
= NULL_RTX
;
4695 /* Pack all the operands to the lower-numbered entries. */
4696 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4705 /* If nothing changed, check that rematerialization of rtl instructions
4706 is still required. */
4709 /* Perform rematerialization if only all operands are registers and
4710 all operations are PLUS. */
4711 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4712 around rs6000 and how it uses the CA register. See PR67145. */
4713 for (i
= 0; i
< n_ops
; i
++)
4715 || !REG_P (ops
[i
].op
)
4716 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4717 && fixed_regs
[REGNO (ops
[i
].op
)]
4718 && !global_regs
[REGNO (ops
[i
].op
)]
4719 && ops
[i
].op
!= frame_pointer_rtx
4720 && ops
[i
].op
!= arg_pointer_rtx
4721 && ops
[i
].op
!= stack_pointer_rtx
))
4726 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4728 && CONST_INT_P (ops
[1].op
)
4729 && CONSTANT_P (ops
[0].op
)
4731 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4733 /* We suppressed creation of trivial CONST expressions in the
4734 combination loop to avoid recursion. Create one manually now.
4735 The combination loop should have ensured that there is exactly
4736 one CONST_INT, and the sort will have ensured that it is last
4737 in the array and that any other constant will be next-to-last. */
4740 && CONST_INT_P (ops
[n_ops
- 1].op
)
4741 && CONSTANT_P (ops
[n_ops
- 2].op
))
4743 rtx value
= ops
[n_ops
- 1].op
;
4744 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4745 value
= neg_const_int (mode
, value
);
4746 if (CONST_INT_P (value
))
4748 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4754 /* Put a non-negated operand first, if possible. */
4756 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4759 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4768 /* Now make the result by performing the requested operations. */
4771 for (i
= 1; i
< n_ops
; i
++)
4772 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4773 mode
, result
, ops
[i
].op
);
4778 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4780 plus_minus_operand_p (const_rtx x
)
4782 return GET_CODE (x
) == PLUS
4783 || GET_CODE (x
) == MINUS
4784 || (GET_CODE (x
) == CONST
4785 && GET_CODE (XEXP (x
, 0)) == PLUS
4786 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4787 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4790 /* Like simplify_binary_operation except used for relational operators.
4791 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4792 not also be VOIDmode.
4794 CMP_MODE specifies in which mode the comparison is done in, so it is
4795 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4796 the operands or, if both are VOIDmode, the operands are compared in
4797 "infinite precision". */
4799 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4800 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4802 rtx tem
, trueop0
, trueop1
;
4804 if (cmp_mode
== VOIDmode
)
4805 cmp_mode
= GET_MODE (op0
);
4806 if (cmp_mode
== VOIDmode
)
4807 cmp_mode
= GET_MODE (op1
);
4809 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4812 if (SCALAR_FLOAT_MODE_P (mode
))
4814 if (tem
== const0_rtx
)
4815 return CONST0_RTX (mode
);
4816 #ifdef FLOAT_STORE_FLAG_VALUE
4818 REAL_VALUE_TYPE val
;
4819 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4820 return const_double_from_real_value (val
, mode
);
4826 if (VECTOR_MODE_P (mode
))
4828 if (tem
== const0_rtx
)
4829 return CONST0_RTX (mode
);
4830 #ifdef VECTOR_STORE_FLAG_VALUE
4832 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4833 if (val
== NULL_RTX
)
4835 if (val
== const1_rtx
)
4836 return CONST1_RTX (mode
);
4838 return gen_const_vec_duplicate (mode
, val
);
4848 /* For the following tests, ensure const0_rtx is op1. */
4849 if (swap_commutative_operands_p (op0
, op1
)
4850 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4851 std::swap (op0
, op1
), code
= swap_condition (code
);
4853 /* If op0 is a compare, extract the comparison arguments from it. */
4854 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4855 return simplify_gen_relational (code
, mode
, VOIDmode
,
4856 XEXP (op0
, 0), XEXP (op0
, 1));
4858 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4862 trueop0
= avoid_constant_pool_reference (op0
);
4863 trueop1
= avoid_constant_pool_reference (op1
);
4864 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4868 /* This part of simplify_relational_operation is only used when CMP_MODE
4869 is not in class MODE_CC (i.e. it is a real comparison).
4871 MODE is the mode of the result, while CMP_MODE specifies in which
4872 mode the comparison is done in, so it is the mode of the operands. */
4875 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4876 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4878 enum rtx_code op0code
= GET_CODE (op0
);
4880 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4882 /* If op0 is a comparison, extract the comparison arguments
4886 if (GET_MODE (op0
) == mode
)
4887 return simplify_rtx (op0
);
4889 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4890 XEXP (op0
, 0), XEXP (op0
, 1));
4892 else if (code
== EQ
)
4894 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4895 if (new_code
!= UNKNOWN
)
4896 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4897 XEXP (op0
, 0), XEXP (op0
, 1));
4901 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4902 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4903 if ((code
== LTU
|| code
== GEU
)
4904 && GET_CODE (op0
) == PLUS
4905 && CONST_INT_P (XEXP (op0
, 1))
4906 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4907 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4908 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4909 && XEXP (op0
, 1) != const0_rtx
)
4912 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4913 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4914 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4917 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4918 transformed into (LTU a -C). */
4919 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4920 && CONST_INT_P (XEXP (op0
, 1))
4921 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4922 && XEXP (op0
, 1) != const0_rtx
)
4925 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4926 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4927 XEXP (op0
, 0), new_cmp
);
4930 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4931 if ((code
== LTU
|| code
== GEU
)
4932 && GET_CODE (op0
) == PLUS
4933 && rtx_equal_p (op1
, XEXP (op0
, 1))
4934 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4935 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4936 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4937 copy_rtx (XEXP (op0
, 0)));
4939 if (op1
== const0_rtx
)
4941 /* Canonicalize (GTU x 0) as (NE x 0). */
4943 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4944 /* Canonicalize (LEU x 0) as (EQ x 0). */
4946 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4948 else if (op1
== const1_rtx
)
4953 /* Canonicalize (GE x 1) as (GT x 0). */
4954 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4957 /* Canonicalize (GEU x 1) as (NE x 0). */
4958 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4961 /* Canonicalize (LT x 1) as (LE x 0). */
4962 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4965 /* Canonicalize (LTU x 1) as (EQ x 0). */
4966 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4972 else if (op1
== constm1_rtx
)
4974 /* Canonicalize (LE x -1) as (LT x 0). */
4976 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4977 /* Canonicalize (GT x -1) as (GE x 0). */
4979 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4982 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4983 if ((code
== EQ
|| code
== NE
)
4984 && (op0code
== PLUS
|| op0code
== MINUS
)
4986 && CONSTANT_P (XEXP (op0
, 1))
4987 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4989 rtx x
= XEXP (op0
, 0);
4990 rtx c
= XEXP (op0
, 1);
4991 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4992 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4994 /* Detect an infinite recursive condition, where we oscillate at this
4995 simplification case between:
4996 A + B == C <---> C - B == A,
4997 where A, B, and C are all constants with non-simplifiable expressions,
4998 usually SYMBOL_REFs. */
4999 if (GET_CODE (tem
) == invcode
5001 && rtx_equal_p (c
, XEXP (tem
, 1)))
5004 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5007 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5008 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5009 scalar_int_mode int_mode
, int_cmp_mode
;
5011 && op1
== const0_rtx
5012 && is_int_mode (mode
, &int_mode
)
5013 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5014 /* ??? Work-around BImode bugs in the ia64 backend. */
5015 && int_mode
!= BImode
5016 && int_cmp_mode
!= BImode
5017 && nonzero_bits (op0
, int_cmp_mode
) == 1
5018 && STORE_FLAG_VALUE
== 1)
5019 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5020 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5021 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5023 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5024 if ((code
== EQ
|| code
== NE
)
5025 && op1
== const0_rtx
5027 return simplify_gen_relational (code
, mode
, cmp_mode
,
5028 XEXP (op0
, 0), XEXP (op0
, 1));
5030 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5031 if ((code
== EQ
|| code
== NE
)
5033 && rtx_equal_p (XEXP (op0
, 0), op1
)
5034 && !side_effects_p (XEXP (op0
, 0)))
5035 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5038 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5039 if ((code
== EQ
|| code
== NE
)
5041 && rtx_equal_p (XEXP (op0
, 1), op1
)
5042 && !side_effects_p (XEXP (op0
, 1)))
5043 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5046 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5047 if ((code
== EQ
|| code
== NE
)
5049 && CONST_SCALAR_INT_P (op1
)
5050 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5051 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5052 simplify_gen_binary (XOR
, cmp_mode
,
5053 XEXP (op0
, 1), op1
));
5055 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5056 constant folding if x/y is a constant. */
5057 if ((code
== EQ
|| code
== NE
)
5058 && (op0code
== AND
|| op0code
== IOR
)
5059 && !side_effects_p (op1
)
5060 && op1
!= CONST0_RTX (cmp_mode
))
5062 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5063 (eq/ne (and (not y) x) 0). */
5064 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5065 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5067 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5069 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5071 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5072 CONST0_RTX (cmp_mode
));
5075 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5076 (eq/ne (and (not x) y) 0). */
5077 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5078 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5080 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5082 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5084 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5085 CONST0_RTX (cmp_mode
));
5089 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5090 if ((code
== EQ
|| code
== NE
)
5091 && GET_CODE (op0
) == BSWAP
5092 && CONST_SCALAR_INT_P (op1
))
5093 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5094 simplify_gen_unary (BSWAP
, cmp_mode
,
5097 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5098 if ((code
== EQ
|| code
== NE
)
5099 && GET_CODE (op0
) == BSWAP
5100 && GET_CODE (op1
) == BSWAP
)
5101 return simplify_gen_relational (code
, mode
, cmp_mode
,
5102 XEXP (op0
, 0), XEXP (op1
, 0));
5104 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5110 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5111 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5112 XEXP (op0
, 0), const0_rtx
);
5117 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5118 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5119 XEXP (op0
, 0), const0_rtx
);
5138 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5139 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5140 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5141 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5142 For floating-point comparisons, assume that the operands were ordered. */
5145 comparison_result (enum rtx_code code
, int known_results
)
5151 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5154 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5158 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5161 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5165 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5168 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5171 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5173 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5176 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5178 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5181 return const_true_rtx
;
5189 /* Check if the given comparison (done in the given MODE) is actually
5190 a tautology or a contradiction. If the mode is VOID_mode, the
5191 comparison is done in "infinite precision". If no simplification
5192 is possible, this function returns zero. Otherwise, it returns
5193 either const_true_rtx or const0_rtx. */
5196 simplify_const_relational_operation (enum rtx_code code
,
5204 gcc_assert (mode
!= VOIDmode
5205 || (GET_MODE (op0
) == VOIDmode
5206 && GET_MODE (op1
) == VOIDmode
));
5208 /* If op0 is a compare, extract the comparison arguments from it. */
5209 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5211 op1
= XEXP (op0
, 1);
5212 op0
= XEXP (op0
, 0);
5214 if (GET_MODE (op0
) != VOIDmode
)
5215 mode
= GET_MODE (op0
);
5216 else if (GET_MODE (op1
) != VOIDmode
)
5217 mode
= GET_MODE (op1
);
5222 /* We can't simplify MODE_CC values since we don't know what the
5223 actual comparison is. */
5224 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5227 /* Make sure the constant is second. */
5228 if (swap_commutative_operands_p (op0
, op1
))
5230 std::swap (op0
, op1
);
5231 code
= swap_condition (code
);
5234 trueop0
= avoid_constant_pool_reference (op0
);
5235 trueop1
= avoid_constant_pool_reference (op1
);
5237 /* For integer comparisons of A and B maybe we can simplify A - B and can
5238 then simplify a comparison of that with zero. If A and B are both either
5239 a register or a CONST_INT, this can't help; testing for these cases will
5240 prevent infinite recursion here and speed things up.
5242 We can only do this for EQ and NE comparisons as otherwise we may
5243 lose or introduce overflow which we cannot disregard as undefined as
5244 we do not know the signedness of the operation on either the left or
5245 the right hand side of the comparison. */
5247 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5248 && (code
== EQ
|| code
== NE
)
5249 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5250 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5251 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5252 /* We cannot do this if tem is a nonzero address. */
5253 && ! nonzero_address_p (tem
))
5254 return simplify_const_relational_operation (signed_condition (code
),
5255 mode
, tem
, const0_rtx
);
5257 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5258 return const_true_rtx
;
5260 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5263 /* For modes without NaNs, if the two operands are equal, we know the
5264 result except if they have side-effects. Even with NaNs we know
5265 the result of unordered comparisons and, if signaling NaNs are
5266 irrelevant, also the result of LT/GT/LTGT. */
5267 if ((! HONOR_NANS (trueop0
)
5268 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5269 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5270 && ! HONOR_SNANS (trueop0
)))
5271 && rtx_equal_p (trueop0
, trueop1
)
5272 && ! side_effects_p (trueop0
))
5273 return comparison_result (code
, CMP_EQ
);
5275 /* If the operands are floating-point constants, see if we can fold
5277 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5278 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5279 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5281 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5282 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5284 /* Comparisons are unordered iff at least one of the values is NaN. */
5285 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5295 return const_true_rtx
;
5308 return comparison_result (code
,
5309 (real_equal (d0
, d1
) ? CMP_EQ
:
5310 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5313 /* Otherwise, see if the operands are both integers. */
5314 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5315 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5317 /* It would be nice if we really had a mode here. However, the
5318 largest int representable on the target is as good as
5320 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5321 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5322 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5324 if (wi::eq_p (ptrueop0
, ptrueop1
))
5325 return comparison_result (code
, CMP_EQ
);
5328 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5329 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5330 return comparison_result (code
, cr
);
5334 /* Optimize comparisons with upper and lower bounds. */
5335 scalar_int_mode int_mode
;
5336 if (CONST_INT_P (trueop1
)
5337 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5338 && HWI_COMPUTABLE_MODE_P (int_mode
)
5339 && !side_effects_p (trueop0
))
5342 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5343 HOST_WIDE_INT val
= INTVAL (trueop1
);
5344 HOST_WIDE_INT mmin
, mmax
;
5354 /* Get a reduced range if the sign bit is zero. */
5355 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5362 rtx mmin_rtx
, mmax_rtx
;
5363 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5365 mmin
= INTVAL (mmin_rtx
);
5366 mmax
= INTVAL (mmax_rtx
);
5369 unsigned int sign_copies
5370 = num_sign_bit_copies (trueop0
, int_mode
);
5372 mmin
>>= (sign_copies
- 1);
5373 mmax
>>= (sign_copies
- 1);
5379 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5381 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5382 return const_true_rtx
;
5383 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5388 return const_true_rtx
;
5393 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5395 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5396 return const_true_rtx
;
5397 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5402 return const_true_rtx
;
5408 /* x == y is always false for y out of range. */
5409 if (val
< mmin
|| val
> mmax
)
5413 /* x > y is always false for y >= mmax, always true for y < mmin. */
5415 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5417 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5418 return const_true_rtx
;
5424 return const_true_rtx
;
5427 /* x < y is always false for y <= mmin, always true for y > mmax. */
5429 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5431 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5432 return const_true_rtx
;
5438 return const_true_rtx
;
5442 /* x != y is always true for y out of range. */
5443 if (val
< mmin
|| val
> mmax
)
5444 return const_true_rtx
;
5452 /* Optimize integer comparisons with zero. */
5453 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5454 && trueop1
== const0_rtx
5455 && !side_effects_p (trueop0
))
5457 /* Some addresses are known to be nonzero. We don't know
5458 their sign, but equality comparisons are known. */
5459 if (nonzero_address_p (trueop0
))
5461 if (code
== EQ
|| code
== LEU
)
5463 if (code
== NE
|| code
== GTU
)
5464 return const_true_rtx
;
5467 /* See if the first operand is an IOR with a constant. If so, we
5468 may be able to determine the result of this comparison. */
5469 if (GET_CODE (op0
) == IOR
)
5471 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5472 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5474 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5475 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5476 && (UINTVAL (inner_const
)
5487 return const_true_rtx
;
5491 return const_true_rtx
;
5505 /* Optimize comparison of ABS with zero. */
5506 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5507 && (GET_CODE (trueop0
) == ABS
5508 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5509 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5514 /* Optimize abs(x) < 0.0. */
5515 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5520 /* Optimize abs(x) >= 0.0. */
5521 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5522 return const_true_rtx
;
5526 /* Optimize ! (abs(x) < 0.0). */
5527 return const_true_rtx
;
5537 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5538 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5539 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5540 can be simplified to that or NULL_RTX if not.
5541 Assume X is compared against zero with CMP_CODE and the true
5542 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5545 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5547 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5550 /* Result on X == 0 and X !=0 respectively. */
5551 rtx on_zero
, on_nonzero
;
5555 on_nonzero
= false_val
;
5559 on_zero
= false_val
;
5560 on_nonzero
= true_val
;
5563 rtx_code op_code
= GET_CODE (on_nonzero
);
5564 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5565 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5566 || !CONST_INT_P (on_zero
))
5569 HOST_WIDE_INT op_val
;
5570 scalar_int_mode mode ATTRIBUTE_UNUSED
5571 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5572 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5573 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5574 && op_val
== INTVAL (on_zero
))
5581 /* Simplify CODE, an operation with result mode MODE and three operands,
5582 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5583 a constant. Return 0 if no simplifications is possible. */
5586 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5587 machine_mode op0_mode
, rtx op0
, rtx op1
,
5590 bool any_change
= false;
5592 scalar_int_mode int_mode
, int_op0_mode
;
5593 unsigned int n_elts
;
5598 /* Simplify negations around the multiplication. */
5599 /* -a * -b + c => a * b + c. */
5600 if (GET_CODE (op0
) == NEG
)
5602 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5604 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5606 else if (GET_CODE (op1
) == NEG
)
5608 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5610 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5613 /* Canonicalize the two multiplication operands. */
5614 /* a * -b + c => -b * a + c. */
5615 if (swap_commutative_operands_p (op0
, op1
))
5616 std::swap (op0
, op1
), any_change
= true;
5619 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5624 if (CONST_INT_P (op0
)
5625 && CONST_INT_P (op1
)
5626 && CONST_INT_P (op2
)
5627 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5628 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5629 && HWI_COMPUTABLE_MODE_P (int_mode
))
5631 /* Extracting a bit-field from a constant */
5632 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5633 HOST_WIDE_INT op1val
= INTVAL (op1
);
5634 HOST_WIDE_INT op2val
= INTVAL (op2
);
5635 if (!BITS_BIG_ENDIAN
)
5637 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5638 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5640 /* Not enough information to calculate the bit position. */
5643 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5645 /* First zero-extend. */
5646 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5647 /* If desired, propagate sign bit. */
5648 if (code
== SIGN_EXTRACT
5649 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5651 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5654 return gen_int_mode (val
, int_mode
);
5659 if (CONST_INT_P (op0
))
5660 return op0
!= const0_rtx
? op1
: op2
;
5662 /* Convert c ? a : a into "a". */
5663 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5666 /* Convert a != b ? a : b into "a". */
5667 if (GET_CODE (op0
) == NE
5668 && ! side_effects_p (op0
)
5669 && ! HONOR_NANS (mode
)
5670 && ! HONOR_SIGNED_ZEROS (mode
)
5671 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5672 && rtx_equal_p (XEXP (op0
, 1), op2
))
5673 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5674 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5677 /* Convert a == b ? a : b into "b". */
5678 if (GET_CODE (op0
) == EQ
5679 && ! side_effects_p (op0
)
5680 && ! HONOR_NANS (mode
)
5681 && ! HONOR_SIGNED_ZEROS (mode
)
5682 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5683 && rtx_equal_p (XEXP (op0
, 1), op2
))
5684 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5685 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5688 /* Convert (!c) != {0,...,0} ? a : b into
5689 c != {0,...,0} ? b : a for vector modes. */
5690 if (VECTOR_MODE_P (GET_MODE (op1
))
5691 && GET_CODE (op0
) == NE
5692 && GET_CODE (XEXP (op0
, 0)) == NOT
5693 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5695 rtx cv
= XEXP (op0
, 1);
5698 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
5701 for (int i
= 0; i
< nunits
; ++i
)
5702 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5709 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5710 XEXP (XEXP (op0
, 0), 0),
5712 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5717 /* Convert x == 0 ? N : clz (x) into clz (x) when
5718 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5719 Similarly for ctz (x). */
5720 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5721 && XEXP (op0
, 1) == const0_rtx
)
5724 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5730 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5732 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5733 ? GET_MODE (XEXP (op0
, 1))
5734 : GET_MODE (XEXP (op0
, 0)));
5737 /* Look for happy constants in op1 and op2. */
5738 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5740 HOST_WIDE_INT t
= INTVAL (op1
);
5741 HOST_WIDE_INT f
= INTVAL (op2
);
5743 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5744 code
= GET_CODE (op0
);
5745 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5748 tmp
= reversed_comparison_code (op0
, NULL
);
5756 return simplify_gen_relational (code
, mode
, cmp_mode
,
5757 XEXP (op0
, 0), XEXP (op0
, 1));
5760 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5761 cmp_mode
, XEXP (op0
, 0),
5764 /* See if any simplifications were possible. */
5767 if (CONST_INT_P (temp
))
5768 return temp
== const0_rtx
? op2
: op1
;
5770 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5776 gcc_assert (GET_MODE (op0
) == mode
);
5777 gcc_assert (GET_MODE (op1
) == mode
);
5778 gcc_assert (VECTOR_MODE_P (mode
));
5779 trueop2
= avoid_constant_pool_reference (op2
);
5780 if (CONST_INT_P (trueop2
)
5781 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
5783 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5784 unsigned HOST_WIDE_INT mask
;
5785 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5788 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5790 if (!(sel
& mask
) && !side_effects_p (op0
))
5792 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5795 rtx trueop0
= avoid_constant_pool_reference (op0
);
5796 rtx trueop1
= avoid_constant_pool_reference (op1
);
5797 if (GET_CODE (trueop0
) == CONST_VECTOR
5798 && GET_CODE (trueop1
) == CONST_VECTOR
)
5800 rtvec v
= rtvec_alloc (n_elts
);
5803 for (i
= 0; i
< n_elts
; i
++)
5804 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5805 ? CONST_VECTOR_ELT (trueop0
, i
)
5806 : CONST_VECTOR_ELT (trueop1
, i
));
5807 return gen_rtx_CONST_VECTOR (mode
, v
);
5810 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5811 if no element from a appears in the result. */
5812 if (GET_CODE (op0
) == VEC_MERGE
)
5814 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5815 if (CONST_INT_P (tem
))
5817 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5818 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5819 return simplify_gen_ternary (code
, mode
, mode
,
5820 XEXP (op0
, 1), op1
, op2
);
5821 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5822 return simplify_gen_ternary (code
, mode
, mode
,
5823 XEXP (op0
, 0), op1
, op2
);
5826 if (GET_CODE (op1
) == VEC_MERGE
)
5828 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5829 if (CONST_INT_P (tem
))
5831 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5832 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5833 return simplify_gen_ternary (code
, mode
, mode
,
5834 op0
, XEXP (op1
, 1), op2
);
5835 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5836 return simplify_gen_ternary (code
, mode
, mode
,
5837 op0
, XEXP (op1
, 0), op2
);
5841 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5843 if (GET_CODE (op0
) == VEC_DUPLICATE
5844 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5845 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5846 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
5848 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5849 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5851 if (XEXP (XEXP (op0
, 0), 0) == op1
5852 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5856 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5858 with (vec_concat (X) (B)) if N == 1 or
5859 (vec_concat (A) (X)) if N == 2. */
5860 if (GET_CODE (op0
) == VEC_DUPLICATE
5861 && GET_CODE (op1
) == CONST_VECTOR
5862 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
5863 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5864 && IN_RANGE (sel
, 1, 2))
5866 rtx newop0
= XEXP (op0
, 0);
5867 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
5869 std::swap (newop0
, newop1
);
5870 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5872 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5873 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5874 Only applies for vectors of two elements. */
5875 if (GET_CODE (op0
) == VEC_DUPLICATE
5876 && GET_CODE (op1
) == VEC_CONCAT
5877 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5878 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
5879 && IN_RANGE (sel
, 1, 2))
5881 rtx newop0
= XEXP (op0
, 0);
5882 rtx newop1
= XEXP (op1
, 2 - sel
);
5883 rtx otherop
= XEXP (op1
, sel
- 1);
5885 std::swap (newop0
, newop1
);
5886 /* Don't want to throw away the other part of the vec_concat if
5887 it has side-effects. */
5888 if (!side_effects_p (otherop
))
5889 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5892 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5894 with (vec_concat x y) or (vec_concat y x) depending on value
5896 if (GET_CODE (op0
) == VEC_DUPLICATE
5897 && GET_CODE (op1
) == VEC_DUPLICATE
5898 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
5899 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
5900 && IN_RANGE (sel
, 1, 2))
5902 rtx newop0
= XEXP (op0
, 0);
5903 rtx newop1
= XEXP (op1
, 0);
5905 std::swap (newop0
, newop1
);
5907 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5911 if (rtx_equal_p (op0
, op1
)
5912 && !side_effects_p (op2
) && !side_effects_p (op1
))
5924 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5925 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5926 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5928 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
5929 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5930 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
5931 FIRST_ELEM is the number of the first element to extract, otherwise
5932 FIRST_ELEM is ignored. */
5935 simplify_immed_subreg (fixed_size_mode outermode
, rtx op
,
5936 machine_mode innermode
, unsigned int byte
,
5937 unsigned int first_elem
, unsigned int inner_bytes
)
5941 value_mask
= (1 << value_bit
) - 1
5943 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5951 rtx result_s
= NULL
;
5952 rtvec result_v
= NULL
;
5953 enum mode_class outer_class
;
5954 scalar_mode outer_submode
;
5957 /* Some ports misuse CCmode. */
5958 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5961 /* We have no way to represent a complex constant at the rtl level. */
5962 if (COMPLEX_MODE_P (outermode
))
5965 /* We support any size mode. */
5966 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5967 inner_bytes
* BITS_PER_UNIT
);
5969 /* Unpack the value. */
5971 if (GET_CODE (op
) == CONST_VECTOR
)
5973 num_elem
= CEIL (inner_bytes
, GET_MODE_UNIT_SIZE (innermode
));
5974 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5979 elem_bitsize
= max_bitsize
;
5981 /* If this asserts, it is too complicated; reducing value_bit may help. */
5982 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5983 /* I don't know how to handle endianness of sub-units. */
5984 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5986 for (elem
= 0; elem
< num_elem
; elem
++)
5989 rtx el
= (GET_CODE (op
) == CONST_VECTOR
5990 ? CONST_VECTOR_ELT (op
, first_elem
+ elem
)
5993 /* Vectors are kept in target memory order. (This is probably
5996 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5997 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5999 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6000 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6001 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6002 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6003 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6006 switch (GET_CODE (el
))
6010 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6012 *vp
++ = INTVAL (el
) >> i
;
6013 /* CONST_INTs are always logically sign-extended. */
6014 for (; i
< elem_bitsize
; i
+= value_bit
)
6015 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
6018 case CONST_WIDE_INT
:
6020 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
6021 unsigned char extend
= wi::sign_mask (val
);
6022 int prec
= wi::get_precision (val
);
6024 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
6025 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
6026 for (; i
< elem_bitsize
; i
+= value_bit
)
6032 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
6034 unsigned char extend
= 0;
6035 /* If this triggers, someone should have generated a
6036 CONST_INT instead. */
6037 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
6039 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6040 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
6041 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
6044 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
6048 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
6050 for (; i
< elem_bitsize
; i
+= value_bit
)
6055 /* This is big enough for anything on the platform. */
6056 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6057 scalar_float_mode el_mode
;
6059 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
6060 int bitsize
= GET_MODE_BITSIZE (el_mode
);
6062 gcc_assert (bitsize
<= elem_bitsize
);
6063 gcc_assert (bitsize
% value_bit
== 0);
6065 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
6068 /* real_to_target produces its result in words affected by
6069 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6070 and use WORDS_BIG_ENDIAN instead; see the documentation
6071 of SUBREG in rtl.texi. */
6072 for (i
= 0; i
< bitsize
; i
+= value_bit
)
6075 if (WORDS_BIG_ENDIAN
)
6076 ibase
= bitsize
- 1 - i
;
6079 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
6082 /* It shouldn't matter what's done here, so fill it with
6084 for (; i
< elem_bitsize
; i
+= value_bit
)
6090 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
6092 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6093 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6097 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6098 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6099 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
6101 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
6102 >> (i
- HOST_BITS_PER_WIDE_INT
);
6103 for (; i
< elem_bitsize
; i
+= value_bit
)
6113 /* Now, pick the right byte to start with. */
6114 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6115 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6116 will already have offset 0. */
6117 if (inner_bytes
>= GET_MODE_SIZE (outermode
))
6119 unsigned ibyte
= inner_bytes
- GET_MODE_SIZE (outermode
) - byte
;
6120 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6121 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6122 byte
= (subword_byte
% UNITS_PER_WORD
6123 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6126 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6127 so if it's become negative it will instead be very large.) */
6128 gcc_assert (byte
< inner_bytes
);
6130 /* Convert from bytes to chunks of size value_bit. */
6131 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
6133 /* Re-pack the value. */
6134 num_elem
= GET_MODE_NUNITS (outermode
);
6136 if (VECTOR_MODE_P (outermode
))
6138 result_v
= rtvec_alloc (num_elem
);
6139 elems
= &RTVEC_ELT (result_v
, 0);
6144 outer_submode
= GET_MODE_INNER (outermode
);
6145 outer_class
= GET_MODE_CLASS (outer_submode
);
6146 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
6148 gcc_assert (elem_bitsize
% value_bit
== 0);
6149 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
6151 for (elem
= 0; elem
< num_elem
; elem
++)
6155 /* Vectors are stored in target memory order. (This is probably
6158 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6159 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6161 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6162 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6163 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6164 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6165 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6168 switch (outer_class
)
6171 case MODE_PARTIAL_INT
:
6176 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
6177 / HOST_BITS_PER_WIDE_INT
;
6178 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
6181 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
6183 for (u
= 0; u
< units
; u
++)
6185 unsigned HOST_WIDE_INT buf
= 0;
6187 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
6189 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6192 base
+= HOST_BITS_PER_WIDE_INT
;
6194 r
= wide_int::from_array (tmp
, units
,
6195 GET_MODE_PRECISION (outer_submode
));
6196 #if TARGET_SUPPORTS_WIDE_INT == 0
6197 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6198 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
6201 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
6206 case MODE_DECIMAL_FLOAT
:
6209 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
6211 /* real_from_target wants its input in words affected by
6212 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6213 and use WORDS_BIG_ENDIAN instead; see the documentation
6214 of SUBREG in rtl.texi. */
6215 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6218 if (WORDS_BIG_ENDIAN
)
6219 ibase
= elem_bitsize
- 1 - i
;
6222 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6225 real_from_target (&r
, tmp
, outer_submode
);
6226 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6238 f
.mode
= outer_submode
;
6241 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6243 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6244 for (; i
< elem_bitsize
; i
+= value_bit
)
6245 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6246 << (i
- HOST_BITS_PER_WIDE_INT
));
6248 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6256 if (VECTOR_MODE_P (outermode
))
6257 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6262 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6263 Return 0 if no simplifications are possible. */
6265 simplify_subreg (machine_mode outermode
, rtx op
,
6266 machine_mode innermode
, poly_uint64 byte
)
6268 /* Little bit of sanity checking. */
6269 gcc_assert (innermode
!= VOIDmode
);
6270 gcc_assert (outermode
!= VOIDmode
);
6271 gcc_assert (innermode
!= BLKmode
);
6272 gcc_assert (outermode
!= BLKmode
);
6274 gcc_assert (GET_MODE (op
) == innermode
6275 || GET_MODE (op
) == VOIDmode
);
6277 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
6278 if (!multiple_p (byte
, outersize
))
6281 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
6282 if (maybe_ge (byte
, innersize
))
6285 if (outermode
== innermode
&& known_eq (byte
, 0U))
6288 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6292 if (VECTOR_MODE_P (outermode
)
6293 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6294 && vec_duplicate_p (op
, &elt
))
6295 return gen_vec_duplicate (outermode
, elt
);
6297 if (outermode
== GET_MODE_INNER (innermode
)
6298 && vec_duplicate_p (op
, &elt
))
6302 if (CONST_SCALAR_INT_P (op
)
6303 || CONST_DOUBLE_AS_FLOAT_P (op
)
6304 || CONST_FIXED_P (op
)
6305 || GET_CODE (op
) == CONST_VECTOR
)
6307 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6308 the result from bytes, so it only works if the sizes of the modes
6309 and the value of the offset are known at compile time. Cases that
6310 that apply to general modes and offsets should be handled here
6311 before calling simplify_immed_subreg. */
6312 fixed_size_mode fs_outermode
, fs_innermode
;
6313 unsigned HOST_WIDE_INT cbyte
;
6314 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6315 && is_a
<fixed_size_mode
> (innermode
, &fs_innermode
)
6316 && byte
.is_constant (&cbyte
))
6317 return simplify_immed_subreg (fs_outermode
, op
, fs_innermode
, cbyte
,
6318 0, GET_MODE_SIZE (fs_innermode
));
6320 /* Handle constant-sized outer modes and variable-sized inner modes. */
6321 unsigned HOST_WIDE_INT first_elem
;
6322 if (GET_CODE (op
) == CONST_VECTOR
6323 && is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6324 && constant_multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
),
6326 return simplify_immed_subreg (fs_outermode
, op
, innermode
, 0,
6328 GET_MODE_SIZE (fs_outermode
));
6333 /* Changing mode twice with SUBREG => just change it once,
6334 or not at all if changing back op starting mode. */
6335 if (GET_CODE (op
) == SUBREG
)
6337 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6338 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
6341 if (outermode
== innermostmode
6342 && known_eq (byte
, 0U)
6343 && known_eq (SUBREG_BYTE (op
), 0))
6344 return SUBREG_REG (op
);
6346 /* Work out the memory offset of the final OUTERMODE value relative
6347 to the inner value of OP. */
6348 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6350 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6351 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6353 /* See whether resulting subreg will be paradoxical. */
6354 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6356 /* Bail out in case resulting subreg would be incorrect. */
6357 if (maybe_lt (final_offset
, 0)
6358 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
6359 || !multiple_p (final_offset
, outersize
))
6364 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6366 if (maybe_ne (final_offset
, required_offset
))
6368 /* Paradoxical subregs always have byte offset 0. */
6372 /* Recurse for further possible simplifications. */
6373 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6377 if (validate_subreg (outermode
, innermostmode
,
6378 SUBREG_REG (op
), final_offset
))
6380 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6381 if (SUBREG_PROMOTED_VAR_P (op
)
6382 && SUBREG_PROMOTED_SIGN (op
) >= 0
6383 && GET_MODE_CLASS (outermode
) == MODE_INT
6384 && known_ge (outersize
, innersize
)
6385 && known_le (outersize
, innermostsize
)
6386 && subreg_lowpart_p (newx
))
6388 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6389 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6396 /* SUBREG of a hard register => just change the register number
6397 and/or mode. If the hard register is not valid in that mode,
6398 suppress this simplification. If the hard register is the stack,
6399 frame, or argument pointer, leave this as a SUBREG. */
6401 if (REG_P (op
) && HARD_REGISTER_P (op
))
6403 unsigned int regno
, final_regno
;
6406 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6407 if (HARD_REGISTER_NUM_P (final_regno
))
6409 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6410 subreg_memory_offset (outermode
,
6413 /* Propagate original regno. We don't have any way to specify
6414 the offset inside original regno, so do so only for lowpart.
6415 The information is used only by alias analysis that can not
6416 grog partial register anyway. */
6418 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6419 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6424 /* If we have a SUBREG of a register that we are replacing and we are
6425 replacing it with a MEM, make a new MEM and try replacing the
6426 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6427 or if we would be widening it. */
6430 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6431 /* Allow splitting of volatile memory references in case we don't
6432 have instruction to move the whole thing. */
6433 && (! MEM_VOLATILE_P (op
)
6434 || ! have_insn_for (SET
, innermode
))
6435 && known_le (outersize
, innersize
))
6436 return adjust_address_nv (op
, outermode
, byte
);
6438 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6440 if (GET_CODE (op
) == CONCAT
6441 || GET_CODE (op
) == VEC_CONCAT
)
6443 poly_uint64 final_offset
;
6446 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6447 if (part_mode
== VOIDmode
)
6448 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6449 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
6450 if (known_lt (byte
, part_size
))
6452 part
= XEXP (op
, 0);
6453 final_offset
= byte
;
6455 else if (known_ge (byte
, part_size
))
6457 part
= XEXP (op
, 1);
6458 final_offset
= byte
- part_size
;
6463 if (maybe_gt (final_offset
+ outersize
, part_size
))
6466 part_mode
= GET_MODE (part
);
6467 if (part_mode
== VOIDmode
)
6468 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6469 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6472 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6473 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6477 /* A SUBREG resulting from a zero extension may fold to zero if
6478 it extracts higher bits that the ZERO_EXTEND's source bits. */
6479 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6481 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6482 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6483 return CONST0_RTX (outermode
);
6486 scalar_int_mode int_outermode
, int_innermode
;
6487 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6488 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6489 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6491 /* Handle polynomial integers. The upper bits of a paradoxical
6492 subreg are undefined, so this is safe regardless of whether
6493 we're truncating or extending. */
6494 if (CONST_POLY_INT_P (op
))
6497 = poly_wide_int::from (const_poly_int_value (op
),
6498 GET_MODE_PRECISION (int_outermode
),
6500 return immed_wide_int_const (val
, int_outermode
);
6503 if (GET_MODE_PRECISION (int_outermode
)
6504 < GET_MODE_PRECISION (int_innermode
))
6506 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6515 /* Make a SUBREG operation or equivalent if it folds. */
6518 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6519 machine_mode innermode
, poly_uint64 byte
)
6523 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6527 if (GET_CODE (op
) == SUBREG
6528 || GET_CODE (op
) == CONCAT
6529 || GET_MODE (op
) == VOIDmode
)
6532 if (validate_subreg (outermode
, innermode
, op
, byte
))
6533 return gen_rtx_SUBREG (outermode
, op
, byte
);
6538 /* Generates a subreg to get the least significant part of EXPR (in mode
6539 INNER_MODE) to OUTER_MODE. */
6542 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6543 machine_mode inner_mode
)
6545 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6546 subreg_lowpart_offset (outer_mode
, inner_mode
));
6549 /* Simplify X, an rtx expression.
6551 Return the simplified expression or NULL if no simplifications
6554 This is the preferred entry point into the simplification routines;
6555 however, we still allow passes to call the more specific routines.
6557 Right now GCC has three (yes, three) major bodies of RTL simplification
6558 code that need to be unified.
6560 1. fold_rtx in cse.c. This code uses various CSE specific
6561 information to aid in RTL simplification.
6563 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6564 it uses combine specific information to aid in RTL
6567 3. The routines in this file.
6570 Long term we want to only have one body of simplification code; to
6571 get to that state I recommend the following steps:
6573 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6574 which are not pass dependent state into these routines.
6576 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6577 use this routine whenever possible.
6579 3. Allow for pass dependent state to be provided to these
6580 routines and add simplifications based on the pass dependent
6581 state. Remove code from cse.c & combine.c that becomes
6584 It will take time, but ultimately the compiler will be easier to
6585 maintain and improve. It's totally silly that when we add a
6586 simplification that it needs to be added to 4 places (3 for RTL
6587 simplification and 1 for tree simplification. */
6590 simplify_rtx (const_rtx x
)
6592 const enum rtx_code code
= GET_CODE (x
);
6593 const machine_mode mode
= GET_MODE (x
);
6595 switch (GET_RTX_CLASS (code
))
6598 return simplify_unary_operation (code
, mode
,
6599 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6600 case RTX_COMM_ARITH
:
6601 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6602 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6607 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6610 case RTX_BITFIELD_OPS
:
6611 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6612 XEXP (x
, 0), XEXP (x
, 1),
6616 case RTX_COMM_COMPARE
:
6617 return simplify_relational_operation (code
, mode
,
6618 ((GET_MODE (XEXP (x
, 0))
6620 ? GET_MODE (XEXP (x
, 0))
6621 : GET_MODE (XEXP (x
, 1))),
6627 return simplify_subreg (mode
, SUBREG_REG (x
),
6628 GET_MODE (SUBREG_REG (x
)),
6635 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6636 if (GET_CODE (XEXP (x
, 0)) == HIGH
6637 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6650 namespace selftest
{
6652 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6655 make_test_reg (machine_mode mode
)
6657 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
6659 return gen_rtx_REG (mode
, test_reg_num
++);
6662 /* Test vector simplifications involving VEC_DUPLICATE in which the
6663 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6664 register that holds one element of MODE. */
6667 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
6669 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6670 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6671 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
6672 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
6674 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6675 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
6676 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
6677 ASSERT_RTX_EQ (duplicate
,
6678 simplify_unary_operation (NOT
, mode
,
6679 duplicate_not
, mode
));
6681 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6682 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
6683 ASSERT_RTX_EQ (duplicate
,
6684 simplify_unary_operation (NEG
, mode
,
6685 duplicate_neg
, mode
));
6687 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6688 ASSERT_RTX_EQ (duplicate
,
6689 simplify_binary_operation (PLUS
, mode
, duplicate
,
6690 CONST0_RTX (mode
)));
6692 ASSERT_RTX_EQ (duplicate
,
6693 simplify_binary_operation (MINUS
, mode
, duplicate
,
6694 CONST0_RTX (mode
)));
6696 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
6697 simplify_binary_operation (MINUS
, mode
, duplicate
,
6701 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6702 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
6703 ASSERT_RTX_PTR_EQ (scalar_reg
,
6704 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6705 duplicate
, zero_par
));
6707 /* And again with the final element. */
6708 unsigned HOST_WIDE_INT const_nunits
;
6709 if (nunits
.is_constant (&const_nunits
))
6711 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
6712 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
6713 ASSERT_RTX_PTR_EQ (scalar_reg
,
6714 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6715 duplicate
, last_par
));
6718 /* Test a scalar subreg of a VEC_DUPLICATE. */
6719 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
6720 ASSERT_RTX_EQ (scalar_reg
,
6721 simplify_gen_subreg (inner_mode
, duplicate
,
6724 machine_mode narrower_mode
;
6725 if (maybe_ne (nunits
, 2U)
6726 && multiple_p (nunits
, 2)
6727 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
6728 && VECTOR_MODE_P (narrower_mode
))
6730 /* Test VEC_SELECT of a vector. */
6732 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
6733 rtx narrower_duplicate
6734 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
6735 ASSERT_RTX_EQ (narrower_duplicate
,
6736 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
6737 duplicate
, vec_par
));
6739 /* Test a vector subreg of a VEC_DUPLICATE. */
6740 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
6741 ASSERT_RTX_EQ (narrower_duplicate
,
6742 simplify_gen_subreg (narrower_mode
, duplicate
,
6747 /* Test vector simplifications involving VEC_SERIES in which the
6748 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6749 register that holds one element of MODE. */
6752 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
6754 /* Test unary cases with VEC_SERIES arguments. */
6755 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6756 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6757 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6758 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
6759 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
6760 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
6761 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
6762 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
6763 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
6765 ASSERT_RTX_EQ (series_0_r
,
6766 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
6767 ASSERT_RTX_EQ (series_r_m1
,
6768 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
6769 ASSERT_RTX_EQ (series_r_r
,
6770 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
6772 /* Test that a VEC_SERIES with a zero step is simplified away. */
6773 ASSERT_RTX_EQ (duplicate
,
6774 simplify_binary_operation (VEC_SERIES
, mode
,
6775 scalar_reg
, const0_rtx
));
6777 /* Test PLUS and MINUS with VEC_SERIES. */
6778 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
6779 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
6780 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
6781 ASSERT_RTX_EQ (series_r_r
,
6782 simplify_binary_operation (PLUS
, mode
, series_0_r
,
6784 ASSERT_RTX_EQ (series_r_1
,
6785 simplify_binary_operation (PLUS
, mode
, duplicate
,
6787 ASSERT_RTX_EQ (series_r_m1
,
6788 simplify_binary_operation (PLUS
, mode
, duplicate
,
6790 ASSERT_RTX_EQ (series_0_r
,
6791 simplify_binary_operation (MINUS
, mode
, series_r_r
,
6793 ASSERT_RTX_EQ (series_r_m1
,
6794 simplify_binary_operation (MINUS
, mode
, duplicate
,
6796 ASSERT_RTX_EQ (series_r_1
,
6797 simplify_binary_operation (MINUS
, mode
, duplicate
,
6799 ASSERT_RTX_EQ (series_0_m1
,
6800 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
6804 /* Verify some simplifications involving vectors. */
6809 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
6811 machine_mode mode
= (machine_mode
) i
;
6812 if (VECTOR_MODE_P (mode
))
6814 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
6815 test_vector_ops_duplicate (mode
, scalar_reg
);
6816 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
6817 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
6818 test_vector_ops_series (mode
, scalar_reg
);
6823 template<unsigned int N
>
6824 struct simplify_const_poly_int_tests
6830 struct simplify_const_poly_int_tests
<1>
6832 static void run () {}
6835 /* Test various CONST_POLY_INT properties. */
6837 template<unsigned int N
>
6839 simplify_const_poly_int_tests
<N
>::run ()
6841 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
6842 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
6843 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
6844 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
6845 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
6846 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
6847 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
6848 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
6849 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
6850 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
6851 rtx two
= GEN_INT (2);
6852 rtx six
= GEN_INT (6);
6853 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
6855 /* These tests only try limited operation combinations. Fuller arithmetic
6856 testing is done directly on poly_ints. */
6857 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
6858 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
6859 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
6860 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
6861 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
6862 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
6863 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
6864 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
6865 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
6866 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
6867 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
6870 /* Run all of the selftests within this file. */
6873 simplify_rtx_c_tests ()
6876 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
6879 } // namespace selftest
6881 #endif /* CHECKING_P */