1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx
neg_const_int (machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (!HWI_COMPUTABLE_MODE_P (mode
)
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
80 scalar_int_mode int_mode
;
82 if (!is_int_mode (mode
, &int_mode
))
85 width
= GET_MODE_PRECISION (int_mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x
))
96 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
97 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
99 for (i
= 0; i
< elts
- 1; i
++)
100 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
102 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
103 width
%= HOST_BITS_PER_WIDE_INT
;
105 width
= HOST_BITS_PER_WIDE_INT
;
108 else if (width
<= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x
)
110 && CONST_DOUBLE_LOW (x
) == 0)
112 val
= CONST_DOUBLE_HIGH (x
);
113 width
-= HOST_BITS_PER_WIDE_INT
;
117 /* X is not an integer constant. */
120 if (width
< HOST_BITS_PER_WIDE_INT
)
121 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
122 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
130 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 scalar_int_mode int_mode
;
135 if (!is_int_mode (mode
, &int_mode
))
138 width
= GET_MODE_PRECISION (int_mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= GET_MODE_MASK (int_mode
);
143 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 scalar_int_mode int_mode
;
154 if (!is_int_mode (mode
, &int_mode
))
157 width
= GET_MODE_PRECISION (int_mode
);
158 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
161 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
168 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
172 scalar_int_mode int_mode
;
173 if (!is_int_mode (mode
, &int_mode
))
176 width
= GET_MODE_PRECISION (int_mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 HOST_WIDE_INT offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr
) == CONST
243 && GET_CODE (XEXP (addr
, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
246 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
247 addr
= XEXP (XEXP (addr
, 0), 0);
250 if (GET_CODE (addr
) == LO_SUM
)
251 addr
= XEXP (addr
, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr
) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr
))
258 c
= get_pool_constant (addr
);
259 cmode
= get_pool_mode (addr
);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset
== 0 && cmode
== GET_MODE (x
))
266 else if (offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
268 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
269 if (tem
&& CONSTANT_P (tem
))
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x
)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
288 && MEM_OFFSET_KNOWN_P (x
))
290 tree decl
= MEM_EXPR (x
);
291 machine_mode mode
= GET_MODE (x
);
292 poly_int64 offset
= 0;
294 switch (TREE_CODE (decl
))
304 case ARRAY_RANGE_REF
:
309 case VIEW_CONVERT_EXPR
:
311 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
313 int unsignedp
, reversep
, volatilep
= 0;
316 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
317 &unsignedp
, &reversep
, &volatilep
);
318 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
319 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
320 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
323 offset
+= bytepos
+ toffset_val
;
329 && mode
== GET_MODE (x
)
331 && (TREE_STATIC (decl
)
332 || DECL_THREAD_LOCAL_P (decl
))
333 && DECL_RTL_SET_P (decl
)
334 && MEM_P (DECL_RTL (decl
)))
338 offset
+= MEM_OFFSET (x
);
340 newx
= DECL_RTL (decl
);
344 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
345 poly_int64 n_offset
, o_offset
;
347 /* Avoid creating a new MEM needlessly if we already had
348 the same address. We do if there's no OFFSET and the
349 old address X is identical to NEWX, or if X is of the
350 form (plus NEWX OFFSET), or the NEWX is of the form
351 (plus Y (const_int Z)) and X is that with the offset
352 added: (plus Y (const_int Z+OFFSET)). */
353 n
= strip_offset (n
, &n_offset
);
354 o
= strip_offset (o
, &o_offset
);
355 if (!(known_eq (o_offset
, n_offset
+ offset
)
356 && rtx_equal_p (o
, n
)))
357 x
= adjust_address_nv (newx
, mode
, offset
);
359 else if (GET_MODE (x
) == GET_MODE (newx
)
360 && known_eq (offset
, 0))
368 /* Make a unary operation by first seeing if it folds and otherwise making
369 the specified operation. */
372 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
373 machine_mode op_mode
)
377 /* If this simplifies, use it. */
378 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
381 return gen_rtx_fmt_e (code
, mode
, op
);
384 /* Likewise for ternary operations. */
387 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
388 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
392 /* If this simplifies, use it. */
393 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
394 op0
, op1
, op2
)) != 0)
397 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
400 /* Likewise, for relational operations.
401 CMP_MODE specifies mode comparison is done in. */
404 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
405 machine_mode cmp_mode
, rtx op0
, rtx op1
)
409 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
413 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417 and simplify the result. If FN is non-NULL, call this callback on each
418 X, if it returns non-NULL, replace X with its return value and simplify the
422 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
423 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
425 enum rtx_code code
= GET_CODE (x
);
426 machine_mode mode
= GET_MODE (x
);
427 machine_mode op_mode
;
429 rtx op0
, op1
, op2
, newx
, op
;
433 if (__builtin_expect (fn
!= NULL
, 0))
435 newx
= fn (x
, old_rtx
, data
);
439 else if (rtx_equal_p (x
, old_rtx
))
440 return copy_rtx ((rtx
) data
);
442 switch (GET_RTX_CLASS (code
))
446 op_mode
= GET_MODE (op0
);
447 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0))
450 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
454 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
455 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
456 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
458 return simplify_gen_binary (code
, mode
, op0
, op1
);
461 case RTX_COMM_COMPARE
:
464 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
465 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
466 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
467 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
469 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
472 case RTX_BITFIELD_OPS
:
474 op_mode
= GET_MODE (op0
);
475 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
476 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
477 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
478 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
480 if (op_mode
== VOIDmode
)
481 op_mode
= GET_MODE (op0
);
482 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
487 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
488 if (op0
== SUBREG_REG (x
))
490 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
491 GET_MODE (SUBREG_REG (x
)),
493 return op0
? op0
: x
;
500 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
501 if (op0
== XEXP (x
, 0))
503 return replace_equiv_address_nv (x
, op0
);
505 else if (code
== LO_SUM
)
507 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
508 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
510 /* (lo_sum (high x) y) -> y where x and y have the same base. */
511 if (GET_CODE (op0
) == HIGH
)
513 rtx base0
, base1
, offset0
, offset1
;
514 split_const (XEXP (op0
, 0), &base0
, &offset0
);
515 split_const (op1
, &base1
, &offset1
);
516 if (rtx_equal_p (base0
, base1
))
520 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
522 return gen_rtx_LO_SUM (mode
, op0
, op1
);
531 fmt
= GET_RTX_FORMAT (code
);
532 for (i
= 0; fmt
[i
]; i
++)
537 newvec
= XVEC (newx
, i
);
538 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
540 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
542 if (op
!= RTVEC_ELT (vec
, j
))
546 newvec
= shallow_copy_rtvec (vec
);
548 newx
= shallow_copy_rtx (x
);
549 XVEC (newx
, i
) = newvec
;
551 RTVEC_ELT (newvec
, j
) = op
;
559 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
560 if (op
!= XEXP (x
, i
))
563 newx
= shallow_copy_rtx (x
);
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573 resulting RTX. Return a new RTX which is as simplified as possible. */
576 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
578 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582 Only handle cases where the truncated value is inherently an rvalue.
584 RTL provides two ways of truncating a value:
586 1. a lowpart subreg. This form is only a truncation when both
587 the outer and inner modes (here MODE and OP_MODE respectively)
588 are scalar integers, and only then when the subreg is used as
591 It is only valid to form such truncating subregs if the
592 truncation requires no action by the target. The onus for
593 proving this is on the creator of the subreg -- e.g. the
594 caller to simplify_subreg or simplify_gen_subreg -- and typically
595 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
597 2. a TRUNCATE. This form handles both scalar and compound integers.
599 The first form is preferred where valid. However, the TRUNCATE
600 handling in simplify_unary_operation turns the second form into the
601 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602 so it is generally safe to form rvalue truncations using:
604 simplify_gen_unary (TRUNCATE, ...)
606 and leave simplify_unary_operation to work out which representation
609 Because of the proof requirements on (1), simplify_truncation must
610 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611 regardless of whether the outer truncation came from a SUBREG or a
612 TRUNCATE. For example, if the caller has proven that an SImode
617 is a no-op and can be represented as a subreg, it does not follow
618 that SImode truncations of X and Y are also no-ops. On a target
619 like 64-bit MIPS that requires SImode values to be stored in
620 sign-extended form, an SImode truncation of:
622 (and:DI (reg:DI X) (const_int 63))
624 is trivially a no-op because only the lower 6 bits can be set.
625 However, X is still an arbitrary 64-bit number and so we cannot
626 assume that truncating it too is a no-op. */
629 simplify_truncation (machine_mode mode
, rtx op
,
630 machine_mode op_mode
)
632 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
633 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
634 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
636 gcc_assert (precision
<= op_precision
);
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op
) == ZERO_EXTEND
640 || GET_CODE (op
) == SIGN_EXTEND
)
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
648 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
649 if (mode
== origmode
)
651 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
652 return simplify_gen_unary (TRUNCATE
, mode
,
653 XEXP (op
, 0), origmode
);
655 return simplify_gen_unary (GET_CODE (op
), mode
,
656 XEXP (op
, 0), origmode
);
659 /* If the machine can perform operations in the truncated mode, distribute
660 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
661 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
663 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
664 && (GET_CODE (op
) == PLUS
665 || GET_CODE (op
) == MINUS
666 || GET_CODE (op
) == MULT
))
668 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
671 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
673 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
677 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
678 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
679 the outer subreg is effectively a truncation to the original mode. */
680 if ((GET_CODE (op
) == LSHIFTRT
681 || GET_CODE (op
) == ASHIFTRT
)
682 /* Ensure that OP_MODE is at least twice as wide as MODE
683 to avoid the possibility that an outer LSHIFTRT shifts by more
684 than the sign extension's sign_bit_copies and introduces zeros
685 into the high bits of the result. */
686 && 2 * precision
<= op_precision
687 && CONST_INT_P (XEXP (op
, 1))
688 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
689 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
690 && UINTVAL (XEXP (op
, 1)) < precision
)
691 return simplify_gen_binary (ASHIFTRT
, mode
,
692 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
694 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
695 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
696 the outer subreg is effectively a truncation to the original mode. */
697 if ((GET_CODE (op
) == LSHIFTRT
698 || GET_CODE (op
) == ASHIFTRT
)
699 && CONST_INT_P (XEXP (op
, 1))
700 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
701 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
702 && UINTVAL (XEXP (op
, 1)) < precision
)
703 return simplify_gen_binary (LSHIFTRT
, mode
,
704 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
706 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
707 to (ashift:QI (x:QI) C), where C is a suitable small constant and
708 the outer subreg is effectively a truncation to the original mode. */
709 if (GET_CODE (op
) == ASHIFT
710 && CONST_INT_P (XEXP (op
, 1))
711 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
712 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
713 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
714 && UINTVAL (XEXP (op
, 1)) < precision
)
715 return simplify_gen_binary (ASHIFT
, mode
,
716 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
718 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
719 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
721 if (GET_CODE (op
) == AND
722 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
723 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
724 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
725 && CONST_INT_P (XEXP (op
, 1)))
727 rtx op0
= (XEXP (XEXP (op
, 0), 0));
728 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
729 rtx mask_op
= XEXP (op
, 1);
730 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
731 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
733 if (shift
< precision
734 /* If doing this transform works for an X with all bits set,
735 it works for any X. */
736 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
737 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
738 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
739 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
741 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
742 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
746 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
747 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
749 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
750 && REG_P (XEXP (op
, 0))
751 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
752 && CONST_INT_P (XEXP (op
, 1))
753 && CONST_INT_P (XEXP (op
, 2)))
755 rtx op0
= XEXP (op
, 0);
756 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
757 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
758 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
760 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
763 pos
-= op_precision
- precision
;
764 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
765 XEXP (op
, 1), GEN_INT (pos
));
768 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
770 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
772 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
773 XEXP (op
, 1), XEXP (op
, 2));
777 /* Recognize a word extraction from a multi-word subreg. */
778 if ((GET_CODE (op
) == LSHIFTRT
779 || GET_CODE (op
) == ASHIFTRT
)
780 && SCALAR_INT_MODE_P (mode
)
781 && SCALAR_INT_MODE_P (op_mode
)
782 && precision
>= BITS_PER_WORD
783 && 2 * precision
<= op_precision
784 && CONST_INT_P (XEXP (op
, 1))
785 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
786 && UINTVAL (XEXP (op
, 1)) < op_precision
)
788 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
789 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
790 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
792 ? byte
- shifted_bytes
793 : byte
+ shifted_bytes
));
796 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
797 and try replacing the TRUNCATE and shift with it. Don't do this
798 if the MEM has a mode-dependent address. */
799 if ((GET_CODE (op
) == LSHIFTRT
800 || GET_CODE (op
) == ASHIFTRT
)
801 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
802 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
803 && MEM_P (XEXP (op
, 0))
804 && CONST_INT_P (XEXP (op
, 1))
805 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
806 && INTVAL (XEXP (op
, 1)) > 0
807 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
808 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
809 MEM_ADDR_SPACE (XEXP (op
, 0)))
810 && ! MEM_VOLATILE_P (XEXP (op
, 0))
811 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
812 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
814 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
815 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
816 return adjust_address_nv (XEXP (op
, 0), int_mode
,
818 ? byte
- shifted_bytes
819 : byte
+ shifted_bytes
));
822 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
823 (OP:SI foo:SI) if OP is NEG or ABS. */
824 if ((GET_CODE (op
) == ABS
825 || GET_CODE (op
) == NEG
)
826 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
827 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
828 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
829 return simplify_gen_unary (GET_CODE (op
), mode
,
830 XEXP (XEXP (op
, 0), 0), mode
);
832 /* (truncate:A (subreg:B (truncate:C X) 0)) is
834 if (GET_CODE (op
) == SUBREG
835 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
836 && SCALAR_INT_MODE_P (op_mode
)
837 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
838 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
839 && subreg_lowpart_p (op
))
841 rtx inner
= XEXP (SUBREG_REG (op
), 0);
842 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
843 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
846 /* If subreg above is paradoxical and C is narrower
847 than A, return (subreg:A (truncate:C X) 0). */
848 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
851 /* (truncate:A (truncate:B X)) is (truncate:A X). */
852 if (GET_CODE (op
) == TRUNCATE
)
853 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
854 GET_MODE (XEXP (op
, 0)));
856 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
858 if (GET_CODE (op
) == IOR
859 && SCALAR_INT_MODE_P (mode
)
860 && SCALAR_INT_MODE_P (op_mode
)
861 && CONST_INT_P (XEXP (op
, 1))
862 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
868 /* Try to simplify a unary operation CODE whose output mode is to be
869 MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if no simplification can be made. */
872 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
873 rtx op
, machine_mode op_mode
)
877 trueop
= avoid_constant_pool_reference (op
);
879 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
883 return simplify_unary_operation_1 (code
, mode
, op
);
886 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
890 exact_int_to_float_conversion_p (const_rtx op
)
892 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
893 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
894 /* Constants shouldn't reach here. */
895 gcc_assert (op0_mode
!= VOIDmode
);
896 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
897 int in_bits
= in_prec
;
898 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
900 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
901 if (GET_CODE (op
) == FLOAT
)
902 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
903 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
904 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
907 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
909 return in_bits
<= out_bits
;
912 /* Perform some simplifications we can do even if the operands
915 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
917 enum rtx_code reversed
;
918 rtx temp
, elt
, base
, step
;
919 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
924 /* (not (not X)) == X. */
925 if (GET_CODE (op
) == NOT
)
928 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
929 comparison is all ones. */
930 if (COMPARISON_P (op
)
931 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
932 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
933 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
934 XEXP (op
, 0), XEXP (op
, 1));
936 /* (not (plus X -1)) can become (neg X). */
937 if (GET_CODE (op
) == PLUS
938 && XEXP (op
, 1) == constm1_rtx
)
939 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
941 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
942 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
943 and MODE_VECTOR_INT. */
944 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
945 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
948 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
949 if (GET_CODE (op
) == XOR
950 && CONST_INT_P (XEXP (op
, 1))
951 && (temp
= simplify_unary_operation (NOT
, mode
,
952 XEXP (op
, 1), mode
)) != 0)
953 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
955 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
956 if (GET_CODE (op
) == PLUS
957 && CONST_INT_P (XEXP (op
, 1))
958 && mode_signbit_p (mode
, XEXP (op
, 1))
959 && (temp
= simplify_unary_operation (NOT
, mode
,
960 XEXP (op
, 1), mode
)) != 0)
961 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
964 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
965 operands other than 1, but that is not valid. We could do a
966 similar simplification for (not (lshiftrt C X)) where C is
967 just the sign bit, but this doesn't seem common enough to
969 if (GET_CODE (op
) == ASHIFT
970 && XEXP (op
, 0) == const1_rtx
)
972 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
973 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
976 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
977 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
978 so we can perform the above simplification. */
979 if (STORE_FLAG_VALUE
== -1
980 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
981 && GET_CODE (op
) == ASHIFTRT
982 && CONST_INT_P (XEXP (op
, 1))
983 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
984 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
985 XEXP (op
, 0), const0_rtx
);
988 if (partial_subreg_p (op
)
989 && subreg_lowpart_p (op
)
990 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
991 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
993 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
996 x
= gen_rtx_ROTATE (inner_mode
,
997 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
999 XEXP (SUBREG_REG (op
), 1));
1000 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1005 /* Apply De Morgan's laws to reduce number of patterns for machines
1006 with negating logical insns (and-not, nand, etc.). If result has
1007 only one NOT, put it first, since that is how the patterns are
1009 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1011 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1012 machine_mode op_mode
;
1014 op_mode
= GET_MODE (in1
);
1015 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1017 op_mode
= GET_MODE (in2
);
1018 if (op_mode
== VOIDmode
)
1020 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1022 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1023 std::swap (in1
, in2
);
1025 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1029 /* (not (bswap x)) -> (bswap (not x)). */
1030 if (GET_CODE (op
) == BSWAP
)
1032 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1033 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1038 /* (neg (neg X)) == X. */
1039 if (GET_CODE (op
) == NEG
)
1040 return XEXP (op
, 0);
1042 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1043 If comparison is not reversible use
1045 if (GET_CODE (op
) == IF_THEN_ELSE
)
1047 rtx cond
= XEXP (op
, 0);
1048 rtx true_rtx
= XEXP (op
, 1);
1049 rtx false_rtx
= XEXP (op
, 2);
1051 if ((GET_CODE (true_rtx
) == NEG
1052 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1053 || (GET_CODE (false_rtx
) == NEG
1054 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1056 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1057 temp
= reversed_comparison (cond
, mode
);
1061 std::swap (true_rtx
, false_rtx
);
1063 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1064 mode
, temp
, true_rtx
, false_rtx
);
1068 /* (neg (plus X 1)) can become (not X). */
1069 if (GET_CODE (op
) == PLUS
1070 && XEXP (op
, 1) == const1_rtx
)
1071 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1073 /* Similarly, (neg (not X)) is (plus X 1). */
1074 if (GET_CODE (op
) == NOT
)
1075 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1078 /* (neg (minus X Y)) can become (minus Y X). This transformation
1079 isn't safe for modes with signed zeros, since if X and Y are
1080 both +0, (minus Y X) is the same as (minus X Y). If the
1081 rounding mode is towards +infinity (or -infinity) then the two
1082 expressions will be rounded differently. */
1083 if (GET_CODE (op
) == MINUS
1084 && !HONOR_SIGNED_ZEROS (mode
)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1086 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1088 if (GET_CODE (op
) == PLUS
1089 && !HONOR_SIGNED_ZEROS (mode
)
1090 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1092 /* (neg (plus A C)) is simplified to (minus -C A). */
1093 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1094 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1096 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1098 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1101 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1102 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1103 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1106 /* (neg (mult A B)) becomes (mult A (neg B)).
1107 This works even for floating-point values. */
1108 if (GET_CODE (op
) == MULT
1109 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1111 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1112 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1115 /* NEG commutes with ASHIFT since it is multiplication. Only do
1116 this if we can then eliminate the NEG (e.g., if the operand
1118 if (GET_CODE (op
) == ASHIFT
)
1120 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1122 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1125 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1126 C is equal to the width of MODE minus 1. */
1127 if (GET_CODE (op
) == ASHIFTRT
1128 && CONST_INT_P (XEXP (op
, 1))
1129 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1130 return simplify_gen_binary (LSHIFTRT
, mode
,
1131 XEXP (op
, 0), XEXP (op
, 1));
1133 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1134 C is equal to the width of MODE minus 1. */
1135 if (GET_CODE (op
) == LSHIFTRT
1136 && CONST_INT_P (XEXP (op
, 1))
1137 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1138 return simplify_gen_binary (ASHIFTRT
, mode
,
1139 XEXP (op
, 0), XEXP (op
, 1));
1141 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1142 if (GET_CODE (op
) == XOR
1143 && XEXP (op
, 1) == const1_rtx
1144 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1145 return plus_constant (mode
, XEXP (op
, 0), -1);
1147 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1148 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1149 if (GET_CODE (op
) == LT
1150 && XEXP (op
, 1) == const0_rtx
1151 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1153 int_mode
= as_a
<scalar_int_mode
> (mode
);
1154 int isize
= GET_MODE_PRECISION (inner
);
1155 if (STORE_FLAG_VALUE
== 1)
1157 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1158 gen_int_shift_amount (inner
,
1160 if (int_mode
== inner
)
1162 if (GET_MODE_PRECISION (int_mode
) > isize
)
1163 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1164 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1166 else if (STORE_FLAG_VALUE
== -1)
1168 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1169 gen_int_shift_amount (inner
,
1171 if (int_mode
== inner
)
1173 if (GET_MODE_PRECISION (int_mode
) > isize
)
1174 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1175 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1179 if (vec_series_p (op
, &base
, &step
))
1181 /* Only create a new series if we can simplify both parts. In other
1182 cases this isn't really a simplification, and it's not necessarily
1183 a win to replace a vector operation with a scalar operation. */
1184 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1185 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1188 step
= simplify_unary_operation (NEG
, inner_mode
,
1191 return gen_vec_series (mode
, base
, step
);
1197 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1198 with the umulXi3_highpart patterns. */
1199 if (GET_CODE (op
) == LSHIFTRT
1200 && GET_CODE (XEXP (op
, 0)) == MULT
)
1203 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1205 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1207 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1211 /* We can't handle truncation to a partial integer mode here
1212 because we don't know the real bitsize of the partial
1217 if (GET_MODE (op
) != VOIDmode
)
1219 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1224 /* If we know that the value is already truncated, we can
1225 replace the TRUNCATE with a SUBREG. */
1226 if (GET_MODE_NUNITS (mode
) == 1
1227 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1228 || truncated_to_mode (mode
, op
)))
1230 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1235 /* A truncate of a comparison can be replaced with a subreg if
1236 STORE_FLAG_VALUE permits. This is like the previous test,
1237 but it works even if the comparison is done in a mode larger
1238 than HOST_BITS_PER_WIDE_INT. */
1239 if (HWI_COMPUTABLE_MODE_P (mode
)
1240 && COMPARISON_P (op
)
1241 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1243 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1248 /* A truncate of a memory is just loading the low part of the memory
1249 if we are not changing the meaning of the address. */
1250 if (GET_CODE (op
) == MEM
1251 && !VECTOR_MODE_P (mode
)
1252 && !MEM_VOLATILE_P (op
)
1253 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1255 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1262 case FLOAT_TRUNCATE
:
1263 if (DECIMAL_FLOAT_MODE_P (mode
))
1266 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1267 if (GET_CODE (op
) == FLOAT_EXTEND
1268 && GET_MODE (XEXP (op
, 0)) == mode
)
1269 return XEXP (op
, 0);
1271 /* (float_truncate:SF (float_truncate:DF foo:XF))
1272 = (float_truncate:SF foo:XF).
1273 This may eliminate double rounding, so it is unsafe.
1275 (float_truncate:SF (float_extend:XF foo:DF))
1276 = (float_truncate:SF foo:DF).
1278 (float_truncate:DF (float_extend:XF foo:SF))
1279 = (float_extend:DF foo:SF). */
1280 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1281 && flag_unsafe_math_optimizations
)
1282 || GET_CODE (op
) == FLOAT_EXTEND
)
1283 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1284 > GET_MODE_UNIT_SIZE (mode
)
1285 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1287 XEXP (op
, 0), mode
);
1289 /* (float_truncate (float x)) is (float x) */
1290 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1291 && (flag_unsafe_math_optimizations
1292 || exact_int_to_float_conversion_p (op
)))
1293 return simplify_gen_unary (GET_CODE (op
), mode
,
1295 GET_MODE (XEXP (op
, 0)));
1297 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1298 (OP:SF foo:SF) if OP is NEG or ABS. */
1299 if ((GET_CODE (op
) == ABS
1300 || GET_CODE (op
) == NEG
)
1301 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1302 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1303 return simplify_gen_unary (GET_CODE (op
), mode
,
1304 XEXP (XEXP (op
, 0), 0), mode
);
1306 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1307 is (float_truncate:SF x). */
1308 if (GET_CODE (op
) == SUBREG
1309 && subreg_lowpart_p (op
)
1310 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1311 return SUBREG_REG (op
);
1315 if (DECIMAL_FLOAT_MODE_P (mode
))
1318 /* (float_extend (float_extend x)) is (float_extend x)
1320 (float_extend (float x)) is (float x) assuming that double
1321 rounding can't happen.
1323 if (GET_CODE (op
) == FLOAT_EXTEND
1324 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1325 && exact_int_to_float_conversion_p (op
)))
1326 return simplify_gen_unary (GET_CODE (op
), mode
,
1328 GET_MODE (XEXP (op
, 0)));
1333 /* (abs (neg <foo>)) -> (abs <foo>) */
1334 if (GET_CODE (op
) == NEG
)
1335 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1336 GET_MODE (XEXP (op
, 0)));
1338 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1340 if (GET_MODE (op
) == VOIDmode
)
1343 /* If operand is something known to be positive, ignore the ABS. */
1344 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1345 || val_signbit_known_clear_p (GET_MODE (op
),
1346 nonzero_bits (op
, GET_MODE (op
))))
1349 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1350 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1351 && (num_sign_bit_copies (op
, int_mode
)
1352 == GET_MODE_PRECISION (int_mode
)))
1353 return gen_rtx_NEG (int_mode
, op
);
1358 /* (ffs (*_extend <X>)) = (ffs <X>) */
1359 if (GET_CODE (op
) == SIGN_EXTEND
1360 || GET_CODE (op
) == ZERO_EXTEND
)
1361 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1362 GET_MODE (XEXP (op
, 0)));
1366 switch (GET_CODE (op
))
1370 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1371 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1372 GET_MODE (XEXP (op
, 0)));
1376 /* Rotations don't affect popcount. */
1377 if (!side_effects_p (XEXP (op
, 1)))
1378 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1379 GET_MODE (XEXP (op
, 0)));
1388 switch (GET_CODE (op
))
1394 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1395 GET_MODE (XEXP (op
, 0)));
1399 /* Rotations don't affect parity. */
1400 if (!side_effects_p (XEXP (op
, 1)))
1401 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1402 GET_MODE (XEXP (op
, 0)));
1411 /* (bswap (bswap x)) -> x. */
1412 if (GET_CODE (op
) == BSWAP
)
1413 return XEXP (op
, 0);
1417 /* (float (sign_extend <X>)) = (float <X>). */
1418 if (GET_CODE (op
) == SIGN_EXTEND
)
1419 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1420 GET_MODE (XEXP (op
, 0)));
1424 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1425 becomes just the MINUS if its mode is MODE. This allows
1426 folding switch statements on machines using casesi (such as
1428 if (GET_CODE (op
) == TRUNCATE
1429 && GET_MODE (XEXP (op
, 0)) == mode
1430 && GET_CODE (XEXP (op
, 0)) == MINUS
1431 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1432 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1433 return XEXP (op
, 0);
1435 /* Extending a widening multiplication should be canonicalized to
1436 a wider widening multiplication. */
1437 if (GET_CODE (op
) == MULT
)
1439 rtx lhs
= XEXP (op
, 0);
1440 rtx rhs
= XEXP (op
, 1);
1441 enum rtx_code lcode
= GET_CODE (lhs
);
1442 enum rtx_code rcode
= GET_CODE (rhs
);
1444 /* Widening multiplies usually extend both operands, but sometimes
1445 they use a shift to extract a portion of a register. */
1446 if ((lcode
== SIGN_EXTEND
1447 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1448 && (rcode
== SIGN_EXTEND
1449 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1451 machine_mode lmode
= GET_MODE (lhs
);
1452 machine_mode rmode
= GET_MODE (rhs
);
1455 if (lcode
== ASHIFTRT
)
1456 /* Number of bits not shifted off the end. */
1457 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1458 - INTVAL (XEXP (lhs
, 1)));
1459 else /* lcode == SIGN_EXTEND */
1460 /* Size of inner mode. */
1461 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1463 if (rcode
== ASHIFTRT
)
1464 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1465 - INTVAL (XEXP (rhs
, 1)));
1466 else /* rcode == SIGN_EXTEND */
1467 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1469 /* We can only widen multiplies if the result is mathematiclly
1470 equivalent. I.e. if overflow was impossible. */
1471 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1472 return simplify_gen_binary
1474 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1475 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1479 /* Check for a sign extension of a subreg of a promoted
1480 variable, where the promotion is sign-extended, and the
1481 target mode is the same as the variable's promotion. */
1482 if (GET_CODE (op
) == SUBREG
1483 && SUBREG_PROMOTED_VAR_P (op
)
1484 && SUBREG_PROMOTED_SIGNED_P (op
)
1485 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1487 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1492 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1493 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1494 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1496 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1497 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1498 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1499 GET_MODE (XEXP (op
, 0)));
1502 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1503 is (sign_extend:M (subreg:O <X>)) if there is mode with
1504 GET_MODE_BITSIZE (N) - I bits.
1505 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1506 is similarly (zero_extend:M (subreg:O <X>)). */
1507 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1508 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1509 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1510 && CONST_INT_P (XEXP (op
, 1))
1511 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1512 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1513 GET_MODE_BITSIZE (op_mode
) > INTVAL (XEXP (op
, 1))))
1515 scalar_int_mode tmode
;
1516 gcc_assert (GET_MODE_BITSIZE (int_mode
)
1517 > GET_MODE_BITSIZE (op_mode
));
1518 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode
)
1519 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1522 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1524 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1525 ? SIGN_EXTEND
: ZERO_EXTEND
,
1526 int_mode
, inner
, tmode
);
1530 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1531 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1532 if (GET_CODE (op
) == LSHIFTRT
1533 && CONST_INT_P (XEXP (op
, 1))
1534 && XEXP (op
, 1) != const0_rtx
)
1535 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1537 #if defined(POINTERS_EXTEND_UNSIGNED)
1538 /* As we do not know which address space the pointer is referring to,
1539 we can do this only if the target does not support different pointer
1540 or address modes depending on the address space. */
1541 if (target_default_pointer_address_modes_p ()
1542 && ! POINTERS_EXTEND_UNSIGNED
1543 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1545 || (GET_CODE (op
) == SUBREG
1546 && REG_P (SUBREG_REG (op
))
1547 && REG_POINTER (SUBREG_REG (op
))
1548 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1549 && !targetm
.have_ptr_extend ())
1552 = convert_memory_address_addr_space_1 (Pmode
, op
,
1553 ADDR_SPACE_GENERIC
, false,
1562 /* Check for a zero extension of a subreg of a promoted
1563 variable, where the promotion is zero-extended, and the
1564 target mode is the same as the variable's promotion. */
1565 if (GET_CODE (op
) == SUBREG
1566 && SUBREG_PROMOTED_VAR_P (op
)
1567 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1568 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1570 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1575 /* Extending a widening multiplication should be canonicalized to
1576 a wider widening multiplication. */
1577 if (GET_CODE (op
) == MULT
)
1579 rtx lhs
= XEXP (op
, 0);
1580 rtx rhs
= XEXP (op
, 1);
1581 enum rtx_code lcode
= GET_CODE (lhs
);
1582 enum rtx_code rcode
= GET_CODE (rhs
);
1584 /* Widening multiplies usually extend both operands, but sometimes
1585 they use a shift to extract a portion of a register. */
1586 if ((lcode
== ZERO_EXTEND
1587 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1588 && (rcode
== ZERO_EXTEND
1589 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1591 machine_mode lmode
= GET_MODE (lhs
);
1592 machine_mode rmode
= GET_MODE (rhs
);
1595 if (lcode
== LSHIFTRT
)
1596 /* Number of bits not shifted off the end. */
1597 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1598 - INTVAL (XEXP (lhs
, 1)));
1599 else /* lcode == ZERO_EXTEND */
1600 /* Size of inner mode. */
1601 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1603 if (rcode
== LSHIFTRT
)
1604 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1605 - INTVAL (XEXP (rhs
, 1)));
1606 else /* rcode == ZERO_EXTEND */
1607 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1609 /* We can only widen multiplies if the result is mathematiclly
1610 equivalent. I.e. if overflow was impossible. */
1611 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1612 return simplify_gen_binary
1614 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1615 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1619 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1620 if (GET_CODE (op
) == ZERO_EXTEND
)
1621 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1622 GET_MODE (XEXP (op
, 0)));
1624 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1625 is (zero_extend:M (subreg:O <X>)) if there is mode with
1626 GET_MODE_PRECISION (N) - I bits. */
1627 if (GET_CODE (op
) == LSHIFTRT
1628 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1629 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1630 && CONST_INT_P (XEXP (op
, 1))
1631 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1632 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1633 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1635 scalar_int_mode tmode
;
1636 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1637 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1640 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1642 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1647 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1648 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1650 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1651 (and:SI (reg:SI) (const_int 63)). */
1652 if (partial_subreg_p (op
)
1653 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1654 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1655 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1656 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1657 && subreg_lowpart_p (op
)
1658 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1659 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1661 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1662 return SUBREG_REG (op
);
1663 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1667 #if defined(POINTERS_EXTEND_UNSIGNED)
1668 /* As we do not know which address space the pointer is referring to,
1669 we can do this only if the target does not support different pointer
1670 or address modes depending on the address space. */
1671 if (target_default_pointer_address_modes_p ()
1672 && POINTERS_EXTEND_UNSIGNED
> 0
1673 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1675 || (GET_CODE (op
) == SUBREG
1676 && REG_P (SUBREG_REG (op
))
1677 && REG_POINTER (SUBREG_REG (op
))
1678 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1679 && !targetm
.have_ptr_extend ())
1682 = convert_memory_address_addr_space_1 (Pmode
, op
,
1683 ADDR_SPACE_GENERIC
, false,
1695 if (VECTOR_MODE_P (mode
) && vec_duplicate_p (op
, &elt
))
1697 /* Try applying the operator to ELT and see if that simplifies.
1698 We can duplicate the result if so.
1700 The reason we don't use simplify_gen_unary is that it isn't
1701 necessarily a win to convert things like:
1703 (neg:V (vec_duplicate:V (reg:S R)))
1707 (vec_duplicate:V (neg:S (reg:S R)))
1709 The first might be done entirely in vector registers while the
1710 second might need a move between register files. */
1711 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1712 elt
, GET_MODE_INNER (GET_MODE (op
)));
1714 return gen_vec_duplicate (mode
, temp
);
1720 /* Try to compute the value of a unary operation CODE whose output mode is to
1721 be MODE with input operand OP whose mode was originally OP_MODE.
1722 Return zero if the value cannot be computed. */
1724 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1725 rtx op
, machine_mode op_mode
)
1727 scalar_int_mode result_mode
;
1729 if (code
== VEC_DUPLICATE
)
1731 gcc_assert (VECTOR_MODE_P (mode
));
1732 if (GET_MODE (op
) != VOIDmode
)
1734 if (!VECTOR_MODE_P (GET_MODE (op
)))
1735 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1737 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1740 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1741 return gen_const_vec_duplicate (mode
, op
);
1742 if (GET_CODE (op
) == CONST_VECTOR
)
1744 unsigned int n_elts
= GET_MODE_NUNITS (mode
);
1745 unsigned int in_n_elts
= CONST_VECTOR_NUNITS (op
);
1746 gcc_assert (in_n_elts
< n_elts
);
1747 gcc_assert ((n_elts
% in_n_elts
) == 0);
1748 rtvec v
= rtvec_alloc (n_elts
);
1749 for (unsigned i
= 0; i
< n_elts
; i
++)
1750 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1751 return gen_rtx_CONST_VECTOR (mode
, v
);
1755 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1757 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1758 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1759 machine_mode opmode
= GET_MODE (op
);
1760 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1761 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1762 rtvec v
= rtvec_alloc (n_elts
);
1765 gcc_assert (op_n_elts
== n_elts
);
1766 for (i
= 0; i
< n_elts
; i
++)
1768 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1769 CONST_VECTOR_ELT (op
, i
),
1770 GET_MODE_INNER (opmode
));
1771 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1773 RTVEC_ELT (v
, i
) = x
;
1775 return gen_rtx_CONST_VECTOR (mode
, v
);
1778 /* The order of these tests is critical so that, for example, we don't
1779 check the wrong mode (input vs. output) for a conversion operation,
1780 such as FIX. At some point, this should be simplified. */
1782 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1786 if (op_mode
== VOIDmode
)
1788 /* CONST_INT have VOIDmode as the mode. We assume that all
1789 the bits of the constant are significant, though, this is
1790 a dangerous assumption as many times CONST_INTs are
1791 created and used with garbage in the bits outside of the
1792 precision of the implied mode of the const_int. */
1793 op_mode
= MAX_MODE_INT
;
1796 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1798 /* Avoid the folding if flag_signaling_nans is on and
1799 operand is a signaling NaN. */
1800 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1803 d
= real_value_truncate (mode
, d
);
1804 return const_double_from_real_value (d
, mode
);
1806 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1810 if (op_mode
== VOIDmode
)
1812 /* CONST_INT have VOIDmode as the mode. We assume that all
1813 the bits of the constant are significant, though, this is
1814 a dangerous assumption as many times CONST_INTs are
1815 created and used with garbage in the bits outside of the
1816 precision of the implied mode of the const_int. */
1817 op_mode
= MAX_MODE_INT
;
1820 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1822 /* Avoid the folding if flag_signaling_nans is on and
1823 operand is a signaling NaN. */
1824 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1827 d
= real_value_truncate (mode
, d
);
1828 return const_double_from_real_value (d
, mode
);
1831 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1833 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1835 scalar_int_mode imode
= (op_mode
== VOIDmode
1837 : as_a
<scalar_int_mode
> (op_mode
));
1838 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1841 #if TARGET_SUPPORTS_WIDE_INT == 0
1842 /* This assert keeps the simplification from producing a result
1843 that cannot be represented in a CONST_DOUBLE but a lot of
1844 upstream callers expect that this function never fails to
1845 simplify something and so you if you added this to the test
1846 above the code would die later anyway. If this assert
1847 happens, you just need to make the port support wide int. */
1848 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1854 result
= wi::bit_not (op0
);
1858 result
= wi::neg (op0
);
1862 result
= wi::abs (op0
);
1866 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1870 if (wi::ne_p (op0
, 0))
1871 int_value
= wi::clz (op0
);
1872 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1873 int_value
= GET_MODE_PRECISION (imode
);
1874 result
= wi::shwi (int_value
, result_mode
);
1878 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1882 if (wi::ne_p (op0
, 0))
1883 int_value
= wi::ctz (op0
);
1884 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1885 int_value
= GET_MODE_PRECISION (imode
);
1886 result
= wi::shwi (int_value
, result_mode
);
1890 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1894 result
= wi::shwi (wi::parity (op0
), result_mode
);
1898 result
= wide_int (op0
).bswap ();
1903 result
= wide_int::from (op0
, width
, UNSIGNED
);
1907 result
= wide_int::from (op0
, width
, SIGNED
);
1915 return immed_wide_int_const (result
, result_mode
);
1918 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1919 && SCALAR_FLOAT_MODE_P (mode
)
1920 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1922 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1928 d
= real_value_abs (&d
);
1931 d
= real_value_negate (&d
);
1933 case FLOAT_TRUNCATE
:
1934 /* Don't perform the operation if flag_signaling_nans is on
1935 and the operand is a signaling NaN. */
1936 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1938 d
= real_value_truncate (mode
, d
);
1941 /* Don't perform the operation if flag_signaling_nans is on
1942 and the operand is a signaling NaN. */
1943 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1945 /* All this does is change the mode, unless changing
1947 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1948 real_convert (&d
, mode
, &d
);
1951 /* Don't perform the operation if flag_signaling_nans is on
1952 and the operand is a signaling NaN. */
1953 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1955 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1962 real_to_target (tmp
, &d
, GET_MODE (op
));
1963 for (i
= 0; i
< 4; i
++)
1965 real_from_target (&d
, tmp
, mode
);
1971 return const_double_from_real_value (d
, mode
);
1973 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1974 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1975 && is_int_mode (mode
, &result_mode
))
1977 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1978 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1979 operators are intentionally left unspecified (to ease implementation
1980 by target backends), for consistency, this routine implements the
1981 same semantics for constant folding as used by the middle-end. */
1983 /* This was formerly used only for non-IEEE float.
1984 eggert@twinsun.com says it is safe for IEEE also. */
1986 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1987 wide_int wmax
, wmin
;
1988 /* This is part of the abi to real_to_integer, but we check
1989 things before making this call. */
1995 if (REAL_VALUE_ISNAN (*x
))
1998 /* Test against the signed upper bound. */
1999 wmax
= wi::max_value (width
, SIGNED
);
2000 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2001 if (real_less (&t
, x
))
2002 return immed_wide_int_const (wmax
, mode
);
2004 /* Test against the signed lower bound. */
2005 wmin
= wi::min_value (width
, SIGNED
);
2006 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2007 if (real_less (x
, &t
))
2008 return immed_wide_int_const (wmin
, mode
);
2010 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2014 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2017 /* Test against the unsigned upper bound. */
2018 wmax
= wi::max_value (width
, UNSIGNED
);
2019 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2020 if (real_less (&t
, x
))
2021 return immed_wide_int_const (wmax
, mode
);
2023 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2031 /* Handle polynomial integers. */
2032 else if (CONST_POLY_INT_P (op
))
2034 poly_wide_int result
;
2038 result
= -const_poly_int_value (op
);
2042 result
= ~const_poly_int_value (op
);
2048 return immed_wide_int_const (result
, mode
);
2054 /* Subroutine of simplify_binary_operation to simplify a binary operation
2055 CODE that can commute with byte swapping, with result mode MODE and
2056 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2057 Return zero if no simplification or canonicalization is possible. */
2060 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2065 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2066 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2068 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2069 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2070 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2073 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2074 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2076 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2077 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2083 /* Subroutine of simplify_binary_operation to simplify a commutative,
2084 associative binary operation CODE with result mode MODE, operating
2085 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2086 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2087 canonicalization is possible. */
2090 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2095 /* Linearize the operator to the left. */
2096 if (GET_CODE (op1
) == code
)
2098 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2099 if (GET_CODE (op0
) == code
)
2101 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2102 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2105 /* "a op (b op c)" becomes "(b op c) op a". */
2106 if (! swap_commutative_operands_p (op1
, op0
))
2107 return simplify_gen_binary (code
, mode
, op1
, op0
);
2109 std::swap (op0
, op1
);
2112 if (GET_CODE (op0
) == code
)
2114 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2115 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2117 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2118 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2121 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2122 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2124 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2126 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2127 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2129 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2136 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2137 and OP1. Return 0 if no simplification is possible.
2139 Don't use this for relational operations such as EQ or LT.
2140 Use simplify_relational_operation instead. */
2142 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2145 rtx trueop0
, trueop1
;
2148 /* Relational operations don't work here. We must know the mode
2149 of the operands in order to do the comparison correctly.
2150 Assuming a full word can give incorrect results.
2151 Consider comparing 128 with -128 in QImode. */
2152 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2153 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2155 /* Make sure the constant is second. */
2156 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2157 && swap_commutative_operands_p (op0
, op1
))
2158 std::swap (op0
, op1
);
2160 trueop0
= avoid_constant_pool_reference (op0
);
2161 trueop1
= avoid_constant_pool_reference (op1
);
2163 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2166 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2171 /* If the above steps did not result in a simplification and op0 or op1
2172 were constant pool references, use the referenced constants directly. */
2173 if (trueop0
!= op0
|| trueop1
!= op1
)
2174 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2179 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2180 which OP0 and OP1 are both vector series or vector duplicates
2181 (which are really just series with a step of 0). If so, try to
2182 form a new series by applying CODE to the bases and to the steps.
2183 Return null if no simplification is possible.
2185 MODE is the mode of the operation and is known to be a vector
2189 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2193 if (vec_duplicate_p (op0
, &base0
))
2195 else if (!vec_series_p (op0
, &base0
, &step0
))
2199 if (vec_duplicate_p (op1
, &base1
))
2201 else if (!vec_series_p (op1
, &base1
, &step1
))
2204 /* Only create a new series if we can simplify both parts. In other
2205 cases this isn't really a simplification, and it's not necessarily
2206 a win to replace a vector operation with a scalar operation. */
2207 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2208 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2212 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2216 return gen_vec_series (mode
, new_base
, new_step
);
2219 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2220 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2221 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2222 actual constants. */
2225 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2226 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2228 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2230 scalar_int_mode int_mode
, inner_mode
;
2233 /* Even if we can't compute a constant result,
2234 there are some cases worth simplifying. */
2239 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2240 when x is NaN, infinite, or finite and nonzero. They aren't
2241 when x is -0 and the rounding mode is not towards -infinity,
2242 since (-0) + 0 is then 0. */
2243 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2246 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2247 transformations are safe even for IEEE. */
2248 if (GET_CODE (op0
) == NEG
)
2249 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2250 else if (GET_CODE (op1
) == NEG
)
2251 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2253 /* (~a) + 1 -> -a */
2254 if (INTEGRAL_MODE_P (mode
)
2255 && GET_CODE (op0
) == NOT
2256 && trueop1
== const1_rtx
)
2257 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2259 /* Handle both-operands-constant cases. We can only add
2260 CONST_INTs to constants since the sum of relocatable symbols
2261 can't be handled by most assemblers. Don't add CONST_INT
2262 to CONST_INT since overflow won't be computed properly if wider
2263 than HOST_BITS_PER_WIDE_INT. */
2265 if ((GET_CODE (op0
) == CONST
2266 || GET_CODE (op0
) == SYMBOL_REF
2267 || GET_CODE (op0
) == LABEL_REF
)
2268 && CONST_INT_P (op1
))
2269 return plus_constant (mode
, op0
, INTVAL (op1
));
2270 else if ((GET_CODE (op1
) == CONST
2271 || GET_CODE (op1
) == SYMBOL_REF
2272 || GET_CODE (op1
) == LABEL_REF
)
2273 && CONST_INT_P (op0
))
2274 return plus_constant (mode
, op1
, INTVAL (op0
));
2276 /* See if this is something like X * C - X or vice versa or
2277 if the multiplication is written as a shift. If so, we can
2278 distribute and make a new multiply, shift, or maybe just
2279 have X (if C is 2 in the example above). But don't make
2280 something more expensive than we had before. */
2282 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2284 rtx lhs
= op0
, rhs
= op1
;
2286 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2287 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2289 if (GET_CODE (lhs
) == NEG
)
2291 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2292 lhs
= XEXP (lhs
, 0);
2294 else if (GET_CODE (lhs
) == MULT
2295 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2297 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2298 lhs
= XEXP (lhs
, 0);
2300 else if (GET_CODE (lhs
) == ASHIFT
2301 && CONST_INT_P (XEXP (lhs
, 1))
2302 && INTVAL (XEXP (lhs
, 1)) >= 0
2303 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2305 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2306 GET_MODE_PRECISION (int_mode
));
2307 lhs
= XEXP (lhs
, 0);
2310 if (GET_CODE (rhs
) == NEG
)
2312 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2313 rhs
= XEXP (rhs
, 0);
2315 else if (GET_CODE (rhs
) == MULT
2316 && CONST_INT_P (XEXP (rhs
, 1)))
2318 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2319 rhs
= XEXP (rhs
, 0);
2321 else if (GET_CODE (rhs
) == ASHIFT
2322 && CONST_INT_P (XEXP (rhs
, 1))
2323 && INTVAL (XEXP (rhs
, 1)) >= 0
2324 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2326 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2327 GET_MODE_PRECISION (int_mode
));
2328 rhs
= XEXP (rhs
, 0);
2331 if (rtx_equal_p (lhs
, rhs
))
2333 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2335 bool speed
= optimize_function_for_speed_p (cfun
);
2337 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2339 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2340 return (set_src_cost (tem
, int_mode
, speed
)
2341 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2345 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2346 if (CONST_SCALAR_INT_P (op1
)
2347 && GET_CODE (op0
) == XOR
2348 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2349 && mode_signbit_p (mode
, op1
))
2350 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2351 simplify_gen_binary (XOR
, mode
, op1
,
2354 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2355 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2356 && GET_CODE (op0
) == MULT
2357 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2361 in1
= XEXP (XEXP (op0
, 0), 0);
2362 in2
= XEXP (op0
, 1);
2363 return simplify_gen_binary (MINUS
, mode
, op1
,
2364 simplify_gen_binary (MULT
, mode
,
2368 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2369 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2371 if (COMPARISON_P (op0
)
2372 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2373 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2374 && (reversed
= reversed_comparison (op0
, mode
)))
2376 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2378 /* If one of the operands is a PLUS or a MINUS, see if we can
2379 simplify this by the associative law.
2380 Don't use the associative law for floating point.
2381 The inaccuracy makes it nonassociative,
2382 and subtle programs can break if operations are associated. */
2384 if (INTEGRAL_MODE_P (mode
)
2385 && (plus_minus_operand_p (op0
)
2386 || plus_minus_operand_p (op1
))
2387 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2390 /* Reassociate floating point addition only when the user
2391 specifies associative math operations. */
2392 if (FLOAT_MODE_P (mode
)
2393 && flag_associative_math
)
2395 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2400 /* Handle vector series. */
2401 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2403 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2410 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2411 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2412 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2413 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2415 rtx xop00
= XEXP (op0
, 0);
2416 rtx xop10
= XEXP (op1
, 0);
2418 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2421 if (REG_P (xop00
) && REG_P (xop10
)
2422 && REGNO (xop00
) == REGNO (xop10
)
2423 && GET_MODE (xop00
) == mode
2424 && GET_MODE (xop10
) == mode
2425 && GET_MODE_CLASS (mode
) == MODE_CC
)
2431 /* We can't assume x-x is 0 even with non-IEEE floating point,
2432 but since it is zero except in very strange circumstances, we
2433 will treat it as zero with -ffinite-math-only. */
2434 if (rtx_equal_p (trueop0
, trueop1
)
2435 && ! side_effects_p (op0
)
2436 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2437 return CONST0_RTX (mode
);
2439 /* Change subtraction from zero into negation. (0 - x) is the
2440 same as -x when x is NaN, infinite, or finite and nonzero.
2441 But if the mode has signed zeros, and does not round towards
2442 -infinity, then 0 - 0 is 0, not -0. */
2443 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2444 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2446 /* (-1 - a) is ~a, unless the expression contains symbolic
2447 constants, in which case not retaining additions and
2448 subtractions could cause invalid assembly to be produced. */
2449 if (trueop0
== constm1_rtx
2450 && !contains_symbolic_reference_p (op1
))
2451 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2453 /* Subtracting 0 has no effect unless the mode has signed zeros
2454 and supports rounding towards -infinity. In such a case,
2456 if (!(HONOR_SIGNED_ZEROS (mode
)
2457 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2458 && trueop1
== CONST0_RTX (mode
))
2461 /* See if this is something like X * C - X or vice versa or
2462 if the multiplication is written as a shift. If so, we can
2463 distribute and make a new multiply, shift, or maybe just
2464 have X (if C is 2 in the example above). But don't make
2465 something more expensive than we had before. */
2467 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2469 rtx lhs
= op0
, rhs
= op1
;
2471 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2472 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2474 if (GET_CODE (lhs
) == NEG
)
2476 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2477 lhs
= XEXP (lhs
, 0);
2479 else if (GET_CODE (lhs
) == MULT
2480 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2482 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2483 lhs
= XEXP (lhs
, 0);
2485 else if (GET_CODE (lhs
) == ASHIFT
2486 && CONST_INT_P (XEXP (lhs
, 1))
2487 && INTVAL (XEXP (lhs
, 1)) >= 0
2488 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2490 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2491 GET_MODE_PRECISION (int_mode
));
2492 lhs
= XEXP (lhs
, 0);
2495 if (GET_CODE (rhs
) == NEG
)
2497 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2498 rhs
= XEXP (rhs
, 0);
2500 else if (GET_CODE (rhs
) == MULT
2501 && CONST_INT_P (XEXP (rhs
, 1)))
2503 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2504 rhs
= XEXP (rhs
, 0);
2506 else if (GET_CODE (rhs
) == ASHIFT
2507 && CONST_INT_P (XEXP (rhs
, 1))
2508 && INTVAL (XEXP (rhs
, 1)) >= 0
2509 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2511 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2512 GET_MODE_PRECISION (int_mode
));
2513 negcoeff1
= -negcoeff1
;
2514 rhs
= XEXP (rhs
, 0);
2517 if (rtx_equal_p (lhs
, rhs
))
2519 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2521 bool speed
= optimize_function_for_speed_p (cfun
);
2523 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2525 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2526 return (set_src_cost (tem
, int_mode
, speed
)
2527 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2531 /* (a - (-b)) -> (a + b). True even for IEEE. */
2532 if (GET_CODE (op1
) == NEG
)
2533 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2535 /* (-x - c) may be simplified as (-c - x). */
2536 if (GET_CODE (op0
) == NEG
2537 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2539 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2541 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2544 if ((GET_CODE (op0
) == CONST
2545 || GET_CODE (op0
) == SYMBOL_REF
2546 || GET_CODE (op0
) == LABEL_REF
)
2547 && poly_int_rtx_p (op1
, &offset
))
2548 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2550 /* Don't let a relocatable value get a negative coeff. */
2551 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2552 return simplify_gen_binary (PLUS
, mode
,
2554 neg_const_int (mode
, op1
));
2556 /* (x - (x & y)) -> (x & ~y) */
2557 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2559 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2561 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2562 GET_MODE (XEXP (op1
, 1)));
2563 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2565 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2567 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2568 GET_MODE (XEXP (op1
, 0)));
2569 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2573 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2574 by reversing the comparison code if valid. */
2575 if (STORE_FLAG_VALUE
== 1
2576 && trueop0
== const1_rtx
2577 && COMPARISON_P (op1
)
2578 && (reversed
= reversed_comparison (op1
, mode
)))
2581 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2582 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2583 && GET_CODE (op1
) == MULT
2584 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2588 in1
= XEXP (XEXP (op1
, 0), 0);
2589 in2
= XEXP (op1
, 1);
2590 return simplify_gen_binary (PLUS
, mode
,
2591 simplify_gen_binary (MULT
, mode
,
2596 /* Canonicalize (minus (neg A) (mult B C)) to
2597 (minus (mult (neg B) C) A). */
2598 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2599 && GET_CODE (op1
) == MULT
2600 && GET_CODE (op0
) == NEG
)
2604 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2605 in2
= XEXP (op1
, 1);
2606 return simplify_gen_binary (MINUS
, mode
,
2607 simplify_gen_binary (MULT
, mode
,
2612 /* If one of the operands is a PLUS or a MINUS, see if we can
2613 simplify this by the associative law. This will, for example,
2614 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2615 Don't use the associative law for floating point.
2616 The inaccuracy makes it nonassociative,
2617 and subtle programs can break if operations are associated. */
2619 if (INTEGRAL_MODE_P (mode
)
2620 && (plus_minus_operand_p (op0
)
2621 || plus_minus_operand_p (op1
))
2622 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2625 /* Handle vector series. */
2626 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2628 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2635 if (trueop1
== constm1_rtx
)
2636 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2638 if (GET_CODE (op0
) == NEG
)
2640 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2641 /* If op1 is a MULT as well and simplify_unary_operation
2642 just moved the NEG to the second operand, simplify_gen_binary
2643 below could through simplify_associative_operation move
2644 the NEG around again and recurse endlessly. */
2646 && GET_CODE (op1
) == MULT
2647 && GET_CODE (temp
) == MULT
2648 && XEXP (op1
, 0) == XEXP (temp
, 0)
2649 && GET_CODE (XEXP (temp
, 1)) == NEG
2650 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2653 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2655 if (GET_CODE (op1
) == NEG
)
2657 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2658 /* If op0 is a MULT as well and simplify_unary_operation
2659 just moved the NEG to the second operand, simplify_gen_binary
2660 below could through simplify_associative_operation move
2661 the NEG around again and recurse endlessly. */
2663 && GET_CODE (op0
) == MULT
2664 && GET_CODE (temp
) == MULT
2665 && XEXP (op0
, 0) == XEXP (temp
, 0)
2666 && GET_CODE (XEXP (temp
, 1)) == NEG
2667 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2670 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2673 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2674 x is NaN, since x * 0 is then also NaN. Nor is it valid
2675 when the mode has signed zeros, since multiplying a negative
2676 number by 0 will give -0, not 0. */
2677 if (!HONOR_NANS (mode
)
2678 && !HONOR_SIGNED_ZEROS (mode
)
2679 && trueop1
== CONST0_RTX (mode
)
2680 && ! side_effects_p (op0
))
2683 /* In IEEE floating point, x*1 is not equivalent to x for
2685 if (!HONOR_SNANS (mode
)
2686 && trueop1
== CONST1_RTX (mode
))
2689 /* Convert multiply by constant power of two into shift. */
2690 if (CONST_SCALAR_INT_P (trueop1
))
2692 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2694 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2695 gen_int_shift_amount (mode
, val
));
2698 /* x*2 is x+x and x*(-1) is -x */
2699 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2700 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2701 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2702 && GET_MODE (op0
) == mode
)
2704 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2706 if (real_equal (d1
, &dconst2
))
2707 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2709 if (!HONOR_SNANS (mode
)
2710 && real_equal (d1
, &dconstm1
))
2711 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2714 /* Optimize -x * -x as x * x. */
2715 if (FLOAT_MODE_P (mode
)
2716 && GET_CODE (op0
) == NEG
2717 && GET_CODE (op1
) == NEG
2718 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2719 && !side_effects_p (XEXP (op0
, 0)))
2720 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2722 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2723 if (SCALAR_FLOAT_MODE_P (mode
)
2724 && GET_CODE (op0
) == ABS
2725 && GET_CODE (op1
) == ABS
2726 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2727 && !side_effects_p (XEXP (op0
, 0)))
2728 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2730 /* Reassociate multiplication, but for floating point MULTs
2731 only when the user specifies unsafe math optimizations. */
2732 if (! FLOAT_MODE_P (mode
)
2733 || flag_unsafe_math_optimizations
)
2735 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2742 if (trueop1
== CONST0_RTX (mode
))
2744 if (INTEGRAL_MODE_P (mode
)
2745 && trueop1
== CONSTM1_RTX (mode
)
2746 && !side_effects_p (op0
))
2748 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2750 /* A | (~A) -> -1 */
2751 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2752 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2753 && ! side_effects_p (op0
)
2754 && SCALAR_INT_MODE_P (mode
))
2757 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2758 if (CONST_INT_P (op1
)
2759 && HWI_COMPUTABLE_MODE_P (mode
)
2760 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2761 && !side_effects_p (op0
))
2764 /* Canonicalize (X & C1) | C2. */
2765 if (GET_CODE (op0
) == AND
2766 && CONST_INT_P (trueop1
)
2767 && CONST_INT_P (XEXP (op0
, 1)))
2769 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2770 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2771 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2773 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2775 && !side_effects_p (XEXP (op0
, 0)))
2778 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2779 if (((c1
|c2
) & mask
) == mask
)
2780 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2783 /* Convert (A & B) | A to A. */
2784 if (GET_CODE (op0
) == AND
2785 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2786 || rtx_equal_p (XEXP (op0
, 1), op1
))
2787 && ! side_effects_p (XEXP (op0
, 0))
2788 && ! side_effects_p (XEXP (op0
, 1)))
2791 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2792 mode size to (rotate A CX). */
2794 if (GET_CODE (op1
) == ASHIFT
2795 || GET_CODE (op1
) == SUBREG
)
2806 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2807 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2808 && CONST_INT_P (XEXP (opleft
, 1))
2809 && CONST_INT_P (XEXP (opright
, 1))
2810 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2811 == GET_MODE_UNIT_PRECISION (mode
)))
2812 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2814 /* Same, but for ashift that has been "simplified" to a wider mode
2815 by simplify_shift_const. */
2817 if (GET_CODE (opleft
) == SUBREG
2818 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2819 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2821 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2822 && GET_CODE (opright
) == LSHIFTRT
2823 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2824 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
2825 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2826 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2827 SUBREG_REG (XEXP (opright
, 0)))
2828 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2829 && CONST_INT_P (XEXP (opright
, 1))
2830 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2831 + INTVAL (XEXP (opright
, 1))
2832 == GET_MODE_PRECISION (int_mode
)))
2833 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2834 XEXP (SUBREG_REG (opleft
), 1));
2836 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2837 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2838 the PLUS does not affect any of the bits in OP1: then we can do
2839 the IOR as a PLUS and we can associate. This is valid if OP1
2840 can be safely shifted left C bits. */
2841 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2842 && GET_CODE (XEXP (op0
, 0)) == PLUS
2843 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2844 && CONST_INT_P (XEXP (op0
, 1))
2845 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2847 int count
= INTVAL (XEXP (op0
, 1));
2848 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2850 if (mask
>> count
== INTVAL (trueop1
)
2851 && trunc_int_for_mode (mask
, mode
) == mask
2852 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2853 return simplify_gen_binary (ASHIFTRT
, mode
,
2854 plus_constant (mode
, XEXP (op0
, 0),
2859 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2863 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2869 if (trueop1
== CONST0_RTX (mode
))
2871 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2872 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2873 if (rtx_equal_p (trueop0
, trueop1
)
2874 && ! side_effects_p (op0
)
2875 && GET_MODE_CLASS (mode
) != MODE_CC
)
2876 return CONST0_RTX (mode
);
2878 /* Canonicalize XOR of the most significant bit to PLUS. */
2879 if (CONST_SCALAR_INT_P (op1
)
2880 && mode_signbit_p (mode
, op1
))
2881 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2882 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2883 if (CONST_SCALAR_INT_P (op1
)
2884 && GET_CODE (op0
) == PLUS
2885 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2886 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2887 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2888 simplify_gen_binary (XOR
, mode
, op1
,
2891 /* If we are XORing two things that have no bits in common,
2892 convert them into an IOR. This helps to detect rotation encoded
2893 using those methods and possibly other simplifications. */
2895 if (HWI_COMPUTABLE_MODE_P (mode
)
2896 && (nonzero_bits (op0
, mode
)
2897 & nonzero_bits (op1
, mode
)) == 0)
2898 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2900 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2901 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2904 int num_negated
= 0;
2906 if (GET_CODE (op0
) == NOT
)
2907 num_negated
++, op0
= XEXP (op0
, 0);
2908 if (GET_CODE (op1
) == NOT
)
2909 num_negated
++, op1
= XEXP (op1
, 0);
2911 if (num_negated
== 2)
2912 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2913 else if (num_negated
== 1)
2914 return simplify_gen_unary (NOT
, mode
,
2915 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2919 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2920 correspond to a machine insn or result in further simplifications
2921 if B is a constant. */
2923 if (GET_CODE (op0
) == AND
2924 && rtx_equal_p (XEXP (op0
, 1), op1
)
2925 && ! side_effects_p (op1
))
2926 return simplify_gen_binary (AND
, mode
,
2927 simplify_gen_unary (NOT
, mode
,
2928 XEXP (op0
, 0), mode
),
2931 else if (GET_CODE (op0
) == AND
2932 && rtx_equal_p (XEXP (op0
, 0), op1
)
2933 && ! side_effects_p (op1
))
2934 return simplify_gen_binary (AND
, mode
,
2935 simplify_gen_unary (NOT
, mode
,
2936 XEXP (op0
, 1), mode
),
2939 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2940 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2941 out bits inverted twice and not set by C. Similarly, given
2942 (xor (and (xor A B) C) D), simplify without inverting C in
2943 the xor operand: (xor (and A C) (B&C)^D).
2945 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2946 && GET_CODE (XEXP (op0
, 0)) == XOR
2947 && CONST_INT_P (op1
)
2948 && CONST_INT_P (XEXP (op0
, 1))
2949 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2951 enum rtx_code op
= GET_CODE (op0
);
2952 rtx a
= XEXP (XEXP (op0
, 0), 0);
2953 rtx b
= XEXP (XEXP (op0
, 0), 1);
2954 rtx c
= XEXP (op0
, 1);
2956 HOST_WIDE_INT bval
= INTVAL (b
);
2957 HOST_WIDE_INT cval
= INTVAL (c
);
2958 HOST_WIDE_INT dval
= INTVAL (d
);
2959 HOST_WIDE_INT xcval
;
2966 return simplify_gen_binary (XOR
, mode
,
2967 simplify_gen_binary (op
, mode
, a
, c
),
2968 gen_int_mode ((bval
& xcval
) ^ dval
,
2972 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2973 we can transform like this:
2974 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2975 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2976 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2977 Attempt a few simplifications when B and C are both constants. */
2978 if (GET_CODE (op0
) == AND
2979 && CONST_INT_P (op1
)
2980 && CONST_INT_P (XEXP (op0
, 1)))
2982 rtx a
= XEXP (op0
, 0);
2983 rtx b
= XEXP (op0
, 1);
2985 HOST_WIDE_INT bval
= INTVAL (b
);
2986 HOST_WIDE_INT cval
= INTVAL (c
);
2988 /* Instead of computing ~A&C, we compute its negated value,
2989 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2990 optimize for sure. If it does not simplify, we still try
2991 to compute ~A&C below, but since that always allocates
2992 RTL, we don't try that before committing to returning a
2993 simplified expression. */
2994 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2997 if ((~cval
& bval
) == 0)
2999 rtx na_c
= NULL_RTX
;
3001 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3004 /* If ~A does not simplify, don't bother: we don't
3005 want to simplify 2 operations into 3, and if na_c
3006 were to simplify with na, n_na_c would have
3007 simplified as well. */
3008 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3010 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3013 /* Try to simplify ~A&C | ~B&C. */
3014 if (na_c
!= NULL_RTX
)
3015 return simplify_gen_binary (IOR
, mode
, na_c
,
3016 gen_int_mode (~bval
& cval
, mode
));
3020 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3021 if (n_na_c
== CONSTM1_RTX (mode
))
3023 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3024 gen_int_mode (~cval
& bval
,
3026 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3027 gen_int_mode (~bval
& cval
,
3033 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3034 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3035 machines, and also has shorter instruction path length. */
3036 if (GET_CODE (op0
) == AND
3037 && GET_CODE (XEXP (op0
, 0)) == XOR
3038 && CONST_INT_P (XEXP (op0
, 1))
3039 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3042 rtx b
= XEXP (XEXP (op0
, 0), 1);
3043 rtx c
= XEXP (op0
, 1);
3044 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3045 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3046 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3047 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3049 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3050 else if (GET_CODE (op0
) == AND
3051 && GET_CODE (XEXP (op0
, 0)) == XOR
3052 && CONST_INT_P (XEXP (op0
, 1))
3053 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3055 rtx a
= XEXP (XEXP (op0
, 0), 0);
3057 rtx c
= XEXP (op0
, 1);
3058 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3059 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3060 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3061 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3064 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3065 comparison if STORE_FLAG_VALUE is 1. */
3066 if (STORE_FLAG_VALUE
== 1
3067 && trueop1
== const1_rtx
3068 && COMPARISON_P (op0
)
3069 && (reversed
= reversed_comparison (op0
, mode
)))
3072 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3073 is (lt foo (const_int 0)), so we can perform the above
3074 simplification if STORE_FLAG_VALUE is 1. */
3076 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3077 && STORE_FLAG_VALUE
== 1
3078 && trueop1
== const1_rtx
3079 && GET_CODE (op0
) == LSHIFTRT
3080 && CONST_INT_P (XEXP (op0
, 1))
3081 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3082 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3084 /* (xor (comparison foo bar) (const_int sign-bit))
3085 when STORE_FLAG_VALUE is the sign bit. */
3086 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3087 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3088 && trueop1
== const_true_rtx
3089 && COMPARISON_P (op0
)
3090 && (reversed
= reversed_comparison (op0
, int_mode
)))
3093 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3097 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3103 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3105 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3107 if (HWI_COMPUTABLE_MODE_P (mode
))
3109 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3110 HOST_WIDE_INT nzop1
;
3111 if (CONST_INT_P (trueop1
))
3113 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3114 /* If we are turning off bits already known off in OP0, we need
3116 if ((nzop0
& ~val1
) == 0)
3119 nzop1
= nonzero_bits (trueop1
, mode
);
3120 /* If we are clearing all the nonzero bits, the result is zero. */
3121 if ((nzop1
& nzop0
) == 0
3122 && !side_effects_p (op0
) && !side_effects_p (op1
))
3123 return CONST0_RTX (mode
);
3125 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3126 && GET_MODE_CLASS (mode
) != MODE_CC
)
3129 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3130 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3131 && ! side_effects_p (op0
)
3132 && GET_MODE_CLASS (mode
) != MODE_CC
)
3133 return CONST0_RTX (mode
);
3135 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3136 there are no nonzero bits of C outside of X's mode. */
3137 if ((GET_CODE (op0
) == SIGN_EXTEND
3138 || GET_CODE (op0
) == ZERO_EXTEND
)
3139 && CONST_INT_P (trueop1
)
3140 && HWI_COMPUTABLE_MODE_P (mode
)
3141 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3142 & UINTVAL (trueop1
)) == 0)
3144 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3145 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3146 gen_int_mode (INTVAL (trueop1
),
3148 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3151 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3152 we might be able to further simplify the AND with X and potentially
3153 remove the truncation altogether. */
3154 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3156 rtx x
= XEXP (op0
, 0);
3157 machine_mode xmode
= GET_MODE (x
);
3158 tem
= simplify_gen_binary (AND
, xmode
, x
,
3159 gen_int_mode (INTVAL (trueop1
), xmode
));
3160 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3163 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3164 if (GET_CODE (op0
) == IOR
3165 && CONST_INT_P (trueop1
)
3166 && CONST_INT_P (XEXP (op0
, 1)))
3168 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3169 return simplify_gen_binary (IOR
, mode
,
3170 simplify_gen_binary (AND
, mode
,
3171 XEXP (op0
, 0), op1
),
3172 gen_int_mode (tmp
, mode
));
3175 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3176 insn (and may simplify more). */
3177 if (GET_CODE (op0
) == XOR
3178 && rtx_equal_p (XEXP (op0
, 0), op1
)
3179 && ! side_effects_p (op1
))
3180 return simplify_gen_binary (AND
, mode
,
3181 simplify_gen_unary (NOT
, mode
,
3182 XEXP (op0
, 1), mode
),
3185 if (GET_CODE (op0
) == XOR
3186 && rtx_equal_p (XEXP (op0
, 1), op1
)
3187 && ! side_effects_p (op1
))
3188 return simplify_gen_binary (AND
, mode
,
3189 simplify_gen_unary (NOT
, mode
,
3190 XEXP (op0
, 0), mode
),
3193 /* Similarly for (~(A ^ B)) & A. */
3194 if (GET_CODE (op0
) == NOT
3195 && GET_CODE (XEXP (op0
, 0)) == XOR
3196 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3197 && ! side_effects_p (op1
))
3198 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3200 if (GET_CODE (op0
) == NOT
3201 && GET_CODE (XEXP (op0
, 0)) == XOR
3202 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3203 && ! side_effects_p (op1
))
3204 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3206 /* Convert (A | B) & A to A. */
3207 if (GET_CODE (op0
) == IOR
3208 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3209 || rtx_equal_p (XEXP (op0
, 1), op1
))
3210 && ! side_effects_p (XEXP (op0
, 0))
3211 && ! side_effects_p (XEXP (op0
, 1)))
3214 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3215 ((A & N) + B) & M -> (A + B) & M
3216 Similarly if (N & M) == 0,
3217 ((A | N) + B) & M -> (A + B) & M
3218 and for - instead of + and/or ^ instead of |.
3219 Also, if (N & M) == 0, then
3220 (A +- N) & M -> A & M. */
3221 if (CONST_INT_P (trueop1
)
3222 && HWI_COMPUTABLE_MODE_P (mode
)
3223 && ~UINTVAL (trueop1
)
3224 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3225 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3230 pmop
[0] = XEXP (op0
, 0);
3231 pmop
[1] = XEXP (op0
, 1);
3233 if (CONST_INT_P (pmop
[1])
3234 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3235 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3237 for (which
= 0; which
< 2; which
++)
3240 switch (GET_CODE (tem
))
3243 if (CONST_INT_P (XEXP (tem
, 1))
3244 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3245 == UINTVAL (trueop1
))
3246 pmop
[which
] = XEXP (tem
, 0);
3250 if (CONST_INT_P (XEXP (tem
, 1))
3251 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3252 pmop
[which
] = XEXP (tem
, 0);
3259 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3261 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3263 return simplify_gen_binary (code
, mode
, tem
, op1
);
3267 /* (and X (ior (not X) Y) -> (and X Y) */
3268 if (GET_CODE (op1
) == IOR
3269 && GET_CODE (XEXP (op1
, 0)) == NOT
3270 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3271 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3273 /* (and (ior (not X) Y) X) -> (and X Y) */
3274 if (GET_CODE (op0
) == IOR
3275 && GET_CODE (XEXP (op0
, 0)) == NOT
3276 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3277 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3279 /* (and X (ior Y (not X)) -> (and X Y) */
3280 if (GET_CODE (op1
) == IOR
3281 && GET_CODE (XEXP (op1
, 1)) == NOT
3282 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3283 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3285 /* (and (ior Y (not X)) X) -> (and X Y) */
3286 if (GET_CODE (op0
) == IOR
3287 && GET_CODE (XEXP (op0
, 1)) == NOT
3288 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3289 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3291 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3295 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3301 /* 0/x is 0 (or x&0 if x has side-effects). */
3302 if (trueop0
== CONST0_RTX (mode
)
3303 && !cfun
->can_throw_non_call_exceptions
)
3305 if (side_effects_p (op1
))
3306 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3310 if (trueop1
== CONST1_RTX (mode
))
3312 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3316 /* Convert divide by power of two into shift. */
3317 if (CONST_INT_P (trueop1
)
3318 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3319 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3320 gen_int_shift_amount (mode
, val
));
3324 /* Handle floating point and integers separately. */
3325 if (SCALAR_FLOAT_MODE_P (mode
))
3327 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3328 safe for modes with NaNs, since 0.0 / 0.0 will then be
3329 NaN rather than 0.0. Nor is it safe for modes with signed
3330 zeros, since dividing 0 by a negative number gives -0.0 */
3331 if (trueop0
== CONST0_RTX (mode
)
3332 && !HONOR_NANS (mode
)
3333 && !HONOR_SIGNED_ZEROS (mode
)
3334 && ! side_effects_p (op1
))
3337 if (trueop1
== CONST1_RTX (mode
)
3338 && !HONOR_SNANS (mode
))
3341 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3342 && trueop1
!= CONST0_RTX (mode
))
3344 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3347 if (real_equal (d1
, &dconstm1
)
3348 && !HONOR_SNANS (mode
))
3349 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3351 /* Change FP division by a constant into multiplication.
3352 Only do this with -freciprocal-math. */
3353 if (flag_reciprocal_math
3354 && !real_equal (d1
, &dconst0
))
3357 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3358 tem
= const_double_from_real_value (d
, mode
);
3359 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3363 else if (SCALAR_INT_MODE_P (mode
))
3365 /* 0/x is 0 (or x&0 if x has side-effects). */
3366 if (trueop0
== CONST0_RTX (mode
)
3367 && !cfun
->can_throw_non_call_exceptions
)
3369 if (side_effects_p (op1
))
3370 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3374 if (trueop1
== CONST1_RTX (mode
))
3376 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3381 if (trueop1
== constm1_rtx
)
3383 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3385 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3391 /* 0%x is 0 (or x&0 if x has side-effects). */
3392 if (trueop0
== CONST0_RTX (mode
))
3394 if (side_effects_p (op1
))
3395 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3398 /* x%1 is 0 (of x&0 if x has side-effects). */
3399 if (trueop1
== CONST1_RTX (mode
))
3401 if (side_effects_p (op0
))
3402 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3403 return CONST0_RTX (mode
);
3405 /* Implement modulus by power of two as AND. */
3406 if (CONST_INT_P (trueop1
)
3407 && exact_log2 (UINTVAL (trueop1
)) > 0)
3408 return simplify_gen_binary (AND
, mode
, op0
,
3409 gen_int_mode (INTVAL (op1
) - 1, mode
));
3413 /* 0%x is 0 (or x&0 if x has side-effects). */
3414 if (trueop0
== CONST0_RTX (mode
))
3416 if (side_effects_p (op1
))
3417 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3420 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3421 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3423 if (side_effects_p (op0
))
3424 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3425 return CONST0_RTX (mode
);
3431 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3432 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3433 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3435 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3436 if (CONST_INT_P (trueop1
)
3437 && IN_RANGE (INTVAL (trueop1
),
3438 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3439 GET_MODE_UNIT_PRECISION (mode
) - 1))
3441 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3442 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3443 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3444 mode
, op0
, new_amount_rtx
);
3449 if (trueop1
== CONST0_RTX (mode
))
3451 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3453 /* Rotating ~0 always results in ~0. */
3454 if (CONST_INT_P (trueop0
)
3455 && HWI_COMPUTABLE_MODE_P (mode
)
3456 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3457 && ! side_effects_p (op1
))
3463 scalar constants c1, c2
3464 size (M2) > size (M1)
3465 c1 == size (M2) - size (M1)
3467 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3471 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3473 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3474 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3476 && CONST_INT_P (op1
)
3477 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3478 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3480 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3481 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3482 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3483 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3484 && subreg_lowpart_p (op0
))
3486 rtx tmp
= gen_int_shift_amount
3487 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3488 tmp
= simplify_gen_binary (code
, inner_mode
,
3489 XEXP (SUBREG_REG (op0
), 0),
3491 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3494 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3496 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3497 if (val
!= INTVAL (op1
))
3498 return simplify_gen_binary (code
, mode
, op0
,
3499 gen_int_shift_amount (mode
, val
));
3506 if (trueop1
== CONST0_RTX (mode
))
3508 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3510 goto canonicalize_shift
;
3513 if (trueop1
== CONST0_RTX (mode
))
3515 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3517 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3518 if (GET_CODE (op0
) == CLZ
3519 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3520 && CONST_INT_P (trueop1
)
3521 && STORE_FLAG_VALUE
== 1
3522 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3524 unsigned HOST_WIDE_INT zero_val
= 0;
3526 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3527 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3528 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3529 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3530 XEXP (op0
, 0), const0_rtx
);
3532 goto canonicalize_shift
;
3535 if (HWI_COMPUTABLE_MODE_P (mode
)
3536 && mode_signbit_p (mode
, trueop1
)
3537 && ! side_effects_p (op0
))
3539 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3541 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3547 if (HWI_COMPUTABLE_MODE_P (mode
)
3548 && CONST_INT_P (trueop1
)
3549 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3550 && ! side_effects_p (op0
))
3552 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3554 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3560 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3562 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3564 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3570 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3572 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3574 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3587 /* ??? There are simplifications that can be done. */
3591 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3592 return gen_vec_duplicate (mode
, op0
);
3593 if (valid_for_const_vector_p (mode
, op0
)
3594 && valid_for_const_vector_p (mode
, op1
))
3595 return gen_const_vec_series (mode
, op0
, op1
);
3599 if (!VECTOR_MODE_P (mode
))
3601 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3602 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3603 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3604 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3605 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3607 if (vec_duplicate_p (trueop0
, &elt0
))
3610 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3611 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3614 /* Extract a scalar element from a nested VEC_SELECT expression
3615 (with optional nested VEC_CONCAT expression). Some targets
3616 (i386) extract scalar element from a vector using chain of
3617 nested VEC_SELECT expressions. When input operand is a memory
3618 operand, this operation can be simplified to a simple scalar
3619 load from an offseted memory address. */
3620 if (GET_CODE (trueop0
) == VEC_SELECT
)
3622 rtx op0
= XEXP (trueop0
, 0);
3623 rtx op1
= XEXP (trueop0
, 1);
3625 int n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3627 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3633 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3634 gcc_assert (i
< n_elts
);
3636 /* Select element, pointed by nested selector. */
3637 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3639 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3640 if (GET_CODE (op0
) == VEC_CONCAT
)
3642 rtx op00
= XEXP (op0
, 0);
3643 rtx op01
= XEXP (op0
, 1);
3645 machine_mode mode00
, mode01
;
3646 int n_elts00
, n_elts01
;
3648 mode00
= GET_MODE (op00
);
3649 mode01
= GET_MODE (op01
);
3651 /* Find out number of elements of each operand. */
3652 n_elts00
= GET_MODE_NUNITS (mode00
);
3653 n_elts01
= GET_MODE_NUNITS (mode01
);
3655 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3657 /* Select correct operand of VEC_CONCAT
3658 and adjust selector. */
3659 if (elem
< n_elts01
)
3670 vec
= rtvec_alloc (1);
3671 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3673 tmp
= gen_rtx_fmt_ee (code
, mode
,
3674 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3680 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3681 gcc_assert (GET_MODE_INNER (mode
)
3682 == GET_MODE_INNER (GET_MODE (trueop0
)));
3683 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3685 if (vec_duplicate_p (trueop0
, &elt0
))
3686 /* It doesn't matter which elements are selected by trueop1,
3687 because they are all the same. */
3688 return gen_vec_duplicate (mode
, elt0
);
3690 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3692 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3693 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3694 rtvec v
= rtvec_alloc (n_elts
);
3697 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3698 for (i
= 0; i
< n_elts
; i
++)
3700 rtx x
= XVECEXP (trueop1
, 0, i
);
3702 gcc_assert (CONST_INT_P (x
));
3703 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3707 return gen_rtx_CONST_VECTOR (mode
, v
);
3710 /* Recognize the identity. */
3711 if (GET_MODE (trueop0
) == mode
)
3713 bool maybe_ident
= true;
3714 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3716 rtx j
= XVECEXP (trueop1
, 0, i
);
3717 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3719 maybe_ident
= false;
3727 /* If we build {a,b} then permute it, build the result directly. */
3728 if (XVECLEN (trueop1
, 0) == 2
3729 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3730 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3731 && GET_CODE (trueop0
) == VEC_CONCAT
3732 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3733 && GET_MODE (XEXP (trueop0
, 0)) == mode
3734 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3735 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3737 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3738 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3741 gcc_assert (i0
< 4 && i1
< 4);
3742 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3743 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3745 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3748 if (XVECLEN (trueop1
, 0) == 2
3749 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3750 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3751 && GET_CODE (trueop0
) == VEC_CONCAT
3752 && GET_MODE (trueop0
) == mode
)
3754 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3755 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3758 gcc_assert (i0
< 2 && i1
< 2);
3759 subop0
= XEXP (trueop0
, i0
);
3760 subop1
= XEXP (trueop0
, i1
);
3762 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3765 /* If we select one half of a vec_concat, return that. */
3766 if (GET_CODE (trueop0
) == VEC_CONCAT
3767 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3769 rtx subop0
= XEXP (trueop0
, 0);
3770 rtx subop1
= XEXP (trueop0
, 1);
3771 machine_mode mode0
= GET_MODE (subop0
);
3772 machine_mode mode1
= GET_MODE (subop1
);
3773 int l0
= GET_MODE_NUNITS (mode0
);
3774 int l1
= GET_MODE_NUNITS (mode1
);
3775 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3776 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3778 bool success
= true;
3779 for (int i
= 1; i
< l0
; ++i
)
3781 rtx j
= XVECEXP (trueop1
, 0, i
);
3782 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3791 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3793 bool success
= true;
3794 for (int i
= 1; i
< l1
; ++i
)
3796 rtx j
= XVECEXP (trueop1
, 0, i
);
3797 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3809 if (XVECLEN (trueop1
, 0) == 1
3810 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3811 && GET_CODE (trueop0
) == VEC_CONCAT
)
3814 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3816 /* Try to find the element in the VEC_CONCAT. */
3817 while (GET_MODE (vec
) != mode
3818 && GET_CODE (vec
) == VEC_CONCAT
)
3820 HOST_WIDE_INT vec_size
;
3822 if (CONST_INT_P (XEXP (vec
, 0)))
3824 /* vec_concat of two const_ints doesn't make sense with
3825 respect to modes. */
3826 if (CONST_INT_P (XEXP (vec
, 1)))
3829 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3830 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3833 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3835 if (offset
< vec_size
)
3836 vec
= XEXP (vec
, 0);
3840 vec
= XEXP (vec
, 1);
3842 vec
= avoid_constant_pool_reference (vec
);
3845 if (GET_MODE (vec
) == mode
)
3849 /* If we select elements in a vec_merge that all come from the same
3850 operand, select from that operand directly. */
3851 if (GET_CODE (op0
) == VEC_MERGE
)
3853 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3854 if (CONST_INT_P (trueop02
))
3856 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3857 bool all_operand0
= true;
3858 bool all_operand1
= true;
3859 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3861 rtx j
= XVECEXP (trueop1
, 0, i
);
3862 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3863 all_operand1
= false;
3865 all_operand0
= false;
3867 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3868 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3869 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3870 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3874 /* If we have two nested selects that are inverses of each
3875 other, replace them with the source operand. */
3876 if (GET_CODE (trueop0
) == VEC_SELECT
3877 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3879 rtx op0_subop1
= XEXP (trueop0
, 1);
3880 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3881 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3883 /* Apply the outer ordering vector to the inner one. (The inner
3884 ordering vector is expressly permitted to be of a different
3885 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3886 then the two VEC_SELECTs cancel. */
3887 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3889 rtx x
= XVECEXP (trueop1
, 0, i
);
3890 if (!CONST_INT_P (x
))
3892 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3893 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3896 return XEXP (trueop0
, 0);
3902 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3903 ? GET_MODE (trueop0
)
3904 : GET_MODE_INNER (mode
));
3905 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3906 ? GET_MODE (trueop1
)
3907 : GET_MODE_INNER (mode
));
3909 gcc_assert (VECTOR_MODE_P (mode
));
3910 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3911 == GET_MODE_SIZE (mode
));
3913 if (VECTOR_MODE_P (op0_mode
))
3914 gcc_assert (GET_MODE_INNER (mode
)
3915 == GET_MODE_INNER (op0_mode
));
3917 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3919 if (VECTOR_MODE_P (op1_mode
))
3920 gcc_assert (GET_MODE_INNER (mode
)
3921 == GET_MODE_INNER (op1_mode
));
3923 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3925 if ((GET_CODE (trueop0
) == CONST_VECTOR
3926 || CONST_SCALAR_INT_P (trueop0
)
3927 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3928 && (GET_CODE (trueop1
) == CONST_VECTOR
3929 || CONST_SCALAR_INT_P (trueop1
)
3930 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3932 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3933 unsigned in_n_elts
= GET_MODE_NUNITS (op0_mode
);
3934 rtvec v
= rtvec_alloc (n_elts
);
3936 for (i
= 0; i
< n_elts
; i
++)
3940 if (!VECTOR_MODE_P (op0_mode
))
3941 RTVEC_ELT (v
, i
) = trueop0
;
3943 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3947 if (!VECTOR_MODE_P (op1_mode
))
3948 RTVEC_ELT (v
, i
) = trueop1
;
3950 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3955 return gen_rtx_CONST_VECTOR (mode
, v
);
3958 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3959 Restrict the transformation to avoid generating a VEC_SELECT with a
3960 mode unrelated to its operand. */
3961 if (GET_CODE (trueop0
) == VEC_SELECT
3962 && GET_CODE (trueop1
) == VEC_SELECT
3963 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3964 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3966 rtx par0
= XEXP (trueop0
, 1);
3967 rtx par1
= XEXP (trueop1
, 1);
3968 int len0
= XVECLEN (par0
, 0);
3969 int len1
= XVECLEN (par1
, 0);
3970 rtvec vec
= rtvec_alloc (len0
+ len1
);
3971 for (int i
= 0; i
< len0
; i
++)
3972 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3973 for (int i
= 0; i
< len1
; i
++)
3974 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3975 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3976 gen_rtx_PARALLEL (VOIDmode
, vec
));
3985 if (mode
== GET_MODE (op0
)
3986 && mode
== GET_MODE (op1
)
3987 && vec_duplicate_p (op0
, &elt0
)
3988 && vec_duplicate_p (op1
, &elt1
))
3990 /* Try applying the operator to ELT and see if that simplifies.
3991 We can duplicate the result if so.
3993 The reason we don't use simplify_gen_binary is that it isn't
3994 necessarily a win to convert things like:
3996 (plus:V (vec_duplicate:V (reg:S R1))
3997 (vec_duplicate:V (reg:S R2)))
4001 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4003 The first might be done entirely in vector registers while the
4004 second might need a move between register files. */
4005 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4008 return gen_vec_duplicate (mode
, tem
);
4015 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4018 if (VECTOR_MODE_P (mode
)
4019 && code
!= VEC_CONCAT
4020 && GET_CODE (op0
) == CONST_VECTOR
4021 && GET_CODE (op1
) == CONST_VECTOR
)
4023 unsigned int n_elts
= CONST_VECTOR_NUNITS (op0
);
4024 gcc_assert (n_elts
== (unsigned int) CONST_VECTOR_NUNITS (op1
));
4025 gcc_assert (n_elts
== GET_MODE_NUNITS (mode
));
4026 rtvec v
= rtvec_alloc (n_elts
);
4029 for (i
= 0; i
< n_elts
; i
++)
4031 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4032 CONST_VECTOR_ELT (op0
, i
),
4033 CONST_VECTOR_ELT (op1
, i
));
4034 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4036 RTVEC_ELT (v
, i
) = x
;
4039 return gen_rtx_CONST_VECTOR (mode
, v
);
4042 if (VECTOR_MODE_P (mode
)
4043 && code
== VEC_CONCAT
4044 && (CONST_SCALAR_INT_P (op0
)
4045 || CONST_FIXED_P (op0
)
4046 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4047 && (CONST_SCALAR_INT_P (op1
)
4048 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4049 || CONST_FIXED_P (op1
)))
4051 unsigned n_elts
= GET_MODE_NUNITS (mode
);
4052 rtvec v
= rtvec_alloc (n_elts
);
4054 gcc_assert (n_elts
>= 2);
4057 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4058 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4060 RTVEC_ELT (v
, 0) = op0
;
4061 RTVEC_ELT (v
, 1) = op1
;
4065 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
4066 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
4069 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4070 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4071 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4073 for (i
= 0; i
< op0_n_elts
; ++i
)
4074 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4075 for (i
= 0; i
< op1_n_elts
; ++i
)
4076 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4079 return gen_rtx_CONST_VECTOR (mode
, v
);
4082 if (SCALAR_FLOAT_MODE_P (mode
)
4083 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4084 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4085 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4096 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4098 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4100 for (i
= 0; i
< 4; i
++)
4117 real_from_target (&r
, tmp0
, mode
);
4118 return const_double_from_real_value (r
, mode
);
4122 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4123 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4126 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4127 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4129 if (HONOR_SNANS (mode
)
4130 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4131 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4134 real_convert (&f0
, mode
, opr0
);
4135 real_convert (&f1
, mode
, opr1
);
4138 && real_equal (&f1
, &dconst0
)
4139 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4142 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4143 && flag_trapping_math
4144 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4146 int s0
= REAL_VALUE_NEGATIVE (f0
);
4147 int s1
= REAL_VALUE_NEGATIVE (f1
);
4152 /* Inf + -Inf = NaN plus exception. */
4157 /* Inf - Inf = NaN plus exception. */
4162 /* Inf / Inf = NaN plus exception. */
4169 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4170 && flag_trapping_math
4171 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4172 || (REAL_VALUE_ISINF (f1
)
4173 && real_equal (&f0
, &dconst0
))))
4174 /* Inf * 0 = NaN plus exception. */
4177 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4179 real_convert (&result
, mode
, &value
);
4181 /* Don't constant fold this floating point operation if
4182 the result has overflowed and flag_trapping_math. */
4184 if (flag_trapping_math
4185 && MODE_HAS_INFINITIES (mode
)
4186 && REAL_VALUE_ISINF (result
)
4187 && !REAL_VALUE_ISINF (f0
)
4188 && !REAL_VALUE_ISINF (f1
))
4189 /* Overflow plus exception. */
4192 /* Don't constant fold this floating point operation if the
4193 result may dependent upon the run-time rounding mode and
4194 flag_rounding_math is set, or if GCC's software emulation
4195 is unable to accurately represent the result. */
4197 if ((flag_rounding_math
4198 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4199 && (inexact
|| !real_identical (&result
, &value
)))
4202 return const_double_from_real_value (result
, mode
);
4206 /* We can fold some multi-word operations. */
4207 scalar_int_mode int_mode
;
4208 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4209 && CONST_SCALAR_INT_P (op0
)
4210 && CONST_SCALAR_INT_P (op1
))
4214 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4215 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4217 #if TARGET_SUPPORTS_WIDE_INT == 0
4218 /* This assert keeps the simplification from producing a result
4219 that cannot be represented in a CONST_DOUBLE but a lot of
4220 upstream callers expect that this function never fails to
4221 simplify something and so you if you added this to the test
4222 above the code would die later anyway. If this assert
4223 happens, you just need to make the port support wide int. */
4224 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4229 result
= wi::sub (pop0
, pop1
);
4233 result
= wi::add (pop0
, pop1
);
4237 result
= wi::mul (pop0
, pop1
);
4241 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4247 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4253 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4259 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4265 result
= wi::bit_and (pop0
, pop1
);
4269 result
= wi::bit_or (pop0
, pop1
);
4273 result
= wi::bit_xor (pop0
, pop1
);
4277 result
= wi::smin (pop0
, pop1
);
4281 result
= wi::smax (pop0
, pop1
);
4285 result
= wi::umin (pop0
, pop1
);
4289 result
= wi::umax (pop0
, pop1
);
4296 wide_int wop1
= pop1
;
4297 if (SHIFT_COUNT_TRUNCATED
)
4298 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4299 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4305 result
= wi::lrshift (pop0
, wop1
);
4309 result
= wi::arshift (pop0
, wop1
);
4313 result
= wi::lshift (pop0
, wop1
);
4324 if (wi::neg_p (pop1
))
4330 result
= wi::lrotate (pop0
, pop1
);
4334 result
= wi::rrotate (pop0
, pop1
);
4345 return immed_wide_int_const (result
, int_mode
);
4348 /* Handle polynomial integers. */
4349 if (NUM_POLY_INT_COEFFS
> 1
4350 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4351 && poly_int_rtx_p (op0
)
4352 && poly_int_rtx_p (op1
))
4354 poly_wide_int result
;
4358 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4362 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4366 if (CONST_SCALAR_INT_P (op1
))
4367 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4373 if (CONST_SCALAR_INT_P (op1
))
4375 wide_int shift
= rtx_mode_t (op1
, mode
);
4376 if (SHIFT_COUNT_TRUNCATED
)
4377 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4378 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4380 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4387 if (!CONST_SCALAR_INT_P (op1
)
4388 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4389 rtx_mode_t (op1
, mode
), &result
))
4396 return immed_wide_int_const (result
, int_mode
);
4404 /* Return a positive integer if X should sort after Y. The value
4405 returned is 1 if and only if X and Y are both regs. */
4408 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4412 result
= (commutative_operand_precedence (y
)
4413 - commutative_operand_precedence (x
));
4415 return result
+ result
;
4417 /* Group together equal REGs to do more simplification. */
4418 if (REG_P (x
) && REG_P (y
))
4419 return REGNO (x
) > REGNO (y
);
4424 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4425 operands may be another PLUS or MINUS.
4427 Rather than test for specific case, we do this by a brute-force method
4428 and do all possible simplifications until no more changes occur. Then
4429 we rebuild the operation.
4431 May return NULL_RTX when no changes were made. */
4434 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4437 struct simplify_plus_minus_op_data
4444 int changed
, n_constants
, canonicalized
= 0;
4447 memset (ops
, 0, sizeof ops
);
4449 /* Set up the two operands and then expand them until nothing has been
4450 changed. If we run out of room in our array, give up; this should
4451 almost never happen. */
4456 ops
[1].neg
= (code
== MINUS
);
4463 for (i
= 0; i
< n_ops
; i
++)
4465 rtx this_op
= ops
[i
].op
;
4466 int this_neg
= ops
[i
].neg
;
4467 enum rtx_code this_code
= GET_CODE (this_op
);
4473 if (n_ops
== ARRAY_SIZE (ops
))
4476 ops
[n_ops
].op
= XEXP (this_op
, 1);
4477 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4480 ops
[i
].op
= XEXP (this_op
, 0);
4482 /* If this operand was negated then we will potentially
4483 canonicalize the expression. Similarly if we don't
4484 place the operands adjacent we're re-ordering the
4485 expression and thus might be performing a
4486 canonicalization. Ignore register re-ordering.
4487 ??? It might be better to shuffle the ops array here,
4488 but then (plus (plus (A, B), plus (C, D))) wouldn't
4489 be seen as non-canonical. */
4492 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4497 ops
[i
].op
= XEXP (this_op
, 0);
4498 ops
[i
].neg
= ! this_neg
;
4504 if (n_ops
!= ARRAY_SIZE (ops
)
4505 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4506 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4507 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4509 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4510 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4511 ops
[n_ops
].neg
= this_neg
;
4519 /* ~a -> (-a - 1) */
4520 if (n_ops
!= ARRAY_SIZE (ops
))
4522 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4523 ops
[n_ops
++].neg
= this_neg
;
4524 ops
[i
].op
= XEXP (this_op
, 0);
4525 ops
[i
].neg
= !this_neg
;
4535 ops
[i
].op
= neg_const_int (mode
, this_op
);
4549 if (n_constants
> 1)
4552 gcc_assert (n_ops
>= 2);
4554 /* If we only have two operands, we can avoid the loops. */
4557 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4560 /* Get the two operands. Be careful with the order, especially for
4561 the cases where code == MINUS. */
4562 if (ops
[0].neg
&& ops
[1].neg
)
4564 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4567 else if (ops
[0].neg
)
4578 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4581 /* Now simplify each pair of operands until nothing changes. */
4584 /* Insertion sort is good enough for a small array. */
4585 for (i
= 1; i
< n_ops
; i
++)
4587 struct simplify_plus_minus_op_data save
;
4591 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4594 /* Just swapping registers doesn't count as canonicalization. */
4600 ops
[j
+ 1] = ops
[j
];
4602 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4607 for (i
= n_ops
- 1; i
> 0; i
--)
4608 for (j
= i
- 1; j
>= 0; j
--)
4610 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4611 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4613 if (lhs
!= 0 && rhs
!= 0)
4615 enum rtx_code ncode
= PLUS
;
4621 std::swap (lhs
, rhs
);
4623 else if (swap_commutative_operands_p (lhs
, rhs
))
4624 std::swap (lhs
, rhs
);
4626 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4627 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4629 rtx tem_lhs
, tem_rhs
;
4631 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4632 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4633 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4636 if (tem
&& !CONSTANT_P (tem
))
4637 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4640 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4644 /* Reject "simplifications" that just wrap the two
4645 arguments in a CONST. Failure to do so can result
4646 in infinite recursion with simplify_binary_operation
4647 when it calls us to simplify CONST operations.
4648 Also, if we find such a simplification, don't try
4649 any more combinations with this rhs: We must have
4650 something like symbol+offset, ie. one of the
4651 trivial CONST expressions we handle later. */
4652 if (GET_CODE (tem
) == CONST
4653 && GET_CODE (XEXP (tem
, 0)) == ncode
4654 && XEXP (XEXP (tem
, 0), 0) == lhs
4655 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4658 if (GET_CODE (tem
) == NEG
)
4659 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4660 if (CONST_INT_P (tem
) && lneg
)
4661 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4665 ops
[j
].op
= NULL_RTX
;
4675 /* Pack all the operands to the lower-numbered entries. */
4676 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4685 /* If nothing changed, check that rematerialization of rtl instructions
4686 is still required. */
4689 /* Perform rematerialization if only all operands are registers and
4690 all operations are PLUS. */
4691 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4692 around rs6000 and how it uses the CA register. See PR67145. */
4693 for (i
= 0; i
< n_ops
; i
++)
4695 || !REG_P (ops
[i
].op
)
4696 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4697 && fixed_regs
[REGNO (ops
[i
].op
)]
4698 && !global_regs
[REGNO (ops
[i
].op
)]
4699 && ops
[i
].op
!= frame_pointer_rtx
4700 && ops
[i
].op
!= arg_pointer_rtx
4701 && ops
[i
].op
!= stack_pointer_rtx
))
4706 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4708 && CONST_INT_P (ops
[1].op
)
4709 && CONSTANT_P (ops
[0].op
)
4711 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4713 /* We suppressed creation of trivial CONST expressions in the
4714 combination loop to avoid recursion. Create one manually now.
4715 The combination loop should have ensured that there is exactly
4716 one CONST_INT, and the sort will have ensured that it is last
4717 in the array and that any other constant will be next-to-last. */
4720 && CONST_INT_P (ops
[n_ops
- 1].op
)
4721 && CONSTANT_P (ops
[n_ops
- 2].op
))
4723 rtx value
= ops
[n_ops
- 1].op
;
4724 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4725 value
= neg_const_int (mode
, value
);
4726 if (CONST_INT_P (value
))
4728 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4734 /* Put a non-negated operand first, if possible. */
4736 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4739 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4748 /* Now make the result by performing the requested operations. */
4751 for (i
= 1; i
< n_ops
; i
++)
4752 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4753 mode
, result
, ops
[i
].op
);
4758 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4760 plus_minus_operand_p (const_rtx x
)
4762 return GET_CODE (x
) == PLUS
4763 || GET_CODE (x
) == MINUS
4764 || (GET_CODE (x
) == CONST
4765 && GET_CODE (XEXP (x
, 0)) == PLUS
4766 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4767 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4770 /* Like simplify_binary_operation except used for relational operators.
4771 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4772 not also be VOIDmode.
4774 CMP_MODE specifies in which mode the comparison is done in, so it is
4775 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4776 the operands or, if both are VOIDmode, the operands are compared in
4777 "infinite precision". */
4779 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4780 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4782 rtx tem
, trueop0
, trueop1
;
4784 if (cmp_mode
== VOIDmode
)
4785 cmp_mode
= GET_MODE (op0
);
4786 if (cmp_mode
== VOIDmode
)
4787 cmp_mode
= GET_MODE (op1
);
4789 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4792 if (SCALAR_FLOAT_MODE_P (mode
))
4794 if (tem
== const0_rtx
)
4795 return CONST0_RTX (mode
);
4796 #ifdef FLOAT_STORE_FLAG_VALUE
4798 REAL_VALUE_TYPE val
;
4799 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4800 return const_double_from_real_value (val
, mode
);
4806 if (VECTOR_MODE_P (mode
))
4808 if (tem
== const0_rtx
)
4809 return CONST0_RTX (mode
);
4810 #ifdef VECTOR_STORE_FLAG_VALUE
4812 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4813 if (val
== NULL_RTX
)
4815 if (val
== const1_rtx
)
4816 return CONST1_RTX (mode
);
4818 return gen_const_vec_duplicate (mode
, val
);
4828 /* For the following tests, ensure const0_rtx is op1. */
4829 if (swap_commutative_operands_p (op0
, op1
)
4830 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4831 std::swap (op0
, op1
), code
= swap_condition (code
);
4833 /* If op0 is a compare, extract the comparison arguments from it. */
4834 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4835 return simplify_gen_relational (code
, mode
, VOIDmode
,
4836 XEXP (op0
, 0), XEXP (op0
, 1));
4838 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4842 trueop0
= avoid_constant_pool_reference (op0
);
4843 trueop1
= avoid_constant_pool_reference (op1
);
4844 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4848 /* This part of simplify_relational_operation is only used when CMP_MODE
4849 is not in class MODE_CC (i.e. it is a real comparison).
4851 MODE is the mode of the result, while CMP_MODE specifies in which
4852 mode the comparison is done in, so it is the mode of the operands. */
4855 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4856 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4858 enum rtx_code op0code
= GET_CODE (op0
);
4860 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4862 /* If op0 is a comparison, extract the comparison arguments
4866 if (GET_MODE (op0
) == mode
)
4867 return simplify_rtx (op0
);
4869 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4870 XEXP (op0
, 0), XEXP (op0
, 1));
4872 else if (code
== EQ
)
4874 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4875 if (new_code
!= UNKNOWN
)
4876 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4877 XEXP (op0
, 0), XEXP (op0
, 1));
4881 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4882 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4883 if ((code
== LTU
|| code
== GEU
)
4884 && GET_CODE (op0
) == PLUS
4885 && CONST_INT_P (XEXP (op0
, 1))
4886 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4887 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4888 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4889 && XEXP (op0
, 1) != const0_rtx
)
4892 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4893 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4894 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4897 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4898 transformed into (LTU a -C). */
4899 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4900 && CONST_INT_P (XEXP (op0
, 1))
4901 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4902 && XEXP (op0
, 1) != const0_rtx
)
4905 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4906 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4907 XEXP (op0
, 0), new_cmp
);
4910 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4911 if ((code
== LTU
|| code
== GEU
)
4912 && GET_CODE (op0
) == PLUS
4913 && rtx_equal_p (op1
, XEXP (op0
, 1))
4914 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4915 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4916 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4917 copy_rtx (XEXP (op0
, 0)));
4919 if (op1
== const0_rtx
)
4921 /* Canonicalize (GTU x 0) as (NE x 0). */
4923 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4924 /* Canonicalize (LEU x 0) as (EQ x 0). */
4926 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4928 else if (op1
== const1_rtx
)
4933 /* Canonicalize (GE x 1) as (GT x 0). */
4934 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4937 /* Canonicalize (GEU x 1) as (NE x 0). */
4938 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4941 /* Canonicalize (LT x 1) as (LE x 0). */
4942 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4945 /* Canonicalize (LTU x 1) as (EQ x 0). */
4946 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4952 else if (op1
== constm1_rtx
)
4954 /* Canonicalize (LE x -1) as (LT x 0). */
4956 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4957 /* Canonicalize (GT x -1) as (GE x 0). */
4959 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4962 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4963 if ((code
== EQ
|| code
== NE
)
4964 && (op0code
== PLUS
|| op0code
== MINUS
)
4966 && CONSTANT_P (XEXP (op0
, 1))
4967 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4969 rtx x
= XEXP (op0
, 0);
4970 rtx c
= XEXP (op0
, 1);
4971 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4972 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4974 /* Detect an infinite recursive condition, where we oscillate at this
4975 simplification case between:
4976 A + B == C <---> C - B == A,
4977 where A, B, and C are all constants with non-simplifiable expressions,
4978 usually SYMBOL_REFs. */
4979 if (GET_CODE (tem
) == invcode
4981 && rtx_equal_p (c
, XEXP (tem
, 1)))
4984 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4987 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4988 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4989 scalar_int_mode int_mode
, int_cmp_mode
;
4991 && op1
== const0_rtx
4992 && is_int_mode (mode
, &int_mode
)
4993 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
4994 /* ??? Work-around BImode bugs in the ia64 backend. */
4995 && int_mode
!= BImode
4996 && int_cmp_mode
!= BImode
4997 && nonzero_bits (op0
, int_cmp_mode
) == 1
4998 && STORE_FLAG_VALUE
== 1)
4999 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5000 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5001 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5003 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5004 if ((code
== EQ
|| code
== NE
)
5005 && op1
== const0_rtx
5007 return simplify_gen_relational (code
, mode
, cmp_mode
,
5008 XEXP (op0
, 0), XEXP (op0
, 1));
5010 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5011 if ((code
== EQ
|| code
== NE
)
5013 && rtx_equal_p (XEXP (op0
, 0), op1
)
5014 && !side_effects_p (XEXP (op0
, 0)))
5015 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5018 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5019 if ((code
== EQ
|| code
== NE
)
5021 && rtx_equal_p (XEXP (op0
, 1), op1
)
5022 && !side_effects_p (XEXP (op0
, 1)))
5023 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5026 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5027 if ((code
== EQ
|| code
== NE
)
5029 && CONST_SCALAR_INT_P (op1
)
5030 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5031 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5032 simplify_gen_binary (XOR
, cmp_mode
,
5033 XEXP (op0
, 1), op1
));
5035 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
5036 can be implemented with a BICS instruction on some targets, or
5037 constant-folded if y is a constant. */
5038 if ((code
== EQ
|| code
== NE
)
5040 && rtx_equal_p (XEXP (op0
, 0), op1
)
5041 && !side_effects_p (op1
)
5042 && op1
!= CONST0_RTX (cmp_mode
))
5044 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5045 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5047 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5048 CONST0_RTX (cmp_mode
));
5051 /* Likewise for (eq/ne (and x y) y). */
5052 if ((code
== EQ
|| code
== NE
)
5054 && rtx_equal_p (XEXP (op0
, 1), op1
)
5055 && !side_effects_p (op1
)
5056 && op1
!= CONST0_RTX (cmp_mode
))
5058 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
5059 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5061 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5062 CONST0_RTX (cmp_mode
));
5065 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5066 if ((code
== EQ
|| code
== NE
)
5067 && GET_CODE (op0
) == BSWAP
5068 && CONST_SCALAR_INT_P (op1
))
5069 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5070 simplify_gen_unary (BSWAP
, cmp_mode
,
5073 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5074 if ((code
== EQ
|| code
== NE
)
5075 && GET_CODE (op0
) == BSWAP
5076 && GET_CODE (op1
) == BSWAP
)
5077 return simplify_gen_relational (code
, mode
, cmp_mode
,
5078 XEXP (op0
, 0), XEXP (op1
, 0));
5080 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5086 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5087 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5088 XEXP (op0
, 0), const0_rtx
);
5093 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5094 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5095 XEXP (op0
, 0), const0_rtx
);
5114 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5115 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5116 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5117 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5118 For floating-point comparisons, assume that the operands were ordered. */
5121 comparison_result (enum rtx_code code
, int known_results
)
5127 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5130 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5134 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5137 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5141 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5144 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5147 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5149 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5152 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5154 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5157 return const_true_rtx
;
5165 /* Check if the given comparison (done in the given MODE) is actually
5166 a tautology or a contradiction. If the mode is VOID_mode, the
5167 comparison is done in "infinite precision". If no simplification
5168 is possible, this function returns zero. Otherwise, it returns
5169 either const_true_rtx or const0_rtx. */
5172 simplify_const_relational_operation (enum rtx_code code
,
5180 gcc_assert (mode
!= VOIDmode
5181 || (GET_MODE (op0
) == VOIDmode
5182 && GET_MODE (op1
) == VOIDmode
));
5184 /* If op0 is a compare, extract the comparison arguments from it. */
5185 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5187 op1
= XEXP (op0
, 1);
5188 op0
= XEXP (op0
, 0);
5190 if (GET_MODE (op0
) != VOIDmode
)
5191 mode
= GET_MODE (op0
);
5192 else if (GET_MODE (op1
) != VOIDmode
)
5193 mode
= GET_MODE (op1
);
5198 /* We can't simplify MODE_CC values since we don't know what the
5199 actual comparison is. */
5200 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5203 /* Make sure the constant is second. */
5204 if (swap_commutative_operands_p (op0
, op1
))
5206 std::swap (op0
, op1
);
5207 code
= swap_condition (code
);
5210 trueop0
= avoid_constant_pool_reference (op0
);
5211 trueop1
= avoid_constant_pool_reference (op1
);
5213 /* For integer comparisons of A and B maybe we can simplify A - B and can
5214 then simplify a comparison of that with zero. If A and B are both either
5215 a register or a CONST_INT, this can't help; testing for these cases will
5216 prevent infinite recursion here and speed things up.
5218 We can only do this for EQ and NE comparisons as otherwise we may
5219 lose or introduce overflow which we cannot disregard as undefined as
5220 we do not know the signedness of the operation on either the left or
5221 the right hand side of the comparison. */
5223 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5224 && (code
== EQ
|| code
== NE
)
5225 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5226 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5227 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5228 /* We cannot do this if tem is a nonzero address. */
5229 && ! nonzero_address_p (tem
))
5230 return simplify_const_relational_operation (signed_condition (code
),
5231 mode
, tem
, const0_rtx
);
5233 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5234 return const_true_rtx
;
5236 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5239 /* For modes without NaNs, if the two operands are equal, we know the
5240 result except if they have side-effects. Even with NaNs we know
5241 the result of unordered comparisons and, if signaling NaNs are
5242 irrelevant, also the result of LT/GT/LTGT. */
5243 if ((! HONOR_NANS (trueop0
)
5244 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5245 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5246 && ! HONOR_SNANS (trueop0
)))
5247 && rtx_equal_p (trueop0
, trueop1
)
5248 && ! side_effects_p (trueop0
))
5249 return comparison_result (code
, CMP_EQ
);
5251 /* If the operands are floating-point constants, see if we can fold
5253 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5254 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5255 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5257 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5258 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5260 /* Comparisons are unordered iff at least one of the values is NaN. */
5261 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5271 return const_true_rtx
;
5284 return comparison_result (code
,
5285 (real_equal (d0
, d1
) ? CMP_EQ
:
5286 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5289 /* Otherwise, see if the operands are both integers. */
5290 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5291 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5293 /* It would be nice if we really had a mode here. However, the
5294 largest int representable on the target is as good as
5296 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5297 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5298 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5300 if (wi::eq_p (ptrueop0
, ptrueop1
))
5301 return comparison_result (code
, CMP_EQ
);
5304 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5305 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5306 return comparison_result (code
, cr
);
5310 /* Optimize comparisons with upper and lower bounds. */
5311 scalar_int_mode int_mode
;
5312 if (CONST_INT_P (trueop1
)
5313 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5314 && HWI_COMPUTABLE_MODE_P (int_mode
)
5315 && !side_effects_p (trueop0
))
5318 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5319 HOST_WIDE_INT val
= INTVAL (trueop1
);
5320 HOST_WIDE_INT mmin
, mmax
;
5330 /* Get a reduced range if the sign bit is zero. */
5331 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5338 rtx mmin_rtx
, mmax_rtx
;
5339 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5341 mmin
= INTVAL (mmin_rtx
);
5342 mmax
= INTVAL (mmax_rtx
);
5345 unsigned int sign_copies
5346 = num_sign_bit_copies (trueop0
, int_mode
);
5348 mmin
>>= (sign_copies
- 1);
5349 mmax
>>= (sign_copies
- 1);
5355 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5357 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5358 return const_true_rtx
;
5359 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5364 return const_true_rtx
;
5369 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5371 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5372 return const_true_rtx
;
5373 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5378 return const_true_rtx
;
5384 /* x == y is always false for y out of range. */
5385 if (val
< mmin
|| val
> mmax
)
5389 /* x > y is always false for y >= mmax, always true for y < mmin. */
5391 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5393 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5394 return const_true_rtx
;
5400 return const_true_rtx
;
5403 /* x < y is always false for y <= mmin, always true for y > mmax. */
5405 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5407 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5408 return const_true_rtx
;
5414 return const_true_rtx
;
5418 /* x != y is always true for y out of range. */
5419 if (val
< mmin
|| val
> mmax
)
5420 return const_true_rtx
;
5428 /* Optimize integer comparisons with zero. */
5429 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5430 && trueop1
== const0_rtx
5431 && !side_effects_p (trueop0
))
5433 /* Some addresses are known to be nonzero. We don't know
5434 their sign, but equality comparisons are known. */
5435 if (nonzero_address_p (trueop0
))
5437 if (code
== EQ
|| code
== LEU
)
5439 if (code
== NE
|| code
== GTU
)
5440 return const_true_rtx
;
5443 /* See if the first operand is an IOR with a constant. If so, we
5444 may be able to determine the result of this comparison. */
5445 if (GET_CODE (op0
) == IOR
)
5447 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5448 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5450 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5451 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5452 && (UINTVAL (inner_const
)
5463 return const_true_rtx
;
5467 return const_true_rtx
;
5481 /* Optimize comparison of ABS with zero. */
5482 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5483 && (GET_CODE (trueop0
) == ABS
5484 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5485 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5490 /* Optimize abs(x) < 0.0. */
5491 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5496 /* Optimize abs(x) >= 0.0. */
5497 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5498 return const_true_rtx
;
5502 /* Optimize ! (abs(x) < 0.0). */
5503 return const_true_rtx
;
5513 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5514 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5515 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5516 can be simplified to that or NULL_RTX if not.
5517 Assume X is compared against zero with CMP_CODE and the true
5518 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5521 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5523 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5526 /* Result on X == 0 and X !=0 respectively. */
5527 rtx on_zero
, on_nonzero
;
5531 on_nonzero
= false_val
;
5535 on_zero
= false_val
;
5536 on_nonzero
= true_val
;
5539 rtx_code op_code
= GET_CODE (on_nonzero
);
5540 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5541 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5542 || !CONST_INT_P (on_zero
))
5545 HOST_WIDE_INT op_val
;
5546 scalar_int_mode mode ATTRIBUTE_UNUSED
5547 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5548 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5549 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5550 && op_val
== INTVAL (on_zero
))
5557 /* Simplify CODE, an operation with result mode MODE and three operands,
5558 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5559 a constant. Return 0 if no simplifications is possible. */
5562 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5563 machine_mode op0_mode
, rtx op0
, rtx op1
,
5566 bool any_change
= false;
5568 scalar_int_mode int_mode
, int_op0_mode
;
5573 /* Simplify negations around the multiplication. */
5574 /* -a * -b + c => a * b + c. */
5575 if (GET_CODE (op0
) == NEG
)
5577 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5579 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5581 else if (GET_CODE (op1
) == NEG
)
5583 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5585 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5588 /* Canonicalize the two multiplication operands. */
5589 /* a * -b + c => -b * a + c. */
5590 if (swap_commutative_operands_p (op0
, op1
))
5591 std::swap (op0
, op1
), any_change
= true;
5594 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5599 if (CONST_INT_P (op0
)
5600 && CONST_INT_P (op1
)
5601 && CONST_INT_P (op2
)
5602 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5603 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5604 && HWI_COMPUTABLE_MODE_P (int_mode
))
5606 /* Extracting a bit-field from a constant */
5607 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5608 HOST_WIDE_INT op1val
= INTVAL (op1
);
5609 HOST_WIDE_INT op2val
= INTVAL (op2
);
5610 if (!BITS_BIG_ENDIAN
)
5612 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5613 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5615 /* Not enough information to calculate the bit position. */
5618 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5620 /* First zero-extend. */
5621 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5622 /* If desired, propagate sign bit. */
5623 if (code
== SIGN_EXTRACT
5624 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5626 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5629 return gen_int_mode (val
, int_mode
);
5634 if (CONST_INT_P (op0
))
5635 return op0
!= const0_rtx
? op1
: op2
;
5637 /* Convert c ? a : a into "a". */
5638 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5641 /* Convert a != b ? a : b into "a". */
5642 if (GET_CODE (op0
) == NE
5643 && ! side_effects_p (op0
)
5644 && ! HONOR_NANS (mode
)
5645 && ! HONOR_SIGNED_ZEROS (mode
)
5646 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5647 && rtx_equal_p (XEXP (op0
, 1), op2
))
5648 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5649 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5652 /* Convert a == b ? a : b into "b". */
5653 if (GET_CODE (op0
) == EQ
5654 && ! side_effects_p (op0
)
5655 && ! HONOR_NANS (mode
)
5656 && ! HONOR_SIGNED_ZEROS (mode
)
5657 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5658 && rtx_equal_p (XEXP (op0
, 1), op2
))
5659 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5660 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5663 /* Convert (!c) != {0,...,0} ? a : b into
5664 c != {0,...,0} ? b : a for vector modes. */
5665 if (VECTOR_MODE_P (GET_MODE (op1
))
5666 && GET_CODE (op0
) == NE
5667 && GET_CODE (XEXP (op0
, 0)) == NOT
5668 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5670 rtx cv
= XEXP (op0
, 1);
5671 int nunits
= CONST_VECTOR_NUNITS (cv
);
5673 for (int i
= 0; i
< nunits
; ++i
)
5674 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5681 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5682 XEXP (XEXP (op0
, 0), 0),
5684 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5689 /* Convert x == 0 ? N : clz (x) into clz (x) when
5690 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5691 Similarly for ctz (x). */
5692 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5693 && XEXP (op0
, 1) == const0_rtx
)
5696 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5702 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5704 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5705 ? GET_MODE (XEXP (op0
, 1))
5706 : GET_MODE (XEXP (op0
, 0)));
5709 /* Look for happy constants in op1 and op2. */
5710 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5712 HOST_WIDE_INT t
= INTVAL (op1
);
5713 HOST_WIDE_INT f
= INTVAL (op2
);
5715 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5716 code
= GET_CODE (op0
);
5717 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5720 tmp
= reversed_comparison_code (op0
, NULL
);
5728 return simplify_gen_relational (code
, mode
, cmp_mode
,
5729 XEXP (op0
, 0), XEXP (op0
, 1));
5732 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5733 cmp_mode
, XEXP (op0
, 0),
5736 /* See if any simplifications were possible. */
5739 if (CONST_INT_P (temp
))
5740 return temp
== const0_rtx
? op2
: op1
;
5742 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5748 gcc_assert (GET_MODE (op0
) == mode
);
5749 gcc_assert (GET_MODE (op1
) == mode
);
5750 gcc_assert (VECTOR_MODE_P (mode
));
5751 trueop2
= avoid_constant_pool_reference (op2
);
5752 if (CONST_INT_P (trueop2
))
5754 unsigned n_elts
= GET_MODE_NUNITS (mode
);
5755 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5756 unsigned HOST_WIDE_INT mask
;
5757 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5760 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5762 if (!(sel
& mask
) && !side_effects_p (op0
))
5764 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5767 rtx trueop0
= avoid_constant_pool_reference (op0
);
5768 rtx trueop1
= avoid_constant_pool_reference (op1
);
5769 if (GET_CODE (trueop0
) == CONST_VECTOR
5770 && GET_CODE (trueop1
) == CONST_VECTOR
)
5772 rtvec v
= rtvec_alloc (n_elts
);
5775 for (i
= 0; i
< n_elts
; i
++)
5776 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5777 ? CONST_VECTOR_ELT (trueop0
, i
)
5778 : CONST_VECTOR_ELT (trueop1
, i
));
5779 return gen_rtx_CONST_VECTOR (mode
, v
);
5782 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5783 if no element from a appears in the result. */
5784 if (GET_CODE (op0
) == VEC_MERGE
)
5786 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5787 if (CONST_INT_P (tem
))
5789 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5790 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5791 return simplify_gen_ternary (code
, mode
, mode
,
5792 XEXP (op0
, 1), op1
, op2
);
5793 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5794 return simplify_gen_ternary (code
, mode
, mode
,
5795 XEXP (op0
, 0), op1
, op2
);
5798 if (GET_CODE (op1
) == VEC_MERGE
)
5800 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5801 if (CONST_INT_P (tem
))
5803 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5804 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5805 return simplify_gen_ternary (code
, mode
, mode
,
5806 op0
, XEXP (op1
, 1), op2
);
5807 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5808 return simplify_gen_ternary (code
, mode
, mode
,
5809 op0
, XEXP (op1
, 0), op2
);
5813 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5815 if (GET_CODE (op0
) == VEC_DUPLICATE
5816 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5817 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5818 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5820 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5821 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5823 if (XEXP (XEXP (op0
, 0), 0) == op1
5824 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5828 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5830 with (vec_concat (X) (B)) if N == 1 or
5831 (vec_concat (A) (X)) if N == 2. */
5832 if (GET_CODE (op0
) == VEC_DUPLICATE
5833 && GET_CODE (op1
) == CONST_VECTOR
5834 && CONST_VECTOR_NUNITS (op1
) == 2
5835 && GET_MODE_NUNITS (GET_MODE (op0
)) == 2
5836 && IN_RANGE (sel
, 1, 2))
5838 rtx newop0
= XEXP (op0
, 0);
5839 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
5841 std::swap (newop0
, newop1
);
5842 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5844 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5845 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5846 Only applies for vectors of two elements. */
5847 if (GET_CODE (op0
) == VEC_DUPLICATE
5848 && GET_CODE (op1
) == VEC_CONCAT
5849 && GET_MODE_NUNITS (GET_MODE (op0
)) == 2
5850 && GET_MODE_NUNITS (GET_MODE (op1
)) == 2
5851 && IN_RANGE (sel
, 1, 2))
5853 rtx newop0
= XEXP (op0
, 0);
5854 rtx newop1
= XEXP (op1
, 2 - sel
);
5855 rtx otherop
= XEXP (op1
, sel
- 1);
5857 std::swap (newop0
, newop1
);
5858 /* Don't want to throw away the other part of the vec_concat if
5859 it has side-effects. */
5860 if (!side_effects_p (otherop
))
5861 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5866 (vec_merge:outer (vec_duplicate:outer x:inner)
5867 (subreg:outer y:inner 0)
5870 with (vec_concat:outer x:inner y:inner) if N == 1,
5871 or (vec_concat:outer y:inner x:inner) if N == 2.
5872 We assume that degenrate cases (N == 0 or N == 3), which
5873 represent taking all elements from either input, are handled
5876 Implicitly, this means we have a paradoxical subreg, but such
5877 a check is cheap, so make it anyway.
5879 Only applies for vectors of two elements. */
5881 if ((GET_CODE (op0
) == VEC_DUPLICATE
5882 || GET_CODE (op1
) == VEC_DUPLICATE
)
5883 && GET_MODE (op0
) == GET_MODE (op1
)
5884 && GET_MODE_NUNITS (GET_MODE (op0
)) == 2
5885 && GET_MODE_NUNITS (GET_MODE (op1
)) == 2
5886 && IN_RANGE (sel
, 1, 2))
5888 rtx newop0
= op0
, newop1
= op1
;
5890 /* Canonicalize locally such that the VEC_DUPLICATE is always
5891 the first operand. */
5892 if (GET_CODE (newop1
) == VEC_DUPLICATE
)
5894 std::swap (newop0
, newop1
);
5895 /* If we swap the operand order, we also need to swap
5896 the selector mask. */
5897 sel
= sel
== 1 ? 2 : 1;
5900 if (GET_CODE (newop1
) == SUBREG
5901 && paradoxical_subreg_p (newop1
)
5902 && subreg_lowpart_p (newop1
)
5903 && GET_MODE (SUBREG_REG (newop1
))
5904 == GET_MODE (XEXP (newop0
, 0)))
5906 newop0
= XEXP (newop0
, 0);
5907 newop1
= SUBREG_REG (newop1
);
5909 std::swap (newop0
, newop1
);
5910 return simplify_gen_binary (VEC_CONCAT
, mode
,
5915 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5917 with (vec_concat x y) or (vec_concat y x) depending on value
5919 if (GET_CODE (op0
) == VEC_DUPLICATE
5920 && GET_CODE (op1
) == VEC_DUPLICATE
5921 && GET_MODE_NUNITS (GET_MODE (op0
)) == 2
5922 && GET_MODE_NUNITS (GET_MODE (op1
)) == 2
5923 && IN_RANGE (sel
, 1, 2))
5925 rtx newop0
= XEXP (op0
, 0);
5926 rtx newop1
= XEXP (op1
, 0);
5928 std::swap (newop0
, newop1
);
5930 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5934 if (rtx_equal_p (op0
, op1
)
5935 && !side_effects_p (op2
) && !side_effects_p (op1
))
5947 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5948 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5949 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5951 Works by unpacking OP into a collection of 8-bit values
5952 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5953 and then repacking them again for OUTERMODE. */
5956 simplify_immed_subreg (fixed_size_mode outermode
, rtx op
,
5957 fixed_size_mode innermode
, unsigned int byte
)
5961 value_mask
= (1 << value_bit
) - 1
5963 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5971 rtx result_s
= NULL
;
5972 rtvec result_v
= NULL
;
5973 enum mode_class outer_class
;
5974 scalar_mode outer_submode
;
5977 /* Some ports misuse CCmode. */
5978 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5981 /* We have no way to represent a complex constant at the rtl level. */
5982 if (COMPLEX_MODE_P (outermode
))
5985 /* We support any size mode. */
5986 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5987 GET_MODE_BITSIZE (innermode
));
5989 /* Unpack the value. */
5991 if (GET_CODE (op
) == CONST_VECTOR
)
5993 num_elem
= CONST_VECTOR_NUNITS (op
);
5994 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5999 elem_bitsize
= max_bitsize
;
6001 /* If this asserts, it is too complicated; reducing value_bit may help. */
6002 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
6003 /* I don't know how to handle endianness of sub-units. */
6004 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
6006 for (elem
= 0; elem
< num_elem
; elem
++)
6009 rtx el
= (GET_CODE (op
) == CONST_VECTOR
6010 ? CONST_VECTOR_ELT (op
, elem
)
6013 /* Vectors are kept in target memory order. (This is probably
6016 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6017 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6019 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6020 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6021 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6022 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6023 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6026 switch (GET_CODE (el
))
6030 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6032 *vp
++ = INTVAL (el
) >> i
;
6033 /* CONST_INTs are always logically sign-extended. */
6034 for (; i
< elem_bitsize
; i
+= value_bit
)
6035 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
6038 case CONST_WIDE_INT
:
6040 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
6041 unsigned char extend
= wi::sign_mask (val
);
6042 int prec
= wi::get_precision (val
);
6044 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
6045 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
6046 for (; i
< elem_bitsize
; i
+= value_bit
)
6052 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
6054 unsigned char extend
= 0;
6055 /* If this triggers, someone should have generated a
6056 CONST_INT instead. */
6057 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
6059 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6060 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
6061 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
6064 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
6068 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
6070 for (; i
< elem_bitsize
; i
+= value_bit
)
6075 /* This is big enough for anything on the platform. */
6076 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6077 scalar_float_mode el_mode
;
6079 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
6080 int bitsize
= GET_MODE_BITSIZE (el_mode
);
6082 gcc_assert (bitsize
<= elem_bitsize
);
6083 gcc_assert (bitsize
% value_bit
== 0);
6085 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
6088 /* real_to_target produces its result in words affected by
6089 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6090 and use WORDS_BIG_ENDIAN instead; see the documentation
6091 of SUBREG in rtl.texi. */
6092 for (i
= 0; i
< bitsize
; i
+= value_bit
)
6095 if (WORDS_BIG_ENDIAN
)
6096 ibase
= bitsize
- 1 - i
;
6099 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
6102 /* It shouldn't matter what's done here, so fill it with
6104 for (; i
< elem_bitsize
; i
+= value_bit
)
6110 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
6112 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6113 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6117 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
6118 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
6119 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
6121 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
6122 >> (i
- HOST_BITS_PER_WIDE_INT
);
6123 for (; i
< elem_bitsize
; i
+= value_bit
)
6133 /* Now, pick the right byte to start with. */
6134 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6135 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6136 will already have offset 0. */
6137 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
6139 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
6141 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6142 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6143 byte
= (subword_byte
% UNITS_PER_WORD
6144 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6147 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6148 so if it's become negative it will instead be very large.) */
6149 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
6151 /* Convert from bytes to chunks of size value_bit. */
6152 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
6154 /* Re-pack the value. */
6155 num_elem
= GET_MODE_NUNITS (outermode
);
6157 if (VECTOR_MODE_P (outermode
))
6159 result_v
= rtvec_alloc (num_elem
);
6160 elems
= &RTVEC_ELT (result_v
, 0);
6165 outer_submode
= GET_MODE_INNER (outermode
);
6166 outer_class
= GET_MODE_CLASS (outer_submode
);
6167 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
6169 gcc_assert (elem_bitsize
% value_bit
== 0);
6170 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
6172 for (elem
= 0; elem
< num_elem
; elem
++)
6176 /* Vectors are stored in target memory order. (This is probably
6179 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6180 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6182 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6183 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6184 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6185 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6186 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6189 switch (outer_class
)
6192 case MODE_PARTIAL_INT
:
6197 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
6198 / HOST_BITS_PER_WIDE_INT
;
6199 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
6202 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
6204 for (u
= 0; u
< units
; u
++)
6206 unsigned HOST_WIDE_INT buf
= 0;
6208 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
6210 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6213 base
+= HOST_BITS_PER_WIDE_INT
;
6215 r
= wide_int::from_array (tmp
, units
,
6216 GET_MODE_PRECISION (outer_submode
));
6217 #if TARGET_SUPPORTS_WIDE_INT == 0
6218 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6219 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
6222 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
6227 case MODE_DECIMAL_FLOAT
:
6230 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
6232 /* real_from_target wants its input in words affected by
6233 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6234 and use WORDS_BIG_ENDIAN instead; see the documentation
6235 of SUBREG in rtl.texi. */
6236 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6239 if (WORDS_BIG_ENDIAN
)
6240 ibase
= elem_bitsize
- 1 - i
;
6243 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6246 real_from_target (&r
, tmp
, outer_submode
);
6247 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6259 f
.mode
= outer_submode
;
6262 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6264 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6265 for (; i
< elem_bitsize
; i
+= value_bit
)
6266 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6267 << (i
- HOST_BITS_PER_WIDE_INT
));
6269 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6277 if (VECTOR_MODE_P (outermode
))
6278 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6283 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6284 Return 0 if no simplifications are possible. */
6286 simplify_subreg (machine_mode outermode
, rtx op
,
6287 machine_mode innermode
, poly_uint64 byte
)
6289 /* Little bit of sanity checking. */
6290 gcc_assert (innermode
!= VOIDmode
);
6291 gcc_assert (outermode
!= VOIDmode
);
6292 gcc_assert (innermode
!= BLKmode
);
6293 gcc_assert (outermode
!= BLKmode
);
6295 gcc_assert (GET_MODE (op
) == innermode
6296 || GET_MODE (op
) == VOIDmode
);
6298 if (!multiple_p (byte
, GET_MODE_SIZE (outermode
)))
6301 if (maybe_ge (byte
, GET_MODE_SIZE (innermode
)))
6304 if (outermode
== innermode
&& known_eq (byte
, 0U))
6307 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
6311 if (VECTOR_MODE_P (outermode
)
6312 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6313 && vec_duplicate_p (op
, &elt
))
6314 return gen_vec_duplicate (outermode
, elt
);
6316 if (outermode
== GET_MODE_INNER (innermode
)
6317 && vec_duplicate_p (op
, &elt
))
6321 if (CONST_SCALAR_INT_P (op
)
6322 || CONST_DOUBLE_AS_FLOAT_P (op
)
6323 || CONST_FIXED_P (op
)
6324 || GET_CODE (op
) == CONST_VECTOR
)
6326 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6327 the result from bytes, so it only works if the sizes of the modes
6328 and the value of the offset are known at compile time. Cases that
6329 that apply to general modes and offsets should be handled here
6330 before calling simplify_immed_subreg. */
6331 fixed_size_mode fs_outermode
, fs_innermode
;
6332 unsigned HOST_WIDE_INT cbyte
;
6333 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6334 && is_a
<fixed_size_mode
> (innermode
, &fs_innermode
)
6335 && byte
.is_constant (&cbyte
))
6336 return simplify_immed_subreg (fs_outermode
, op
, fs_innermode
, cbyte
);
6341 /* Changing mode twice with SUBREG => just change it once,
6342 or not at all if changing back op starting mode. */
6343 if (GET_CODE (op
) == SUBREG
)
6345 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6348 if (outermode
== innermostmode
6349 && known_eq (byte
, 0U)
6350 && known_eq (SUBREG_BYTE (op
), 0))
6351 return SUBREG_REG (op
);
6353 /* Work out the memory offset of the final OUTERMODE value relative
6354 to the inner value of OP. */
6355 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
6357 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
6358 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
6360 /* See whether resulting subreg will be paradoxical. */
6361 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6363 /* In nonparadoxical subregs we can't handle negative offsets. */
6364 if (maybe_lt (final_offset
, 0))
6366 /* Bail out in case resulting subreg would be incorrect. */
6367 if (!multiple_p (final_offset
, GET_MODE_SIZE (outermode
))
6368 || maybe_ge (final_offset
, GET_MODE_SIZE (innermostmode
)))
6373 poly_int64 required_offset
= subreg_memory_offset (outermode
,
6375 if (maybe_ne (final_offset
, required_offset
))
6377 /* Paradoxical subregs always have byte offset 0. */
6381 /* Recurse for further possible simplifications. */
6382 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6386 if (validate_subreg (outermode
, innermostmode
,
6387 SUBREG_REG (op
), final_offset
))
6389 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6390 if (SUBREG_PROMOTED_VAR_P (op
)
6391 && SUBREG_PROMOTED_SIGN (op
) >= 0
6392 && GET_MODE_CLASS (outermode
) == MODE_INT
6393 && IN_RANGE (GET_MODE_SIZE (outermode
),
6394 GET_MODE_SIZE (innermode
),
6395 GET_MODE_SIZE (innermostmode
))
6396 && subreg_lowpart_p (newx
))
6398 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6399 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6406 /* SUBREG of a hard register => just change the register number
6407 and/or mode. If the hard register is not valid in that mode,
6408 suppress this simplification. If the hard register is the stack,
6409 frame, or argument pointer, leave this as a SUBREG. */
6411 if (REG_P (op
) && HARD_REGISTER_P (op
))
6413 unsigned int regno
, final_regno
;
6416 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6417 if (HARD_REGISTER_NUM_P (final_regno
))
6419 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6420 subreg_memory_offset (outermode
,
6423 /* Propagate original regno. We don't have any way to specify
6424 the offset inside original regno, so do so only for lowpart.
6425 The information is used only by alias analysis that can not
6426 grog partial register anyway. */
6428 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
6429 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6434 /* If we have a SUBREG of a register that we are replacing and we are
6435 replacing it with a MEM, make a new MEM and try replacing the
6436 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6437 or if we would be widening it. */
6440 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6441 /* Allow splitting of volatile memory references in case we don't
6442 have instruction to move the whole thing. */
6443 && (! MEM_VOLATILE_P (op
)
6444 || ! have_insn_for (SET
, innermode
))
6445 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6446 return adjust_address_nv (op
, outermode
, byte
);
6448 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6450 if (GET_CODE (op
) == CONCAT
6451 || GET_CODE (op
) == VEC_CONCAT
)
6453 unsigned int part_size
;
6454 poly_uint64 final_offset
;
6457 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6458 if (part_mode
== VOIDmode
)
6459 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6460 part_size
= GET_MODE_SIZE (part_mode
);
6461 if (known_lt (byte
, part_size
))
6463 part
= XEXP (op
, 0);
6464 final_offset
= byte
;
6466 else if (known_ge (byte
, part_size
))
6468 part
= XEXP (op
, 1);
6469 final_offset
= byte
- part_size
;
6474 if (maybe_gt (final_offset
+ GET_MODE_SIZE (outermode
), part_size
))
6477 part_mode
= GET_MODE (part
);
6478 if (part_mode
== VOIDmode
)
6479 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6480 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6483 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6484 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6488 /* A SUBREG resulting from a zero extension may fold to zero if
6489 it extracts higher bits that the ZERO_EXTEND's source bits. */
6490 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6492 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6493 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
6494 return CONST0_RTX (outermode
);
6497 scalar_int_mode int_outermode
, int_innermode
;
6498 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6499 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6500 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
6502 /* Handle polynomial integers. The upper bits of a paradoxical
6503 subreg are undefined, so this is safe regardless of whether
6504 we're truncating or extending. */
6505 if (CONST_POLY_INT_P (op
))
6508 = poly_wide_int::from (const_poly_int_value (op
),
6509 GET_MODE_PRECISION (int_outermode
),
6511 return immed_wide_int_const (val
, int_outermode
);
6514 if (GET_MODE_PRECISION (int_outermode
)
6515 < GET_MODE_PRECISION (int_innermode
))
6517 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6526 /* Make a SUBREG operation or equivalent if it folds. */
6529 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6530 machine_mode innermode
, poly_uint64 byte
)
6534 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6538 if (GET_CODE (op
) == SUBREG
6539 || GET_CODE (op
) == CONCAT
6540 || GET_MODE (op
) == VOIDmode
)
6543 if (validate_subreg (outermode
, innermode
, op
, byte
))
6544 return gen_rtx_SUBREG (outermode
, op
, byte
);
6549 /* Generates a subreg to get the least significant part of EXPR (in mode
6550 INNER_MODE) to OUTER_MODE. */
6553 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6554 machine_mode inner_mode
)
6556 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6557 subreg_lowpart_offset (outer_mode
, inner_mode
));
6560 /* Simplify X, an rtx expression.
6562 Return the simplified expression or NULL if no simplifications
6565 This is the preferred entry point into the simplification routines;
6566 however, we still allow passes to call the more specific routines.
6568 Right now GCC has three (yes, three) major bodies of RTL simplification
6569 code that need to be unified.
6571 1. fold_rtx in cse.c. This code uses various CSE specific
6572 information to aid in RTL simplification.
6574 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6575 it uses combine specific information to aid in RTL
6578 3. The routines in this file.
6581 Long term we want to only have one body of simplification code; to
6582 get to that state I recommend the following steps:
6584 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6585 which are not pass dependent state into these routines.
6587 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6588 use this routine whenever possible.
6590 3. Allow for pass dependent state to be provided to these
6591 routines and add simplifications based on the pass dependent
6592 state. Remove code from cse.c & combine.c that becomes
6595 It will take time, but ultimately the compiler will be easier to
6596 maintain and improve. It's totally silly that when we add a
6597 simplification that it needs to be added to 4 places (3 for RTL
6598 simplification and 1 for tree simplification. */
6601 simplify_rtx (const_rtx x
)
6603 const enum rtx_code code
= GET_CODE (x
);
6604 const machine_mode mode
= GET_MODE (x
);
6606 switch (GET_RTX_CLASS (code
))
6609 return simplify_unary_operation (code
, mode
,
6610 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6611 case RTX_COMM_ARITH
:
6612 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6613 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6618 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6621 case RTX_BITFIELD_OPS
:
6622 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6623 XEXP (x
, 0), XEXP (x
, 1),
6627 case RTX_COMM_COMPARE
:
6628 return simplify_relational_operation (code
, mode
,
6629 ((GET_MODE (XEXP (x
, 0))
6631 ? GET_MODE (XEXP (x
, 0))
6632 : GET_MODE (XEXP (x
, 1))),
6638 return simplify_subreg (mode
, SUBREG_REG (x
),
6639 GET_MODE (SUBREG_REG (x
)),
6646 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6647 if (GET_CODE (XEXP (x
, 0)) == HIGH
6648 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6661 namespace selftest
{
6663 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6666 make_test_reg (machine_mode mode
)
6668 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
6670 return gen_rtx_REG (mode
, test_reg_num
++);
6673 /* Test vector simplifications involving VEC_DUPLICATE in which the
6674 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6675 register that holds one element of MODE. */
6678 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
6680 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6681 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6682 unsigned int nunits
= GET_MODE_NUNITS (mode
);
6683 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
6685 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6686 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
6687 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
6688 ASSERT_RTX_EQ (duplicate
,
6689 simplify_unary_operation (NOT
, mode
,
6690 duplicate_not
, mode
));
6692 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6693 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
6694 ASSERT_RTX_EQ (duplicate
,
6695 simplify_unary_operation (NEG
, mode
,
6696 duplicate_neg
, mode
));
6698 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6699 ASSERT_RTX_EQ (duplicate
,
6700 simplify_binary_operation (PLUS
, mode
, duplicate
,
6701 CONST0_RTX (mode
)));
6703 ASSERT_RTX_EQ (duplicate
,
6704 simplify_binary_operation (MINUS
, mode
, duplicate
,
6705 CONST0_RTX (mode
)));
6707 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
6708 simplify_binary_operation (MINUS
, mode
, duplicate
,
6712 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6713 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
6714 ASSERT_RTX_PTR_EQ (scalar_reg
,
6715 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6716 duplicate
, zero_par
));
6718 /* And again with the final element. */
6719 rtx last_index
= gen_int_mode (GET_MODE_NUNITS (mode
) - 1, word_mode
);
6720 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
6721 ASSERT_RTX_PTR_EQ (scalar_reg
,
6722 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6723 duplicate
, last_par
));
6725 /* Test a scalar subreg of a VEC_DUPLICATE. */
6726 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
6727 ASSERT_RTX_EQ (scalar_reg
,
6728 simplify_gen_subreg (inner_mode
, duplicate
,
6731 machine_mode narrower_mode
;
6733 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
6734 && VECTOR_MODE_P (narrower_mode
))
6736 /* Test VEC_SELECT of a vector. */
6738 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
6739 rtx narrower_duplicate
6740 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
6741 ASSERT_RTX_EQ (narrower_duplicate
,
6742 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
6743 duplicate
, vec_par
));
6745 /* Test a vector subreg of a VEC_DUPLICATE. */
6746 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
6747 ASSERT_RTX_EQ (narrower_duplicate
,
6748 simplify_gen_subreg (narrower_mode
, duplicate
,
6753 /* Test vector simplifications involving VEC_SERIES in which the
6754 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6755 register that holds one element of MODE. */
6758 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
6760 /* Test unary cases with VEC_SERIES arguments. */
6761 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6762 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6763 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6764 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
6765 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
6766 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
6767 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
6768 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
6769 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
6771 ASSERT_RTX_EQ (series_0_r
,
6772 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
6773 ASSERT_RTX_EQ (series_r_m1
,
6774 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
6775 ASSERT_RTX_EQ (series_r_r
,
6776 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
6778 /* Test that a VEC_SERIES with a zero step is simplified away. */
6779 ASSERT_RTX_EQ (duplicate
,
6780 simplify_binary_operation (VEC_SERIES
, mode
,
6781 scalar_reg
, const0_rtx
));
6783 /* Test PLUS and MINUS with VEC_SERIES. */
6784 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
6785 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
6786 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
6787 ASSERT_RTX_EQ (series_r_r
,
6788 simplify_binary_operation (PLUS
, mode
, series_0_r
,
6790 ASSERT_RTX_EQ (series_r_1
,
6791 simplify_binary_operation (PLUS
, mode
, duplicate
,
6793 ASSERT_RTX_EQ (series_r_m1
,
6794 simplify_binary_operation (PLUS
, mode
, duplicate
,
6796 ASSERT_RTX_EQ (series_0_r
,
6797 simplify_binary_operation (MINUS
, mode
, series_r_r
,
6799 ASSERT_RTX_EQ (series_r_m1
,
6800 simplify_binary_operation (MINUS
, mode
, duplicate
,
6802 ASSERT_RTX_EQ (series_r_1
,
6803 simplify_binary_operation (MINUS
, mode
, duplicate
,
6805 ASSERT_RTX_EQ (series_0_m1
,
6806 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
6810 /* Verify some simplifications involving vectors. */
6815 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
6817 machine_mode mode
= (machine_mode
) i
;
6818 if (VECTOR_MODE_P (mode
))
6820 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
6821 test_vector_ops_duplicate (mode
, scalar_reg
);
6822 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
6823 && GET_MODE_NUNITS (mode
) > 2)
6824 test_vector_ops_series (mode
, scalar_reg
);
6829 template<unsigned int N
>
6830 struct simplify_const_poly_int_tests
6836 struct simplify_const_poly_int_tests
<1>
6838 static void run () {}
6841 /* Test various CONST_POLY_INT properties. */
6843 template<unsigned int N
>
6845 simplify_const_poly_int_tests
<N
>::run ()
6847 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
6848 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
6849 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
6850 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
6851 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
6852 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
6853 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
6854 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
6855 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
6856 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
6857 rtx two
= GEN_INT (2);
6858 rtx six
= GEN_INT (6);
6859 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
6861 /* These tests only try limited operation combinations. Fuller arithmetic
6862 testing is done directly on poly_ints. */
6863 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
6864 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
6865 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
6866 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
6867 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
6868 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
6869 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
6870 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
6871 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
6872 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
6873 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
6876 /* Run all of the selftests within this file. */
6879 simplify_rtx_c_tests ()
6882 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
6885 } // namespace selftest
6887 #endif /* CHECKING_P */