1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 static bool plus_minus_operand_p (const_rtx
);
51 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
54 neg_poly_int_rtx (machine_mode mode
, const_rtx i
)
56 return immed_wide_int_const (-wi::to_poly_wide (i
, mode
), mode
);
59 /* Test whether expression, X, is an immediate constant that represents
60 the most significant bit of machine mode MODE. */
63 mode_signbit_p (machine_mode mode
, const_rtx x
)
65 unsigned HOST_WIDE_INT val
;
67 scalar_int_mode int_mode
;
69 if (!is_int_mode (mode
, &int_mode
))
72 width
= GET_MODE_PRECISION (int_mode
);
76 if (width
<= HOST_BITS_PER_WIDE_INT
79 #if TARGET_SUPPORTS_WIDE_INT
80 else if (CONST_WIDE_INT_P (x
))
83 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
84 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
86 for (i
= 0; i
< elts
- 1; i
++)
87 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
89 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
90 width
%= HOST_BITS_PER_WIDE_INT
;
92 width
= HOST_BITS_PER_WIDE_INT
;
95 else if (width
<= HOST_BITS_PER_DOUBLE_INT
96 && CONST_DOUBLE_AS_INT_P (x
)
97 && CONST_DOUBLE_LOW (x
) == 0)
99 val
= CONST_DOUBLE_HIGH (x
);
100 width
-= HOST_BITS_PER_WIDE_INT
;
104 /* X is not an integer constant. */
107 if (width
< HOST_BITS_PER_WIDE_INT
)
108 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
109 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
112 /* Test whether VAL is equal to the most significant bit of mode MODE
113 (after masking with the mode mask of MODE). Returns false if the
114 precision of MODE is too large to handle. */
117 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
120 scalar_int_mode int_mode
;
122 if (!is_int_mode (mode
, &int_mode
))
125 width
= GET_MODE_PRECISION (int_mode
);
126 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
129 val
&= GET_MODE_MASK (int_mode
);
130 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
133 /* Test whether the most significant bit of mode MODE is set in VAL.
134 Returns false if the precision of MODE is too large to handle. */
136 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
140 scalar_int_mode int_mode
;
141 if (!is_int_mode (mode
, &int_mode
))
144 width
= GET_MODE_PRECISION (int_mode
);
145 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
148 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
152 /* Test whether the most significant bit of mode MODE is clear in VAL.
153 Returns false if the precision of MODE is too large to handle. */
155 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
159 scalar_int_mode int_mode
;
160 if (!is_int_mode (mode
, &int_mode
))
163 width
= GET_MODE_PRECISION (int_mode
);
164 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
167 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
171 /* Make a binary operation by properly ordering the operands and
172 seeing if the expression folds. */
175 simplify_context::simplify_gen_binary (rtx_code code
, machine_mode mode
,
180 /* If this simplifies, do it. */
181 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
185 /* Put complex operands first and constants second if commutative. */
186 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
187 && swap_commutative_operands_p (op0
, op1
))
188 std::swap (op0
, op1
);
190 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
193 /* If X is a MEM referencing the constant pool, return the real value.
194 Otherwise return X. */
196 avoid_constant_pool_reference (rtx x
)
200 poly_int64 offset
= 0;
202 switch (GET_CODE (x
))
208 /* Handle float extensions of constant pool references. */
210 c
= avoid_constant_pool_reference (tmp
);
211 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
212 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
220 if (GET_MODE (x
) == BLKmode
)
225 /* Call target hook to avoid the effects of -fpic etc.... */
226 addr
= targetm
.delegitimize_address (addr
);
228 /* Split the address into a base and integer offset. */
229 addr
= strip_offset (addr
, &offset
);
231 if (GET_CODE (addr
) == LO_SUM
)
232 addr
= XEXP (addr
, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr
) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr
))
239 c
= get_pool_constant (addr
);
240 cmode
= get_pool_mode (addr
);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if (known_eq (offset
, 0) && cmode
== GET_MODE (x
))
247 else if (known_in_range_p (offset
, 0, GET_MODE_SIZE (cmode
)))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
258 /* Simplify a MEM based on its attributes. This is the default
259 delegitimize_address target hook, and it's recommended that every
260 overrider call it. */
263 delegitimize_mem_from_attrs (rtx x
)
265 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
266 use their base addresses as equivalent. */
269 && MEM_OFFSET_KNOWN_P (x
))
271 tree decl
= MEM_EXPR (x
);
272 machine_mode mode
= GET_MODE (x
);
273 poly_int64 offset
= 0;
275 switch (TREE_CODE (decl
))
285 case ARRAY_RANGE_REF
:
290 case VIEW_CONVERT_EXPR
:
292 poly_int64 bitsize
, bitpos
, bytepos
, toffset_val
= 0;
294 int unsignedp
, reversep
, volatilep
= 0;
297 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
298 &unsignedp
, &reversep
, &volatilep
);
299 if (maybe_ne (bitsize
, GET_MODE_BITSIZE (mode
))
300 || !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
)
301 || (toffset
&& !poly_int_tree_p (toffset
, &toffset_val
)))
304 offset
+= bytepos
+ toffset_val
;
310 && mode
== GET_MODE (x
)
312 && (TREE_STATIC (decl
)
313 || DECL_THREAD_LOCAL_P (decl
))
314 && DECL_RTL_SET_P (decl
)
315 && MEM_P (DECL_RTL (decl
)))
319 offset
+= MEM_OFFSET (x
);
321 newx
= DECL_RTL (decl
);
325 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
326 poly_int64 n_offset
, o_offset
;
328 /* Avoid creating a new MEM needlessly if we already had
329 the same address. We do if there's no OFFSET and the
330 old address X is identical to NEWX, or if X is of the
331 form (plus NEWX OFFSET), or the NEWX is of the form
332 (plus Y (const_int Z)) and X is that with the offset
333 added: (plus Y (const_int Z+OFFSET)). */
334 n
= strip_offset (n
, &n_offset
);
335 o
= strip_offset (o
, &o_offset
);
336 if (!(known_eq (o_offset
, n_offset
+ offset
)
337 && rtx_equal_p (o
, n
)))
338 x
= adjust_address_nv (newx
, mode
, offset
);
340 else if (GET_MODE (x
) == GET_MODE (newx
)
341 && known_eq (offset
, 0))
349 /* Make a unary operation by first seeing if it folds and otherwise making
350 the specified operation. */
353 simplify_context::simplify_gen_unary (rtx_code code
, machine_mode mode
, rtx op
,
354 machine_mode op_mode
)
358 /* If this simplifies, use it. */
359 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
362 return gen_rtx_fmt_e (code
, mode
, op
);
365 /* Likewise for ternary operations. */
368 simplify_context::simplify_gen_ternary (rtx_code code
, machine_mode mode
,
369 machine_mode op0_mode
,
370 rtx op0
, rtx op1
, rtx op2
)
374 /* If this simplifies, use it. */
375 if ((tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
376 op0
, op1
, op2
)) != 0)
379 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
382 /* Likewise, for relational operations.
383 CMP_MODE specifies mode comparison is done in. */
386 simplify_context::simplify_gen_relational (rtx_code code
, machine_mode mode
,
387 machine_mode cmp_mode
,
392 if ((tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
396 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
399 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
400 and simplify the result. If FN is non-NULL, call this callback on each
401 X, if it returns non-NULL, replace X with its return value and simplify the
405 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
406 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
408 enum rtx_code code
= GET_CODE (x
);
409 machine_mode mode
= GET_MODE (x
);
410 machine_mode op_mode
;
412 rtx op0
, op1
, op2
, newx
, op
;
416 if (__builtin_expect (fn
!= NULL
, 0))
418 newx
= fn (x
, old_rtx
, data
);
422 else if (rtx_equal_p (x
, old_rtx
))
423 return copy_rtx ((rtx
) data
);
425 switch (GET_RTX_CLASS (code
))
429 op_mode
= GET_MODE (op0
);
430 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
431 if (op0
== XEXP (x
, 0))
433 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
437 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
438 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
439 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
441 return simplify_gen_binary (code
, mode
, op0
, op1
);
444 case RTX_COMM_COMPARE
:
447 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
448 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
449 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
450 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
452 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
455 case RTX_BITFIELD_OPS
:
457 op_mode
= GET_MODE (op0
);
458 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
459 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
460 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
461 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
463 if (op_mode
== VOIDmode
)
464 op_mode
= GET_MODE (op0
);
465 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
470 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
471 if (op0
== SUBREG_REG (x
))
473 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
474 GET_MODE (SUBREG_REG (x
)),
476 return op0
? op0
: x
;
483 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
484 if (op0
== XEXP (x
, 0))
486 return replace_equiv_address_nv (x
, op0
);
488 else if (code
== LO_SUM
)
490 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
491 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
493 /* (lo_sum (high x) y) -> y where x and y have the same base. */
494 if (GET_CODE (op0
) == HIGH
)
496 rtx base0
, base1
, offset0
, offset1
;
497 split_const (XEXP (op0
, 0), &base0
, &offset0
);
498 split_const (op1
, &base1
, &offset1
);
499 if (rtx_equal_p (base0
, base1
))
503 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
505 return gen_rtx_LO_SUM (mode
, op0
, op1
);
514 fmt
= GET_RTX_FORMAT (code
);
515 for (i
= 0; fmt
[i
]; i
++)
520 newvec
= XVEC (newx
, i
);
521 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
523 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
525 if (op
!= RTVEC_ELT (vec
, j
))
529 newvec
= shallow_copy_rtvec (vec
);
531 newx
= shallow_copy_rtx (x
);
532 XVEC (newx
, i
) = newvec
;
534 RTVEC_ELT (newvec
, j
) = op
;
542 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
543 if (op
!= XEXP (x
, i
))
546 newx
= shallow_copy_rtx (x
);
555 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
556 resulting RTX. Return a new RTX which is as simplified as possible. */
559 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
561 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
564 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
565 Only handle cases where the truncated value is inherently an rvalue.
567 RTL provides two ways of truncating a value:
569 1. a lowpart subreg. This form is only a truncation when both
570 the outer and inner modes (here MODE and OP_MODE respectively)
571 are scalar integers, and only then when the subreg is used as
574 It is only valid to form such truncating subregs if the
575 truncation requires no action by the target. The onus for
576 proving this is on the creator of the subreg -- e.g. the
577 caller to simplify_subreg or simplify_gen_subreg -- and typically
578 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
580 2. a TRUNCATE. This form handles both scalar and compound integers.
582 The first form is preferred where valid. However, the TRUNCATE
583 handling in simplify_unary_operation turns the second form into the
584 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
585 so it is generally safe to form rvalue truncations using:
587 simplify_gen_unary (TRUNCATE, ...)
589 and leave simplify_unary_operation to work out which representation
592 Because of the proof requirements on (1), simplify_truncation must
593 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
594 regardless of whether the outer truncation came from a SUBREG or a
595 TRUNCATE. For example, if the caller has proven that an SImode
600 is a no-op and can be represented as a subreg, it does not follow
601 that SImode truncations of X and Y are also no-ops. On a target
602 like 64-bit MIPS that requires SImode values to be stored in
603 sign-extended form, an SImode truncation of:
605 (and:DI (reg:DI X) (const_int 63))
607 is trivially a no-op because only the lower 6 bits can be set.
608 However, X is still an arbitrary 64-bit number and so we cannot
609 assume that truncating it too is a no-op. */
612 simplify_context::simplify_truncation (machine_mode mode
, rtx op
,
613 machine_mode op_mode
)
615 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
616 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
617 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
619 gcc_assert (precision
<= op_precision
);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op
) == ZERO_EXTEND
623 || GET_CODE (op
) == SIGN_EXTEND
)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
631 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
632 if (mode
== origmode
)
634 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
635 return simplify_gen_unary (TRUNCATE
, mode
,
636 XEXP (op
, 0), origmode
);
638 return simplify_gen_unary (GET_CODE (op
), mode
,
639 XEXP (op
, 0), origmode
);
642 /* If the machine can perform operations in the truncated mode, distribute
643 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
644 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
646 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
647 && (GET_CODE (op
) == PLUS
648 || GET_CODE (op
) == MINUS
649 || GET_CODE (op
) == MULT
))
651 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
654 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
656 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
660 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
661 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
662 the outer subreg is effectively a truncation to the original mode. */
663 if ((GET_CODE (op
) == LSHIFTRT
664 || GET_CODE (op
) == ASHIFTRT
)
665 /* Ensure that OP_MODE is at least twice as wide as MODE
666 to avoid the possibility that an outer LSHIFTRT shifts by more
667 than the sign extension's sign_bit_copies and introduces zeros
668 into the high bits of the result. */
669 && 2 * precision
<= op_precision
670 && CONST_INT_P (XEXP (op
, 1))
671 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
672 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
673 && UINTVAL (XEXP (op
, 1)) < precision
)
674 return simplify_gen_binary (ASHIFTRT
, mode
,
675 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
677 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
678 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
679 the outer subreg is effectively a truncation to the original mode. */
680 if ((GET_CODE (op
) == LSHIFTRT
681 || GET_CODE (op
) == ASHIFTRT
)
682 && CONST_INT_P (XEXP (op
, 1))
683 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
684 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
685 && UINTVAL (XEXP (op
, 1)) < precision
)
686 return simplify_gen_binary (LSHIFTRT
, mode
,
687 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
689 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
690 to (ashift:QI (x:QI) C), where C is a suitable small constant and
691 the outer subreg is effectively a truncation to the original mode. */
692 if (GET_CODE (op
) == ASHIFT
693 && CONST_INT_P (XEXP (op
, 1))
694 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
695 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
696 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
697 && UINTVAL (XEXP (op
, 1)) < precision
)
698 return simplify_gen_binary (ASHIFT
, mode
,
699 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
701 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
702 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
704 if (GET_CODE (op
) == AND
705 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
706 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
707 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
708 && CONST_INT_P (XEXP (op
, 1)))
710 rtx op0
= (XEXP (XEXP (op
, 0), 0));
711 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
712 rtx mask_op
= XEXP (op
, 1);
713 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
714 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
716 if (shift
< precision
717 /* If doing this transform works for an X with all bits set,
718 it works for any X. */
719 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
720 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
721 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
722 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
724 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
725 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
729 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
730 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
732 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
733 && REG_P (XEXP (op
, 0))
734 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
735 && CONST_INT_P (XEXP (op
, 1))
736 && CONST_INT_P (XEXP (op
, 2)))
738 rtx op0
= XEXP (op
, 0);
739 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
740 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
741 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
743 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
746 pos
-= op_precision
- precision
;
747 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
748 XEXP (op
, 1), GEN_INT (pos
));
751 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
753 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
755 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
756 XEXP (op
, 1), XEXP (op
, 2));
760 /* Recognize a word extraction from a multi-word subreg. */
761 if ((GET_CODE (op
) == LSHIFTRT
762 || GET_CODE (op
) == ASHIFTRT
)
763 && SCALAR_INT_MODE_P (mode
)
764 && SCALAR_INT_MODE_P (op_mode
)
765 && precision
>= BITS_PER_WORD
766 && 2 * precision
<= op_precision
767 && CONST_INT_P (XEXP (op
, 1))
768 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
769 && UINTVAL (XEXP (op
, 1)) < op_precision
)
771 poly_int64 byte
= subreg_lowpart_offset (mode
, op_mode
);
772 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
773 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
775 ? byte
- shifted_bytes
776 : byte
+ shifted_bytes
));
779 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
780 and try replacing the TRUNCATE and shift with it. Don't do this
781 if the MEM has a mode-dependent address. */
782 if ((GET_CODE (op
) == LSHIFTRT
783 || GET_CODE (op
) == ASHIFTRT
)
784 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
785 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
786 && MEM_P (XEXP (op
, 0))
787 && CONST_INT_P (XEXP (op
, 1))
788 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
789 && INTVAL (XEXP (op
, 1)) > 0
790 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
791 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
792 MEM_ADDR_SPACE (XEXP (op
, 0)))
793 && ! MEM_VOLATILE_P (XEXP (op
, 0))
794 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
795 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
797 poly_int64 byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
798 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
799 return adjust_address_nv (XEXP (op
, 0), int_mode
,
801 ? byte
- shifted_bytes
802 : byte
+ shifted_bytes
));
805 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
806 (OP:SI foo:SI) if OP is NEG or ABS. */
807 if ((GET_CODE (op
) == ABS
808 || GET_CODE (op
) == NEG
)
809 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
810 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
811 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
812 return simplify_gen_unary (GET_CODE (op
), mode
,
813 XEXP (XEXP (op
, 0), 0), mode
);
815 /* (truncate:A (subreg:B (truncate:C X) 0)) is
817 if (GET_CODE (op
) == SUBREG
818 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
819 && SCALAR_INT_MODE_P (op_mode
)
820 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
821 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
822 && subreg_lowpart_p (op
))
824 rtx inner
= XEXP (SUBREG_REG (op
), 0);
825 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
826 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
829 /* If subreg above is paradoxical and C is narrower
830 than A, return (subreg:A (truncate:C X) 0). */
831 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
834 /* (truncate:A (truncate:B X)) is (truncate:A X). */
835 if (GET_CODE (op
) == TRUNCATE
)
836 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
837 GET_MODE (XEXP (op
, 0)));
839 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
841 if (GET_CODE (op
) == IOR
842 && SCALAR_INT_MODE_P (mode
)
843 && SCALAR_INT_MODE_P (op_mode
)
844 && CONST_INT_P (XEXP (op
, 1))
845 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
851 /* Try to simplify a unary operation CODE whose output mode is to be
852 MODE with input operand OP whose mode was originally OP_MODE.
853 Return zero if no simplification can be made. */
855 simplify_context::simplify_unary_operation (rtx_code code
, machine_mode mode
,
856 rtx op
, machine_mode op_mode
)
860 trueop
= avoid_constant_pool_reference (op
);
862 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
866 return simplify_unary_operation_1 (code
, mode
, op
);
869 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
873 exact_int_to_float_conversion_p (const_rtx op
)
875 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
876 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
877 /* Constants shouldn't reach here. */
878 gcc_assert (op0_mode
!= VOIDmode
);
879 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
880 int in_bits
= in_prec
;
881 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
883 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
884 if (GET_CODE (op
) == FLOAT
)
885 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
886 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
887 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
890 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
892 return in_bits
<= out_bits
;
895 /* Perform some simplifications we can do even if the operands
898 simplify_context::simplify_unary_operation_1 (rtx_code code
, machine_mode mode
,
901 enum rtx_code reversed
;
902 rtx temp
, elt
, base
, step
;
903 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
908 /* (not (not X)) == X. */
909 if (GET_CODE (op
) == NOT
)
912 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
913 comparison is all ones. */
914 if (COMPARISON_P (op
)
915 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
916 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
917 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
918 XEXP (op
, 0), XEXP (op
, 1));
920 /* (not (plus X -1)) can become (neg X). */
921 if (GET_CODE (op
) == PLUS
922 && XEXP (op
, 1) == constm1_rtx
)
923 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
925 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
926 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
927 and MODE_VECTOR_INT. */
928 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
929 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
932 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
933 if (GET_CODE (op
) == XOR
934 && CONST_INT_P (XEXP (op
, 1))
935 && (temp
= simplify_unary_operation (NOT
, mode
,
936 XEXP (op
, 1), mode
)) != 0)
937 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
939 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
940 if (GET_CODE (op
) == PLUS
941 && CONST_INT_P (XEXP (op
, 1))
942 && mode_signbit_p (mode
, XEXP (op
, 1))
943 && (temp
= simplify_unary_operation (NOT
, mode
,
944 XEXP (op
, 1), mode
)) != 0)
945 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
948 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
949 operands other than 1, but that is not valid. We could do a
950 similar simplification for (not (lshiftrt C X)) where C is
951 just the sign bit, but this doesn't seem common enough to
953 if (GET_CODE (op
) == ASHIFT
954 && XEXP (op
, 0) == const1_rtx
)
956 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
957 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
960 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
961 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
962 so we can perform the above simplification. */
963 if (STORE_FLAG_VALUE
== -1
964 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
965 && GET_CODE (op
) == ASHIFTRT
966 && CONST_INT_P (XEXP (op
, 1))
967 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
968 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
969 XEXP (op
, 0), const0_rtx
);
972 if (partial_subreg_p (op
)
973 && subreg_lowpart_p (op
)
974 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
975 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
977 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
980 x
= gen_rtx_ROTATE (inner_mode
,
981 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
983 XEXP (SUBREG_REG (op
), 1));
984 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
989 /* Apply De Morgan's laws to reduce number of patterns for machines
990 with negating logical insns (and-not, nand, etc.). If result has
991 only one NOT, put it first, since that is how the patterns are
993 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
995 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
996 machine_mode op_mode
;
998 op_mode
= GET_MODE (in1
);
999 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1001 op_mode
= GET_MODE (in2
);
1002 if (op_mode
== VOIDmode
)
1004 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1006 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1007 std::swap (in1
, in2
);
1009 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1013 /* (not (bswap x)) -> (bswap (not x)). */
1014 if (GET_CODE (op
) == BSWAP
)
1016 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1017 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1022 /* (neg (neg X)) == X. */
1023 if (GET_CODE (op
) == NEG
)
1024 return XEXP (op
, 0);
1026 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1027 If comparison is not reversible use
1029 if (GET_CODE (op
) == IF_THEN_ELSE
)
1031 rtx cond
= XEXP (op
, 0);
1032 rtx true_rtx
= XEXP (op
, 1);
1033 rtx false_rtx
= XEXP (op
, 2);
1035 if ((GET_CODE (true_rtx
) == NEG
1036 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1037 || (GET_CODE (false_rtx
) == NEG
1038 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1040 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1041 temp
= reversed_comparison (cond
, mode
);
1045 std::swap (true_rtx
, false_rtx
);
1047 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1048 mode
, temp
, true_rtx
, false_rtx
);
1052 /* (neg (plus X 1)) can become (not X). */
1053 if (GET_CODE (op
) == PLUS
1054 && XEXP (op
, 1) == const1_rtx
)
1055 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1057 /* Similarly, (neg (not X)) is (plus X 1). */
1058 if (GET_CODE (op
) == NOT
)
1059 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1062 /* (neg (minus X Y)) can become (minus Y X). This transformation
1063 isn't safe for modes with signed zeros, since if X and Y are
1064 both +0, (minus Y X) is the same as (minus X Y). If the
1065 rounding mode is towards +infinity (or -infinity) then the two
1066 expressions will be rounded differently. */
1067 if (GET_CODE (op
) == MINUS
1068 && !HONOR_SIGNED_ZEROS (mode
)
1069 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1070 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1072 if (GET_CODE (op
) == PLUS
1073 && !HONOR_SIGNED_ZEROS (mode
)
1074 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1076 /* (neg (plus A C)) is simplified to (minus -C A). */
1077 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1078 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1080 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1082 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1085 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1086 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1087 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1090 /* (neg (mult A B)) becomes (mult A (neg B)).
1091 This works even for floating-point values. */
1092 if (GET_CODE (op
) == MULT
1093 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1095 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1096 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1099 /* NEG commutes with ASHIFT since it is multiplication. Only do
1100 this if we can then eliminate the NEG (e.g., if the operand
1102 if (GET_CODE (op
) == ASHIFT
)
1104 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1106 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1109 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1110 C is equal to the width of MODE minus 1. */
1111 if (GET_CODE (op
) == ASHIFTRT
1112 && CONST_INT_P (XEXP (op
, 1))
1113 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1114 return simplify_gen_binary (LSHIFTRT
, mode
,
1115 XEXP (op
, 0), XEXP (op
, 1));
1117 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1118 C is equal to the width of MODE minus 1. */
1119 if (GET_CODE (op
) == LSHIFTRT
1120 && CONST_INT_P (XEXP (op
, 1))
1121 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1122 return simplify_gen_binary (ASHIFTRT
, mode
,
1123 XEXP (op
, 0), XEXP (op
, 1));
1125 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1126 if (GET_CODE (op
) == XOR
1127 && XEXP (op
, 1) == const1_rtx
1128 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1129 return plus_constant (mode
, XEXP (op
, 0), -1);
1131 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1132 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1133 if (GET_CODE (op
) == LT
1134 && XEXP (op
, 1) == const0_rtx
1135 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1137 int_mode
= as_a
<scalar_int_mode
> (mode
);
1138 int isize
= GET_MODE_PRECISION (inner
);
1139 if (STORE_FLAG_VALUE
== 1)
1141 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1142 gen_int_shift_amount (inner
,
1144 if (int_mode
== inner
)
1146 if (GET_MODE_PRECISION (int_mode
) > isize
)
1147 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1148 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1150 else if (STORE_FLAG_VALUE
== -1)
1152 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1153 gen_int_shift_amount (inner
,
1155 if (int_mode
== inner
)
1157 if (GET_MODE_PRECISION (int_mode
) > isize
)
1158 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1159 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1163 if (vec_series_p (op
, &base
, &step
))
1165 /* Only create a new series if we can simplify both parts. In other
1166 cases this isn't really a simplification, and it's not necessarily
1167 a win to replace a vector operation with a scalar operation. */
1168 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1169 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1172 step
= simplify_unary_operation (NEG
, inner_mode
,
1175 return gen_vec_series (mode
, base
, step
);
1181 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1182 with the umulXi3_highpart patterns. */
1183 if (GET_CODE (op
) == LSHIFTRT
1184 && GET_CODE (XEXP (op
, 0)) == MULT
)
1187 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1189 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1191 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1195 /* We can't handle truncation to a partial integer mode here
1196 because we don't know the real bitsize of the partial
1201 if (GET_MODE (op
) != VOIDmode
)
1203 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1208 /* If we know that the value is already truncated, we can
1209 replace the TRUNCATE with a SUBREG. */
1210 if (known_eq (GET_MODE_NUNITS (mode
), 1)
1211 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1212 || truncated_to_mode (mode
, op
)))
1214 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1219 /* A truncate of a comparison can be replaced with a subreg if
1220 STORE_FLAG_VALUE permits. This is like the previous test,
1221 but it works even if the comparison is done in a mode larger
1222 than HOST_BITS_PER_WIDE_INT. */
1223 if (HWI_COMPUTABLE_MODE_P (mode
)
1224 && COMPARISON_P (op
)
1225 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1227 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1232 /* A truncate of a memory is just loading the low part of the memory
1233 if we are not changing the meaning of the address. */
1234 if (GET_CODE (op
) == MEM
1235 && !VECTOR_MODE_P (mode
)
1236 && !MEM_VOLATILE_P (op
)
1237 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1239 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1246 case FLOAT_TRUNCATE
:
1247 if (DECIMAL_FLOAT_MODE_P (mode
))
1250 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1251 if (GET_CODE (op
) == FLOAT_EXTEND
1252 && GET_MODE (XEXP (op
, 0)) == mode
)
1253 return XEXP (op
, 0);
1255 /* (float_truncate:SF (float_truncate:DF foo:XF))
1256 = (float_truncate:SF foo:XF).
1257 This may eliminate double rounding, so it is unsafe.
1259 (float_truncate:SF (float_extend:XF foo:DF))
1260 = (float_truncate:SF foo:DF).
1262 (float_truncate:DF (float_extend:XF foo:SF))
1263 = (float_extend:DF foo:SF). */
1264 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1265 && flag_unsafe_math_optimizations
)
1266 || GET_CODE (op
) == FLOAT_EXTEND
)
1267 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1268 > GET_MODE_UNIT_SIZE (mode
)
1269 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1271 XEXP (op
, 0), mode
);
1273 /* (float_truncate (float x)) is (float x) */
1274 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1275 && (flag_unsafe_math_optimizations
1276 || exact_int_to_float_conversion_p (op
)))
1277 return simplify_gen_unary (GET_CODE (op
), mode
,
1279 GET_MODE (XEXP (op
, 0)));
1281 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1282 (OP:SF foo:SF) if OP is NEG or ABS. */
1283 if ((GET_CODE (op
) == ABS
1284 || GET_CODE (op
) == NEG
)
1285 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1286 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1287 return simplify_gen_unary (GET_CODE (op
), mode
,
1288 XEXP (XEXP (op
, 0), 0), mode
);
1290 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1291 is (float_truncate:SF x). */
1292 if (GET_CODE (op
) == SUBREG
1293 && subreg_lowpart_p (op
)
1294 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1295 return SUBREG_REG (op
);
1299 if (DECIMAL_FLOAT_MODE_P (mode
))
1302 /* (float_extend (float_extend x)) is (float_extend x)
1304 (float_extend (float x)) is (float x) assuming that double
1305 rounding can't happen.
1307 if (GET_CODE (op
) == FLOAT_EXTEND
1308 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1309 && exact_int_to_float_conversion_p (op
)))
1310 return simplify_gen_unary (GET_CODE (op
), mode
,
1312 GET_MODE (XEXP (op
, 0)));
1317 /* (abs (neg <foo>)) -> (abs <foo>) */
1318 if (GET_CODE (op
) == NEG
)
1319 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1320 GET_MODE (XEXP (op
, 0)));
1322 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1324 if (GET_MODE (op
) == VOIDmode
)
1327 /* If operand is something known to be positive, ignore the ABS. */
1328 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1329 || val_signbit_known_clear_p (GET_MODE (op
),
1330 nonzero_bits (op
, GET_MODE (op
))))
1333 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1334 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1335 && (num_sign_bit_copies (op
, int_mode
)
1336 == GET_MODE_PRECISION (int_mode
)))
1337 return gen_rtx_NEG (int_mode
, op
);
1342 /* (ffs (*_extend <X>)) = (ffs <X>) */
1343 if (GET_CODE (op
) == SIGN_EXTEND
1344 || GET_CODE (op
) == ZERO_EXTEND
)
1345 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1346 GET_MODE (XEXP (op
, 0)));
1350 switch (GET_CODE (op
))
1354 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1355 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1356 GET_MODE (XEXP (op
, 0)));
1360 /* Rotations don't affect popcount. */
1361 if (!side_effects_p (XEXP (op
, 1)))
1362 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1363 GET_MODE (XEXP (op
, 0)));
1372 switch (GET_CODE (op
))
1378 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1379 GET_MODE (XEXP (op
, 0)));
1383 /* Rotations don't affect parity. */
1384 if (!side_effects_p (XEXP (op
, 1)))
1385 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1386 GET_MODE (XEXP (op
, 0)));
1390 /* (parity (parity x)) -> parity (x). */
1399 /* (bswap (bswap x)) -> x. */
1400 if (GET_CODE (op
) == BSWAP
)
1401 return XEXP (op
, 0);
1405 /* (float (sign_extend <X>)) = (float <X>). */
1406 if (GET_CODE (op
) == SIGN_EXTEND
)
1407 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1408 GET_MODE (XEXP (op
, 0)));
1412 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1413 becomes just the MINUS if its mode is MODE. This allows
1414 folding switch statements on machines using casesi (such as
1416 if (GET_CODE (op
) == TRUNCATE
1417 && GET_MODE (XEXP (op
, 0)) == mode
1418 && GET_CODE (XEXP (op
, 0)) == MINUS
1419 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1420 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1421 return XEXP (op
, 0);
1423 /* Extending a widening multiplication should be canonicalized to
1424 a wider widening multiplication. */
1425 if (GET_CODE (op
) == MULT
)
1427 rtx lhs
= XEXP (op
, 0);
1428 rtx rhs
= XEXP (op
, 1);
1429 enum rtx_code lcode
= GET_CODE (lhs
);
1430 enum rtx_code rcode
= GET_CODE (rhs
);
1432 /* Widening multiplies usually extend both operands, but sometimes
1433 they use a shift to extract a portion of a register. */
1434 if ((lcode
== SIGN_EXTEND
1435 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1436 && (rcode
== SIGN_EXTEND
1437 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1439 machine_mode lmode
= GET_MODE (lhs
);
1440 machine_mode rmode
= GET_MODE (rhs
);
1443 if (lcode
== ASHIFTRT
)
1444 /* Number of bits not shifted off the end. */
1445 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1446 - INTVAL (XEXP (lhs
, 1)));
1447 else /* lcode == SIGN_EXTEND */
1448 /* Size of inner mode. */
1449 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1451 if (rcode
== ASHIFTRT
)
1452 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1453 - INTVAL (XEXP (rhs
, 1)));
1454 else /* rcode == SIGN_EXTEND */
1455 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1457 /* We can only widen multiplies if the result is mathematiclly
1458 equivalent. I.e. if overflow was impossible. */
1459 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1460 return simplify_gen_binary
1462 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1463 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1467 /* Check for a sign extension of a subreg of a promoted
1468 variable, where the promotion is sign-extended, and the
1469 target mode is the same as the variable's promotion. */
1470 if (GET_CODE (op
) == SUBREG
1471 && SUBREG_PROMOTED_VAR_P (op
)
1472 && SUBREG_PROMOTED_SIGNED_P (op
)
1473 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1475 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1480 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1481 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1482 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1484 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1485 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1486 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1487 GET_MODE (XEXP (op
, 0)));
1490 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1491 is (sign_extend:M (subreg:O <X>)) if there is mode with
1492 GET_MODE_BITSIZE (N) - I bits.
1493 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1494 is similarly (zero_extend:M (subreg:O <X>)). */
1495 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1496 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1497 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1498 && CONST_INT_P (XEXP (op
, 1))
1499 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1500 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1501 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1503 scalar_int_mode tmode
;
1504 gcc_assert (GET_MODE_PRECISION (int_mode
)
1505 > GET_MODE_PRECISION (op_mode
));
1506 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1507 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1510 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1512 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1513 ? SIGN_EXTEND
: ZERO_EXTEND
,
1514 int_mode
, inner
, tmode
);
1518 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1519 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1520 if (GET_CODE (op
) == LSHIFTRT
1521 && CONST_INT_P (XEXP (op
, 1))
1522 && XEXP (op
, 1) != const0_rtx
)
1523 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1525 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1526 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1527 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1528 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1529 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1531 if (GET_CODE (op
) == TRUNCATE
1532 && GET_CODE (XEXP (op
, 0)) == LSHIFTRT
1533 && CONST_INT_P (XEXP (XEXP (op
, 0), 1)))
1535 scalar_int_mode m_mode
, n_mode
, o_mode
;
1536 rtx old_shift
= XEXP (op
, 0);
1537 if (is_a
<scalar_int_mode
> (mode
, &m_mode
)
1538 && is_a
<scalar_int_mode
> (GET_MODE (op
), &n_mode
)
1539 && is_a
<scalar_int_mode
> (GET_MODE (old_shift
), &o_mode
)
1540 && GET_MODE_PRECISION (o_mode
) - GET_MODE_PRECISION (n_mode
)
1541 == INTVAL (XEXP (old_shift
, 1)))
1543 rtx new_shift
= simplify_gen_binary (ASHIFTRT
,
1544 GET_MODE (old_shift
),
1545 XEXP (old_shift
, 0),
1546 XEXP (old_shift
, 1));
1547 if (GET_MODE_PRECISION (m_mode
) > GET_MODE_PRECISION (o_mode
))
1548 return simplify_gen_unary (SIGN_EXTEND
, mode
, new_shift
,
1549 GET_MODE (new_shift
));
1550 if (mode
!= GET_MODE (new_shift
))
1551 return simplify_gen_unary (TRUNCATE
, mode
, new_shift
,
1552 GET_MODE (new_shift
));
1557 #if defined(POINTERS_EXTEND_UNSIGNED)
1558 /* As we do not know which address space the pointer is referring to,
1559 we can do this only if the target does not support different pointer
1560 or address modes depending on the address space. */
1561 if (target_default_pointer_address_modes_p ()
1562 && ! POINTERS_EXTEND_UNSIGNED
1563 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1565 || (GET_CODE (op
) == SUBREG
1566 && REG_P (SUBREG_REG (op
))
1567 && REG_POINTER (SUBREG_REG (op
))
1568 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1569 && !targetm
.have_ptr_extend ())
1572 = convert_memory_address_addr_space_1 (Pmode
, op
,
1573 ADDR_SPACE_GENERIC
, false,
1582 /* Check for a zero extension of a subreg of a promoted
1583 variable, where the promotion is zero-extended, and the
1584 target mode is the same as the variable's promotion. */
1585 if (GET_CODE (op
) == SUBREG
1586 && SUBREG_PROMOTED_VAR_P (op
)
1587 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1588 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1590 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, SUBREG_REG (op
));
1595 /* Extending a widening multiplication should be canonicalized to
1596 a wider widening multiplication. */
1597 if (GET_CODE (op
) == MULT
)
1599 rtx lhs
= XEXP (op
, 0);
1600 rtx rhs
= XEXP (op
, 1);
1601 enum rtx_code lcode
= GET_CODE (lhs
);
1602 enum rtx_code rcode
= GET_CODE (rhs
);
1604 /* Widening multiplies usually extend both operands, but sometimes
1605 they use a shift to extract a portion of a register. */
1606 if ((lcode
== ZERO_EXTEND
1607 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1608 && (rcode
== ZERO_EXTEND
1609 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1611 machine_mode lmode
= GET_MODE (lhs
);
1612 machine_mode rmode
= GET_MODE (rhs
);
1615 if (lcode
== LSHIFTRT
)
1616 /* Number of bits not shifted off the end. */
1617 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1618 - INTVAL (XEXP (lhs
, 1)));
1619 else /* lcode == ZERO_EXTEND */
1620 /* Size of inner mode. */
1621 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1623 if (rcode
== LSHIFTRT
)
1624 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1625 - INTVAL (XEXP (rhs
, 1)));
1626 else /* rcode == ZERO_EXTEND */
1627 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1629 /* We can only widen multiplies if the result is mathematiclly
1630 equivalent. I.e. if overflow was impossible. */
1631 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1632 return simplify_gen_binary
1634 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1635 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1639 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1640 if (GET_CODE (op
) == ZERO_EXTEND
)
1641 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1642 GET_MODE (XEXP (op
, 0)));
1644 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1645 is (zero_extend:M (subreg:O <X>)) if there is mode with
1646 GET_MODE_PRECISION (N) - I bits. */
1647 if (GET_CODE (op
) == LSHIFTRT
1648 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1649 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1650 && CONST_INT_P (XEXP (op
, 1))
1651 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1652 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1653 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1655 scalar_int_mode tmode
;
1656 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1657 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1660 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1662 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1667 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1668 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1670 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1671 (and:SI (reg:SI) (const_int 63)). */
1672 if (partial_subreg_p (op
)
1673 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1674 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1675 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1676 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1677 && subreg_lowpart_p (op
)
1678 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1679 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1681 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1682 return SUBREG_REG (op
);
1683 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1687 #if defined(POINTERS_EXTEND_UNSIGNED)
1688 /* As we do not know which address space the pointer is referring to,
1689 we can do this only if the target does not support different pointer
1690 or address modes depending on the address space. */
1691 if (target_default_pointer_address_modes_p ()
1692 && POINTERS_EXTEND_UNSIGNED
> 0
1693 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1695 || (GET_CODE (op
) == SUBREG
1696 && REG_P (SUBREG_REG (op
))
1697 && REG_POINTER (SUBREG_REG (op
))
1698 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1699 && !targetm
.have_ptr_extend ())
1702 = convert_memory_address_addr_space_1 (Pmode
, op
,
1703 ADDR_SPACE_GENERIC
, false,
1715 if (VECTOR_MODE_P (mode
)
1716 && vec_duplicate_p (op
, &elt
)
1717 && code
!= VEC_DUPLICATE
)
1719 /* Try applying the operator to ELT and see if that simplifies.
1720 We can duplicate the result if so.
1722 The reason we don't use simplify_gen_unary is that it isn't
1723 necessarily a win to convert things like:
1725 (neg:V (vec_duplicate:V (reg:S R)))
1729 (vec_duplicate:V (neg:S (reg:S R)))
1731 The first might be done entirely in vector registers while the
1732 second might need a move between register files. */
1733 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1734 elt
, GET_MODE_INNER (GET_MODE (op
)));
1736 return gen_vec_duplicate (mode
, temp
);
1742 /* Try to compute the value of a unary operation CODE whose output mode is to
1743 be MODE with input operand OP whose mode was originally OP_MODE.
1744 Return zero if the value cannot be computed. */
1746 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1747 rtx op
, machine_mode op_mode
)
1749 scalar_int_mode result_mode
;
1751 if (code
== VEC_DUPLICATE
)
1753 gcc_assert (VECTOR_MODE_P (mode
));
1754 if (GET_MODE (op
) != VOIDmode
)
1756 if (!VECTOR_MODE_P (GET_MODE (op
)))
1757 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1759 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1762 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1763 return gen_const_vec_duplicate (mode
, op
);
1764 if (GET_CODE (op
) == CONST_VECTOR
1765 && (CONST_VECTOR_DUPLICATE_P (op
)
1766 || CONST_VECTOR_NUNITS (op
).is_constant ()))
1768 unsigned int npatterns
= (CONST_VECTOR_DUPLICATE_P (op
)
1769 ? CONST_VECTOR_NPATTERNS (op
)
1770 : CONST_VECTOR_NUNITS (op
).to_constant ());
1771 gcc_assert (multiple_p (GET_MODE_NUNITS (mode
), npatterns
));
1772 rtx_vector_builder
builder (mode
, npatterns
, 1);
1773 for (unsigned i
= 0; i
< npatterns
; i
++)
1774 builder
.quick_push (CONST_VECTOR_ELT (op
, i
));
1775 return builder
.build ();
1779 if (VECTOR_MODE_P (mode
)
1780 && GET_CODE (op
) == CONST_VECTOR
1781 && known_eq (GET_MODE_NUNITS (mode
), CONST_VECTOR_NUNITS (op
)))
1783 gcc_assert (GET_MODE (op
) == op_mode
);
1785 rtx_vector_builder builder
;
1786 if (!builder
.new_unary_operation (mode
, op
, false))
1789 unsigned int count
= builder
.encoded_nelts ();
1790 for (unsigned int i
= 0; i
< count
; i
++)
1792 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1793 CONST_VECTOR_ELT (op
, i
),
1794 GET_MODE_INNER (op_mode
));
1795 if (!x
|| !valid_for_const_vector_p (mode
, x
))
1797 builder
.quick_push (x
);
1799 return builder
.build ();
1802 /* The order of these tests is critical so that, for example, we don't
1803 check the wrong mode (input vs. output) for a conversion operation,
1804 such as FIX. At some point, this should be simplified. */
1806 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1810 if (op_mode
== VOIDmode
)
1812 /* CONST_INT have VOIDmode as the mode. We assume that all
1813 the bits of the constant are significant, though, this is
1814 a dangerous assumption as many times CONST_INTs are
1815 created and used with garbage in the bits outside of the
1816 precision of the implied mode of the const_int. */
1817 op_mode
= MAX_MODE_INT
;
1820 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1822 /* Avoid the folding if flag_signaling_nans is on and
1823 operand is a signaling NaN. */
1824 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1827 d
= real_value_truncate (mode
, d
);
1828 return const_double_from_real_value (d
, mode
);
1830 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1834 if (op_mode
== VOIDmode
)
1836 /* CONST_INT have VOIDmode as the mode. We assume that all
1837 the bits of the constant are significant, though, this is
1838 a dangerous assumption as many times CONST_INTs are
1839 created and used with garbage in the bits outside of the
1840 precision of the implied mode of the const_int. */
1841 op_mode
= MAX_MODE_INT
;
1844 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1846 /* Avoid the folding if flag_signaling_nans is on and
1847 operand is a signaling NaN. */
1848 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1851 d
= real_value_truncate (mode
, d
);
1852 return const_double_from_real_value (d
, mode
);
1855 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1857 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1858 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
1862 scalar_int_mode imode
= (op_mode
== VOIDmode
1864 : as_a
<scalar_int_mode
> (op_mode
));
1865 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1868 #if TARGET_SUPPORTS_WIDE_INT == 0
1869 /* This assert keeps the simplification from producing a result
1870 that cannot be represented in a CONST_DOUBLE but a lot of
1871 upstream callers expect that this function never fails to
1872 simplify something and so you if you added this to the test
1873 above the code would die later anyway. If this assert
1874 happens, you just need to make the port support wide int. */
1875 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1881 result
= wi::bit_not (op0
);
1885 result
= wi::neg (op0
);
1889 result
= wi::abs (op0
);
1893 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1897 if (wi::ne_p (op0
, 0))
1898 int_value
= wi::clz (op0
);
1899 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1901 result
= wi::shwi (int_value
, result_mode
);
1905 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1909 if (wi::ne_p (op0
, 0))
1910 int_value
= wi::ctz (op0
);
1911 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1913 result
= wi::shwi (int_value
, result_mode
);
1917 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1921 result
= wi::shwi (wi::parity (op0
), result_mode
);
1925 result
= wide_int (op0
).bswap ();
1930 result
= wide_int::from (op0
, width
, UNSIGNED
);
1934 result
= wide_int::from (op0
, width
, SIGNED
);
1942 return immed_wide_int_const (result
, result_mode
);
1945 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1946 && SCALAR_FLOAT_MODE_P (mode
)
1947 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1949 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1955 d
= real_value_abs (&d
);
1958 d
= real_value_negate (&d
);
1960 case FLOAT_TRUNCATE
:
1961 /* Don't perform the operation if flag_signaling_nans is on
1962 and the operand is a signaling NaN. */
1963 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1965 d
= real_value_truncate (mode
, d
);
1968 /* Don't perform the operation if flag_signaling_nans is on
1969 and the operand is a signaling NaN. */
1970 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1972 /* All this does is change the mode, unless changing
1974 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1975 real_convert (&d
, mode
, &d
);
1978 /* Don't perform the operation if flag_signaling_nans is on
1979 and the operand is a signaling NaN. */
1980 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1982 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1989 real_to_target (tmp
, &d
, GET_MODE (op
));
1990 for (i
= 0; i
< 4; i
++)
1992 real_from_target (&d
, tmp
, mode
);
1998 return const_double_from_real_value (d
, mode
);
2000 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
2001 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
2002 && is_int_mode (mode
, &result_mode
))
2004 unsigned int width
= GET_MODE_PRECISION (result_mode
);
2005 if (width
> MAX_BITSIZE_MODE_ANY_INT
)
2008 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2009 operators are intentionally left unspecified (to ease implementation
2010 by target backends), for consistency, this routine implements the
2011 same semantics for constant folding as used by the middle-end. */
2013 /* This was formerly used only for non-IEEE float.
2014 eggert@twinsun.com says it is safe for IEEE also. */
2016 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
2017 wide_int wmax
, wmin
;
2018 /* This is part of the abi to real_to_integer, but we check
2019 things before making this call. */
2025 if (REAL_VALUE_ISNAN (*x
))
2028 /* Test against the signed upper bound. */
2029 wmax
= wi::max_value (width
, SIGNED
);
2030 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2031 if (real_less (&t
, x
))
2032 return immed_wide_int_const (wmax
, mode
);
2034 /* Test against the signed lower bound. */
2035 wmin
= wi::min_value (width
, SIGNED
);
2036 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2037 if (real_less (x
, &t
))
2038 return immed_wide_int_const (wmin
, mode
);
2040 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2044 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2047 /* Test against the unsigned upper bound. */
2048 wmax
= wi::max_value (width
, UNSIGNED
);
2049 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2050 if (real_less (&t
, x
))
2051 return immed_wide_int_const (wmax
, mode
);
2053 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2061 /* Handle polynomial integers. */
2062 else if (CONST_POLY_INT_P (op
))
2064 poly_wide_int result
;
2068 result
= -const_poly_int_value (op
);
2072 result
= ~const_poly_int_value (op
);
2078 return immed_wide_int_const (result
, mode
);
2084 /* Subroutine of simplify_binary_operation to simplify a binary operation
2085 CODE that can commute with byte swapping, with result mode MODE and
2086 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2087 Return zero if no simplification or canonicalization is possible. */
2090 simplify_context::simplify_byte_swapping_operation (rtx_code code
,
2096 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2097 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2099 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2100 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2101 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2104 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2105 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2107 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2108 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2114 /* Subroutine of simplify_binary_operation to simplify a commutative,
2115 associative binary operation CODE with result mode MODE, operating
2116 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2117 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2118 canonicalization is possible. */
2121 simplify_context::simplify_associative_operation (rtx_code code
,
2127 /* Linearize the operator to the left. */
2128 if (GET_CODE (op1
) == code
)
2130 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2131 if (GET_CODE (op0
) == code
)
2133 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2134 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2137 /* "a op (b op c)" becomes "(b op c) op a". */
2138 if (! swap_commutative_operands_p (op1
, op0
))
2139 return simplify_gen_binary (code
, mode
, op1
, op0
);
2141 std::swap (op0
, op1
);
2144 if (GET_CODE (op0
) == code
)
2146 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2147 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2149 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2150 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2153 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2154 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2156 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2158 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2159 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2161 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2167 /* Return a mask describing the COMPARISON. */
2169 comparison_to_mask (enum rtx_code comparison
)
2209 /* Return a comparison corresponding to the MASK. */
2210 static enum rtx_code
2211 mask_to_comparison (int mask
)
2251 /* Return true if CODE is valid for comparisons of mode MODE, false
2254 It is always safe to return false, even if the code was valid for the
2255 given mode as that will merely suppress optimizations. */
2258 comparison_code_valid_for_mode (enum rtx_code code
, enum machine_mode mode
)
2262 /* These are valid for integral, floating and vector modes. */
2269 return (INTEGRAL_MODE_P (mode
)
2270 || FLOAT_MODE_P (mode
)
2271 || VECTOR_MODE_P (mode
));
2273 /* These are valid for floating point modes. */
2282 return FLOAT_MODE_P (mode
);
2284 /* These are filtered out in simplify_logical_operation, but
2285 we check for them too as a matter of safety. They are valid
2286 for integral and vector modes. */
2291 return INTEGRAL_MODE_P (mode
) || VECTOR_MODE_P (mode
);
2298 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2299 and OP1, which should be both relational operations. Return 0 if no such
2300 simplification is possible. */
2302 simplify_context::simplify_logical_relational_operation (rtx_code code
,
2306 /* We only handle IOR of two relational operations. */
2310 if (!(COMPARISON_P (op0
) && COMPARISON_P (op1
)))
2313 if (!(rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2314 && rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))))
2317 enum rtx_code code0
= GET_CODE (op0
);
2318 enum rtx_code code1
= GET_CODE (op1
);
2320 /* We don't handle unsigned comparisons currently. */
2321 if (code0
== LTU
|| code0
== GTU
|| code0
== LEU
|| code0
== GEU
)
2323 if (code1
== LTU
|| code1
== GTU
|| code1
== LEU
|| code1
== GEU
)
2326 int mask0
= comparison_to_mask (code0
);
2327 int mask1
= comparison_to_mask (code1
);
2329 int mask
= mask0
| mask1
;
2332 return const_true_rtx
;
2334 code
= mask_to_comparison (mask
);
2336 /* Many comparison codes are only valid for certain mode classes. */
2337 if (!comparison_code_valid_for_mode (code
, mode
))
2340 op0
= XEXP (op1
, 0);
2341 op1
= XEXP (op1
, 1);
2343 return simplify_gen_relational (code
, mode
, VOIDmode
, op0
, op1
);
2346 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2347 and OP1. Return 0 if no simplification is possible.
2349 Don't use this for relational operations such as EQ or LT.
2350 Use simplify_relational_operation instead. */
2352 simplify_context::simplify_binary_operation (rtx_code code
, machine_mode mode
,
2355 rtx trueop0
, trueop1
;
2358 /* Relational operations don't work here. We must know the mode
2359 of the operands in order to do the comparison correctly.
2360 Assuming a full word can give incorrect results.
2361 Consider comparing 128 with -128 in QImode. */
2362 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2363 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2365 /* Make sure the constant is second. */
2366 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2367 && swap_commutative_operands_p (op0
, op1
))
2368 std::swap (op0
, op1
);
2370 trueop0
= avoid_constant_pool_reference (op0
);
2371 trueop1
= avoid_constant_pool_reference (op1
);
2373 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2376 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2381 /* If the above steps did not result in a simplification and op0 or op1
2382 were constant pool references, use the referenced constants directly. */
2383 if (trueop0
!= op0
|| trueop1
!= op1
)
2384 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2389 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2390 which OP0 and OP1 are both vector series or vector duplicates
2391 (which are really just series with a step of 0). If so, try to
2392 form a new series by applying CODE to the bases and to the steps.
2393 Return null if no simplification is possible.
2395 MODE is the mode of the operation and is known to be a vector
2399 simplify_context::simplify_binary_operation_series (rtx_code code
,
2404 if (vec_duplicate_p (op0
, &base0
))
2406 else if (!vec_series_p (op0
, &base0
, &step0
))
2410 if (vec_duplicate_p (op1
, &base1
))
2412 else if (!vec_series_p (op1
, &base1
, &step1
))
2415 /* Only create a new series if we can simplify both parts. In other
2416 cases this isn't really a simplification, and it's not necessarily
2417 a win to replace a vector operation with a scalar operation. */
2418 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2419 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2423 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2427 return gen_vec_series (mode
, new_base
, new_step
);
2430 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2431 operation CODE with result mode MODE, operating on OP0 and OP1.
2432 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2433 Returns NULL_RTX if no simplification is possible. */
2436 simplify_context::simplify_distributive_operation (rtx_code code
,
2440 enum rtx_code op
= GET_CODE (op0
);
2441 gcc_assert (GET_CODE (op1
) == op
);
2443 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 1))
2444 && ! side_effects_p (XEXP (op0
, 1)))
2445 return simplify_gen_binary (op
, mode
,
2446 simplify_gen_binary (code
, mode
,
2451 if (GET_RTX_CLASS (op
) == RTX_COMM_ARITH
)
2453 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2454 && ! side_effects_p (XEXP (op0
, 0)))
2455 return simplify_gen_binary (op
, mode
,
2456 simplify_gen_binary (code
, mode
,
2460 if (rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 1))
2461 && ! side_effects_p (XEXP (op0
, 0)))
2462 return simplify_gen_binary (op
, mode
,
2463 simplify_gen_binary (code
, mode
,
2467 if (rtx_equal_p (XEXP (op0
, 1), XEXP (op1
, 0))
2468 && ! side_effects_p (XEXP (op0
, 1)))
2469 return simplify_gen_binary (op
, mode
,
2470 simplify_gen_binary (code
, mode
,
2479 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2480 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2481 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2482 actual constants. */
2485 simplify_context::simplify_binary_operation_1 (rtx_code code
,
2488 rtx trueop0
, rtx trueop1
)
2490 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2492 scalar_int_mode int_mode
, inner_mode
;
2495 /* Even if we can't compute a constant result,
2496 there are some cases worth simplifying. */
2501 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2502 when x is NaN, infinite, or finite and nonzero. They aren't
2503 when x is -0 and the rounding mode is not towards -infinity,
2504 since (-0) + 0 is then 0. */
2505 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2508 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2509 transformations are safe even for IEEE. */
2510 if (GET_CODE (op0
) == NEG
)
2511 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2512 else if (GET_CODE (op1
) == NEG
)
2513 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2515 /* (~a) + 1 -> -a */
2516 if (INTEGRAL_MODE_P (mode
)
2517 && GET_CODE (op0
) == NOT
2518 && trueop1
== const1_rtx
)
2519 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2521 /* Handle both-operands-constant cases. We can only add
2522 CONST_INTs to constants since the sum of relocatable symbols
2523 can't be handled by most assemblers. Don't add CONST_INT
2524 to CONST_INT since overflow won't be computed properly if wider
2525 than HOST_BITS_PER_WIDE_INT. */
2527 if ((GET_CODE (op0
) == CONST
2528 || GET_CODE (op0
) == SYMBOL_REF
2529 || GET_CODE (op0
) == LABEL_REF
)
2530 && poly_int_rtx_p (op1
, &offset
))
2531 return plus_constant (mode
, op0
, offset
);
2532 else if ((GET_CODE (op1
) == CONST
2533 || GET_CODE (op1
) == SYMBOL_REF
2534 || GET_CODE (op1
) == LABEL_REF
)
2535 && poly_int_rtx_p (op0
, &offset
))
2536 return plus_constant (mode
, op1
, offset
);
2538 /* See if this is something like X * C - X or vice versa or
2539 if the multiplication is written as a shift. If so, we can
2540 distribute and make a new multiply, shift, or maybe just
2541 have X (if C is 2 in the example above). But don't make
2542 something more expensive than we had before. */
2544 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2546 rtx lhs
= op0
, rhs
= op1
;
2548 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2549 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2551 if (GET_CODE (lhs
) == NEG
)
2553 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2554 lhs
= XEXP (lhs
, 0);
2556 else if (GET_CODE (lhs
) == MULT
2557 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2559 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2560 lhs
= XEXP (lhs
, 0);
2562 else if (GET_CODE (lhs
) == ASHIFT
2563 && CONST_INT_P (XEXP (lhs
, 1))
2564 && INTVAL (XEXP (lhs
, 1)) >= 0
2565 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2567 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2568 GET_MODE_PRECISION (int_mode
));
2569 lhs
= XEXP (lhs
, 0);
2572 if (GET_CODE (rhs
) == NEG
)
2574 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2575 rhs
= XEXP (rhs
, 0);
2577 else if (GET_CODE (rhs
) == MULT
2578 && CONST_INT_P (XEXP (rhs
, 1)))
2580 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2581 rhs
= XEXP (rhs
, 0);
2583 else if (GET_CODE (rhs
) == ASHIFT
2584 && CONST_INT_P (XEXP (rhs
, 1))
2585 && INTVAL (XEXP (rhs
, 1)) >= 0
2586 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2588 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2589 GET_MODE_PRECISION (int_mode
));
2590 rhs
= XEXP (rhs
, 0);
2593 if (rtx_equal_p (lhs
, rhs
))
2595 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2597 bool speed
= optimize_function_for_speed_p (cfun
);
2599 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2601 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2602 return (set_src_cost (tem
, int_mode
, speed
)
2603 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2606 /* Optimize (X - 1) * Y + Y to X * Y. */
2609 if (GET_CODE (op0
) == MULT
)
2611 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2612 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
)
2613 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2614 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
))
2615 && rtx_equal_p (XEXP (op0
, 1), op1
))
2616 lhs
= XEXP (XEXP (op0
, 0), 0);
2617 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2618 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
)
2619 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2620 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
))
2621 && rtx_equal_p (XEXP (op0
, 0), op1
))
2622 lhs
= XEXP (XEXP (op0
, 1), 0);
2624 else if (GET_CODE (op1
) == MULT
)
2626 if (((GET_CODE (XEXP (op1
, 0)) == PLUS
2627 && XEXP (XEXP (op1
, 0), 1) == constm1_rtx
)
2628 || (GET_CODE (XEXP (op1
, 0)) == MINUS
2629 && XEXP (XEXP (op1
, 0), 1) == const1_rtx
))
2630 && rtx_equal_p (XEXP (op1
, 1), op0
))
2631 rhs
= XEXP (XEXP (op1
, 0), 0);
2632 else if (((GET_CODE (XEXP (op1
, 1)) == PLUS
2633 && XEXP (XEXP (op1
, 1), 1) == constm1_rtx
)
2634 || (GET_CODE (XEXP (op1
, 1)) == MINUS
2635 && XEXP (XEXP (op1
, 1), 1) == const1_rtx
))
2636 && rtx_equal_p (XEXP (op1
, 0), op0
))
2637 rhs
= XEXP (XEXP (op1
, 1), 0);
2639 if (lhs
!= op0
|| rhs
!= op1
)
2640 return simplify_gen_binary (MULT
, int_mode
, lhs
, rhs
);
2643 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2644 if (CONST_SCALAR_INT_P (op1
)
2645 && GET_CODE (op0
) == XOR
2646 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2647 && mode_signbit_p (mode
, op1
))
2648 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2649 simplify_gen_binary (XOR
, mode
, op1
,
2652 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2653 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2654 && GET_CODE (op0
) == MULT
2655 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2659 in1
= XEXP (XEXP (op0
, 0), 0);
2660 in2
= XEXP (op0
, 1);
2661 return simplify_gen_binary (MINUS
, mode
, op1
,
2662 simplify_gen_binary (MULT
, mode
,
2666 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2667 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2669 if (COMPARISON_P (op0
)
2670 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2671 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2672 && (reversed
= reversed_comparison (op0
, mode
)))
2674 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2676 /* If one of the operands is a PLUS or a MINUS, see if we can
2677 simplify this by the associative law.
2678 Don't use the associative law for floating point.
2679 The inaccuracy makes it nonassociative,
2680 and subtle programs can break if operations are associated. */
2682 if (INTEGRAL_MODE_P (mode
)
2683 && (plus_minus_operand_p (op0
)
2684 || plus_minus_operand_p (op1
))
2685 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2688 /* Reassociate floating point addition only when the user
2689 specifies associative math operations. */
2690 if (FLOAT_MODE_P (mode
)
2691 && flag_associative_math
)
2693 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2698 /* Handle vector series. */
2699 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2701 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2708 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2709 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2710 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2711 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2713 rtx xop00
= XEXP (op0
, 0);
2714 rtx xop10
= XEXP (op1
, 0);
2716 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2719 if (REG_P (xop00
) && REG_P (xop10
)
2720 && REGNO (xop00
) == REGNO (xop10
)
2721 && GET_MODE (xop00
) == mode
2722 && GET_MODE (xop10
) == mode
2723 && GET_MODE_CLASS (mode
) == MODE_CC
)
2729 /* We can't assume x-x is 0 even with non-IEEE floating point,
2730 but since it is zero except in very strange circumstances, we
2731 will treat it as zero with -ffinite-math-only. */
2732 if (rtx_equal_p (trueop0
, trueop1
)
2733 && ! side_effects_p (op0
)
2734 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2735 return CONST0_RTX (mode
);
2737 /* Change subtraction from zero into negation. (0 - x) is the
2738 same as -x when x is NaN, infinite, or finite and nonzero.
2739 But if the mode has signed zeros, and does not round towards
2740 -infinity, then 0 - 0 is 0, not -0. */
2741 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2742 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2744 /* (-1 - a) is ~a, unless the expression contains symbolic
2745 constants, in which case not retaining additions and
2746 subtractions could cause invalid assembly to be produced. */
2747 if (trueop0
== constm1_rtx
2748 && !contains_symbolic_reference_p (op1
))
2749 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2751 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2752 or has signed zeros and supports rounding towards -infinity.
2753 In such a case, 0 - 0 is -0. */
2754 if (!(HONOR_SIGNED_ZEROS (mode
)
2755 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2756 && !HONOR_SNANS (mode
)
2757 && trueop1
== CONST0_RTX (mode
))
2760 /* See if this is something like X * C - X or vice versa or
2761 if the multiplication is written as a shift. If so, we can
2762 distribute and make a new multiply, shift, or maybe just
2763 have X (if C is 2 in the example above). But don't make
2764 something more expensive than we had before. */
2766 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2768 rtx lhs
= op0
, rhs
= op1
;
2770 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2771 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2773 if (GET_CODE (lhs
) == NEG
)
2775 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2776 lhs
= XEXP (lhs
, 0);
2778 else if (GET_CODE (lhs
) == MULT
2779 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2781 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2782 lhs
= XEXP (lhs
, 0);
2784 else if (GET_CODE (lhs
) == ASHIFT
2785 && CONST_INT_P (XEXP (lhs
, 1))
2786 && INTVAL (XEXP (lhs
, 1)) >= 0
2787 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2789 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2790 GET_MODE_PRECISION (int_mode
));
2791 lhs
= XEXP (lhs
, 0);
2794 if (GET_CODE (rhs
) == NEG
)
2796 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2797 rhs
= XEXP (rhs
, 0);
2799 else if (GET_CODE (rhs
) == MULT
2800 && CONST_INT_P (XEXP (rhs
, 1)))
2802 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2803 rhs
= XEXP (rhs
, 0);
2805 else if (GET_CODE (rhs
) == ASHIFT
2806 && CONST_INT_P (XEXP (rhs
, 1))
2807 && INTVAL (XEXP (rhs
, 1)) >= 0
2808 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2810 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2811 GET_MODE_PRECISION (int_mode
));
2812 negcoeff1
= -negcoeff1
;
2813 rhs
= XEXP (rhs
, 0);
2816 if (rtx_equal_p (lhs
, rhs
))
2818 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2820 bool speed
= optimize_function_for_speed_p (cfun
);
2822 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2824 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2825 return (set_src_cost (tem
, int_mode
, speed
)
2826 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2829 /* Optimize (X + 1) * Y - Y to X * Y. */
2831 if (GET_CODE (op0
) == MULT
)
2833 if (((GET_CODE (XEXP (op0
, 0)) == PLUS
2834 && XEXP (XEXP (op0
, 0), 1) == const1_rtx
)
2835 || (GET_CODE (XEXP (op0
, 0)) == MINUS
2836 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
))
2837 && rtx_equal_p (XEXP (op0
, 1), op1
))
2838 lhs
= XEXP (XEXP (op0
, 0), 0);
2839 else if (((GET_CODE (XEXP (op0
, 1)) == PLUS
2840 && XEXP (XEXP (op0
, 1), 1) == const1_rtx
)
2841 || (GET_CODE (XEXP (op0
, 1)) == MINUS
2842 && XEXP (XEXP (op0
, 1), 1) == constm1_rtx
))
2843 && rtx_equal_p (XEXP (op0
, 0), op1
))
2844 lhs
= XEXP (XEXP (op0
, 1), 0);
2847 return simplify_gen_binary (MULT
, int_mode
, lhs
, op1
);
2850 /* (a - (-b)) -> (a + b). True even for IEEE. */
2851 if (GET_CODE (op1
) == NEG
)
2852 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2854 /* (-x - c) may be simplified as (-c - x). */
2855 if (GET_CODE (op0
) == NEG
2856 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2858 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2860 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2863 if ((GET_CODE (op0
) == CONST
2864 || GET_CODE (op0
) == SYMBOL_REF
2865 || GET_CODE (op0
) == LABEL_REF
)
2866 && poly_int_rtx_p (op1
, &offset
))
2867 return plus_constant (mode
, op0
, trunc_int_for_mode (-offset
, mode
));
2869 /* Don't let a relocatable value get a negative coeff. */
2870 if (poly_int_rtx_p (op1
) && GET_MODE (op0
) != VOIDmode
)
2871 return simplify_gen_binary (PLUS
, mode
,
2873 neg_poly_int_rtx (mode
, op1
));
2875 /* (x - (x & y)) -> (x & ~y) */
2876 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2878 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2880 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2881 GET_MODE (XEXP (op1
, 1)));
2882 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2884 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2886 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2887 GET_MODE (XEXP (op1
, 0)));
2888 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2892 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2893 by reversing the comparison code if valid. */
2894 if (STORE_FLAG_VALUE
== 1
2895 && trueop0
== const1_rtx
2896 && COMPARISON_P (op1
)
2897 && (reversed
= reversed_comparison (op1
, mode
)))
2900 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2901 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2902 && GET_CODE (op1
) == MULT
2903 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2907 in1
= XEXP (XEXP (op1
, 0), 0);
2908 in2
= XEXP (op1
, 1);
2909 return simplify_gen_binary (PLUS
, mode
,
2910 simplify_gen_binary (MULT
, mode
,
2915 /* Canonicalize (minus (neg A) (mult B C)) to
2916 (minus (mult (neg B) C) A). */
2917 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2918 && GET_CODE (op1
) == MULT
2919 && GET_CODE (op0
) == NEG
)
2923 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2924 in2
= XEXP (op1
, 1);
2925 return simplify_gen_binary (MINUS
, mode
,
2926 simplify_gen_binary (MULT
, mode
,
2931 /* If one of the operands is a PLUS or a MINUS, see if we can
2932 simplify this by the associative law. This will, for example,
2933 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2934 Don't use the associative law for floating point.
2935 The inaccuracy makes it nonassociative,
2936 and subtle programs can break if operations are associated. */
2938 if (INTEGRAL_MODE_P (mode
)
2939 && (plus_minus_operand_p (op0
)
2940 || plus_minus_operand_p (op1
))
2941 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2944 /* Handle vector series. */
2945 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2947 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2954 if (trueop1
== constm1_rtx
)
2955 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2957 if (GET_CODE (op0
) == NEG
)
2959 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2960 /* If op1 is a MULT as well and simplify_unary_operation
2961 just moved the NEG to the second operand, simplify_gen_binary
2962 below could through simplify_associative_operation move
2963 the NEG around again and recurse endlessly. */
2965 && GET_CODE (op1
) == MULT
2966 && GET_CODE (temp
) == MULT
2967 && XEXP (op1
, 0) == XEXP (temp
, 0)
2968 && GET_CODE (XEXP (temp
, 1)) == NEG
2969 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2972 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2974 if (GET_CODE (op1
) == NEG
)
2976 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2977 /* If op0 is a MULT as well and simplify_unary_operation
2978 just moved the NEG to the second operand, simplify_gen_binary
2979 below could through simplify_associative_operation move
2980 the NEG around again and recurse endlessly. */
2982 && GET_CODE (op0
) == MULT
2983 && GET_CODE (temp
) == MULT
2984 && XEXP (op0
, 0) == XEXP (temp
, 0)
2985 && GET_CODE (XEXP (temp
, 1)) == NEG
2986 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2989 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2992 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2993 x is NaN, since x * 0 is then also NaN. Nor is it valid
2994 when the mode has signed zeros, since multiplying a negative
2995 number by 0 will give -0, not 0. */
2996 if (!HONOR_NANS (mode
)
2997 && !HONOR_SIGNED_ZEROS (mode
)
2998 && trueop1
== CONST0_RTX (mode
)
2999 && ! side_effects_p (op0
))
3002 /* In IEEE floating point, x*1 is not equivalent to x for
3004 if (!HONOR_SNANS (mode
)
3005 && trueop1
== CONST1_RTX (mode
))
3008 /* Convert multiply by constant power of two into shift. */
3009 if (mem_depth
== 0 && CONST_SCALAR_INT_P (trueop1
))
3011 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
3013 return simplify_gen_binary (ASHIFT
, mode
, op0
,
3014 gen_int_shift_amount (mode
, val
));
3017 /* x*2 is x+x and x*(-1) is -x */
3018 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3019 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
3020 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
3021 && GET_MODE (op0
) == mode
)
3023 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3025 if (real_equal (d1
, &dconst2
))
3026 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
3028 if (!HONOR_SNANS (mode
)
3029 && real_equal (d1
, &dconstm1
))
3030 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3033 /* Optimize -x * -x as x * x. */
3034 if (FLOAT_MODE_P (mode
)
3035 && GET_CODE (op0
) == NEG
3036 && GET_CODE (op1
) == NEG
3037 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3038 && !side_effects_p (XEXP (op0
, 0)))
3039 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3041 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3042 if (SCALAR_FLOAT_MODE_P (mode
)
3043 && GET_CODE (op0
) == ABS
3044 && GET_CODE (op1
) == ABS
3045 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
3046 && !side_effects_p (XEXP (op0
, 0)))
3047 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
3049 /* Reassociate multiplication, but for floating point MULTs
3050 only when the user specifies unsafe math optimizations. */
3051 if (! FLOAT_MODE_P (mode
)
3052 || flag_unsafe_math_optimizations
)
3054 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3061 if (trueop1
== CONST0_RTX (mode
))
3063 if (INTEGRAL_MODE_P (mode
)
3064 && trueop1
== CONSTM1_RTX (mode
)
3065 && !side_effects_p (op0
))
3067 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3069 /* A | (~A) -> -1 */
3070 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3071 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3072 && ! side_effects_p (op0
)
3073 && SCALAR_INT_MODE_P (mode
))
3076 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3077 if (CONST_INT_P (op1
)
3078 && HWI_COMPUTABLE_MODE_P (mode
)
3079 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
3080 && !side_effects_p (op0
))
3083 /* Canonicalize (X & C1) | C2. */
3084 if (GET_CODE (op0
) == AND
3085 && CONST_INT_P (trueop1
)
3086 && CONST_INT_P (XEXP (op0
, 1)))
3088 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
3089 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
3090 HOST_WIDE_INT c2
= INTVAL (trueop1
);
3092 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3094 && !side_effects_p (XEXP (op0
, 0)))
3097 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3098 if (((c1
|c2
) & mask
) == mask
)
3099 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
3102 /* Convert (A & B) | A to A. */
3103 if (GET_CODE (op0
) == AND
3104 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3105 || rtx_equal_p (XEXP (op0
, 1), op1
))
3106 && ! side_effects_p (XEXP (op0
, 0))
3107 && ! side_effects_p (XEXP (op0
, 1)))
3110 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3111 mode size to (rotate A CX). */
3113 if (GET_CODE (op1
) == ASHIFT
3114 || GET_CODE (op1
) == SUBREG
)
3125 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
3126 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
3127 && CONST_INT_P (XEXP (opleft
, 1))
3128 && CONST_INT_P (XEXP (opright
, 1))
3129 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
3130 == GET_MODE_UNIT_PRECISION (mode
)))
3131 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
3133 /* Same, but for ashift that has been "simplified" to a wider mode
3134 by simplify_shift_const. */
3136 if (GET_CODE (opleft
) == SUBREG
3137 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3138 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
3140 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
3141 && GET_CODE (opright
) == LSHIFTRT
3142 && GET_CODE (XEXP (opright
, 0)) == SUBREG
3143 && known_eq (SUBREG_BYTE (opleft
), SUBREG_BYTE (XEXP (opright
, 0)))
3144 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
3145 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
3146 SUBREG_REG (XEXP (opright
, 0)))
3147 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
3148 && CONST_INT_P (XEXP (opright
, 1))
3149 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
3150 + INTVAL (XEXP (opright
, 1))
3151 == GET_MODE_PRECISION (int_mode
)))
3152 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
3153 XEXP (SUBREG_REG (opleft
), 1));
3155 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3156 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3157 the PLUS does not affect any of the bits in OP1: then we can do
3158 the IOR as a PLUS and we can associate. This is valid if OP1
3159 can be safely shifted left C bits. */
3160 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
3161 && GET_CODE (XEXP (op0
, 0)) == PLUS
3162 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
3163 && CONST_INT_P (XEXP (op0
, 1))
3164 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
3166 int count
= INTVAL (XEXP (op0
, 1));
3167 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
3169 if (mask
>> count
== INTVAL (trueop1
)
3170 && trunc_int_for_mode (mask
, mode
) == mask
3171 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
3172 return simplify_gen_binary (ASHIFTRT
, mode
,
3173 plus_constant (mode
, XEXP (op0
, 0),
3178 /* The following happens with bitfield merging.
3179 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3180 if (GET_CODE (op0
) == AND
3181 && GET_CODE (op1
) == AND
3182 && CONST_INT_P (XEXP (op0
, 1))
3183 && CONST_INT_P (XEXP (op1
, 1))
3184 && (INTVAL (XEXP (op0
, 1))
3185 == ~INTVAL (XEXP (op1
, 1))))
3187 /* The IOR may be on both sides. */
3188 rtx top0
= NULL_RTX
, top1
= NULL_RTX
;
3189 if (GET_CODE (XEXP (op1
, 0)) == IOR
)
3190 top0
= op0
, top1
= op1
;
3191 else if (GET_CODE (XEXP (op0
, 0)) == IOR
)
3192 top0
= op1
, top1
= op0
;
3195 /* X may be on either side of the inner IOR. */
3197 if (rtx_equal_p (XEXP (top0
, 0),
3198 XEXP (XEXP (top1
, 0), 0)))
3199 tem
= XEXP (XEXP (top1
, 0), 1);
3200 else if (rtx_equal_p (XEXP (top0
, 0),
3201 XEXP (XEXP (top1
, 0), 1)))
3202 tem
= XEXP (XEXP (top1
, 0), 0);
3204 return simplify_gen_binary (IOR
, mode
, XEXP (top0
, 0),
3206 (AND
, mode
, tem
, XEXP (top1
, 1)));
3210 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3211 if (GET_CODE (op0
) == GET_CODE (op1
)
3212 && (GET_CODE (op0
) == AND
3213 || GET_CODE (op0
) == IOR
3214 || GET_CODE (op0
) == LSHIFTRT
3215 || GET_CODE (op0
) == ASHIFTRT
3216 || GET_CODE (op0
) == ASHIFT
3217 || GET_CODE (op0
) == ROTATE
3218 || GET_CODE (op0
) == ROTATERT
))
3220 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3225 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3229 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3233 tem
= simplify_logical_relational_operation (code
, mode
, op0
, op1
);
3239 if (trueop1
== CONST0_RTX (mode
))
3241 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3242 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
3243 if (rtx_equal_p (trueop0
, trueop1
)
3244 && ! side_effects_p (op0
)
3245 && GET_MODE_CLASS (mode
) != MODE_CC
)
3246 return CONST0_RTX (mode
);
3248 /* Canonicalize XOR of the most significant bit to PLUS. */
3249 if (CONST_SCALAR_INT_P (op1
)
3250 && mode_signbit_p (mode
, op1
))
3251 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
3252 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3253 if (CONST_SCALAR_INT_P (op1
)
3254 && GET_CODE (op0
) == PLUS
3255 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
3256 && mode_signbit_p (mode
, XEXP (op0
, 1)))
3257 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
3258 simplify_gen_binary (XOR
, mode
, op1
,
3261 /* If we are XORing two things that have no bits in common,
3262 convert them into an IOR. This helps to detect rotation encoded
3263 using those methods and possibly other simplifications. */
3265 if (HWI_COMPUTABLE_MODE_P (mode
)
3266 && (nonzero_bits (op0
, mode
)
3267 & nonzero_bits (op1
, mode
)) == 0)
3268 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
3270 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3271 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3274 int num_negated
= 0;
3276 if (GET_CODE (op0
) == NOT
)
3277 num_negated
++, op0
= XEXP (op0
, 0);
3278 if (GET_CODE (op1
) == NOT
)
3279 num_negated
++, op1
= XEXP (op1
, 0);
3281 if (num_negated
== 2)
3282 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
3283 else if (num_negated
== 1)
3284 return simplify_gen_unary (NOT
, mode
,
3285 simplify_gen_binary (XOR
, mode
, op0
, op1
),
3289 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3290 correspond to a machine insn or result in further simplifications
3291 if B is a constant. */
3293 if (GET_CODE (op0
) == AND
3294 && rtx_equal_p (XEXP (op0
, 1), op1
)
3295 && ! side_effects_p (op1
))
3296 return simplify_gen_binary (AND
, mode
,
3297 simplify_gen_unary (NOT
, mode
,
3298 XEXP (op0
, 0), mode
),
3301 else if (GET_CODE (op0
) == AND
3302 && rtx_equal_p (XEXP (op0
, 0), op1
)
3303 && ! side_effects_p (op1
))
3304 return simplify_gen_binary (AND
, mode
,
3305 simplify_gen_unary (NOT
, mode
,
3306 XEXP (op0
, 1), mode
),
3309 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3310 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3311 out bits inverted twice and not set by C. Similarly, given
3312 (xor (and (xor A B) C) D), simplify without inverting C in
3313 the xor operand: (xor (and A C) (B&C)^D).
3315 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
3316 && GET_CODE (XEXP (op0
, 0)) == XOR
3317 && CONST_INT_P (op1
)
3318 && CONST_INT_P (XEXP (op0
, 1))
3319 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
3321 enum rtx_code op
= GET_CODE (op0
);
3322 rtx a
= XEXP (XEXP (op0
, 0), 0);
3323 rtx b
= XEXP (XEXP (op0
, 0), 1);
3324 rtx c
= XEXP (op0
, 1);
3326 HOST_WIDE_INT bval
= INTVAL (b
);
3327 HOST_WIDE_INT cval
= INTVAL (c
);
3328 HOST_WIDE_INT dval
= INTVAL (d
);
3329 HOST_WIDE_INT xcval
;
3336 return simplify_gen_binary (XOR
, mode
,
3337 simplify_gen_binary (op
, mode
, a
, c
),
3338 gen_int_mode ((bval
& xcval
) ^ dval
,
3342 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3343 we can transform like this:
3344 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3345 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3346 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3347 Attempt a few simplifications when B and C are both constants. */
3348 if (GET_CODE (op0
) == AND
3349 && CONST_INT_P (op1
)
3350 && CONST_INT_P (XEXP (op0
, 1)))
3352 rtx a
= XEXP (op0
, 0);
3353 rtx b
= XEXP (op0
, 1);
3355 HOST_WIDE_INT bval
= INTVAL (b
);
3356 HOST_WIDE_INT cval
= INTVAL (c
);
3358 /* Instead of computing ~A&C, we compute its negated value,
3359 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3360 optimize for sure. If it does not simplify, we still try
3361 to compute ~A&C below, but since that always allocates
3362 RTL, we don't try that before committing to returning a
3363 simplified expression. */
3364 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
3367 if ((~cval
& bval
) == 0)
3369 rtx na_c
= NULL_RTX
;
3371 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
3374 /* If ~A does not simplify, don't bother: we don't
3375 want to simplify 2 operations into 3, and if na_c
3376 were to simplify with na, n_na_c would have
3377 simplified as well. */
3378 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
3380 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
3383 /* Try to simplify ~A&C | ~B&C. */
3384 if (na_c
!= NULL_RTX
)
3385 return simplify_gen_binary (IOR
, mode
, na_c
,
3386 gen_int_mode (~bval
& cval
, mode
));
3390 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3391 if (n_na_c
== CONSTM1_RTX (mode
))
3393 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3394 gen_int_mode (~cval
& bval
,
3396 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3397 gen_int_mode (~bval
& cval
,
3403 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3404 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3405 machines, and also has shorter instruction path length. */
3406 if (GET_CODE (op0
) == AND
3407 && GET_CODE (XEXP (op0
, 0)) == XOR
3408 && CONST_INT_P (XEXP (op0
, 1))
3409 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3412 rtx b
= XEXP (XEXP (op0
, 0), 1);
3413 rtx c
= XEXP (op0
, 1);
3414 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3415 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3416 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3417 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3419 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3420 else if (GET_CODE (op0
) == AND
3421 && GET_CODE (XEXP (op0
, 0)) == XOR
3422 && CONST_INT_P (XEXP (op0
, 1))
3423 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3425 rtx a
= XEXP (XEXP (op0
, 0), 0);
3427 rtx c
= XEXP (op0
, 1);
3428 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3429 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3430 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3431 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3434 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3435 comparison if STORE_FLAG_VALUE is 1. */
3436 if (STORE_FLAG_VALUE
== 1
3437 && trueop1
== const1_rtx
3438 && COMPARISON_P (op0
)
3439 && (reversed
= reversed_comparison (op0
, mode
)))
3442 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3443 is (lt foo (const_int 0)), so we can perform the above
3444 simplification if STORE_FLAG_VALUE is 1. */
3446 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3447 && STORE_FLAG_VALUE
== 1
3448 && trueop1
== const1_rtx
3449 && GET_CODE (op0
) == LSHIFTRT
3450 && CONST_INT_P (XEXP (op0
, 1))
3451 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3452 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3454 /* (xor (comparison foo bar) (const_int sign-bit))
3455 when STORE_FLAG_VALUE is the sign bit. */
3456 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3457 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3458 && trueop1
== const_true_rtx
3459 && COMPARISON_P (op0
)
3460 && (reversed
= reversed_comparison (op0
, int_mode
)))
3463 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3464 if (GET_CODE (op0
) == GET_CODE (op1
)
3465 && (GET_CODE (op0
) == AND
3466 || GET_CODE (op0
) == LSHIFTRT
3467 || GET_CODE (op0
) == ASHIFTRT
3468 || GET_CODE (op0
) == ASHIFT
3469 || GET_CODE (op0
) == ROTATE
3470 || GET_CODE (op0
) == ROTATERT
))
3472 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3477 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3481 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3487 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3489 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3491 if (HWI_COMPUTABLE_MODE_P (mode
))
3493 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3494 HOST_WIDE_INT nzop1
;
3495 if (CONST_INT_P (trueop1
))
3497 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3498 /* If we are turning off bits already known off in OP0, we need
3500 if ((nzop0
& ~val1
) == 0)
3503 nzop1
= nonzero_bits (trueop1
, mode
);
3504 /* If we are clearing all the nonzero bits, the result is zero. */
3505 if ((nzop1
& nzop0
) == 0
3506 && !side_effects_p (op0
) && !side_effects_p (op1
))
3507 return CONST0_RTX (mode
);
3509 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3510 && GET_MODE_CLASS (mode
) != MODE_CC
)
3513 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3514 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3515 && ! side_effects_p (op0
)
3516 && GET_MODE_CLASS (mode
) != MODE_CC
)
3517 return CONST0_RTX (mode
);
3519 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3520 there are no nonzero bits of C outside of X's mode. */
3521 if ((GET_CODE (op0
) == SIGN_EXTEND
3522 || GET_CODE (op0
) == ZERO_EXTEND
)
3523 && CONST_INT_P (trueop1
)
3524 && HWI_COMPUTABLE_MODE_P (mode
)
3525 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3526 & UINTVAL (trueop1
)) == 0)
3528 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3529 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3530 gen_int_mode (INTVAL (trueop1
),
3532 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3535 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3536 we might be able to further simplify the AND with X and potentially
3537 remove the truncation altogether. */
3538 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3540 rtx x
= XEXP (op0
, 0);
3541 machine_mode xmode
= GET_MODE (x
);
3542 tem
= simplify_gen_binary (AND
, xmode
, x
,
3543 gen_int_mode (INTVAL (trueop1
), xmode
));
3544 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3547 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3548 if (GET_CODE (op0
) == IOR
3549 && CONST_INT_P (trueop1
)
3550 && CONST_INT_P (XEXP (op0
, 1)))
3552 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3553 return simplify_gen_binary (IOR
, mode
,
3554 simplify_gen_binary (AND
, mode
,
3555 XEXP (op0
, 0), op1
),
3556 gen_int_mode (tmp
, mode
));
3559 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3560 insn (and may simplify more). */
3561 if (GET_CODE (op0
) == XOR
3562 && rtx_equal_p (XEXP (op0
, 0), op1
)
3563 && ! side_effects_p (op1
))
3564 return simplify_gen_binary (AND
, mode
,
3565 simplify_gen_unary (NOT
, mode
,
3566 XEXP (op0
, 1), mode
),
3569 if (GET_CODE (op0
) == XOR
3570 && rtx_equal_p (XEXP (op0
, 1), op1
)
3571 && ! side_effects_p (op1
))
3572 return simplify_gen_binary (AND
, mode
,
3573 simplify_gen_unary (NOT
, mode
,
3574 XEXP (op0
, 0), mode
),
3577 /* Similarly for (~(A ^ B)) & A. */
3578 if (GET_CODE (op0
) == NOT
3579 && GET_CODE (XEXP (op0
, 0)) == XOR
3580 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3581 && ! side_effects_p (op1
))
3582 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3584 if (GET_CODE (op0
) == NOT
3585 && GET_CODE (XEXP (op0
, 0)) == XOR
3586 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3587 && ! side_effects_p (op1
))
3588 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3590 /* Convert (A | B) & A to A. */
3591 if (GET_CODE (op0
) == IOR
3592 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3593 || rtx_equal_p (XEXP (op0
, 1), op1
))
3594 && ! side_effects_p (XEXP (op0
, 0))
3595 && ! side_effects_p (XEXP (op0
, 1)))
3598 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3599 ((A & N) + B) & M -> (A + B) & M
3600 Similarly if (N & M) == 0,
3601 ((A | N) + B) & M -> (A + B) & M
3602 and for - instead of + and/or ^ instead of |.
3603 Also, if (N & M) == 0, then
3604 (A +- N) & M -> A & M. */
3605 if (CONST_INT_P (trueop1
)
3606 && HWI_COMPUTABLE_MODE_P (mode
)
3607 && ~UINTVAL (trueop1
)
3608 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3609 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3614 pmop
[0] = XEXP (op0
, 0);
3615 pmop
[1] = XEXP (op0
, 1);
3617 if (CONST_INT_P (pmop
[1])
3618 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3619 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3621 for (which
= 0; which
< 2; which
++)
3624 switch (GET_CODE (tem
))
3627 if (CONST_INT_P (XEXP (tem
, 1))
3628 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3629 == UINTVAL (trueop1
))
3630 pmop
[which
] = XEXP (tem
, 0);
3634 if (CONST_INT_P (XEXP (tem
, 1))
3635 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3636 pmop
[which
] = XEXP (tem
, 0);
3643 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3645 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3647 return simplify_gen_binary (code
, mode
, tem
, op1
);
3651 /* (and X (ior (not X) Y) -> (and X Y) */
3652 if (GET_CODE (op1
) == IOR
3653 && GET_CODE (XEXP (op1
, 0)) == NOT
3654 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3655 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3657 /* (and (ior (not X) Y) X) -> (and X Y) */
3658 if (GET_CODE (op0
) == IOR
3659 && GET_CODE (XEXP (op0
, 0)) == NOT
3660 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3661 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3663 /* (and X (ior Y (not X)) -> (and X Y) */
3664 if (GET_CODE (op1
) == IOR
3665 && GET_CODE (XEXP (op1
, 1)) == NOT
3666 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3667 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3669 /* (and (ior Y (not X)) X) -> (and X Y) */
3670 if (GET_CODE (op0
) == IOR
3671 && GET_CODE (XEXP (op0
, 1)) == NOT
3672 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3673 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3675 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3676 if (GET_CODE (op0
) == GET_CODE (op1
)
3677 && (GET_CODE (op0
) == AND
3678 || GET_CODE (op0
) == IOR
3679 || GET_CODE (op0
) == LSHIFTRT
3680 || GET_CODE (op0
) == ASHIFTRT
3681 || GET_CODE (op0
) == ASHIFT
3682 || GET_CODE (op0
) == ROTATE
3683 || GET_CODE (op0
) == ROTATERT
))
3685 tem
= simplify_distributive_operation (code
, mode
, op0
, op1
);
3690 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3694 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3700 /* 0/x is 0 (or x&0 if x has side-effects). */
3701 if (trueop0
== CONST0_RTX (mode
)
3702 && !cfun
->can_throw_non_call_exceptions
)
3704 if (side_effects_p (op1
))
3705 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3709 if (trueop1
== CONST1_RTX (mode
))
3711 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3715 /* Convert divide by power of two into shift. */
3716 if (CONST_INT_P (trueop1
)
3717 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3718 return simplify_gen_binary (LSHIFTRT
, mode
, op0
,
3719 gen_int_shift_amount (mode
, val
));
3723 /* Handle floating point and integers separately. */
3724 if (SCALAR_FLOAT_MODE_P (mode
))
3726 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3727 safe for modes with NaNs, since 0.0 / 0.0 will then be
3728 NaN rather than 0.0. Nor is it safe for modes with signed
3729 zeros, since dividing 0 by a negative number gives -0.0 */
3730 if (trueop0
== CONST0_RTX (mode
)
3731 && !HONOR_NANS (mode
)
3732 && !HONOR_SIGNED_ZEROS (mode
)
3733 && ! side_effects_p (op1
))
3736 if (trueop1
== CONST1_RTX (mode
)
3737 && !HONOR_SNANS (mode
))
3740 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3741 && trueop1
!= CONST0_RTX (mode
))
3743 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3746 if (real_equal (d1
, &dconstm1
)
3747 && !HONOR_SNANS (mode
))
3748 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3750 /* Change FP division by a constant into multiplication.
3751 Only do this with -freciprocal-math. */
3752 if (flag_reciprocal_math
3753 && !real_equal (d1
, &dconst0
))
3756 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3757 tem
= const_double_from_real_value (d
, mode
);
3758 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3762 else if (SCALAR_INT_MODE_P (mode
))
3764 /* 0/x is 0 (or x&0 if x has side-effects). */
3765 if (trueop0
== CONST0_RTX (mode
)
3766 && !cfun
->can_throw_non_call_exceptions
)
3768 if (side_effects_p (op1
))
3769 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3773 if (trueop1
== CONST1_RTX (mode
))
3775 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3780 if (trueop1
== constm1_rtx
)
3782 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3784 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3790 /* 0%x is 0 (or x&0 if x has side-effects). */
3791 if (trueop0
== CONST0_RTX (mode
))
3793 if (side_effects_p (op1
))
3794 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3797 /* x%1 is 0 (of x&0 if x has side-effects). */
3798 if (trueop1
== CONST1_RTX (mode
))
3800 if (side_effects_p (op0
))
3801 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3802 return CONST0_RTX (mode
);
3804 /* Implement modulus by power of two as AND. */
3805 if (CONST_INT_P (trueop1
)
3806 && exact_log2 (UINTVAL (trueop1
)) > 0)
3807 return simplify_gen_binary (AND
, mode
, op0
,
3808 gen_int_mode (UINTVAL (trueop1
) - 1,
3813 /* 0%x is 0 (or x&0 if x has side-effects). */
3814 if (trueop0
== CONST0_RTX (mode
))
3816 if (side_effects_p (op1
))
3817 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3820 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3821 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3823 if (side_effects_p (op0
))
3824 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3825 return CONST0_RTX (mode
);
3831 if (trueop1
== CONST0_RTX (mode
))
3833 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3834 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3835 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3837 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3838 if (CONST_INT_P (trueop1
)
3839 && IN_RANGE (INTVAL (trueop1
),
3840 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3841 GET_MODE_UNIT_PRECISION (mode
) - 1))
3843 int new_amount
= GET_MODE_UNIT_PRECISION (mode
) - INTVAL (trueop1
);
3844 rtx new_amount_rtx
= gen_int_shift_amount (mode
, new_amount
);
3845 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3846 mode
, op0
, new_amount_rtx
);
3851 if (trueop1
== CONST0_RTX (mode
))
3853 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3855 /* Rotating ~0 always results in ~0. */
3856 if (CONST_INT_P (trueop0
)
3857 && HWI_COMPUTABLE_MODE_P (mode
)
3858 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3859 && ! side_effects_p (op1
))
3865 scalar constants c1, c2
3866 size (M2) > size (M1)
3867 c1 == size (M2) - size (M1)
3869 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3873 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3875 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3876 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3878 && CONST_INT_P (op1
)
3879 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3880 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3882 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3883 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3884 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3885 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3886 && subreg_lowpart_p (op0
))
3888 rtx tmp
= gen_int_shift_amount
3889 (inner_mode
, INTVAL (XEXP (SUBREG_REG (op0
), 1)) + INTVAL (op1
));
3891 /* Combine would usually zero out the value when combining two
3892 local shifts and the range becomes larger or equal to the mode.
3893 However since we fold away one of the shifts here combine won't
3894 see it so we should immediately zero the result if it's out of
3896 if (code
== LSHIFTRT
3897 && INTVAL (tmp
) >= GET_MODE_BITSIZE (inner_mode
))
3900 tmp
= simplify_gen_binary (code
,
3902 XEXP (SUBREG_REG (op0
), 0),
3905 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3908 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3910 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3911 if (val
!= INTVAL (op1
))
3912 return simplify_gen_binary (code
, mode
, op0
,
3913 gen_int_shift_amount (mode
, val
));
3920 if (trueop1
== CONST0_RTX (mode
))
3922 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3926 && CONST_INT_P (trueop1
)
3927 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3928 && IN_RANGE (UINTVAL (trueop1
),
3929 1, GET_MODE_PRECISION (int_mode
) - 1))
3931 auto c
= (wi::one (GET_MODE_PRECISION (int_mode
))
3932 << UINTVAL (trueop1
));
3933 rtx new_op1
= immed_wide_int_const (c
, int_mode
);
3934 return simplify_gen_binary (MULT
, int_mode
, op0
, new_op1
);
3936 goto canonicalize_shift
;
3939 if (trueop1
== CONST0_RTX (mode
))
3941 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3943 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3944 if (GET_CODE (op0
) == CLZ
3945 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3946 && CONST_INT_P (trueop1
)
3947 && STORE_FLAG_VALUE
== 1
3948 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3950 unsigned HOST_WIDE_INT zero_val
= 0;
3952 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3953 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3954 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3955 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3956 XEXP (op0
, 0), const0_rtx
);
3958 goto canonicalize_shift
;
3961 if (HWI_COMPUTABLE_MODE_P (mode
)
3962 && mode_signbit_p (mode
, trueop1
)
3963 && ! side_effects_p (op0
))
3965 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3967 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3973 if (HWI_COMPUTABLE_MODE_P (mode
)
3974 && CONST_INT_P (trueop1
)
3975 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3976 && ! side_effects_p (op0
))
3978 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3980 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3986 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3988 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3990 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3996 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3998 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
4000 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
4013 /* ??? There are simplifications that can be done. */
4017 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
4018 return gen_vec_duplicate (mode
, op0
);
4019 if (valid_for_const_vector_p (mode
, op0
)
4020 && valid_for_const_vector_p (mode
, op1
))
4021 return gen_const_vec_series (mode
, op0
, op1
);
4025 if (!VECTOR_MODE_P (mode
))
4027 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4028 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
4029 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4030 gcc_assert (XVECLEN (trueop1
, 0) == 1);
4032 /* We can't reason about selections made at runtime. */
4033 if (!CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4036 if (vec_duplicate_p (trueop0
, &elt0
))
4039 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4040 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
4043 /* Extract a scalar element from a nested VEC_SELECT expression
4044 (with optional nested VEC_CONCAT expression). Some targets
4045 (i386) extract scalar element from a vector using chain of
4046 nested VEC_SELECT expressions. When input operand is a memory
4047 operand, this operation can be simplified to a simple scalar
4048 load from an offseted memory address. */
4050 if (GET_CODE (trueop0
) == VEC_SELECT
4051 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4052 .is_constant (&n_elts
)))
4054 rtx op0
= XEXP (trueop0
, 0);
4055 rtx op1
= XEXP (trueop0
, 1);
4057 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
4063 gcc_assert (GET_CODE (op1
) == PARALLEL
);
4064 gcc_assert (i
< n_elts
);
4066 /* Select element, pointed by nested selector. */
4067 elem
= INTVAL (XVECEXP (op1
, 0, i
));
4069 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4070 if (GET_CODE (op0
) == VEC_CONCAT
)
4072 rtx op00
= XEXP (op0
, 0);
4073 rtx op01
= XEXP (op0
, 1);
4075 machine_mode mode00
, mode01
;
4076 int n_elts00
, n_elts01
;
4078 mode00
= GET_MODE (op00
);
4079 mode01
= GET_MODE (op01
);
4081 /* Find out the number of elements of each operand.
4082 Since the concatenated result has a constant number
4083 of elements, the operands must too. */
4084 n_elts00
= GET_MODE_NUNITS (mode00
).to_constant ();
4085 n_elts01
= GET_MODE_NUNITS (mode01
).to_constant ();
4087 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
4089 /* Select correct operand of VEC_CONCAT
4090 and adjust selector. */
4091 if (elem
< n_elts01
)
4102 vec
= rtvec_alloc (1);
4103 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
4105 tmp
= gen_rtx_fmt_ee (code
, mode
,
4106 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
4112 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
4113 gcc_assert (GET_MODE_INNER (mode
)
4114 == GET_MODE_INNER (GET_MODE (trueop0
)));
4115 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
4117 if (vec_duplicate_p (trueop0
, &elt0
))
4118 /* It doesn't matter which elements are selected by trueop1,
4119 because they are all the same. */
4120 return gen_vec_duplicate (mode
, elt0
);
4122 if (GET_CODE (trueop0
) == CONST_VECTOR
)
4124 unsigned n_elts
= XVECLEN (trueop1
, 0);
4125 rtvec v
= rtvec_alloc (n_elts
);
4128 gcc_assert (known_eq (n_elts
, GET_MODE_NUNITS (mode
)));
4129 for (i
= 0; i
< n_elts
; i
++)
4131 rtx x
= XVECEXP (trueop1
, 0, i
);
4133 if (!CONST_INT_P (x
))
4136 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
4140 return gen_rtx_CONST_VECTOR (mode
, v
);
4143 /* Recognize the identity. */
4144 if (GET_MODE (trueop0
) == mode
)
4146 bool maybe_ident
= true;
4147 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4149 rtx j
= XVECEXP (trueop1
, 0, i
);
4150 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4152 maybe_ident
= false;
4160 /* If we build {a,b} then permute it, build the result directly. */
4161 if (XVECLEN (trueop1
, 0) == 2
4162 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4163 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4164 && GET_CODE (trueop0
) == VEC_CONCAT
4165 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
4166 && GET_MODE (XEXP (trueop0
, 0)) == mode
4167 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
4168 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
4170 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4171 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4174 gcc_assert (i0
< 4 && i1
< 4);
4175 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
4176 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
4178 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4181 if (XVECLEN (trueop1
, 0) == 2
4182 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4183 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
4184 && GET_CODE (trueop0
) == VEC_CONCAT
4185 && GET_MODE (trueop0
) == mode
)
4187 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4188 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
4191 gcc_assert (i0
< 2 && i1
< 2);
4192 subop0
= XEXP (trueop0
, i0
);
4193 subop1
= XEXP (trueop0
, i1
);
4195 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
4198 /* If we select one half of a vec_concat, return that. */
4200 if (GET_CODE (trueop0
) == VEC_CONCAT
4201 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 0)))
4203 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0
, 1)))
4205 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
4207 rtx subop0
= XEXP (trueop0
, 0);
4208 rtx subop1
= XEXP (trueop0
, 1);
4209 machine_mode mode0
= GET_MODE (subop0
);
4210 machine_mode mode1
= GET_MODE (subop1
);
4211 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
4212 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
4214 bool success
= true;
4215 for (int i
= 1; i
< l0
; ++i
)
4217 rtx j
= XVECEXP (trueop1
, 0, i
);
4218 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
4227 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
4229 bool success
= true;
4230 for (int i
= 1; i
< l1
; ++i
)
4232 rtx j
= XVECEXP (trueop1
, 0, i
);
4233 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
4244 /* Simplify vec_select of a subreg of X to just a vec_select of X
4245 when X has same component mode as vec_select. */
4246 unsigned HOST_WIDE_INT subreg_offset
= 0;
4247 if (GET_CODE (trueop0
) == SUBREG
4248 && GET_MODE_INNER (mode
)
4249 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0
)))
4250 && GET_MODE_NUNITS (mode
).is_constant (&l1
)
4251 && constant_multiple_p (subreg_memory_offset (trueop0
),
4252 GET_MODE_UNIT_BITSIZE (mode
),
4256 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0
)));
4257 bool success
= true;
4258 for (int i
= 0; i
!= l1
; i
++)
4260 rtx idx
= XVECEXP (trueop1
, 0, i
);
4261 if (!CONST_INT_P (idx
)
4262 || maybe_ge (UINTVAL (idx
) + subreg_offset
, nunits
))
4274 rtvec vec
= rtvec_alloc (l1
);
4275 for (int i
= 0; i
< l1
; i
++)
4277 = GEN_INT (INTVAL (XVECEXP (trueop1
, 0, i
))
4279 par
= gen_rtx_PARALLEL (VOIDmode
, vec
);
4281 return gen_rtx_VEC_SELECT (mode
, SUBREG_REG (trueop0
), par
);
4286 if (XVECLEN (trueop1
, 0) == 1
4287 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
4288 && GET_CODE (trueop0
) == VEC_CONCAT
)
4291 offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
4293 /* Try to find the element in the VEC_CONCAT. */
4294 while (GET_MODE (vec
) != mode
4295 && GET_CODE (vec
) == VEC_CONCAT
)
4297 poly_int64 vec_size
;
4299 if (CONST_INT_P (XEXP (vec
, 0)))
4301 /* vec_concat of two const_ints doesn't make sense with
4302 respect to modes. */
4303 if (CONST_INT_P (XEXP (vec
, 1)))
4306 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
4307 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
4310 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
4312 if (known_lt (offset
, vec_size
))
4313 vec
= XEXP (vec
, 0);
4314 else if (known_ge (offset
, vec_size
))
4317 vec
= XEXP (vec
, 1);
4321 vec
= avoid_constant_pool_reference (vec
);
4324 if (GET_MODE (vec
) == mode
)
4328 /* If we select elements in a vec_merge that all come from the same
4329 operand, select from that operand directly. */
4330 if (GET_CODE (op0
) == VEC_MERGE
)
4332 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
4333 if (CONST_INT_P (trueop02
))
4335 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
4336 bool all_operand0
= true;
4337 bool all_operand1
= true;
4338 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
4340 rtx j
= XVECEXP (trueop1
, 0, i
);
4341 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
4342 all_operand1
= false;
4344 all_operand0
= false;
4346 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
4347 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
4348 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
4349 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
4353 /* If we have two nested selects that are inverses of each
4354 other, replace them with the source operand. */
4355 if (GET_CODE (trueop0
) == VEC_SELECT
4356 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4358 rtx op0_subop1
= XEXP (trueop0
, 1);
4359 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
4360 gcc_assert (known_eq (XVECLEN (trueop1
, 0), GET_MODE_NUNITS (mode
)));
4362 /* Apply the outer ordering vector to the inner one. (The inner
4363 ordering vector is expressly permitted to be of a different
4364 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4365 then the two VEC_SELECTs cancel. */
4366 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
4368 rtx x
= XVECEXP (trueop1
, 0, i
);
4369 if (!CONST_INT_P (x
))
4371 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
4372 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
4375 return XEXP (trueop0
, 0);
4381 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
4382 ? GET_MODE (trueop0
)
4383 : GET_MODE_INNER (mode
));
4384 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
4385 ? GET_MODE (trueop1
)
4386 : GET_MODE_INNER (mode
));
4388 gcc_assert (VECTOR_MODE_P (mode
));
4389 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode
)
4390 + GET_MODE_SIZE (op1_mode
),
4391 GET_MODE_SIZE (mode
)));
4393 if (VECTOR_MODE_P (op0_mode
))
4394 gcc_assert (GET_MODE_INNER (mode
)
4395 == GET_MODE_INNER (op0_mode
));
4397 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
4399 if (VECTOR_MODE_P (op1_mode
))
4400 gcc_assert (GET_MODE_INNER (mode
)
4401 == GET_MODE_INNER (op1_mode
));
4403 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
4405 unsigned int n_elts
, in_n_elts
;
4406 if ((GET_CODE (trueop0
) == CONST_VECTOR
4407 || CONST_SCALAR_INT_P (trueop0
)
4408 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
4409 && (GET_CODE (trueop1
) == CONST_VECTOR
4410 || CONST_SCALAR_INT_P (trueop1
)
4411 || CONST_DOUBLE_AS_FLOAT_P (trueop1
))
4412 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
)
4413 && GET_MODE_NUNITS (op0_mode
).is_constant (&in_n_elts
))
4415 rtvec v
= rtvec_alloc (n_elts
);
4417 for (i
= 0; i
< n_elts
; i
++)
4421 if (!VECTOR_MODE_P (op0_mode
))
4422 RTVEC_ELT (v
, i
) = trueop0
;
4424 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
4428 if (!VECTOR_MODE_P (op1_mode
))
4429 RTVEC_ELT (v
, i
) = trueop1
;
4431 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
4436 return gen_rtx_CONST_VECTOR (mode
, v
);
4439 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4440 Restrict the transformation to avoid generating a VEC_SELECT with a
4441 mode unrelated to its operand. */
4442 if (GET_CODE (trueop0
) == VEC_SELECT
4443 && GET_CODE (trueop1
) == VEC_SELECT
4444 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
4445 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
4447 rtx par0
= XEXP (trueop0
, 1);
4448 rtx par1
= XEXP (trueop1
, 1);
4449 int len0
= XVECLEN (par0
, 0);
4450 int len1
= XVECLEN (par1
, 0);
4451 rtvec vec
= rtvec_alloc (len0
+ len1
);
4452 for (int i
= 0; i
< len0
; i
++)
4453 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
4454 for (int i
= 0; i
< len1
; i
++)
4455 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
4456 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
4457 gen_rtx_PARALLEL (VOIDmode
, vec
));
4466 if (mode
== GET_MODE (op0
)
4467 && mode
== GET_MODE (op1
)
4468 && vec_duplicate_p (op0
, &elt0
)
4469 && vec_duplicate_p (op1
, &elt1
))
4471 /* Try applying the operator to ELT and see if that simplifies.
4472 We can duplicate the result if so.
4474 The reason we don't use simplify_gen_binary is that it isn't
4475 necessarily a win to convert things like:
4477 (plus:V (vec_duplicate:V (reg:S R1))
4478 (vec_duplicate:V (reg:S R2)))
4482 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4484 The first might be done entirely in vector registers while the
4485 second might need a move between register files. */
4486 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4489 return gen_vec_duplicate (mode
, tem
);
4495 /* Return true if binary operation OP distributes over addition in operand
4496 OPNO, with the other operand being held constant. OPNO counts from 1. */
4499 distributes_over_addition_p (rtx_code op
, int opno
)
4517 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
4520 if (VECTOR_MODE_P (mode
)
4521 && code
!= VEC_CONCAT
4522 && GET_CODE (op0
) == CONST_VECTOR
4523 && GET_CODE (op1
) == CONST_VECTOR
)
4526 if (CONST_VECTOR_STEPPED_P (op0
)
4527 && CONST_VECTOR_STEPPED_P (op1
))
4528 /* We can operate directly on the encoding if:
4530 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4532 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4534 Addition and subtraction are the supported operators
4535 for which this is true. */
4536 step_ok_p
= (code
== PLUS
|| code
== MINUS
);
4537 else if (CONST_VECTOR_STEPPED_P (op0
))
4538 /* We can operate directly on stepped encodings if:
4542 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4544 which is true if (x -> x op c) distributes over addition. */
4545 step_ok_p
= distributes_over_addition_p (code
, 1);
4547 /* Similarly in reverse. */
4548 step_ok_p
= distributes_over_addition_p (code
, 2);
4549 rtx_vector_builder builder
;
4550 if (!builder
.new_binary_operation (mode
, op0
, op1
, step_ok_p
))
4553 unsigned int count
= builder
.encoded_nelts ();
4554 for (unsigned int i
= 0; i
< count
; i
++)
4556 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4557 CONST_VECTOR_ELT (op0
, i
),
4558 CONST_VECTOR_ELT (op1
, i
));
4559 if (!x
|| !valid_for_const_vector_p (mode
, x
))
4561 builder
.quick_push (x
);
4563 return builder
.build ();
4566 if (VECTOR_MODE_P (mode
)
4567 && code
== VEC_CONCAT
4568 && (CONST_SCALAR_INT_P (op0
)
4569 || CONST_FIXED_P (op0
)
4570 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4571 && (CONST_SCALAR_INT_P (op1
)
4572 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4573 || CONST_FIXED_P (op1
)))
4575 /* Both inputs have a constant number of elements, so the result
4577 unsigned n_elts
= GET_MODE_NUNITS (mode
).to_constant ();
4578 rtvec v
= rtvec_alloc (n_elts
);
4580 gcc_assert (n_elts
>= 2);
4583 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4584 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4586 RTVEC_ELT (v
, 0) = op0
;
4587 RTVEC_ELT (v
, 1) = op1
;
4591 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
)).to_constant ();
4592 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
)).to_constant ();
4595 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4596 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4597 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4599 for (i
= 0; i
< op0_n_elts
; ++i
)
4600 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op0
, i
);
4601 for (i
= 0; i
< op1_n_elts
; ++i
)
4602 RTVEC_ELT (v
, op0_n_elts
+i
) = CONST_VECTOR_ELT (op1
, i
);
4605 return gen_rtx_CONST_VECTOR (mode
, v
);
4608 if (SCALAR_FLOAT_MODE_P (mode
)
4609 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4610 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4611 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4622 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4624 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4626 for (i
= 0; i
< 4; i
++)
4643 real_from_target (&r
, tmp0
, mode
);
4644 return const_double_from_real_value (r
, mode
);
4648 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4649 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4652 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4653 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4655 if (HONOR_SNANS (mode
)
4656 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4657 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4660 real_convert (&f0
, mode
, opr0
);
4661 real_convert (&f1
, mode
, opr1
);
4664 && real_equal (&f1
, &dconst0
)
4665 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4668 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4669 && flag_trapping_math
4670 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4672 int s0
= REAL_VALUE_NEGATIVE (f0
);
4673 int s1
= REAL_VALUE_NEGATIVE (f1
);
4678 /* Inf + -Inf = NaN plus exception. */
4683 /* Inf - Inf = NaN plus exception. */
4688 /* Inf / Inf = NaN plus exception. */
4695 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4696 && flag_trapping_math
4697 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4698 || (REAL_VALUE_ISINF (f1
)
4699 && real_equal (&f0
, &dconst0
))))
4700 /* Inf * 0 = NaN plus exception. */
4703 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4705 real_convert (&result
, mode
, &value
);
4707 /* Don't constant fold this floating point operation if
4708 the result has overflowed and flag_trapping_math. */
4710 if (flag_trapping_math
4711 && MODE_HAS_INFINITIES (mode
)
4712 && REAL_VALUE_ISINF (result
)
4713 && !REAL_VALUE_ISINF (f0
)
4714 && !REAL_VALUE_ISINF (f1
))
4715 /* Overflow plus exception. */
4718 /* Don't constant fold this floating point operation if the
4719 result may dependent upon the run-time rounding mode and
4720 flag_rounding_math is set, or if GCC's software emulation
4721 is unable to accurately represent the result. */
4723 if ((flag_rounding_math
4724 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4725 && (inexact
|| !real_identical (&result
, &value
)))
4728 return const_double_from_real_value (result
, mode
);
4732 /* We can fold some multi-word operations. */
4733 scalar_int_mode int_mode
;
4734 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4735 && CONST_SCALAR_INT_P (op0
)
4736 && CONST_SCALAR_INT_P (op1
)
4737 && GET_MODE_PRECISION (int_mode
) <= MAX_BITSIZE_MODE_ANY_INT
)
4740 wi::overflow_type overflow
;
4741 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4742 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4744 #if TARGET_SUPPORTS_WIDE_INT == 0
4745 /* This assert keeps the simplification from producing a result
4746 that cannot be represented in a CONST_DOUBLE but a lot of
4747 upstream callers expect that this function never fails to
4748 simplify something and so you if you added this to the test
4749 above the code would die later anyway. If this assert
4750 happens, you just need to make the port support wide int. */
4751 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4756 result
= wi::sub (pop0
, pop1
);
4760 result
= wi::add (pop0
, pop1
);
4764 result
= wi::mul (pop0
, pop1
);
4768 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4774 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4780 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4786 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4792 result
= wi::bit_and (pop0
, pop1
);
4796 result
= wi::bit_or (pop0
, pop1
);
4800 result
= wi::bit_xor (pop0
, pop1
);
4804 result
= wi::smin (pop0
, pop1
);
4808 result
= wi::smax (pop0
, pop1
);
4812 result
= wi::umin (pop0
, pop1
);
4816 result
= wi::umax (pop0
, pop1
);
4823 wide_int wop1
= pop1
;
4824 if (SHIFT_COUNT_TRUNCATED
)
4825 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4826 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4832 result
= wi::lrshift (pop0
, wop1
);
4836 result
= wi::arshift (pop0
, wop1
);
4840 result
= wi::lshift (pop0
, wop1
);
4851 if (wi::neg_p (pop1
))
4857 result
= wi::lrotate (pop0
, pop1
);
4861 result
= wi::rrotate (pop0
, pop1
);
4872 return immed_wide_int_const (result
, int_mode
);
4875 /* Handle polynomial integers. */
4876 if (NUM_POLY_INT_COEFFS
> 1
4877 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
4878 && poly_int_rtx_p (op0
)
4879 && poly_int_rtx_p (op1
))
4881 poly_wide_int result
;
4885 result
= wi::to_poly_wide (op0
, mode
) + wi::to_poly_wide (op1
, mode
);
4889 result
= wi::to_poly_wide (op0
, mode
) - wi::to_poly_wide (op1
, mode
);
4893 if (CONST_SCALAR_INT_P (op1
))
4894 result
= wi::to_poly_wide (op0
, mode
) * rtx_mode_t (op1
, mode
);
4900 if (CONST_SCALAR_INT_P (op1
))
4902 wide_int shift
= rtx_mode_t (op1
, mode
);
4903 if (SHIFT_COUNT_TRUNCATED
)
4904 shift
= wi::umod_trunc (shift
, GET_MODE_PRECISION (int_mode
));
4905 else if (wi::geu_p (shift
, GET_MODE_PRECISION (int_mode
)))
4907 result
= wi::to_poly_wide (op0
, mode
) << shift
;
4914 if (!CONST_SCALAR_INT_P (op1
)
4915 || !can_ior_p (wi::to_poly_wide (op0
, mode
),
4916 rtx_mode_t (op1
, mode
), &result
))
4923 return immed_wide_int_const (result
, int_mode
);
4931 /* Return a positive integer if X should sort after Y. The value
4932 returned is 1 if and only if X and Y are both regs. */
4935 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4939 result
= (commutative_operand_precedence (y
)
4940 - commutative_operand_precedence (x
));
4942 return result
+ result
;
4944 /* Group together equal REGs to do more simplification. */
4945 if (REG_P (x
) && REG_P (y
))
4946 return REGNO (x
) > REGNO (y
);
4951 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4952 operands may be another PLUS or MINUS.
4954 Rather than test for specific case, we do this by a brute-force method
4955 and do all possible simplifications until no more changes occur. Then
4956 we rebuild the operation.
4958 May return NULL_RTX when no changes were made. */
4961 simplify_context::simplify_plus_minus (rtx_code code
, machine_mode mode
,
4964 struct simplify_plus_minus_op_data
4971 int changed
, n_constants
, canonicalized
= 0;
4974 memset (ops
, 0, sizeof ops
);
4976 /* Set up the two operands and then expand them until nothing has been
4977 changed. If we run out of room in our array, give up; this should
4978 almost never happen. */
4983 ops
[1].neg
= (code
== MINUS
);
4990 for (i
= 0; i
< n_ops
; i
++)
4992 rtx this_op
= ops
[i
].op
;
4993 int this_neg
= ops
[i
].neg
;
4994 enum rtx_code this_code
= GET_CODE (this_op
);
5000 if (n_ops
== ARRAY_SIZE (ops
))
5003 ops
[n_ops
].op
= XEXP (this_op
, 1);
5004 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
5007 ops
[i
].op
= XEXP (this_op
, 0);
5009 /* If this operand was negated then we will potentially
5010 canonicalize the expression. Similarly if we don't
5011 place the operands adjacent we're re-ordering the
5012 expression and thus might be performing a
5013 canonicalization. Ignore register re-ordering.
5014 ??? It might be better to shuffle the ops array here,
5015 but then (plus (plus (A, B), plus (C, D))) wouldn't
5016 be seen as non-canonical. */
5019 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
5024 ops
[i
].op
= XEXP (this_op
, 0);
5025 ops
[i
].neg
= ! this_neg
;
5031 if (n_ops
!= ARRAY_SIZE (ops
)
5032 && GET_CODE (XEXP (this_op
, 0)) == PLUS
5033 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
5034 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
5036 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
5037 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
5038 ops
[n_ops
].neg
= this_neg
;
5046 /* ~a -> (-a - 1) */
5047 if (n_ops
!= ARRAY_SIZE (ops
))
5049 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
5050 ops
[n_ops
++].neg
= this_neg
;
5051 ops
[i
].op
= XEXP (this_op
, 0);
5052 ops
[i
].neg
= !this_neg
;
5058 CASE_CONST_SCALAR_INT
:
5059 case CONST_POLY_INT
:
5063 ops
[i
].op
= neg_poly_int_rtx (mode
, this_op
);
5077 if (n_constants
> 1)
5080 gcc_assert (n_ops
>= 2);
5082 /* If we only have two operands, we can avoid the loops. */
5085 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
5088 /* Get the two operands. Be careful with the order, especially for
5089 the cases where code == MINUS. */
5090 if (ops
[0].neg
&& ops
[1].neg
)
5092 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
5095 else if (ops
[0].neg
)
5106 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
5109 /* Now simplify each pair of operands until nothing changes. */
5112 /* Insertion sort is good enough for a small array. */
5113 for (i
= 1; i
< n_ops
; i
++)
5115 struct simplify_plus_minus_op_data save
;
5119 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
5122 /* Just swapping registers doesn't count as canonicalization. */
5128 ops
[j
+ 1] = ops
[j
];
5130 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
5135 for (i
= n_ops
- 1; i
> 0; i
--)
5136 for (j
= i
- 1; j
>= 0; j
--)
5138 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
5139 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
5141 if (lhs
!= 0 && rhs
!= 0)
5143 enum rtx_code ncode
= PLUS
;
5149 std::swap (lhs
, rhs
);
5151 else if (swap_commutative_operands_p (lhs
, rhs
))
5152 std::swap (lhs
, rhs
);
5154 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
5155 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
5157 rtx tem_lhs
, tem_rhs
;
5159 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
5160 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
5161 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
5164 if (tem
&& !CONSTANT_P (tem
))
5165 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
5168 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
5172 /* Reject "simplifications" that just wrap the two
5173 arguments in a CONST. Failure to do so can result
5174 in infinite recursion with simplify_binary_operation
5175 when it calls us to simplify CONST operations.
5176 Also, if we find such a simplification, don't try
5177 any more combinations with this rhs: We must have
5178 something like symbol+offset, ie. one of the
5179 trivial CONST expressions we handle later. */
5180 if (GET_CODE (tem
) == CONST
5181 && GET_CODE (XEXP (tem
, 0)) == ncode
5182 && XEXP (XEXP (tem
, 0), 0) == lhs
5183 && XEXP (XEXP (tem
, 0), 1) == rhs
)
5186 if (GET_CODE (tem
) == NEG
)
5187 tem
= XEXP (tem
, 0), lneg
= !lneg
;
5188 if (poly_int_rtx_p (tem
) && lneg
)
5189 tem
= neg_poly_int_rtx (mode
, tem
), lneg
= 0;
5193 ops
[j
].op
= NULL_RTX
;
5203 /* Pack all the operands to the lower-numbered entries. */
5204 for (i
= 0, j
= 0; j
< n_ops
; j
++)
5213 /* If nothing changed, check that rematerialization of rtl instructions
5214 is still required. */
5217 /* Perform rematerialization if only all operands are registers and
5218 all operations are PLUS. */
5219 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5220 around rs6000 and how it uses the CA register. See PR67145. */
5221 for (i
= 0; i
< n_ops
; i
++)
5223 || !REG_P (ops
[i
].op
)
5224 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
5225 && fixed_regs
[REGNO (ops
[i
].op
)]
5226 && !global_regs
[REGNO (ops
[i
].op
)]
5227 && ops
[i
].op
!= frame_pointer_rtx
5228 && ops
[i
].op
!= arg_pointer_rtx
5229 && ops
[i
].op
!= stack_pointer_rtx
))
5234 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5236 && CONST_INT_P (ops
[1].op
)
5237 && CONSTANT_P (ops
[0].op
)
5239 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
5241 /* We suppressed creation of trivial CONST expressions in the
5242 combination loop to avoid recursion. Create one manually now.
5243 The combination loop should have ensured that there is exactly
5244 one CONST_INT, and the sort will have ensured that it is last
5245 in the array and that any other constant will be next-to-last. */
5248 && poly_int_rtx_p (ops
[n_ops
- 1].op
)
5249 && CONSTANT_P (ops
[n_ops
- 2].op
))
5251 rtx value
= ops
[n_ops
- 1].op
;
5252 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
5253 value
= neg_poly_int_rtx (mode
, value
);
5254 if (CONST_INT_P (value
))
5256 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
5262 /* Put a non-negated operand first, if possible. */
5264 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
5267 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
5276 /* Now make the result by performing the requested operations. */
5279 for (i
= 1; i
< n_ops
; i
++)
5280 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
5281 mode
, result
, ops
[i
].op
);
5286 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5288 plus_minus_operand_p (const_rtx x
)
5290 return GET_CODE (x
) == PLUS
5291 || GET_CODE (x
) == MINUS
5292 || (GET_CODE (x
) == CONST
5293 && GET_CODE (XEXP (x
, 0)) == PLUS
5294 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
5295 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
5298 /* Like simplify_binary_operation except used for relational operators.
5299 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5300 not also be VOIDmode.
5302 CMP_MODE specifies in which mode the comparison is done in, so it is
5303 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5304 the operands or, if both are VOIDmode, the operands are compared in
5305 "infinite precision". */
5307 simplify_context::simplify_relational_operation (rtx_code code
,
5309 machine_mode cmp_mode
,
5312 rtx tem
, trueop0
, trueop1
;
5314 if (cmp_mode
== VOIDmode
)
5315 cmp_mode
= GET_MODE (op0
);
5316 if (cmp_mode
== VOIDmode
)
5317 cmp_mode
= GET_MODE (op1
);
5319 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
5322 if (SCALAR_FLOAT_MODE_P (mode
))
5324 if (tem
== const0_rtx
)
5325 return CONST0_RTX (mode
);
5326 #ifdef FLOAT_STORE_FLAG_VALUE
5328 REAL_VALUE_TYPE val
;
5329 val
= FLOAT_STORE_FLAG_VALUE (mode
);
5330 return const_double_from_real_value (val
, mode
);
5336 if (VECTOR_MODE_P (mode
))
5338 if (tem
== const0_rtx
)
5339 return CONST0_RTX (mode
);
5340 #ifdef VECTOR_STORE_FLAG_VALUE
5342 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
5343 if (val
== NULL_RTX
)
5345 if (val
== const1_rtx
)
5346 return CONST1_RTX (mode
);
5348 return gen_const_vec_duplicate (mode
, val
);
5354 /* For vector comparison with scalar int result, it is unknown
5355 if the target means here a comparison into an integral bitmask,
5356 or comparison where all comparisons true mean const_true_rtx
5357 whole result, or where any comparisons true mean const_true_rtx
5358 whole result. For const0_rtx all the cases are the same. */
5359 if (VECTOR_MODE_P (cmp_mode
)
5360 && SCALAR_INT_MODE_P (mode
)
5361 && tem
== const_true_rtx
)
5367 /* For the following tests, ensure const0_rtx is op1. */
5368 if (swap_commutative_operands_p (op0
, op1
)
5369 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
5370 std::swap (op0
, op1
), code
= swap_condition (code
);
5372 /* If op0 is a compare, extract the comparison arguments from it. */
5373 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5374 return simplify_gen_relational (code
, mode
, VOIDmode
,
5375 XEXP (op0
, 0), XEXP (op0
, 1));
5377 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
5381 trueop0
= avoid_constant_pool_reference (op0
);
5382 trueop1
= avoid_constant_pool_reference (op1
);
5383 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
5387 /* This part of simplify_relational_operation is only used when CMP_MODE
5388 is not in class MODE_CC (i.e. it is a real comparison).
5390 MODE is the mode of the result, while CMP_MODE specifies in which
5391 mode the comparison is done in, so it is the mode of the operands. */
5394 simplify_context::simplify_relational_operation_1 (rtx_code code
,
5396 machine_mode cmp_mode
,
5399 enum rtx_code op0code
= GET_CODE (op0
);
5401 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
5403 /* If op0 is a comparison, extract the comparison arguments
5407 if (GET_MODE (op0
) == mode
)
5408 return simplify_rtx (op0
);
5410 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
5411 XEXP (op0
, 0), XEXP (op0
, 1));
5413 else if (code
== EQ
)
5415 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
5416 if (new_code
!= UNKNOWN
)
5417 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
5418 XEXP (op0
, 0), XEXP (op0
, 1));
5422 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5423 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5424 if ((code
== LTU
|| code
== GEU
)
5425 && GET_CODE (op0
) == PLUS
5426 && CONST_INT_P (XEXP (op0
, 1))
5427 && (rtx_equal_p (op1
, XEXP (op0
, 0))
5428 || rtx_equal_p (op1
, XEXP (op0
, 1)))
5429 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5430 && XEXP (op0
, 1) != const0_rtx
)
5433 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5434 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
5435 cmp_mode
, XEXP (op0
, 0), new_cmp
);
5438 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5439 transformed into (LTU a -C). */
5440 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
5441 && CONST_INT_P (XEXP (op0
, 1))
5442 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
5443 && XEXP (op0
, 1) != const0_rtx
)
5446 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
5447 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
5448 XEXP (op0
, 0), new_cmp
);
5451 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5452 if ((code
== LTU
|| code
== GEU
)
5453 && GET_CODE (op0
) == PLUS
5454 && rtx_equal_p (op1
, XEXP (op0
, 1))
5455 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5456 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
5457 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
5458 copy_rtx (XEXP (op0
, 0)));
5460 if (op1
== const0_rtx
)
5462 /* Canonicalize (GTU x 0) as (NE x 0). */
5464 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
5465 /* Canonicalize (LEU x 0) as (EQ x 0). */
5467 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
5469 else if (op1
== const1_rtx
)
5474 /* Canonicalize (GE x 1) as (GT x 0). */
5475 return simplify_gen_relational (GT
, mode
, cmp_mode
,
5478 /* Canonicalize (GEU x 1) as (NE x 0). */
5479 return simplify_gen_relational (NE
, mode
, cmp_mode
,
5482 /* Canonicalize (LT x 1) as (LE x 0). */
5483 return simplify_gen_relational (LE
, mode
, cmp_mode
,
5486 /* Canonicalize (LTU x 1) as (EQ x 0). */
5487 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
5493 else if (op1
== constm1_rtx
)
5495 /* Canonicalize (LE x -1) as (LT x 0). */
5497 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
5498 /* Canonicalize (GT x -1) as (GE x 0). */
5500 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
5503 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5504 if ((code
== EQ
|| code
== NE
)
5505 && (op0code
== PLUS
|| op0code
== MINUS
)
5507 && CONSTANT_P (XEXP (op0
, 1))
5508 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
5510 rtx x
= XEXP (op0
, 0);
5511 rtx c
= XEXP (op0
, 1);
5512 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
5513 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
5515 /* Detect an infinite recursive condition, where we oscillate at this
5516 simplification case between:
5517 A + B == C <---> C - B == A,
5518 where A, B, and C are all constants with non-simplifiable expressions,
5519 usually SYMBOL_REFs. */
5520 if (GET_CODE (tem
) == invcode
5522 && rtx_equal_p (c
, XEXP (tem
, 1)))
5525 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
5528 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5529 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5530 scalar_int_mode int_mode
, int_cmp_mode
;
5532 && op1
== const0_rtx
5533 && is_int_mode (mode
, &int_mode
)
5534 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
5535 /* ??? Work-around BImode bugs in the ia64 backend. */
5536 && int_mode
!= BImode
5537 && int_cmp_mode
!= BImode
5538 && nonzero_bits (op0
, int_cmp_mode
) == 1
5539 && STORE_FLAG_VALUE
== 1)
5540 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
5541 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
5542 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
5544 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5545 if ((code
== EQ
|| code
== NE
)
5546 && op1
== const0_rtx
5548 return simplify_gen_relational (code
, mode
, cmp_mode
,
5549 XEXP (op0
, 0), XEXP (op0
, 1));
5551 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5552 if ((code
== EQ
|| code
== NE
)
5554 && rtx_equal_p (XEXP (op0
, 0), op1
)
5555 && !side_effects_p (XEXP (op0
, 0)))
5556 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
5559 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5560 if ((code
== EQ
|| code
== NE
)
5562 && rtx_equal_p (XEXP (op0
, 1), op1
)
5563 && !side_effects_p (XEXP (op0
, 1)))
5564 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5567 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5568 if ((code
== EQ
|| code
== NE
)
5570 && CONST_SCALAR_INT_P (op1
)
5571 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
5572 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5573 simplify_gen_binary (XOR
, cmp_mode
,
5574 XEXP (op0
, 1), op1
));
5576 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5577 constant folding if x/y is a constant. */
5578 if ((code
== EQ
|| code
== NE
)
5579 && (op0code
== AND
|| op0code
== IOR
)
5580 && !side_effects_p (op1
)
5581 && op1
!= CONST0_RTX (cmp_mode
))
5583 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5584 (eq/ne (and (not y) x) 0). */
5585 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 0), op1
))
5586 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 1), op1
)))
5588 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1),
5590 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
5592 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5593 CONST0_RTX (cmp_mode
));
5596 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5597 (eq/ne (and (not x) y) 0). */
5598 if ((op0code
== AND
&& rtx_equal_p (XEXP (op0
, 1), op1
))
5599 || (op0code
== IOR
&& rtx_equal_p (XEXP (op0
, 0), op1
)))
5601 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0),
5603 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
5605 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
5606 CONST0_RTX (cmp_mode
));
5610 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5611 if ((code
== EQ
|| code
== NE
)
5612 && GET_CODE (op0
) == BSWAP
5613 && CONST_SCALAR_INT_P (op1
))
5614 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
5615 simplify_gen_unary (BSWAP
, cmp_mode
,
5618 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5619 if ((code
== EQ
|| code
== NE
)
5620 && GET_CODE (op0
) == BSWAP
5621 && GET_CODE (op1
) == BSWAP
)
5622 return simplify_gen_relational (code
, mode
, cmp_mode
,
5623 XEXP (op0
, 0), XEXP (op1
, 0));
5625 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5631 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5632 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5633 XEXP (op0
, 0), const0_rtx
);
5638 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5639 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5640 XEXP (op0
, 0), const0_rtx
);
5659 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5660 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5661 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5662 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5663 For floating-point comparisons, assume that the operands were ordered. */
5666 comparison_result (enum rtx_code code
, int known_results
)
5672 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5675 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5679 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5682 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5686 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5689 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5692 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5694 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5697 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5699 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5702 return const_true_rtx
;
5710 /* Check if the given comparison (done in the given MODE) is actually
5711 a tautology or a contradiction. If the mode is VOIDmode, the
5712 comparison is done in "infinite precision". If no simplification
5713 is possible, this function returns zero. Otherwise, it returns
5714 either const_true_rtx or const0_rtx. */
5717 simplify_const_relational_operation (enum rtx_code code
,
5725 gcc_assert (mode
!= VOIDmode
5726 || (GET_MODE (op0
) == VOIDmode
5727 && GET_MODE (op1
) == VOIDmode
));
5729 /* If op0 is a compare, extract the comparison arguments from it. */
5730 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5732 op1
= XEXP (op0
, 1);
5733 op0
= XEXP (op0
, 0);
5735 if (GET_MODE (op0
) != VOIDmode
)
5736 mode
= GET_MODE (op0
);
5737 else if (GET_MODE (op1
) != VOIDmode
)
5738 mode
= GET_MODE (op1
);
5743 /* We can't simplify MODE_CC values since we don't know what the
5744 actual comparison is. */
5745 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5748 /* Make sure the constant is second. */
5749 if (swap_commutative_operands_p (op0
, op1
))
5751 std::swap (op0
, op1
);
5752 code
= swap_condition (code
);
5755 trueop0
= avoid_constant_pool_reference (op0
);
5756 trueop1
= avoid_constant_pool_reference (op1
);
5758 /* For integer comparisons of A and B maybe we can simplify A - B and can
5759 then simplify a comparison of that with zero. If A and B are both either
5760 a register or a CONST_INT, this can't help; testing for these cases will
5761 prevent infinite recursion here and speed things up.
5763 We can only do this for EQ and NE comparisons as otherwise we may
5764 lose or introduce overflow which we cannot disregard as undefined as
5765 we do not know the signedness of the operation on either the left or
5766 the right hand side of the comparison. */
5768 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5769 && (code
== EQ
|| code
== NE
)
5770 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5771 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5772 && (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
)) != 0
5773 /* We cannot do this if tem is a nonzero address. */
5774 && ! nonzero_address_p (tem
))
5775 return simplify_const_relational_operation (signed_condition (code
),
5776 mode
, tem
, const0_rtx
);
5778 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5779 return const_true_rtx
;
5781 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5784 /* For modes without NaNs, if the two operands are equal, we know the
5785 result except if they have side-effects. Even with NaNs we know
5786 the result of unordered comparisons and, if signaling NaNs are
5787 irrelevant, also the result of LT/GT/LTGT. */
5788 if ((! HONOR_NANS (trueop0
)
5789 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5790 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5791 && ! HONOR_SNANS (trueop0
)))
5792 && rtx_equal_p (trueop0
, trueop1
)
5793 && ! side_effects_p (trueop0
))
5794 return comparison_result (code
, CMP_EQ
);
5796 /* If the operands are floating-point constants, see if we can fold
5798 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5799 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5800 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5802 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5803 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5805 /* Comparisons are unordered iff at least one of the values is NaN. */
5806 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5816 return const_true_rtx
;
5829 return comparison_result (code
,
5830 (real_equal (d0
, d1
) ? CMP_EQ
:
5831 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5834 /* Otherwise, see if the operands are both integers. */
5835 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5836 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5838 /* It would be nice if we really had a mode here. However, the
5839 largest int representable on the target is as good as
5841 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5842 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5843 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5845 if (wi::eq_p (ptrueop0
, ptrueop1
))
5846 return comparison_result (code
, CMP_EQ
);
5849 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5850 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5851 return comparison_result (code
, cr
);
5855 /* Optimize comparisons with upper and lower bounds. */
5856 scalar_int_mode int_mode
;
5857 if (CONST_INT_P (trueop1
)
5858 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5859 && HWI_COMPUTABLE_MODE_P (int_mode
)
5860 && !side_effects_p (trueop0
))
5863 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5864 HOST_WIDE_INT val
= INTVAL (trueop1
);
5865 HOST_WIDE_INT mmin
, mmax
;
5875 /* Get a reduced range if the sign bit is zero. */
5876 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5883 rtx mmin_rtx
, mmax_rtx
;
5884 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5886 mmin
= INTVAL (mmin_rtx
);
5887 mmax
= INTVAL (mmax_rtx
);
5890 unsigned int sign_copies
5891 = num_sign_bit_copies (trueop0
, int_mode
);
5893 mmin
>>= (sign_copies
- 1);
5894 mmax
>>= (sign_copies
- 1);
5900 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5902 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5903 return const_true_rtx
;
5904 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5909 return const_true_rtx
;
5914 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5916 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5917 return const_true_rtx
;
5918 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5923 return const_true_rtx
;
5929 /* x == y is always false for y out of range. */
5930 if (val
< mmin
|| val
> mmax
)
5934 /* x > y is always false for y >= mmax, always true for y < mmin. */
5936 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5938 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5939 return const_true_rtx
;
5945 return const_true_rtx
;
5948 /* x < y is always false for y <= mmin, always true for y > mmax. */
5950 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5952 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5953 return const_true_rtx
;
5959 return const_true_rtx
;
5963 /* x != y is always true for y out of range. */
5964 if (val
< mmin
|| val
> mmax
)
5965 return const_true_rtx
;
5973 /* Optimize integer comparisons with zero. */
5974 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5975 && trueop1
== const0_rtx
5976 && !side_effects_p (trueop0
))
5978 /* Some addresses are known to be nonzero. We don't know
5979 their sign, but equality comparisons are known. */
5980 if (nonzero_address_p (trueop0
))
5982 if (code
== EQ
|| code
== LEU
)
5984 if (code
== NE
|| code
== GTU
)
5985 return const_true_rtx
;
5988 /* See if the first operand is an IOR with a constant. If so, we
5989 may be able to determine the result of this comparison. */
5990 if (GET_CODE (op0
) == IOR
)
5992 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5993 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5995 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5996 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5997 && (UINTVAL (inner_const
)
6008 return const_true_rtx
;
6012 return const_true_rtx
;
6026 /* Optimize comparison of ABS with zero. */
6027 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
6028 && (GET_CODE (trueop0
) == ABS
6029 || (GET_CODE (trueop0
) == FLOAT_EXTEND
6030 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
6035 /* Optimize abs(x) < 0.0. */
6036 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
6041 /* Optimize abs(x) >= 0.0. */
6042 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
6043 return const_true_rtx
;
6047 /* Optimize ! (abs(x) < 0.0). */
6048 return const_true_rtx
;
6058 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6059 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6060 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6061 can be simplified to that or NULL_RTX if not.
6062 Assume X is compared against zero with CMP_CODE and the true
6063 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6066 simplify_context::simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
,
6067 rtx true_val
, rtx false_val
)
6069 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
6072 /* Result on X == 0 and X !=0 respectively. */
6073 rtx on_zero
, on_nonzero
;
6077 on_nonzero
= false_val
;
6081 on_zero
= false_val
;
6082 on_nonzero
= true_val
;
6085 rtx_code op_code
= GET_CODE (on_nonzero
);
6086 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
6087 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
6088 || !CONST_INT_P (on_zero
))
6091 HOST_WIDE_INT op_val
;
6092 scalar_int_mode mode ATTRIBUTE_UNUSED
6093 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
6094 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
6095 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
6096 && op_val
== INTVAL (on_zero
))
6102 /* Try to simplify X given that it appears within operand OP of a
6103 VEC_MERGE operation whose mask is MASK. X need not use the same
6104 vector mode as the VEC_MERGE, but it must have the same number of
6107 Return the simplified X on success, otherwise return NULL_RTX. */
6110 simplify_context::simplify_merge_mask (rtx x
, rtx mask
, int op
)
6112 gcc_assert (VECTOR_MODE_P (GET_MODE (x
)));
6113 poly_uint64 nunits
= GET_MODE_NUNITS (GET_MODE (x
));
6114 if (GET_CODE (x
) == VEC_MERGE
&& rtx_equal_p (XEXP (x
, 2), mask
))
6116 if (side_effects_p (XEXP (x
, 1 - op
)))
6119 return XEXP (x
, op
);
6122 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6123 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
))
6125 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6127 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), top0
,
6128 GET_MODE (XEXP (x
, 0)));
6131 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6132 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6133 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6134 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
))
6136 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6137 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6140 if (COMPARISON_P (x
))
6141 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
6142 GET_MODE (XEXP (x
, 0)) != VOIDmode
6143 ? GET_MODE (XEXP (x
, 0))
6144 : GET_MODE (XEXP (x
, 1)),
6145 top0
? top0
: XEXP (x
, 0),
6146 top1
? top1
: XEXP (x
, 1));
6148 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
6149 top0
? top0
: XEXP (x
, 0),
6150 top1
? top1
: XEXP (x
, 1));
6153 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_TERNARY
6154 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 0)))
6155 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 0))), nunits
)
6156 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 1)))
6157 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 1))), nunits
)
6158 && VECTOR_MODE_P (GET_MODE (XEXP (x
, 2)))
6159 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x
, 2))), nunits
))
6161 rtx top0
= simplify_merge_mask (XEXP (x
, 0), mask
, op
);
6162 rtx top1
= simplify_merge_mask (XEXP (x
, 1), mask
, op
);
6163 rtx top2
= simplify_merge_mask (XEXP (x
, 2), mask
, op
);
6164 if (top0
|| top1
|| top2
)
6165 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
6166 GET_MODE (XEXP (x
, 0)),
6167 top0
? top0
: XEXP (x
, 0),
6168 top1
? top1
: XEXP (x
, 1),
6169 top2
? top2
: XEXP (x
, 2));
6175 /* Simplify CODE, an operation with result mode MODE and three operands,
6176 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6177 a constant. Return 0 if no simplifications is possible. */
6180 simplify_context::simplify_ternary_operation (rtx_code code
, machine_mode mode
,
6181 machine_mode op0_mode
,
6182 rtx op0
, rtx op1
, rtx op2
)
6184 bool any_change
= false;
6186 scalar_int_mode int_mode
, int_op0_mode
;
6187 unsigned int n_elts
;
6192 /* Simplify negations around the multiplication. */
6193 /* -a * -b + c => a * b + c. */
6194 if (GET_CODE (op0
) == NEG
)
6196 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
6198 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
6200 else if (GET_CODE (op1
) == NEG
)
6202 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
6204 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
6207 /* Canonicalize the two multiplication operands. */
6208 /* a * -b + c => -b * a + c. */
6209 if (swap_commutative_operands_p (op0
, op1
))
6210 std::swap (op0
, op1
), any_change
= true;
6213 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
6218 if (CONST_INT_P (op0
)
6219 && CONST_INT_P (op1
)
6220 && CONST_INT_P (op2
)
6221 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6222 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
6223 && HWI_COMPUTABLE_MODE_P (int_mode
))
6225 /* Extracting a bit-field from a constant */
6226 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
6227 HOST_WIDE_INT op1val
= INTVAL (op1
);
6228 HOST_WIDE_INT op2val
= INTVAL (op2
);
6229 if (!BITS_BIG_ENDIAN
)
6231 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
6232 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
6234 /* Not enough information to calculate the bit position. */
6237 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
6239 /* First zero-extend. */
6240 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
6241 /* If desired, propagate sign bit. */
6242 if (code
== SIGN_EXTRACT
6243 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
6245 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
6248 return gen_int_mode (val
, int_mode
);
6253 if (CONST_INT_P (op0
))
6254 return op0
!= const0_rtx
? op1
: op2
;
6256 /* Convert c ? a : a into "a". */
6257 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
6260 /* Convert a != b ? a : b into "a". */
6261 if (GET_CODE (op0
) == NE
6262 && ! side_effects_p (op0
)
6263 && ! HONOR_NANS (mode
)
6264 && ! HONOR_SIGNED_ZEROS (mode
)
6265 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6266 && rtx_equal_p (XEXP (op0
, 1), op2
))
6267 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6268 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6271 /* Convert a == b ? a : b into "b". */
6272 if (GET_CODE (op0
) == EQ
6273 && ! side_effects_p (op0
)
6274 && ! HONOR_NANS (mode
)
6275 && ! HONOR_SIGNED_ZEROS (mode
)
6276 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
6277 && rtx_equal_p (XEXP (op0
, 1), op2
))
6278 || (rtx_equal_p (XEXP (op0
, 0), op2
)
6279 && rtx_equal_p (XEXP (op0
, 1), op1
))))
6282 /* Convert (!c) != {0,...,0} ? a : b into
6283 c != {0,...,0} ? b : a for vector modes. */
6284 if (VECTOR_MODE_P (GET_MODE (op1
))
6285 && GET_CODE (op0
) == NE
6286 && GET_CODE (XEXP (op0
, 0)) == NOT
6287 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
6289 rtx cv
= XEXP (op0
, 1);
6292 if (!CONST_VECTOR_NUNITS (cv
).is_constant (&nunits
))
6295 for (int i
= 0; i
< nunits
; ++i
)
6296 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
6303 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
6304 XEXP (XEXP (op0
, 0), 0),
6306 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
6311 /* Convert x == 0 ? N : clz (x) into clz (x) when
6312 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6313 Similarly for ctz (x). */
6314 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
6315 && XEXP (op0
, 1) == const0_rtx
)
6318 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
6324 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
6326 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
6327 ? GET_MODE (XEXP (op0
, 1))
6328 : GET_MODE (XEXP (op0
, 0)));
6331 /* Look for happy constants in op1 and op2. */
6332 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
6334 HOST_WIDE_INT t
= INTVAL (op1
);
6335 HOST_WIDE_INT f
= INTVAL (op2
);
6337 if (t
== STORE_FLAG_VALUE
&& f
== 0)
6338 code
= GET_CODE (op0
);
6339 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
6342 tmp
= reversed_comparison_code (op0
, NULL
);
6350 return simplify_gen_relational (code
, mode
, cmp_mode
,
6351 XEXP (op0
, 0), XEXP (op0
, 1));
6354 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
6355 cmp_mode
, XEXP (op0
, 0),
6358 /* See if any simplifications were possible. */
6361 if (CONST_INT_P (temp
))
6362 return temp
== const0_rtx
? op2
: op1
;
6364 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
6370 gcc_assert (GET_MODE (op0
) == mode
);
6371 gcc_assert (GET_MODE (op1
) == mode
);
6372 gcc_assert (VECTOR_MODE_P (mode
));
6373 trueop2
= avoid_constant_pool_reference (op2
);
6374 if (CONST_INT_P (trueop2
)
6375 && GET_MODE_NUNITS (mode
).is_constant (&n_elts
))
6377 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
6378 unsigned HOST_WIDE_INT mask
;
6379 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
6382 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
6384 if (!(sel
& mask
) && !side_effects_p (op0
))
6386 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
6389 rtx trueop0
= avoid_constant_pool_reference (op0
);
6390 rtx trueop1
= avoid_constant_pool_reference (op1
);
6391 if (GET_CODE (trueop0
) == CONST_VECTOR
6392 && GET_CODE (trueop1
) == CONST_VECTOR
)
6394 rtvec v
= rtvec_alloc (n_elts
);
6397 for (i
= 0; i
< n_elts
; i
++)
6398 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
6399 ? CONST_VECTOR_ELT (trueop0
, i
)
6400 : CONST_VECTOR_ELT (trueop1
, i
));
6401 return gen_rtx_CONST_VECTOR (mode
, v
);
6404 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6405 if no element from a appears in the result. */
6406 if (GET_CODE (op0
) == VEC_MERGE
)
6408 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
6409 if (CONST_INT_P (tem
))
6411 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
6412 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
6413 return simplify_gen_ternary (code
, mode
, mode
,
6414 XEXP (op0
, 1), op1
, op2
);
6415 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
6416 return simplify_gen_ternary (code
, mode
, mode
,
6417 XEXP (op0
, 0), op1
, op2
);
6420 if (GET_CODE (op1
) == VEC_MERGE
)
6422 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
6423 if (CONST_INT_P (tem
))
6425 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
6426 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
6427 return simplify_gen_ternary (code
, mode
, mode
,
6428 op0
, XEXP (op1
, 1), op2
);
6429 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
6430 return simplify_gen_ternary (code
, mode
, mode
,
6431 op0
, XEXP (op1
, 0), op2
);
6435 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6437 if (GET_CODE (op0
) == VEC_DUPLICATE
6438 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
6439 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
6440 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0
, 0))), 1))
6442 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
6443 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
6445 if (XEXP (XEXP (op0
, 0), 0) == op1
6446 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
6450 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6452 with (vec_concat (X) (B)) if N == 1 or
6453 (vec_concat (A) (X)) if N == 2. */
6454 if (GET_CODE (op0
) == VEC_DUPLICATE
6455 && GET_CODE (op1
) == CONST_VECTOR
6456 && known_eq (CONST_VECTOR_NUNITS (op1
), 2)
6457 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6458 && IN_RANGE (sel
, 1, 2))
6460 rtx newop0
= XEXP (op0
, 0);
6461 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
6463 std::swap (newop0
, newop1
);
6464 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6466 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6467 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6468 Only applies for vectors of two elements. */
6469 if (GET_CODE (op0
) == VEC_DUPLICATE
6470 && GET_CODE (op1
) == VEC_CONCAT
6471 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6472 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6473 && IN_RANGE (sel
, 1, 2))
6475 rtx newop0
= XEXP (op0
, 0);
6476 rtx newop1
= XEXP (op1
, 2 - sel
);
6477 rtx otherop
= XEXP (op1
, sel
- 1);
6479 std::swap (newop0
, newop1
);
6480 /* Don't want to throw away the other part of the vec_concat if
6481 it has side-effects. */
6482 if (!side_effects_p (otherop
))
6483 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6488 (vec_merge:outer (vec_duplicate:outer x:inner)
6489 (subreg:outer y:inner 0)
6492 with (vec_concat:outer x:inner y:inner) if N == 1,
6493 or (vec_concat:outer y:inner x:inner) if N == 2.
6495 Implicitly, this means we have a paradoxical subreg, but such
6496 a check is cheap, so make it anyway.
6498 Only applies for vectors of two elements. */
6499 if (GET_CODE (op0
) == VEC_DUPLICATE
6500 && GET_CODE (op1
) == SUBREG
6501 && GET_MODE (op1
) == GET_MODE (op0
)
6502 && GET_MODE (SUBREG_REG (op1
)) == GET_MODE (XEXP (op0
, 0))
6503 && paradoxical_subreg_p (op1
)
6504 && subreg_lowpart_p (op1
)
6505 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6506 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6507 && IN_RANGE (sel
, 1, 2))
6509 rtx newop0
= XEXP (op0
, 0);
6510 rtx newop1
= SUBREG_REG (op1
);
6512 std::swap (newop0
, newop1
);
6513 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6516 /* Same as above but with switched operands:
6517 Replace (vec_merge:outer (subreg:outer x:inner 0)
6518 (vec_duplicate:outer y:inner)
6521 with (vec_concat:outer x:inner y:inner) if N == 1,
6522 or (vec_concat:outer y:inner x:inner) if N == 2. */
6523 if (GET_CODE (op1
) == VEC_DUPLICATE
6524 && GET_CODE (op0
) == SUBREG
6525 && GET_MODE (op0
) == GET_MODE (op1
)
6526 && GET_MODE (SUBREG_REG (op0
)) == GET_MODE (XEXP (op1
, 0))
6527 && paradoxical_subreg_p (op0
)
6528 && subreg_lowpart_p (op0
)
6529 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6530 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6531 && IN_RANGE (sel
, 1, 2))
6533 rtx newop0
= SUBREG_REG (op0
);
6534 rtx newop1
= XEXP (op1
, 0);
6536 std::swap (newop0
, newop1
);
6537 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6540 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6542 with (vec_concat x y) or (vec_concat y x) depending on value
6544 if (GET_CODE (op0
) == VEC_DUPLICATE
6545 && GET_CODE (op1
) == VEC_DUPLICATE
6546 && known_eq (GET_MODE_NUNITS (GET_MODE (op0
)), 2)
6547 && known_eq (GET_MODE_NUNITS (GET_MODE (op1
)), 2)
6548 && IN_RANGE (sel
, 1, 2))
6550 rtx newop0
= XEXP (op0
, 0);
6551 rtx newop1
= XEXP (op1
, 0);
6553 std::swap (newop0
, newop1
);
6555 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
6559 if (rtx_equal_p (op0
, op1
)
6560 && !side_effects_p (op2
) && !side_effects_p (op1
))
6563 if (!side_effects_p (op2
))
6566 = may_trap_p (op0
) ? NULL_RTX
: simplify_merge_mask (op0
, op2
, 0);
6568 = may_trap_p (op1
) ? NULL_RTX
: simplify_merge_mask (op1
, op2
, 1);
6570 return simplify_gen_ternary (code
, mode
, mode
,
6572 top1
? top1
: op1
, op2
);
6584 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6585 starting at byte FIRST_BYTE. Return true on success and add the
6586 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6587 that the bytes follow target memory order. Leave BYTES unmodified
6590 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6591 BYTES before calling this function. */
6594 native_encode_rtx (machine_mode mode
, rtx x
, vec
<target_unit
> &bytes
,
6595 unsigned int first_byte
, unsigned int num_bytes
)
6597 /* Check the mode is sensible. */
6598 gcc_assert (GET_MODE (x
) == VOIDmode
6599 ? is_a
<scalar_int_mode
> (mode
)
6600 : mode
== GET_MODE (x
));
6602 if (GET_CODE (x
) == CONST_VECTOR
)
6604 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6605 is necessary. The only complication is that MODE_VECTOR_BOOL
6606 vectors can have several elements per byte. */
6607 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6608 GET_MODE_NUNITS (mode
));
6609 unsigned int elt
= first_byte
* BITS_PER_UNIT
/ elt_bits
;
6610 if (elt_bits
< BITS_PER_UNIT
)
6612 /* This is the only case in which elements can be smaller than
6614 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6615 for (unsigned int i
= 0; i
< num_bytes
; ++i
)
6617 target_unit value
= 0;
6618 for (unsigned int j
= 0; j
< BITS_PER_UNIT
; j
+= elt_bits
)
6620 value
|= (INTVAL (CONST_VECTOR_ELT (x
, elt
)) & 1) << j
;
6623 bytes
.quick_push (value
);
6628 unsigned int start
= bytes
.length ();
6629 unsigned int elt_bytes
= GET_MODE_UNIT_SIZE (mode
);
6630 /* Make FIRST_BYTE relative to ELT. */
6631 first_byte
%= elt_bytes
;
6632 while (num_bytes
> 0)
6634 /* Work out how many bytes we want from element ELT. */
6635 unsigned int chunk_bytes
= MIN (num_bytes
, elt_bytes
- first_byte
);
6636 if (!native_encode_rtx (GET_MODE_INNER (mode
),
6637 CONST_VECTOR_ELT (x
, elt
), bytes
,
6638 first_byte
, chunk_bytes
))
6640 bytes
.truncate (start
);
6645 num_bytes
-= chunk_bytes
;
6650 /* All subsequent cases are limited to scalars. */
6652 if (!is_a
<scalar_mode
> (mode
, &smode
))
6655 /* Make sure that the region is in range. */
6656 unsigned int end_byte
= first_byte
+ num_bytes
;
6657 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6658 gcc_assert (end_byte
<= mode_bytes
);
6660 if (CONST_SCALAR_INT_P (x
))
6662 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6663 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6664 position of each byte. */
6665 rtx_mode_t
value (x
, smode
);
6666 wide_int_ref
value_wi (value
);
6667 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6669 /* Always constant because the inputs are. */
6671 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6672 /* Operate directly on the encoding rather than using
6673 wi::extract_uhwi, so that we preserve the sign or zero
6674 extension for modes that are not a whole number of bits in
6675 size. (Zero extension is only used for the combination of
6676 innermode == BImode && STORE_FLAG_VALUE == 1). */
6677 unsigned int elt
= lsb
/ HOST_BITS_PER_WIDE_INT
;
6678 unsigned int shift
= lsb
% HOST_BITS_PER_WIDE_INT
;
6679 unsigned HOST_WIDE_INT uhwi
= value_wi
.elt (elt
);
6680 bytes
.quick_push (uhwi
>> shift
);
6685 if (CONST_DOUBLE_P (x
))
6687 /* real_to_target produces an array of integers in target memory order.
6688 All integers before the last one have 32 bits; the last one may
6689 have 32 bits or fewer, depending on whether the mode bitsize
6690 is divisible by 32. Each of these integers is then laid out
6691 in target memory as any other integer would be. */
6692 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6693 real_to_target (el32
, CONST_DOUBLE_REAL_VALUE (x
), smode
);
6695 /* The (maximum) number of target bytes per element of el32. */
6696 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6697 gcc_assert (bytes_per_el32
!= 0);
6699 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6701 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6703 unsigned int index
= byte
/ bytes_per_el32
;
6704 unsigned int subbyte
= byte
% bytes_per_el32
;
6705 unsigned int int_bytes
= MIN (bytes_per_el32
,
6706 mode_bytes
- index
* bytes_per_el32
);
6707 /* Always constant because the inputs are. */
6709 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6710 bytes
.quick_push ((unsigned long) el32
[index
] >> lsb
);
6715 if (GET_CODE (x
) == CONST_FIXED
)
6717 for (unsigned int byte
= first_byte
; byte
< end_byte
; ++byte
)
6719 /* Always constant because the inputs are. */
6721 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6722 unsigned HOST_WIDE_INT piece
= CONST_FIXED_VALUE_LOW (x
);
6723 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6725 lsb
-= HOST_BITS_PER_WIDE_INT
;
6726 piece
= CONST_FIXED_VALUE_HIGH (x
);
6728 bytes
.quick_push (piece
>> lsb
);
6736 /* Read a vector of mode MODE from the target memory image given by BYTES,
6737 starting at byte FIRST_BYTE. The vector is known to be encodable using
6738 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6739 and BYTES is known to have enough bytes to supply NPATTERNS *
6740 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6741 BITS_PER_UNIT bits and the bytes are in target memory order.
6743 Return the vector on success, otherwise return NULL_RTX. */
6746 native_decode_vector_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6747 unsigned int first_byte
, unsigned int npatterns
,
6748 unsigned int nelts_per_pattern
)
6750 rtx_vector_builder
builder (mode
, npatterns
, nelts_per_pattern
);
6752 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6753 GET_MODE_NUNITS (mode
));
6754 if (elt_bits
< BITS_PER_UNIT
)
6756 /* This is the only case in which elements can be smaller than a byte.
6757 Element 0 is always in the lsb of the containing byte. */
6758 gcc_assert (GET_MODE_CLASS (mode
) == MODE_VECTOR_BOOL
);
6759 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6761 unsigned int bit_index
= first_byte
* BITS_PER_UNIT
+ i
* elt_bits
;
6762 unsigned int byte_index
= bit_index
/ BITS_PER_UNIT
;
6763 unsigned int lsb
= bit_index
% BITS_PER_UNIT
;
6764 builder
.quick_push (bytes
[byte_index
] & (1 << lsb
)
6765 ? CONST1_RTX (BImode
)
6766 : CONST0_RTX (BImode
));
6771 for (unsigned int i
= 0; i
< builder
.encoded_nelts (); ++i
)
6773 rtx x
= native_decode_rtx (GET_MODE_INNER (mode
), bytes
, first_byte
);
6776 builder
.quick_push (x
);
6777 first_byte
+= elt_bits
/ BITS_PER_UNIT
;
6780 return builder
.build ();
6783 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6784 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
6785 bits and the bytes are in target memory order. The image has enough
6786 values to specify all bytes of MODE.
6788 Return the rtx on success, otherwise return NULL_RTX. */
6791 native_decode_rtx (machine_mode mode
, vec
<target_unit
> bytes
,
6792 unsigned int first_byte
)
6794 if (VECTOR_MODE_P (mode
))
6796 /* If we know at compile time how many elements there are,
6797 pull each element directly from BYTES. */
6799 if (GET_MODE_NUNITS (mode
).is_constant (&nelts
))
6800 return native_decode_vector_rtx (mode
, bytes
, first_byte
, nelts
, 1);
6804 scalar_int_mode imode
;
6805 if (is_a
<scalar_int_mode
> (mode
, &imode
)
6806 && GET_MODE_PRECISION (imode
) <= MAX_BITSIZE_MODE_ANY_INT
)
6808 /* Pull the bytes msb first, so that we can use simple
6809 shift-and-insert wide_int operations. */
6810 unsigned int size
= GET_MODE_SIZE (imode
);
6811 wide_int
result (wi::zero (GET_MODE_PRECISION (imode
)));
6812 for (unsigned int i
= 0; i
< size
; ++i
)
6814 unsigned int lsb
= (size
- i
- 1) * BITS_PER_UNIT
;
6815 /* Always constant because the inputs are. */
6816 unsigned int subbyte
6817 = subreg_size_offset_from_lsb (1, size
, lsb
).to_constant ();
6818 result
<<= BITS_PER_UNIT
;
6819 result
|= bytes
[first_byte
+ subbyte
];
6821 return immed_wide_int_const (result
, imode
);
6824 scalar_float_mode fmode
;
6825 if (is_a
<scalar_float_mode
> (mode
, &fmode
))
6827 /* We need to build an array of integers in target memory order.
6828 All integers before the last one have 32 bits; the last one may
6829 have 32 bits or fewer, depending on whether the mode bitsize
6830 is divisible by 32. */
6831 long el32
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
6832 unsigned int num_el32
= CEIL (GET_MODE_BITSIZE (fmode
), 32);
6833 memset (el32
, 0, num_el32
* sizeof (long));
6835 /* The (maximum) number of target bytes per element of el32. */
6836 unsigned int bytes_per_el32
= 32 / BITS_PER_UNIT
;
6837 gcc_assert (bytes_per_el32
!= 0);
6839 unsigned int mode_bytes
= GET_MODE_SIZE (fmode
);
6840 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6842 unsigned int index
= byte
/ bytes_per_el32
;
6843 unsigned int subbyte
= byte
% bytes_per_el32
;
6844 unsigned int int_bytes
= MIN (bytes_per_el32
,
6845 mode_bytes
- index
* bytes_per_el32
);
6846 /* Always constant because the inputs are. */
6848 = subreg_size_lsb (1, int_bytes
, subbyte
).to_constant ();
6849 el32
[index
] |= (unsigned long) bytes
[first_byte
+ byte
] << lsb
;
6852 real_from_target (&r
, el32
, fmode
);
6853 return const_double_from_real_value (r
, fmode
);
6856 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode
))
6858 scalar_mode smode
= as_a
<scalar_mode
> (mode
);
6864 unsigned int mode_bytes
= GET_MODE_SIZE (smode
);
6865 for (unsigned int byte
= 0; byte
< mode_bytes
; ++byte
)
6867 /* Always constant because the inputs are. */
6869 = subreg_size_lsb (1, mode_bytes
, byte
).to_constant ();
6870 unsigned HOST_WIDE_INT unit
= bytes
[first_byte
+ byte
];
6871 if (lsb
>= HOST_BITS_PER_WIDE_INT
)
6872 f
.data
.high
|= unit
<< (lsb
- HOST_BITS_PER_WIDE_INT
);
6874 f
.data
.low
|= unit
<< lsb
;
6876 return CONST_FIXED_FROM_FIXED_VALUE (f
, mode
);
6882 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
6883 is to convert a runtime BYTE value into a constant one. */
6886 simplify_const_vector_byte_offset (rtx x
, poly_uint64 byte
)
6888 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6889 machine_mode mode
= GET_MODE (x
);
6890 unsigned int elt_bits
= vector_element_size (GET_MODE_BITSIZE (mode
),
6891 GET_MODE_NUNITS (mode
));
6892 /* The number of bits needed to encode one element from each pattern. */
6893 unsigned int sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * elt_bits
;
6895 /* Identify the start point in terms of a sequence number and a byte offset
6896 within that sequence. */
6897 poly_uint64 first_sequence
;
6898 unsigned HOST_WIDE_INT subbit
;
6899 if (can_div_trunc_p (byte
* BITS_PER_UNIT
, sequence_bits
,
6900 &first_sequence
, &subbit
))
6902 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6903 if (nelts_per_pattern
== 1)
6904 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
6906 byte
= subbit
/ BITS_PER_UNIT
;
6907 else if (nelts_per_pattern
== 2 && known_gt (first_sequence
, 0U))
6909 /* The subreg drops the first element from each pattern and
6910 only uses the second element. Find the first sequence
6911 that starts on a byte boundary. */
6912 subbit
+= least_common_multiple (sequence_bits
, BITS_PER_UNIT
);
6913 byte
= subbit
/ BITS_PER_UNIT
;
6919 /* Subroutine of simplify_subreg in which:
6921 - X is known to be a CONST_VECTOR
6922 - OUTERMODE is known to be a vector mode
6924 Try to handle the subreg by operating on the CONST_VECTOR encoding
6925 rather than on each individual element of the CONST_VECTOR.
6927 Return the simplified subreg on success, otherwise return NULL_RTX. */
6930 simplify_const_vector_subreg (machine_mode outermode
, rtx x
,
6931 machine_mode innermode
, unsigned int first_byte
)
6933 /* Paradoxical subregs of vectors have dubious semantics. */
6934 if (paradoxical_subreg_p (outermode
, innermode
))
6937 /* We can only preserve the semantics of a stepped pattern if the new
6938 vector element is the same as the original one. */
6939 if (CONST_VECTOR_STEPPED_P (x
)
6940 && GET_MODE_INNER (outermode
) != GET_MODE_INNER (innermode
))
6943 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
6944 unsigned int x_elt_bits
6945 = vector_element_size (GET_MODE_BITSIZE (innermode
),
6946 GET_MODE_NUNITS (innermode
));
6947 unsigned int out_elt_bits
6948 = vector_element_size (GET_MODE_BITSIZE (outermode
),
6949 GET_MODE_NUNITS (outermode
));
6951 /* The number of bits needed to encode one element from every pattern
6952 of the original vector. */
6953 unsigned int x_sequence_bits
= CONST_VECTOR_NPATTERNS (x
) * x_elt_bits
;
6955 /* The number of bits needed to encode one element from every pattern
6957 unsigned int out_sequence_bits
6958 = least_common_multiple (x_sequence_bits
, out_elt_bits
);
6960 /* Work out the number of interleaved patterns in the output vector
6961 and the number of encoded elements per pattern. */
6962 unsigned int out_npatterns
= out_sequence_bits
/ out_elt_bits
;
6963 unsigned int nelts_per_pattern
= CONST_VECTOR_NELTS_PER_PATTERN (x
);
6965 /* The encoding scheme requires the number of elements to be a multiple
6966 of the number of patterns, so that each pattern appears at least once
6967 and so that the same number of elements appear from each pattern. */
6968 bool ok_p
= multiple_p (GET_MODE_NUNITS (outermode
), out_npatterns
);
6969 unsigned int const_nunits
;
6970 if (GET_MODE_NUNITS (outermode
).is_constant (&const_nunits
)
6971 && (!ok_p
|| out_npatterns
* nelts_per_pattern
> const_nunits
))
6973 /* Either the encoding is invalid, or applying it would give us
6974 more elements than we need. Just encode each element directly. */
6975 out_npatterns
= const_nunits
;
6976 nelts_per_pattern
= 1;
6981 /* Get enough bytes of X to form the new encoding. */
6982 unsigned int buffer_bits
= out_npatterns
* nelts_per_pattern
* out_elt_bits
;
6983 unsigned int buffer_bytes
= CEIL (buffer_bits
, BITS_PER_UNIT
);
6984 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
6985 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, buffer_bytes
))
6988 /* Reencode the bytes as OUTERMODE. */
6989 return native_decode_vector_rtx (outermode
, buffer
, 0, out_npatterns
,
6993 /* Try to simplify a subreg of a constant by encoding the subreg region
6994 as a sequence of target bytes and reading them back in the new mode.
6995 Return the new value on success, otherwise return null.
6997 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
6998 and byte offset FIRST_BYTE. */
7001 simplify_immed_subreg (fixed_size_mode outermode
, rtx x
,
7002 machine_mode innermode
, unsigned int first_byte
)
7004 unsigned int buffer_bytes
= GET_MODE_SIZE (outermode
);
7005 auto_vec
<target_unit
, 128> buffer (buffer_bytes
);
7007 /* Some ports misuse CCmode. */
7008 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (x
))
7011 /* Paradoxical subregs read undefined values for bytes outside of the
7012 inner value. However, we have traditionally always sign-extended
7013 integer constants and zero-extended others. */
7014 unsigned int inner_bytes
= buffer_bytes
;
7015 if (paradoxical_subreg_p (outermode
, innermode
))
7017 if (!GET_MODE_SIZE (innermode
).is_constant (&inner_bytes
))
7020 target_unit filler
= 0;
7021 if (CONST_SCALAR_INT_P (x
) && wi::neg_p (rtx_mode_t (x
, innermode
)))
7024 /* Add any leading bytes due to big-endian layout. The number of
7025 bytes must be constant because both modes have constant size. */
7026 unsigned int leading_bytes
7027 = -byte_lowpart_offset (outermode
, innermode
).to_constant ();
7028 for (unsigned int i
= 0; i
< leading_bytes
; ++i
)
7029 buffer
.quick_push (filler
);
7031 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7034 /* Add any trailing bytes due to little-endian layout. */
7035 while (buffer
.length () < buffer_bytes
)
7036 buffer
.quick_push (filler
);
7040 if (!native_encode_rtx (innermode
, x
, buffer
, first_byte
, inner_bytes
))
7043 return native_decode_rtx (outermode
, buffer
, 0);
7046 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7047 Return 0 if no simplifications are possible. */
7049 simplify_context::simplify_subreg (machine_mode outermode
, rtx op
,
7050 machine_mode innermode
, poly_uint64 byte
)
7052 /* Little bit of sanity checking. */
7053 gcc_assert (innermode
!= VOIDmode
);
7054 gcc_assert (outermode
!= VOIDmode
);
7055 gcc_assert (innermode
!= BLKmode
);
7056 gcc_assert (outermode
!= BLKmode
);
7058 gcc_assert (GET_MODE (op
) == innermode
7059 || GET_MODE (op
) == VOIDmode
);
7061 poly_uint64 outersize
= GET_MODE_SIZE (outermode
);
7062 if (!multiple_p (byte
, outersize
))
7065 poly_uint64 innersize
= GET_MODE_SIZE (innermode
);
7066 if (maybe_ge (byte
, innersize
))
7069 if (outermode
== innermode
&& known_eq (byte
, 0U))
7072 if (GET_CODE (op
) == CONST_VECTOR
)
7073 byte
= simplify_const_vector_byte_offset (op
, byte
);
7075 if (multiple_p (byte
, GET_MODE_UNIT_SIZE (innermode
)))
7079 if (VECTOR_MODE_P (outermode
)
7080 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
7081 && vec_duplicate_p (op
, &elt
))
7082 return gen_vec_duplicate (outermode
, elt
);
7084 if (outermode
== GET_MODE_INNER (innermode
)
7085 && vec_duplicate_p (op
, &elt
))
7089 if (CONST_SCALAR_INT_P (op
)
7090 || CONST_DOUBLE_AS_FLOAT_P (op
)
7091 || CONST_FIXED_P (op
)
7092 || GET_CODE (op
) == CONST_VECTOR
)
7094 unsigned HOST_WIDE_INT cbyte
;
7095 if (byte
.is_constant (&cbyte
))
7097 if (GET_CODE (op
) == CONST_VECTOR
&& VECTOR_MODE_P (outermode
))
7099 rtx tmp
= simplify_const_vector_subreg (outermode
, op
,
7105 fixed_size_mode fs_outermode
;
7106 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
))
7107 return simplify_immed_subreg (fs_outermode
, op
, innermode
, cbyte
);
7111 /* Changing mode twice with SUBREG => just change it once,
7112 or not at all if changing back op starting mode. */
7113 if (GET_CODE (op
) == SUBREG
)
7115 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
7116 poly_uint64 innermostsize
= GET_MODE_SIZE (innermostmode
);
7119 if (outermode
== innermostmode
7120 && known_eq (byte
, 0U)
7121 && known_eq (SUBREG_BYTE (op
), 0))
7122 return SUBREG_REG (op
);
7124 /* Work out the memory offset of the final OUTERMODE value relative
7125 to the inner value of OP. */
7126 poly_int64 mem_offset
= subreg_memory_offset (outermode
,
7128 poly_int64 op_mem_offset
= subreg_memory_offset (op
);
7129 poly_int64 final_offset
= mem_offset
+ op_mem_offset
;
7131 /* See whether resulting subreg will be paradoxical. */
7132 if (!paradoxical_subreg_p (outermode
, innermostmode
))
7134 /* Bail out in case resulting subreg would be incorrect. */
7135 if (maybe_lt (final_offset
, 0)
7136 || maybe_ge (poly_uint64 (final_offset
), innermostsize
)
7137 || !multiple_p (final_offset
, outersize
))
7142 poly_int64 required_offset
= subreg_memory_offset (outermode
,
7144 if (maybe_ne (final_offset
, required_offset
))
7146 /* Paradoxical subregs always have byte offset 0. */
7150 /* Recurse for further possible simplifications. */
7151 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
7155 if (validate_subreg (outermode
, innermostmode
,
7156 SUBREG_REG (op
), final_offset
))
7158 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
7159 if (SUBREG_PROMOTED_VAR_P (op
)
7160 && SUBREG_PROMOTED_SIGN (op
) >= 0
7161 && GET_MODE_CLASS (outermode
) == MODE_INT
7162 && known_ge (outersize
, innersize
)
7163 && known_le (outersize
, innermostsize
)
7164 && subreg_lowpart_p (newx
))
7166 SUBREG_PROMOTED_VAR_P (newx
) = 1;
7167 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
7174 /* SUBREG of a hard register => just change the register number
7175 and/or mode. If the hard register is not valid in that mode,
7176 suppress this simplification. If the hard register is the stack,
7177 frame, or argument pointer, leave this as a SUBREG. */
7179 if (REG_P (op
) && HARD_REGISTER_P (op
))
7181 unsigned int regno
, final_regno
;
7184 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
7185 if (HARD_REGISTER_NUM_P (final_regno
))
7187 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
7188 subreg_memory_offset (outermode
,
7191 /* Propagate original regno. We don't have any way to specify
7192 the offset inside original regno, so do so only for lowpart.
7193 The information is used only by alias analysis that cannot
7194 grog partial register anyway. */
7196 if (known_eq (subreg_lowpart_offset (outermode
, innermode
), byte
))
7197 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
7202 /* If we have a SUBREG of a register that we are replacing and we are
7203 replacing it with a MEM, make a new MEM and try replacing the
7204 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7205 or if we would be widening it. */
7208 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
7209 /* Allow splitting of volatile memory references in case we don't
7210 have instruction to move the whole thing. */
7211 && (! MEM_VOLATILE_P (op
)
7212 || ! have_insn_for (SET
, innermode
))
7213 && known_le (outersize
, innersize
))
7214 return adjust_address_nv (op
, outermode
, byte
);
7216 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7218 if (GET_CODE (op
) == CONCAT
7219 || GET_CODE (op
) == VEC_CONCAT
)
7221 poly_uint64 final_offset
;
7224 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
7225 if (part_mode
== VOIDmode
)
7226 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7227 poly_uint64 part_size
= GET_MODE_SIZE (part_mode
);
7228 if (known_lt (byte
, part_size
))
7230 part
= XEXP (op
, 0);
7231 final_offset
= byte
;
7233 else if (known_ge (byte
, part_size
))
7235 part
= XEXP (op
, 1);
7236 final_offset
= byte
- part_size
;
7241 if (maybe_gt (final_offset
+ outersize
, part_size
))
7244 part_mode
= GET_MODE (part
);
7245 if (part_mode
== VOIDmode
)
7246 part_mode
= GET_MODE_INNER (GET_MODE (op
));
7247 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
7250 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
7251 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
7256 (subreg (vec_merge (X)
7258 (const_int ((1 << N) | M)))
7259 (N * sizeof (outermode)))
7261 (subreg (X) (N * sizeof (outermode)))
7264 if (constant_multiple_p (byte
, GET_MODE_SIZE (outermode
), &idx
)
7265 && idx
< HOST_BITS_PER_WIDE_INT
7266 && GET_CODE (op
) == VEC_MERGE
7267 && GET_MODE_INNER (innermode
) == outermode
7268 && CONST_INT_P (XEXP (op
, 2))
7269 && (UINTVAL (XEXP (op
, 2)) & (HOST_WIDE_INT_1U
<< idx
)) != 0)
7270 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
, byte
);
7272 /* A SUBREG resulting from a zero extension may fold to zero if
7273 it extracts higher bits that the ZERO_EXTEND's source bits. */
7274 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
7276 poly_uint64 bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
7277 if (known_ge (bitpos
, GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))))
7278 return CONST0_RTX (outermode
);
7281 scalar_int_mode int_outermode
, int_innermode
;
7282 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
7283 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
7284 && known_eq (byte
, subreg_lowpart_offset (int_outermode
, int_innermode
)))
7286 /* Handle polynomial integers. The upper bits of a paradoxical
7287 subreg are undefined, so this is safe regardless of whether
7288 we're truncating or extending. */
7289 if (CONST_POLY_INT_P (op
))
7292 = poly_wide_int::from (const_poly_int_value (op
),
7293 GET_MODE_PRECISION (int_outermode
),
7295 return immed_wide_int_const (val
, int_outermode
);
7298 if (GET_MODE_PRECISION (int_outermode
)
7299 < GET_MODE_PRECISION (int_innermode
))
7301 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
7307 /* If OP is a vector comparison and the subreg is not changing the
7308 number of elements or the size of the elements, change the result
7309 of the comparison to the new mode. */
7310 if (COMPARISON_P (op
)
7311 && VECTOR_MODE_P (outermode
)
7312 && VECTOR_MODE_P (innermode
)
7313 && known_eq (GET_MODE_NUNITS (outermode
), GET_MODE_NUNITS (innermode
))
7314 && known_eq (GET_MODE_UNIT_SIZE (outermode
),
7315 GET_MODE_UNIT_SIZE (innermode
)))
7316 return simplify_gen_relational (GET_CODE (op
), outermode
, innermode
,
7317 XEXP (op
, 0), XEXP (op
, 1));
7321 /* Make a SUBREG operation or equivalent if it folds. */
7324 simplify_context::simplify_gen_subreg (machine_mode outermode
, rtx op
,
7325 machine_mode innermode
,
7330 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
7334 if (GET_CODE (op
) == SUBREG
7335 || GET_CODE (op
) == CONCAT
7336 || GET_MODE (op
) == VOIDmode
)
7339 if (validate_subreg (outermode
, innermode
, op
, byte
))
7340 return gen_rtx_SUBREG (outermode
, op
, byte
);
7345 /* Generates a subreg to get the least significant part of EXPR (in mode
7346 INNER_MODE) to OUTER_MODE. */
7349 simplify_context::lowpart_subreg (machine_mode outer_mode
, rtx expr
,
7350 machine_mode inner_mode
)
7352 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
7353 subreg_lowpart_offset (outer_mode
, inner_mode
));
7356 /* Simplify X, an rtx expression.
7358 Return the simplified expression or NULL if no simplifications
7361 This is the preferred entry point into the simplification routines;
7362 however, we still allow passes to call the more specific routines.
7364 Right now GCC has three (yes, three) major bodies of RTL simplification
7365 code that need to be unified.
7367 1. fold_rtx in cse.c. This code uses various CSE specific
7368 information to aid in RTL simplification.
7370 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7371 it uses combine specific information to aid in RTL
7374 3. The routines in this file.
7377 Long term we want to only have one body of simplification code; to
7378 get to that state I recommend the following steps:
7380 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7381 which are not pass dependent state into these routines.
7383 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7384 use this routine whenever possible.
7386 3. Allow for pass dependent state to be provided to these
7387 routines and add simplifications based on the pass dependent
7388 state. Remove code from cse.c & combine.c that becomes
7391 It will take time, but ultimately the compiler will be easier to
7392 maintain and improve. It's totally silly that when we add a
7393 simplification that it needs to be added to 4 places (3 for RTL
7394 simplification and 1 for tree simplification. */
7397 simplify_rtx (const_rtx x
)
7399 const enum rtx_code code
= GET_CODE (x
);
7400 const machine_mode mode
= GET_MODE (x
);
7402 switch (GET_RTX_CLASS (code
))
7405 return simplify_unary_operation (code
, mode
,
7406 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
7407 case RTX_COMM_ARITH
:
7408 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7409 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
7414 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
7417 case RTX_BITFIELD_OPS
:
7418 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
7419 XEXP (x
, 0), XEXP (x
, 1),
7423 case RTX_COMM_COMPARE
:
7424 return simplify_relational_operation (code
, mode
,
7425 ((GET_MODE (XEXP (x
, 0))
7427 ? GET_MODE (XEXP (x
, 0))
7428 : GET_MODE (XEXP (x
, 1))),
7434 return simplify_subreg (mode
, SUBREG_REG (x
),
7435 GET_MODE (SUBREG_REG (x
)),
7442 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7443 if (GET_CODE (XEXP (x
, 0)) == HIGH
7444 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
7457 namespace selftest
{
7459 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7462 make_test_reg (machine_mode mode
)
7464 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
7466 return gen_rtx_REG (mode
, test_reg_num
++);
7470 test_scalar_int_ops (machine_mode mode
)
7472 rtx op0
= make_test_reg (mode
);
7473 rtx op1
= make_test_reg (mode
);
7474 rtx six
= GEN_INT (6);
7476 rtx neg_op0
= simplify_gen_unary (NEG
, mode
, op0
, mode
);
7477 rtx not_op0
= simplify_gen_unary (NOT
, mode
, op0
, mode
);
7478 rtx bswap_op0
= simplify_gen_unary (BSWAP
, mode
, op0
, mode
);
7480 rtx and_op0_op1
= simplify_gen_binary (AND
, mode
, op0
, op1
);
7481 rtx ior_op0_op1
= simplify_gen_binary (IOR
, mode
, op0
, op1
);
7482 rtx xor_op0_op1
= simplify_gen_binary (XOR
, mode
, op0
, op1
);
7484 rtx and_op0_6
= simplify_gen_binary (AND
, mode
, op0
, six
);
7485 rtx and_op1_6
= simplify_gen_binary (AND
, mode
, op1
, six
);
7487 /* Test some binary identities. */
7488 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, op0
, const0_rtx
));
7489 ASSERT_RTX_EQ (op0
, simplify_gen_binary (PLUS
, mode
, const0_rtx
, op0
));
7490 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MINUS
, mode
, op0
, const0_rtx
));
7491 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, op0
, const1_rtx
));
7492 ASSERT_RTX_EQ (op0
, simplify_gen_binary (MULT
, mode
, const1_rtx
, op0
));
7493 ASSERT_RTX_EQ (op0
, simplify_gen_binary (DIV
, mode
, op0
, const1_rtx
));
7494 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, constm1_rtx
));
7495 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, constm1_rtx
, op0
));
7496 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, const0_rtx
));
7497 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, const0_rtx
, op0
));
7498 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, op0
, const0_rtx
));
7499 ASSERT_RTX_EQ (op0
, simplify_gen_binary (XOR
, mode
, const0_rtx
, op0
));
7500 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFT
, mode
, op0
, const0_rtx
));
7501 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATE
, mode
, op0
, const0_rtx
));
7502 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ASHIFTRT
, mode
, op0
, const0_rtx
));
7503 ASSERT_RTX_EQ (op0
, simplify_gen_binary (LSHIFTRT
, mode
, op0
, const0_rtx
));
7504 ASSERT_RTX_EQ (op0
, simplify_gen_binary (ROTATERT
, mode
, op0
, const0_rtx
));
7506 /* Test some self-inverse operations. */
7507 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NEG
, mode
, neg_op0
, mode
));
7508 ASSERT_RTX_EQ (op0
, simplify_gen_unary (NOT
, mode
, not_op0
, mode
));
7509 ASSERT_RTX_EQ (op0
, simplify_gen_unary (BSWAP
, mode
, bswap_op0
, mode
));
7511 /* Test some reflexive operations. */
7512 ASSERT_RTX_EQ (op0
, simplify_gen_binary (AND
, mode
, op0
, op0
));
7513 ASSERT_RTX_EQ (op0
, simplify_gen_binary (IOR
, mode
, op0
, op0
));
7514 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMIN
, mode
, op0
, op0
));
7515 ASSERT_RTX_EQ (op0
, simplify_gen_binary (SMAX
, mode
, op0
, op0
));
7516 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMIN
, mode
, op0
, op0
));
7517 ASSERT_RTX_EQ (op0
, simplify_gen_binary (UMAX
, mode
, op0
, op0
));
7519 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (MINUS
, mode
, op0
, op0
));
7520 ASSERT_RTX_EQ (const0_rtx
, simplify_gen_binary (XOR
, mode
, op0
, op0
));
7522 /* Test simplify_distributive_operation. */
7523 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, xor_op0_op1
, six
),
7524 simplify_gen_binary (XOR
, mode
, and_op0_6
, and_op1_6
));
7525 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, ior_op0_op1
, six
),
7526 simplify_gen_binary (IOR
, mode
, and_op0_6
, and_op1_6
));
7527 ASSERT_RTX_EQ (simplify_gen_binary (AND
, mode
, and_op0_op1
, six
),
7528 simplify_gen_binary (AND
, mode
, and_op0_6
, and_op1_6
));
7531 /* Verify some simplifications involving scalar expressions. */
7536 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7538 machine_mode mode
= (machine_mode
) i
;
7539 if (SCALAR_INT_MODE_P (mode
) && mode
!= BImode
)
7540 test_scalar_int_ops (mode
);
7544 /* Test vector simplifications involving VEC_DUPLICATE in which the
7545 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7546 register that holds one element of MODE. */
7549 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
7551 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7552 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7553 poly_uint64 nunits
= GET_MODE_NUNITS (mode
);
7554 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7556 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7557 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
7558 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
7559 ASSERT_RTX_EQ (duplicate
,
7560 simplify_unary_operation (NOT
, mode
,
7561 duplicate_not
, mode
));
7563 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7564 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
7565 ASSERT_RTX_EQ (duplicate
,
7566 simplify_unary_operation (NEG
, mode
,
7567 duplicate_neg
, mode
));
7569 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7570 ASSERT_RTX_EQ (duplicate
,
7571 simplify_binary_operation (PLUS
, mode
, duplicate
,
7572 CONST0_RTX (mode
)));
7574 ASSERT_RTX_EQ (duplicate
,
7575 simplify_binary_operation (MINUS
, mode
, duplicate
,
7576 CONST0_RTX (mode
)));
7578 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
7579 simplify_binary_operation (MINUS
, mode
, duplicate
,
7583 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7584 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
7585 ASSERT_RTX_PTR_EQ (scalar_reg
,
7586 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7587 duplicate
, zero_par
));
7589 unsigned HOST_WIDE_INT const_nunits
;
7590 if (nunits
.is_constant (&const_nunits
))
7592 /* And again with the final element. */
7593 rtx last_index
= gen_int_mode (const_nunits
- 1, word_mode
);
7594 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
7595 ASSERT_RTX_PTR_EQ (scalar_reg
,
7596 simplify_binary_operation (VEC_SELECT
, inner_mode
,
7597 duplicate
, last_par
));
7599 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7600 rtx vector_reg
= make_test_reg (mode
);
7601 for (unsigned HOST_WIDE_INT i
= 0; i
< const_nunits
; i
++)
7603 if (i
>= HOST_BITS_PER_WIDE_INT
)
7605 rtx mask
= GEN_INT ((HOST_WIDE_INT_1U
<< i
) | (i
+ 1));
7606 rtx vm
= gen_rtx_VEC_MERGE (mode
, duplicate
, vector_reg
, mask
);
7607 poly_uint64 offset
= i
* GET_MODE_SIZE (inner_mode
);
7608 ASSERT_RTX_EQ (scalar_reg
,
7609 simplify_gen_subreg (inner_mode
, vm
,
7614 /* Test a scalar subreg of a VEC_DUPLICATE. */
7615 poly_uint64 offset
= subreg_lowpart_offset (inner_mode
, mode
);
7616 ASSERT_RTX_EQ (scalar_reg
,
7617 simplify_gen_subreg (inner_mode
, duplicate
,
7620 machine_mode narrower_mode
;
7621 if (maybe_ne (nunits
, 2U)
7622 && multiple_p (nunits
, 2)
7623 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
7624 && VECTOR_MODE_P (narrower_mode
))
7626 /* Test VEC_DUPLICATE of a vector. */
7627 rtx_vector_builder
nbuilder (narrower_mode
, 2, 1);
7628 nbuilder
.quick_push (const0_rtx
);
7629 nbuilder
.quick_push (const1_rtx
);
7630 rtx_vector_builder
builder (mode
, 2, 1);
7631 builder
.quick_push (const0_rtx
);
7632 builder
.quick_push (const1_rtx
);
7633 ASSERT_RTX_EQ (builder
.build (),
7634 simplify_unary_operation (VEC_DUPLICATE
, mode
,
7638 /* Test VEC_SELECT of a vector. */
7640 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
7641 rtx narrower_duplicate
7642 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
7643 ASSERT_RTX_EQ (narrower_duplicate
,
7644 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
7645 duplicate
, vec_par
));
7647 /* Test a vector subreg of a VEC_DUPLICATE. */
7648 poly_uint64 offset
= subreg_lowpart_offset (narrower_mode
, mode
);
7649 ASSERT_RTX_EQ (narrower_duplicate
,
7650 simplify_gen_subreg (narrower_mode
, duplicate
,
7655 /* Test vector simplifications involving VEC_SERIES in which the
7656 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7657 register that holds one element of MODE. */
7660 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
7662 /* Test unary cases with VEC_SERIES arguments. */
7663 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
7664 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
7665 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
7666 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
7667 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
7668 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
7669 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
7670 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
7671 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
7673 ASSERT_RTX_EQ (series_0_r
,
7674 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
7675 ASSERT_RTX_EQ (series_r_m1
,
7676 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
7677 ASSERT_RTX_EQ (series_r_r
,
7678 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
7680 /* Test that a VEC_SERIES with a zero step is simplified away. */
7681 ASSERT_RTX_EQ (duplicate
,
7682 simplify_binary_operation (VEC_SERIES
, mode
,
7683 scalar_reg
, const0_rtx
));
7685 /* Test PLUS and MINUS with VEC_SERIES. */
7686 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
7687 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
7688 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
7689 ASSERT_RTX_EQ (series_r_r
,
7690 simplify_binary_operation (PLUS
, mode
, series_0_r
,
7692 ASSERT_RTX_EQ (series_r_1
,
7693 simplify_binary_operation (PLUS
, mode
, duplicate
,
7695 ASSERT_RTX_EQ (series_r_m1
,
7696 simplify_binary_operation (PLUS
, mode
, duplicate
,
7698 ASSERT_RTX_EQ (series_0_r
,
7699 simplify_binary_operation (MINUS
, mode
, series_r_r
,
7701 ASSERT_RTX_EQ (series_r_m1
,
7702 simplify_binary_operation (MINUS
, mode
, duplicate
,
7704 ASSERT_RTX_EQ (series_r_1
,
7705 simplify_binary_operation (MINUS
, mode
, duplicate
,
7707 ASSERT_RTX_EQ (series_0_m1
,
7708 simplify_binary_operation (VEC_SERIES
, mode
, const0_rtx
,
7711 /* Test NEG on constant vector series. */
7712 ASSERT_RTX_EQ (series_0_m1
,
7713 simplify_unary_operation (NEG
, mode
, series_0_1
, mode
));
7714 ASSERT_RTX_EQ (series_0_1
,
7715 simplify_unary_operation (NEG
, mode
, series_0_m1
, mode
));
7717 /* Test PLUS and MINUS on constant vector series. */
7718 rtx scalar2
= gen_int_mode (2, inner_mode
);
7719 rtx scalar3
= gen_int_mode (3, inner_mode
);
7720 rtx series_1_1
= gen_const_vec_series (mode
, const1_rtx
, const1_rtx
);
7721 rtx series_0_2
= gen_const_vec_series (mode
, const0_rtx
, scalar2
);
7722 rtx series_1_3
= gen_const_vec_series (mode
, const1_rtx
, scalar3
);
7723 ASSERT_RTX_EQ (series_1_1
,
7724 simplify_binary_operation (PLUS
, mode
, series_0_1
,
7725 CONST1_RTX (mode
)));
7726 ASSERT_RTX_EQ (series_0_m1
,
7727 simplify_binary_operation (PLUS
, mode
, CONST0_RTX (mode
),
7729 ASSERT_RTX_EQ (series_1_3
,
7730 simplify_binary_operation (PLUS
, mode
, series_1_1
,
7732 ASSERT_RTX_EQ (series_0_1
,
7733 simplify_binary_operation (MINUS
, mode
, series_1_1
,
7734 CONST1_RTX (mode
)));
7735 ASSERT_RTX_EQ (series_1_1
,
7736 simplify_binary_operation (MINUS
, mode
, CONST1_RTX (mode
),
7738 ASSERT_RTX_EQ (series_1_1
,
7739 simplify_binary_operation (MINUS
, mode
, series_1_3
,
7742 /* Test MULT between constant vectors. */
7743 rtx vec2
= gen_const_vec_duplicate (mode
, scalar2
);
7744 rtx vec3
= gen_const_vec_duplicate (mode
, scalar3
);
7745 rtx scalar9
= gen_int_mode (9, inner_mode
);
7746 rtx series_3_9
= gen_const_vec_series (mode
, scalar3
, scalar9
);
7747 ASSERT_RTX_EQ (series_0_2
,
7748 simplify_binary_operation (MULT
, mode
, series_0_1
, vec2
));
7749 ASSERT_RTX_EQ (series_3_9
,
7750 simplify_binary_operation (MULT
, mode
, vec3
, series_1_3
));
7751 if (!GET_MODE_NUNITS (mode
).is_constant ())
7752 ASSERT_FALSE (simplify_binary_operation (MULT
, mode
, series_0_1
,
7755 /* Test ASHIFT between constant vectors. */
7756 ASSERT_RTX_EQ (series_0_2
,
7757 simplify_binary_operation (ASHIFT
, mode
, series_0_1
,
7758 CONST1_RTX (mode
)));
7759 if (!GET_MODE_NUNITS (mode
).is_constant ())
7760 ASSERT_FALSE (simplify_binary_operation (ASHIFT
, mode
, CONST1_RTX (mode
),
7765 simplify_merge_mask (rtx x
, rtx mask
, int op
)
7767 return simplify_context ().simplify_merge_mask (x
, mask
, op
);
7770 /* Verify simplify_merge_mask works correctly. */
7773 test_vec_merge (machine_mode mode
)
7775 rtx op0
= make_test_reg (mode
);
7776 rtx op1
= make_test_reg (mode
);
7777 rtx op2
= make_test_reg (mode
);
7778 rtx op3
= make_test_reg (mode
);
7779 rtx op4
= make_test_reg (mode
);
7780 rtx op5
= make_test_reg (mode
);
7781 rtx mask1
= make_test_reg (SImode
);
7782 rtx mask2
= make_test_reg (SImode
);
7783 rtx vm1
= gen_rtx_VEC_MERGE (mode
, op0
, op1
, mask1
);
7784 rtx vm2
= gen_rtx_VEC_MERGE (mode
, op2
, op3
, mask1
);
7785 rtx vm3
= gen_rtx_VEC_MERGE (mode
, op4
, op5
, mask1
);
7787 /* Simple vec_merge. */
7788 ASSERT_EQ (op0
, simplify_merge_mask (vm1
, mask1
, 0));
7789 ASSERT_EQ (op1
, simplify_merge_mask (vm1
, mask1
, 1));
7790 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 0));
7791 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (vm1
, mask2
, 1));
7793 /* Nested vec_merge.
7794 It's tempting to make this simplify right down to opN, but we don't
7795 because all the simplify_* functions assume that the operands have
7796 already been simplified. */
7797 rtx nvm
= gen_rtx_VEC_MERGE (mode
, vm1
, vm2
, mask1
);
7798 ASSERT_EQ (vm1
, simplify_merge_mask (nvm
, mask1
, 0));
7799 ASSERT_EQ (vm2
, simplify_merge_mask (nvm
, mask1
, 1));
7801 /* Intermediate unary op. */
7802 rtx unop
= gen_rtx_NOT (mode
, vm1
);
7803 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op0
),
7804 simplify_merge_mask (unop
, mask1
, 0));
7805 ASSERT_RTX_EQ (gen_rtx_NOT (mode
, op1
),
7806 simplify_merge_mask (unop
, mask1
, 1));
7808 /* Intermediate binary op. */
7809 rtx binop
= gen_rtx_PLUS (mode
, vm1
, vm2
);
7810 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op0
, op2
),
7811 simplify_merge_mask (binop
, mask1
, 0));
7812 ASSERT_RTX_EQ (gen_rtx_PLUS (mode
, op1
, op3
),
7813 simplify_merge_mask (binop
, mask1
, 1));
7815 /* Intermediate ternary op. */
7816 rtx tenop
= gen_rtx_FMA (mode
, vm1
, vm2
, vm3
);
7817 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op0
, op2
, op4
),
7818 simplify_merge_mask (tenop
, mask1
, 0));
7819 ASSERT_RTX_EQ (gen_rtx_FMA (mode
, op1
, op3
, op5
),
7820 simplify_merge_mask (tenop
, mask1
, 1));
7823 rtx badop0
= gen_rtx_PRE_INC (mode
, op0
);
7824 rtx badvm
= gen_rtx_VEC_MERGE (mode
, badop0
, op1
, mask1
);
7825 ASSERT_EQ (badop0
, simplify_merge_mask (badvm
, mask1
, 0));
7826 ASSERT_EQ (NULL_RTX
, simplify_merge_mask (badvm
, mask1
, 1));
7828 /* Called indirectly. */
7829 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode
, op0
, op3
, mask1
),
7830 simplify_rtx (nvm
));
7833 /* Test subregs of integer vector constant X, trying elements in
7834 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
7835 where NELTS is the number of elements in X. Subregs involving
7836 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
7839 test_vector_subregs_modes (rtx x
, poly_uint64 elt_bias
= 0,
7840 unsigned int first_valid
= 0)
7842 machine_mode inner_mode
= GET_MODE (x
);
7843 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7845 for (unsigned int modei
= 0; modei
< NUM_MACHINE_MODES
; ++modei
)
7847 machine_mode outer_mode
= (machine_mode
) modei
;
7848 if (!VECTOR_MODE_P (outer_mode
))
7851 unsigned int outer_nunits
;
7852 if (GET_MODE_INNER (outer_mode
) == int_mode
7853 && GET_MODE_NUNITS (outer_mode
).is_constant (&outer_nunits
)
7854 && multiple_p (GET_MODE_NUNITS (inner_mode
), outer_nunits
))
7856 /* Test subregs in which the outer mode is a smaller,
7857 constant-sized vector of the same element type. */
7859 = constant_lower_bound (GET_MODE_NUNITS (inner_mode
));
7860 for (unsigned int elt
= 0; elt
< limit
; elt
+= outer_nunits
)
7862 rtx expected
= NULL_RTX
;
7863 if (elt
>= first_valid
)
7865 rtx_vector_builder
builder (outer_mode
, outer_nunits
, 1);
7866 for (unsigned int i
= 0; i
< outer_nunits
; ++i
)
7867 builder
.quick_push (CONST_VECTOR_ELT (x
, elt
+ i
));
7868 expected
= builder
.build ();
7870 poly_uint64 byte
= (elt_bias
+ elt
) * GET_MODE_SIZE (int_mode
);
7871 ASSERT_RTX_EQ (expected
,
7872 simplify_subreg (outer_mode
, x
,
7876 else if (known_eq (GET_MODE_SIZE (outer_mode
),
7877 GET_MODE_SIZE (inner_mode
))
7878 && known_eq (elt_bias
, 0U)
7879 && (GET_MODE_CLASS (outer_mode
) != MODE_VECTOR_BOOL
7880 || known_eq (GET_MODE_BITSIZE (outer_mode
),
7881 GET_MODE_NUNITS (outer_mode
)))
7882 && (!FLOAT_MODE_P (outer_mode
)
7883 || (FLOAT_MODE_FORMAT (outer_mode
)->ieee_bits
7884 == GET_MODE_UNIT_PRECISION (outer_mode
)))
7885 && (GET_MODE_SIZE (inner_mode
).is_constant ()
7886 || !CONST_VECTOR_STEPPED_P (x
)))
7888 /* Try converting to OUTER_MODE and back. */
7889 rtx outer_x
= simplify_subreg (outer_mode
, x
, inner_mode
, 0);
7890 ASSERT_TRUE (outer_x
!= NULL_RTX
);
7891 ASSERT_RTX_EQ (x
, simplify_subreg (inner_mode
, outer_x
,
7896 if (BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
7898 /* Test each byte in the element range. */
7900 = constant_lower_bound (GET_MODE_SIZE (inner_mode
));
7901 for (unsigned int i
= 0; i
< limit
; ++i
)
7903 unsigned int elt
= i
/ GET_MODE_SIZE (int_mode
);
7904 rtx expected
= NULL_RTX
;
7905 if (elt
>= first_valid
)
7907 unsigned int byte_shift
= i
% GET_MODE_SIZE (int_mode
);
7908 if (BYTES_BIG_ENDIAN
)
7909 byte_shift
= GET_MODE_SIZE (int_mode
) - byte_shift
- 1;
7910 rtx_mode_t
vec_elt (CONST_VECTOR_ELT (x
, elt
), int_mode
);
7911 wide_int shifted_elt
7912 = wi::lrshift (vec_elt
, byte_shift
* BITS_PER_UNIT
);
7913 expected
= immed_wide_int_const (shifted_elt
, QImode
);
7915 poly_uint64 byte
= elt_bias
* GET_MODE_SIZE (int_mode
) + i
;
7916 ASSERT_RTX_EQ (expected
,
7917 simplify_subreg (QImode
, x
, inner_mode
, byte
));
7922 /* Test constant subregs of integer vector mode INNER_MODE, using 1
7923 element per pattern. */
7926 test_vector_subregs_repeating (machine_mode inner_mode
)
7928 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7929 unsigned int min_nunits
= constant_lower_bound (nunits
);
7930 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7931 unsigned int count
= gcd (min_nunits
, 8);
7933 rtx_vector_builder
builder (inner_mode
, count
, 1);
7934 for (unsigned int i
= 0; i
< count
; ++i
)
7935 builder
.quick_push (gen_int_mode (8 - i
, int_mode
));
7936 rtx x
= builder
.build ();
7938 test_vector_subregs_modes (x
);
7939 if (!nunits
.is_constant ())
7940 test_vector_subregs_modes (x
, nunits
- min_nunits
);
7943 /* Test constant subregs of integer vector mode INNER_MODE, using 2
7944 elements per pattern. */
7947 test_vector_subregs_fore_back (machine_mode inner_mode
)
7949 poly_uint64 nunits
= GET_MODE_NUNITS (inner_mode
);
7950 unsigned int min_nunits
= constant_lower_bound (nunits
);
7951 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7952 unsigned int count
= gcd (min_nunits
, 4);
7954 rtx_vector_builder
builder (inner_mode
, count
, 2);
7955 for (unsigned int i
= 0; i
< count
; ++i
)
7956 builder
.quick_push (gen_int_mode (i
, int_mode
));
7957 for (unsigned int i
= 0; i
< count
; ++i
)
7958 builder
.quick_push (gen_int_mode (-(int) i
, int_mode
));
7959 rtx x
= builder
.build ();
7961 test_vector_subregs_modes (x
);
7962 if (!nunits
.is_constant ())
7963 test_vector_subregs_modes (x
, nunits
- min_nunits
, count
);
7966 /* Test constant subregs of integer vector mode INNER_MODE, using 3
7967 elements per pattern. */
7970 test_vector_subregs_stepped (machine_mode inner_mode
)
7972 /* Build { 0, 1, 2, 3, ... }. */
7973 scalar_mode int_mode
= GET_MODE_INNER (inner_mode
);
7974 rtx_vector_builder
builder (inner_mode
, 1, 3);
7975 for (unsigned int i
= 0; i
< 3; ++i
)
7976 builder
.quick_push (gen_int_mode (i
, int_mode
));
7977 rtx x
= builder
.build ();
7979 test_vector_subregs_modes (x
);
7982 /* Test constant subregs of integer vector mode INNER_MODE. */
7985 test_vector_subregs (machine_mode inner_mode
)
7987 test_vector_subregs_repeating (inner_mode
);
7988 test_vector_subregs_fore_back (inner_mode
);
7989 test_vector_subregs_stepped (inner_mode
);
7992 /* Verify some simplifications involving vectors. */
7997 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
7999 machine_mode mode
= (machine_mode
) i
;
8000 if (VECTOR_MODE_P (mode
))
8002 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
8003 test_vector_ops_duplicate (mode
, scalar_reg
);
8004 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
8005 && maybe_gt (GET_MODE_NUNITS (mode
), 2))
8007 test_vector_ops_series (mode
, scalar_reg
);
8008 test_vector_subregs (mode
);
8010 test_vec_merge (mode
);
8015 template<unsigned int N
>
8016 struct simplify_const_poly_int_tests
8022 struct simplify_const_poly_int_tests
<1>
8024 static void run () {}
8027 /* Test various CONST_POLY_INT properties. */
8029 template<unsigned int N
>
8031 simplify_const_poly_int_tests
<N
>::run ()
8033 rtx x1
= gen_int_mode (poly_int64 (1, 1), QImode
);
8034 rtx x2
= gen_int_mode (poly_int64 (-80, 127), QImode
);
8035 rtx x3
= gen_int_mode (poly_int64 (-79, -128), QImode
);
8036 rtx x4
= gen_int_mode (poly_int64 (5, 4), QImode
);
8037 rtx x5
= gen_int_mode (poly_int64 (30, 24), QImode
);
8038 rtx x6
= gen_int_mode (poly_int64 (20, 16), QImode
);
8039 rtx x7
= gen_int_mode (poly_int64 (7, 4), QImode
);
8040 rtx x8
= gen_int_mode (poly_int64 (30, 24), HImode
);
8041 rtx x9
= gen_int_mode (poly_int64 (-30, -24), HImode
);
8042 rtx x10
= gen_int_mode (poly_int64 (-31, -24), HImode
);
8043 rtx two
= GEN_INT (2);
8044 rtx six
= GEN_INT (6);
8045 poly_uint64 offset
= subreg_lowpart_offset (QImode
, HImode
);
8047 /* These tests only try limited operation combinations. Fuller arithmetic
8048 testing is done directly on poly_ints. */
8049 ASSERT_EQ (simplify_unary_operation (NEG
, HImode
, x8
, HImode
), x9
);
8050 ASSERT_EQ (simplify_unary_operation (NOT
, HImode
, x8
, HImode
), x10
);
8051 ASSERT_EQ (simplify_unary_operation (TRUNCATE
, QImode
, x8
, HImode
), x5
);
8052 ASSERT_EQ (simplify_binary_operation (PLUS
, QImode
, x1
, x2
), x3
);
8053 ASSERT_EQ (simplify_binary_operation (MINUS
, QImode
, x3
, x1
), x2
);
8054 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, x4
, six
), x5
);
8055 ASSERT_EQ (simplify_binary_operation (MULT
, QImode
, six
, x4
), x5
);
8056 ASSERT_EQ (simplify_binary_operation (ASHIFT
, QImode
, x4
, two
), x6
);
8057 ASSERT_EQ (simplify_binary_operation (IOR
, QImode
, x4
, two
), x7
);
8058 ASSERT_EQ (simplify_subreg (HImode
, x5
, QImode
, 0), x8
);
8059 ASSERT_EQ (simplify_subreg (QImode
, x8
, HImode
, offset
), x5
);
8062 /* Run all of the selftests within this file. */
8065 simplify_rtx_c_tests ()
8069 simplify_const_poly_int_tests
<NUM_POLY_INT_COEFFS
>::run ();
8072 } // namespace selftest
8074 #endif /* CHECKING_P */