1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx
neg_const_int (machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (!HWI_COMPUTABLE_MODE_P (mode
)
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
80 scalar_int_mode int_mode
;
82 if (!is_int_mode (mode
, &int_mode
))
85 width
= GET_MODE_PRECISION (int_mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x
))
96 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
97 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
99 for (i
= 0; i
< elts
- 1; i
++)
100 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
102 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
103 width
%= HOST_BITS_PER_WIDE_INT
;
105 width
= HOST_BITS_PER_WIDE_INT
;
108 else if (width
<= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x
)
110 && CONST_DOUBLE_LOW (x
) == 0)
112 val
= CONST_DOUBLE_HIGH (x
);
113 width
-= HOST_BITS_PER_WIDE_INT
;
117 /* X is not an integer constant. */
120 if (width
< HOST_BITS_PER_WIDE_INT
)
121 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
122 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
130 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 scalar_int_mode int_mode
;
135 if (!is_int_mode (mode
, &int_mode
))
138 width
= GET_MODE_PRECISION (int_mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= GET_MODE_MASK (int_mode
);
143 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 scalar_int_mode int_mode
;
154 if (!is_int_mode (mode
, &int_mode
))
157 width
= GET_MODE_PRECISION (int_mode
);
158 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
161 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
168 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
172 scalar_int_mode int_mode
;
173 if (!is_int_mode (mode
, &int_mode
))
176 width
= GET_MODE_PRECISION (int_mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 HOST_WIDE_INT offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr
) == CONST
243 && GET_CODE (XEXP (addr
, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
246 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
247 addr
= XEXP (XEXP (addr
, 0), 0);
250 if (GET_CODE (addr
) == LO_SUM
)
251 addr
= XEXP (addr
, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr
) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr
))
258 c
= get_pool_constant (addr
);
259 cmode
= get_pool_mode (addr
);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset
== 0 && cmode
== GET_MODE (x
))
266 else if (offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
268 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
269 if (tem
&& CONSTANT_P (tem
))
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x
)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
288 && MEM_OFFSET_KNOWN_P (x
))
290 tree decl
= MEM_EXPR (x
);
291 machine_mode mode
= GET_MODE (x
);
292 HOST_WIDE_INT offset
= 0;
294 switch (TREE_CODE (decl
))
304 case ARRAY_RANGE_REF
:
309 case VIEW_CONVERT_EXPR
:
311 HOST_WIDE_INT bitsize
, bitpos
;
313 int unsignedp
, reversep
, volatilep
= 0;
316 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
317 &unsignedp
, &reversep
, &volatilep
);
318 if (bitsize
!= GET_MODE_BITSIZE (mode
)
319 || (bitpos
% BITS_PER_UNIT
)
320 || (toffset
&& !tree_fits_shwi_p (toffset
)))
324 offset
+= bitpos
/ BITS_PER_UNIT
;
326 offset
+= tree_to_shwi (toffset
);
333 && mode
== GET_MODE (x
)
335 && (TREE_STATIC (decl
)
336 || DECL_THREAD_LOCAL_P (decl
))
337 && DECL_RTL_SET_P (decl
)
338 && MEM_P (DECL_RTL (decl
)))
342 offset
+= MEM_OFFSET (x
);
344 newx
= DECL_RTL (decl
);
348 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
357 || (GET_CODE (o
) == PLUS
358 && GET_CODE (XEXP (o
, 1)) == CONST_INT
359 && (offset
== INTVAL (XEXP (o
, 1))
360 || (GET_CODE (n
) == PLUS
361 && GET_CODE (XEXP (n
, 1)) == CONST_INT
362 && (INTVAL (XEXP (n
, 1)) + offset
363 == INTVAL (XEXP (o
, 1)))
364 && (n
= XEXP (n
, 0))))
365 && (o
= XEXP (o
, 0))))
366 && rtx_equal_p (o
, n
)))
367 x
= adjust_address_nv (newx
, mode
, offset
);
369 else if (GET_MODE (x
) == GET_MODE (newx
)
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
383 machine_mode op_mode
)
387 /* If this simplifies, use it. */
388 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
391 return gen_rtx_fmt_e (code
, mode
, op
);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
398 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
402 /* If this simplifies, use it. */
403 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
407 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
415 machine_mode cmp_mode
, rtx op0
, rtx op1
)
419 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
423 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
432 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
433 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
435 enum rtx_code code
= GET_CODE (x
);
436 machine_mode mode
= GET_MODE (x
);
437 machine_mode op_mode
;
439 rtx op0
, op1
, op2
, newx
, op
;
443 if (__builtin_expect (fn
!= NULL
, 0))
445 newx
= fn (x
, old_rtx
, data
);
449 else if (rtx_equal_p (x
, old_rtx
))
450 return copy_rtx ((rtx
) data
);
452 switch (GET_RTX_CLASS (code
))
456 op_mode
= GET_MODE (op0
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 if (op0
== XEXP (x
, 0))
460 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
464 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
465 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
466 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
468 return simplify_gen_binary (code
, mode
, op0
, op1
);
471 case RTX_COMM_COMPARE
:
474 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
475 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
476 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
477 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
479 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
482 case RTX_BITFIELD_OPS
:
484 op_mode
= GET_MODE (op0
);
485 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
486 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
487 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
490 if (op_mode
== VOIDmode
)
491 op_mode
= GET_MODE (op0
);
492 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
497 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
498 if (op0
== SUBREG_REG (x
))
500 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
501 GET_MODE (SUBREG_REG (x
)),
503 return op0
? op0
: x
;
510 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
511 if (op0
== XEXP (x
, 0))
513 return replace_equiv_address_nv (x
, op0
);
515 else if (code
== LO_SUM
)
517 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
518 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0
) == HIGH
)
523 rtx base0
, base1
, offset0
, offset1
;
524 split_const (XEXP (op0
, 0), &base0
, &offset0
);
525 split_const (op1
, &base1
, &offset1
);
526 if (rtx_equal_p (base0
, base1
))
530 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
532 return gen_rtx_LO_SUM (mode
, op0
, op1
);
541 fmt
= GET_RTX_FORMAT (code
);
542 for (i
= 0; fmt
[i
]; i
++)
547 newvec
= XVEC (newx
, i
);
548 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
550 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
552 if (op
!= RTVEC_ELT (vec
, j
))
556 newvec
= shallow_copy_rtvec (vec
);
558 newx
= shallow_copy_rtx (x
);
559 XVEC (newx
, i
) = newvec
;
561 RTVEC_ELT (newvec
, j
) = op
;
569 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
570 if (op
!= XEXP (x
, i
))
573 newx
= shallow_copy_rtx (x
);
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
588 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
639 simplify_truncation (machine_mode mode
, rtx op
,
640 machine_mode op_mode
)
642 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
643 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
644 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
646 gcc_assert (precision
<= op_precision
);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op
) == ZERO_EXTEND
650 || GET_CODE (op
) == SIGN_EXTEND
)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
658 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
659 if (mode
== origmode
)
661 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
662 return simplify_gen_unary (TRUNCATE
, mode
,
663 XEXP (op
, 0), origmode
);
665 return simplify_gen_unary (GET_CODE (op
), mode
,
666 XEXP (op
, 0), origmode
);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
673 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
674 && (GET_CODE (op
) == PLUS
675 || GET_CODE (op
) == MINUS
676 || GET_CODE (op
) == MULT
))
678 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
681 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
683 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op
) == LSHIFTRT
691 || GET_CODE (op
) == ASHIFTRT
)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision
<= op_precision
697 && CONST_INT_P (XEXP (op
, 1))
698 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
700 && UINTVAL (XEXP (op
, 1)) < precision
)
701 return simplify_gen_binary (ASHIFTRT
, mode
,
702 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op
) == LSHIFTRT
708 || GET_CODE (op
) == ASHIFTRT
)
709 && CONST_INT_P (XEXP (op
, 1))
710 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
712 && UINTVAL (XEXP (op
, 1)) < precision
)
713 return simplify_gen_binary (LSHIFTRT
, mode
,
714 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op
) == ASHIFT
720 && CONST_INT_P (XEXP (op
, 1))
721 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
723 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
724 && UINTVAL (XEXP (op
, 1)) < precision
)
725 return simplify_gen_binary (ASHIFT
, mode
,
726 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
731 if (GET_CODE (op
) == AND
732 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
734 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
735 && CONST_INT_P (XEXP (op
, 1)))
737 rtx op0
= (XEXP (XEXP (op
, 0), 0));
738 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
739 rtx mask_op
= XEXP (op
, 1);
740 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
741 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
743 if (shift
< precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
747 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
748 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
749 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
751 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
752 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
759 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
760 && REG_P (XEXP (op
, 0))
761 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
762 && CONST_INT_P (XEXP (op
, 1))
763 && CONST_INT_P (XEXP (op
, 2)))
765 rtx op0
= XEXP (op
, 0);
766 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
767 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
768 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
770 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
773 pos
-= op_precision
- precision
;
774 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
775 XEXP (op
, 1), GEN_INT (pos
));
778 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
780 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
782 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
783 XEXP (op
, 1), XEXP (op
, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op
) == LSHIFTRT
789 || GET_CODE (op
) == ASHIFTRT
)
790 && SCALAR_INT_MODE_P (mode
)
791 && SCALAR_INT_MODE_P (op_mode
)
792 && precision
>= BITS_PER_WORD
793 && 2 * precision
<= op_precision
794 && CONST_INT_P (XEXP (op
, 1))
795 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
796 && UINTVAL (XEXP (op
, 1)) < op_precision
)
798 int byte
= subreg_lowpart_offset (mode
, op_mode
);
799 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
800 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
802 ? byte
- shifted_bytes
803 : byte
+ shifted_bytes
));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op
) == LSHIFTRT
810 || GET_CODE (op
) == ASHIFTRT
)
811 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
812 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
813 && MEM_P (XEXP (op
, 0))
814 && CONST_INT_P (XEXP (op
, 1))
815 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
816 && INTVAL (XEXP (op
, 1)) > 0
817 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
818 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op
, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op
, 0))
821 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
824 int byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
825 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
826 return adjust_address_nv (XEXP (op
, 0), int_mode
,
828 ? byte
- shifted_bytes
829 : byte
+ shifted_bytes
));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op
) == ABS
835 || GET_CODE (op
) == NEG
)
836 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
838 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
839 return simplify_gen_unary (GET_CODE (op
), mode
,
840 XEXP (XEXP (op
, 0), 0), mode
);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
844 if (GET_CODE (op
) == SUBREG
845 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
846 && SCALAR_INT_MODE_P (op_mode
)
847 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
848 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
849 && subreg_lowpart_p (op
))
851 rtx inner
= XEXP (SUBREG_REG (op
), 0);
852 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
853 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op
) == TRUNCATE
)
863 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
864 GET_MODE (XEXP (op
, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
868 if (GET_CODE (op
) == IOR
869 && SCALAR_INT_MODE_P (mode
)
870 && SCALAR_INT_MODE_P (op_mode
)
871 && CONST_INT_P (XEXP (op
, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
883 rtx op
, machine_mode op_mode
)
887 trueop
= avoid_constant_pool_reference (op
);
889 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
893 return simplify_unary_operation_1 (code
, mode
, op
);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
900 exact_int_to_float_conversion_p (const_rtx op
)
902 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
903 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode
!= VOIDmode
);
906 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
907 int in_bits
= in_prec
;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
910 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
911 if (GET_CODE (op
) == FLOAT
)
912 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
913 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
914 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
917 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
919 return in_bits
<= out_bits
;
922 /* Perform some simplifications we can do even if the operands
925 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
927 enum rtx_code reversed
;
928 rtx temp
, elt
, base
, step
;
929 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
934 /* (not (not X)) == X. */
935 if (GET_CODE (op
) == NOT
)
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op
)
941 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
942 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
943 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
944 XEXP (op
, 0), XEXP (op
, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op
) == PLUS
948 && XEXP (op
, 1) == constm1_rtx
)
949 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
955 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op
) == XOR
960 && CONST_INT_P (XEXP (op
, 1))
961 && (temp
= simplify_unary_operation (NOT
, mode
,
962 XEXP (op
, 1), mode
)) != 0)
963 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op
) == PLUS
967 && CONST_INT_P (XEXP (op
, 1))
968 && mode_signbit_p (mode
, XEXP (op
, 1))
969 && (temp
= simplify_unary_operation (NOT
, mode
,
970 XEXP (op
, 1), mode
)) != 0)
971 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
979 if (GET_CODE (op
) == ASHIFT
980 && XEXP (op
, 0) == const1_rtx
)
982 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
983 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE
== -1
990 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
991 && GET_CODE (op
) == ASHIFTRT
992 && CONST_INT_P (XEXP (op
, 1))
993 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
994 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
995 XEXP (op
, 0), const0_rtx
);
998 if (partial_subreg_p (op
)
999 && subreg_lowpart_p (op
)
1000 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
1001 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
1003 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1006 x
= gen_rtx_ROTATE (inner_mode
,
1007 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1009 XEXP (SUBREG_REG (op
), 1));
1010 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1019 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1021 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1022 machine_mode op_mode
;
1024 op_mode
= GET_MODE (in1
);
1025 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1027 op_mode
= GET_MODE (in2
);
1028 if (op_mode
== VOIDmode
)
1030 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1032 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1033 std::swap (in1
, in2
);
1035 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op
) == BSWAP
)
1042 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1043 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op
) == NEG
)
1050 return XEXP (op
, 0);
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1055 if (GET_CODE (op
) == IF_THEN_ELSE
)
1057 rtx cond
= XEXP (op
, 0);
1058 rtx true_rtx
= XEXP (op
, 1);
1059 rtx false_rtx
= XEXP (op
, 2);
1061 if ((GET_CODE (true_rtx
) == NEG
1062 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1063 || (GET_CODE (false_rtx
) == NEG
1064 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1066 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1067 temp
= reversed_comparison (cond
, mode
);
1071 std::swap (true_rtx
, false_rtx
);
1073 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1074 mode
, temp
, true_rtx
, false_rtx
);
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op
) == PLUS
1080 && XEXP (op
, 1) == const1_rtx
)
1081 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op
) == NOT
)
1085 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op
) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode
)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1096 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1098 if (GET_CODE (op
) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode
)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1106 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1108 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1113 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op
) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1121 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1122 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1128 if (GET_CODE (op
) == ASHIFT
)
1130 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1132 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op
) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op
, 1))
1139 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1140 return simplify_gen_binary (LSHIFTRT
, mode
,
1141 XEXP (op
, 0), XEXP (op
, 1));
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op
) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op
, 1))
1147 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1148 return simplify_gen_binary (ASHIFTRT
, mode
,
1149 XEXP (op
, 0), XEXP (op
, 1));
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op
) == XOR
1153 && XEXP (op
, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1155 return plus_constant (mode
, XEXP (op
, 0), -1);
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op
) == LT
1160 && XEXP (op
, 1) == const0_rtx
1161 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1163 int_mode
= as_a
<scalar_int_mode
> (mode
);
1164 int isize
= GET_MODE_PRECISION (inner
);
1165 if (STORE_FLAG_VALUE
== 1)
1167 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1168 GEN_INT (isize
- 1));
1169 if (int_mode
== inner
)
1171 if (GET_MODE_PRECISION (int_mode
) > isize
)
1172 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1173 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1175 else if (STORE_FLAG_VALUE
== -1)
1177 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1178 GEN_INT (isize
- 1));
1179 if (int_mode
== inner
)
1181 if (GET_MODE_PRECISION (int_mode
) > isize
)
1182 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1183 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1187 if (vec_series_p (op
, &base
, &step
))
1189 /* Only create a new series if we can simplify both parts. In other
1190 cases this isn't really a simplification, and it's not necessarily
1191 a win to replace a vector operation with a scalar operation. */
1192 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1193 base
= simplify_unary_operation (NEG
, inner_mode
, base
, inner_mode
);
1196 step
= simplify_unary_operation (NEG
, inner_mode
,
1199 return gen_vec_series (mode
, base
, step
);
1205 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1206 with the umulXi3_highpart patterns. */
1207 if (GET_CODE (op
) == LSHIFTRT
1208 && GET_CODE (XEXP (op
, 0)) == MULT
)
1211 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1213 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1215 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1219 /* We can't handle truncation to a partial integer mode here
1220 because we don't know the real bitsize of the partial
1225 if (GET_MODE (op
) != VOIDmode
)
1227 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1232 /* If we know that the value is already truncated, we can
1233 replace the TRUNCATE with a SUBREG. */
1234 if (GET_MODE_NUNITS (mode
) == 1
1235 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1236 || truncated_to_mode (mode
, op
)))
1238 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1243 /* A truncate of a comparison can be replaced with a subreg if
1244 STORE_FLAG_VALUE permits. This is like the previous test,
1245 but it works even if the comparison is done in a mode larger
1246 than HOST_BITS_PER_WIDE_INT. */
1247 if (HWI_COMPUTABLE_MODE_P (mode
)
1248 && COMPARISON_P (op
)
1249 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1251 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1256 /* A truncate of a memory is just loading the low part of the memory
1257 if we are not changing the meaning of the address. */
1258 if (GET_CODE (op
) == MEM
1259 && !VECTOR_MODE_P (mode
)
1260 && !MEM_VOLATILE_P (op
)
1261 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1263 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1270 case FLOAT_TRUNCATE
:
1271 if (DECIMAL_FLOAT_MODE_P (mode
))
1274 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1275 if (GET_CODE (op
) == FLOAT_EXTEND
1276 && GET_MODE (XEXP (op
, 0)) == mode
)
1277 return XEXP (op
, 0);
1279 /* (float_truncate:SF (float_truncate:DF foo:XF))
1280 = (float_truncate:SF foo:XF).
1281 This may eliminate double rounding, so it is unsafe.
1283 (float_truncate:SF (float_extend:XF foo:DF))
1284 = (float_truncate:SF foo:DF).
1286 (float_truncate:DF (float_extend:XF foo:SF))
1287 = (float_extend:DF foo:SF). */
1288 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1289 && flag_unsafe_math_optimizations
)
1290 || GET_CODE (op
) == FLOAT_EXTEND
)
1291 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1292 > GET_MODE_UNIT_SIZE (mode
)
1293 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1295 XEXP (op
, 0), mode
);
1297 /* (float_truncate (float x)) is (float x) */
1298 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1299 && (flag_unsafe_math_optimizations
1300 || exact_int_to_float_conversion_p (op
)))
1301 return simplify_gen_unary (GET_CODE (op
), mode
,
1303 GET_MODE (XEXP (op
, 0)));
1305 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1306 (OP:SF foo:SF) if OP is NEG or ABS. */
1307 if ((GET_CODE (op
) == ABS
1308 || GET_CODE (op
) == NEG
)
1309 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1310 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1311 return simplify_gen_unary (GET_CODE (op
), mode
,
1312 XEXP (XEXP (op
, 0), 0), mode
);
1314 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1315 is (float_truncate:SF x). */
1316 if (GET_CODE (op
) == SUBREG
1317 && subreg_lowpart_p (op
)
1318 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1319 return SUBREG_REG (op
);
1323 if (DECIMAL_FLOAT_MODE_P (mode
))
1326 /* (float_extend (float_extend x)) is (float_extend x)
1328 (float_extend (float x)) is (float x) assuming that double
1329 rounding can't happen.
1331 if (GET_CODE (op
) == FLOAT_EXTEND
1332 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1333 && exact_int_to_float_conversion_p (op
)))
1334 return simplify_gen_unary (GET_CODE (op
), mode
,
1336 GET_MODE (XEXP (op
, 0)));
1341 /* (abs (neg <foo>)) -> (abs <foo>) */
1342 if (GET_CODE (op
) == NEG
)
1343 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1344 GET_MODE (XEXP (op
, 0)));
1346 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1348 if (GET_MODE (op
) == VOIDmode
)
1351 /* If operand is something known to be positive, ignore the ABS. */
1352 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1353 || val_signbit_known_clear_p (GET_MODE (op
),
1354 nonzero_bits (op
, GET_MODE (op
))))
1357 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1358 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1359 && (num_sign_bit_copies (op
, int_mode
)
1360 == GET_MODE_PRECISION (int_mode
)))
1361 return gen_rtx_NEG (int_mode
, op
);
1366 /* (ffs (*_extend <X>)) = (ffs <X>) */
1367 if (GET_CODE (op
) == SIGN_EXTEND
1368 || GET_CODE (op
) == ZERO_EXTEND
)
1369 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1370 GET_MODE (XEXP (op
, 0)));
1374 switch (GET_CODE (op
))
1378 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1379 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1380 GET_MODE (XEXP (op
, 0)));
1384 /* Rotations don't affect popcount. */
1385 if (!side_effects_p (XEXP (op
, 1)))
1386 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1387 GET_MODE (XEXP (op
, 0)));
1396 switch (GET_CODE (op
))
1402 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1403 GET_MODE (XEXP (op
, 0)));
1407 /* Rotations don't affect parity. */
1408 if (!side_effects_p (XEXP (op
, 1)))
1409 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1410 GET_MODE (XEXP (op
, 0)));
1419 /* (bswap (bswap x)) -> x. */
1420 if (GET_CODE (op
) == BSWAP
)
1421 return XEXP (op
, 0);
1425 /* (float (sign_extend <X>)) = (float <X>). */
1426 if (GET_CODE (op
) == SIGN_EXTEND
)
1427 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1428 GET_MODE (XEXP (op
, 0)));
1432 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1433 becomes just the MINUS if its mode is MODE. This allows
1434 folding switch statements on machines using casesi (such as
1436 if (GET_CODE (op
) == TRUNCATE
1437 && GET_MODE (XEXP (op
, 0)) == mode
1438 && GET_CODE (XEXP (op
, 0)) == MINUS
1439 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1440 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1441 return XEXP (op
, 0);
1443 /* Extending a widening multiplication should be canonicalized to
1444 a wider widening multiplication. */
1445 if (GET_CODE (op
) == MULT
)
1447 rtx lhs
= XEXP (op
, 0);
1448 rtx rhs
= XEXP (op
, 1);
1449 enum rtx_code lcode
= GET_CODE (lhs
);
1450 enum rtx_code rcode
= GET_CODE (rhs
);
1452 /* Widening multiplies usually extend both operands, but sometimes
1453 they use a shift to extract a portion of a register. */
1454 if ((lcode
== SIGN_EXTEND
1455 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1456 && (rcode
== SIGN_EXTEND
1457 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1459 machine_mode lmode
= GET_MODE (lhs
);
1460 machine_mode rmode
= GET_MODE (rhs
);
1463 if (lcode
== ASHIFTRT
)
1464 /* Number of bits not shifted off the end. */
1465 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1466 - INTVAL (XEXP (lhs
, 1)));
1467 else /* lcode == SIGN_EXTEND */
1468 /* Size of inner mode. */
1469 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1471 if (rcode
== ASHIFTRT
)
1472 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1473 - INTVAL (XEXP (rhs
, 1)));
1474 else /* rcode == SIGN_EXTEND */
1475 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1477 /* We can only widen multiplies if the result is mathematiclly
1478 equivalent. I.e. if overflow was impossible. */
1479 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1480 return simplify_gen_binary
1482 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1483 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1487 /* Check for a sign extension of a subreg of a promoted
1488 variable, where the promotion is sign-extended, and the
1489 target mode is the same as the variable's promotion. */
1490 if (GET_CODE (op
) == SUBREG
1491 && SUBREG_PROMOTED_VAR_P (op
)
1492 && SUBREG_PROMOTED_SIGNED_P (op
)
1493 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1495 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1500 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1501 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1504 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1505 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1506 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1507 GET_MODE (XEXP (op
, 0)));
1510 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1511 is (sign_extend:M (subreg:O <X>)) if there is mode with
1512 GET_MODE_BITSIZE (N) - I bits.
1513 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1514 is similarly (zero_extend:M (subreg:O <X>)). */
1515 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1516 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1517 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1518 && CONST_INT_P (XEXP (op
, 1))
1519 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1520 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1521 GET_MODE_BITSIZE (op_mode
) > INTVAL (XEXP (op
, 1))))
1523 scalar_int_mode tmode
;
1524 gcc_assert (GET_MODE_BITSIZE (int_mode
)
1525 > GET_MODE_BITSIZE (op_mode
));
1526 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode
)
1527 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1530 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1532 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1533 ? SIGN_EXTEND
: ZERO_EXTEND
,
1534 int_mode
, inner
, tmode
);
1538 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1539 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1540 if (GET_CODE (op
) == LSHIFTRT
1541 && CONST_INT_P (XEXP (op
, 1))
1542 && XEXP (op
, 1) != const0_rtx
)
1543 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1545 #if defined(POINTERS_EXTEND_UNSIGNED)
1546 /* As we do not know which address space the pointer is referring to,
1547 we can do this only if the target does not support different pointer
1548 or address modes depending on the address space. */
1549 if (target_default_pointer_address_modes_p ()
1550 && ! POINTERS_EXTEND_UNSIGNED
1551 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1553 || (GET_CODE (op
) == SUBREG
1554 && REG_P (SUBREG_REG (op
))
1555 && REG_POINTER (SUBREG_REG (op
))
1556 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1557 && !targetm
.have_ptr_extend ())
1560 = convert_memory_address_addr_space_1 (Pmode
, op
,
1561 ADDR_SPACE_GENERIC
, false,
1570 /* Check for a zero extension of a subreg of a promoted
1571 variable, where the promotion is zero-extended, and the
1572 target mode is the same as the variable's promotion. */
1573 if (GET_CODE (op
) == SUBREG
1574 && SUBREG_PROMOTED_VAR_P (op
)
1575 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1576 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1578 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1583 /* Extending a widening multiplication should be canonicalized to
1584 a wider widening multiplication. */
1585 if (GET_CODE (op
) == MULT
)
1587 rtx lhs
= XEXP (op
, 0);
1588 rtx rhs
= XEXP (op
, 1);
1589 enum rtx_code lcode
= GET_CODE (lhs
);
1590 enum rtx_code rcode
= GET_CODE (rhs
);
1592 /* Widening multiplies usually extend both operands, but sometimes
1593 they use a shift to extract a portion of a register. */
1594 if ((lcode
== ZERO_EXTEND
1595 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1596 && (rcode
== ZERO_EXTEND
1597 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1599 machine_mode lmode
= GET_MODE (lhs
);
1600 machine_mode rmode
= GET_MODE (rhs
);
1603 if (lcode
== LSHIFTRT
)
1604 /* Number of bits not shifted off the end. */
1605 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1606 - INTVAL (XEXP (lhs
, 1)));
1607 else /* lcode == ZERO_EXTEND */
1608 /* Size of inner mode. */
1609 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1611 if (rcode
== LSHIFTRT
)
1612 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1613 - INTVAL (XEXP (rhs
, 1)));
1614 else /* rcode == ZERO_EXTEND */
1615 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1617 /* We can only widen multiplies if the result is mathematiclly
1618 equivalent. I.e. if overflow was impossible. */
1619 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1620 return simplify_gen_binary
1622 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1623 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1627 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1628 if (GET_CODE (op
) == ZERO_EXTEND
)
1629 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1630 GET_MODE (XEXP (op
, 0)));
1632 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1633 is (zero_extend:M (subreg:O <X>)) if there is mode with
1634 GET_MODE_PRECISION (N) - I bits. */
1635 if (GET_CODE (op
) == LSHIFTRT
1636 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1637 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1638 && CONST_INT_P (XEXP (op
, 1))
1639 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1640 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1641 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1643 scalar_int_mode tmode
;
1644 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1645 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1648 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1650 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1655 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1656 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1658 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1659 (and:SI (reg:SI) (const_int 63)). */
1660 if (partial_subreg_p (op
)
1661 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1662 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1663 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1664 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1665 && subreg_lowpart_p (op
)
1666 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1667 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1669 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1670 return SUBREG_REG (op
);
1671 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1675 #if defined(POINTERS_EXTEND_UNSIGNED)
1676 /* As we do not know which address space the pointer is referring to,
1677 we can do this only if the target does not support different pointer
1678 or address modes depending on the address space. */
1679 if (target_default_pointer_address_modes_p ()
1680 && POINTERS_EXTEND_UNSIGNED
> 0
1681 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1683 || (GET_CODE (op
) == SUBREG
1684 && REG_P (SUBREG_REG (op
))
1685 && REG_POINTER (SUBREG_REG (op
))
1686 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1687 && !targetm
.have_ptr_extend ())
1690 = convert_memory_address_addr_space_1 (Pmode
, op
,
1691 ADDR_SPACE_GENERIC
, false,
1703 if (VECTOR_MODE_P (mode
) && vec_duplicate_p (op
, &elt
))
1705 /* Try applying the operator to ELT and see if that simplifies.
1706 We can duplicate the result if so.
1708 The reason we don't use simplify_gen_unary is that it isn't
1709 necessarily a win to convert things like:
1711 (neg:V (vec_duplicate:V (reg:S R)))
1715 (vec_duplicate:V (neg:S (reg:S R)))
1717 The first might be done entirely in vector registers while the
1718 second might need a move between register files. */
1719 temp
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1720 elt
, GET_MODE_INNER (GET_MODE (op
)));
1722 return gen_vec_duplicate (mode
, temp
);
1728 /* Try to compute the value of a unary operation CODE whose output mode is to
1729 be MODE with input operand OP whose mode was originally OP_MODE.
1730 Return zero if the value cannot be computed. */
1732 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1733 rtx op
, machine_mode op_mode
)
1735 scalar_int_mode result_mode
;
1737 if (code
== VEC_DUPLICATE
)
1739 gcc_assert (VECTOR_MODE_P (mode
));
1740 if (GET_MODE (op
) != VOIDmode
)
1742 if (!VECTOR_MODE_P (GET_MODE (op
)))
1743 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1745 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1748 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1749 return gen_const_vec_duplicate (mode
, op
);
1750 if (GET_CODE (op
) == CONST_VECTOR
)
1752 unsigned int n_elts
= GET_MODE_NUNITS (mode
);
1753 unsigned int in_n_elts
= CONST_VECTOR_NUNITS (op
);
1754 gcc_assert (in_n_elts
< n_elts
);
1755 gcc_assert ((n_elts
% in_n_elts
) == 0);
1756 rtvec v
= rtvec_alloc (n_elts
);
1757 for (unsigned i
= 0; i
< n_elts
; i
++)
1758 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1759 return gen_rtx_CONST_VECTOR (mode
, v
);
1763 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1765 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1766 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1767 machine_mode opmode
= GET_MODE (op
);
1768 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1769 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1770 rtvec v
= rtvec_alloc (n_elts
);
1773 gcc_assert (op_n_elts
== n_elts
);
1774 for (i
= 0; i
< n_elts
; i
++)
1776 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1777 CONST_VECTOR_ELT (op
, i
),
1778 GET_MODE_INNER (opmode
));
1781 RTVEC_ELT (v
, i
) = x
;
1783 return gen_rtx_CONST_VECTOR (mode
, v
);
1786 /* The order of these tests is critical so that, for example, we don't
1787 check the wrong mode (input vs. output) for a conversion operation,
1788 such as FIX. At some point, this should be simplified. */
1790 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1794 if (op_mode
== VOIDmode
)
1796 /* CONST_INT have VOIDmode as the mode. We assume that all
1797 the bits of the constant are significant, though, this is
1798 a dangerous assumption as many times CONST_INTs are
1799 created and used with garbage in the bits outside of the
1800 precision of the implied mode of the const_int. */
1801 op_mode
= MAX_MODE_INT
;
1804 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1806 /* Avoid the folding if flag_signaling_nans is on and
1807 operand is a signaling NaN. */
1808 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1811 d
= real_value_truncate (mode
, d
);
1812 return const_double_from_real_value (d
, mode
);
1814 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1818 if (op_mode
== VOIDmode
)
1820 /* CONST_INT have VOIDmode as the mode. We assume that all
1821 the bits of the constant are significant, though, this is
1822 a dangerous assumption as many times CONST_INTs are
1823 created and used with garbage in the bits outside of the
1824 precision of the implied mode of the const_int. */
1825 op_mode
= MAX_MODE_INT
;
1828 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1830 /* Avoid the folding if flag_signaling_nans is on and
1831 operand is a signaling NaN. */
1832 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1835 d
= real_value_truncate (mode
, d
);
1836 return const_double_from_real_value (d
, mode
);
1839 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1841 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1843 scalar_int_mode imode
= (op_mode
== VOIDmode
1845 : as_a
<scalar_int_mode
> (op_mode
));
1846 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1849 #if TARGET_SUPPORTS_WIDE_INT == 0
1850 /* This assert keeps the simplification from producing a result
1851 that cannot be represented in a CONST_DOUBLE but a lot of
1852 upstream callers expect that this function never fails to
1853 simplify something and so you if you added this to the test
1854 above the code would die later anyway. If this assert
1855 happens, you just need to make the port support wide int. */
1856 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1862 result
= wi::bit_not (op0
);
1866 result
= wi::neg (op0
);
1870 result
= wi::abs (op0
);
1874 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1878 if (wi::ne_p (op0
, 0))
1879 int_value
= wi::clz (op0
);
1880 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1881 int_value
= GET_MODE_PRECISION (imode
);
1882 result
= wi::shwi (int_value
, result_mode
);
1886 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1890 if (wi::ne_p (op0
, 0))
1891 int_value
= wi::ctz (op0
);
1892 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1893 int_value
= GET_MODE_PRECISION (imode
);
1894 result
= wi::shwi (int_value
, result_mode
);
1898 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1902 result
= wi::shwi (wi::parity (op0
), result_mode
);
1906 result
= wide_int (op0
).bswap ();
1911 result
= wide_int::from (op0
, width
, UNSIGNED
);
1915 result
= wide_int::from (op0
, width
, SIGNED
);
1923 return immed_wide_int_const (result
, result_mode
);
1926 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1927 && SCALAR_FLOAT_MODE_P (mode
)
1928 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1930 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1936 d
= real_value_abs (&d
);
1939 d
= real_value_negate (&d
);
1941 case FLOAT_TRUNCATE
:
1942 /* Don't perform the operation if flag_signaling_nans is on
1943 and the operand is a signaling NaN. */
1944 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1946 d
= real_value_truncate (mode
, d
);
1949 /* Don't perform the operation if flag_signaling_nans is on
1950 and the operand is a signaling NaN. */
1951 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1953 /* All this does is change the mode, unless changing
1955 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1956 real_convert (&d
, mode
, &d
);
1959 /* Don't perform the operation if flag_signaling_nans is on
1960 and the operand is a signaling NaN. */
1961 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1963 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1970 real_to_target (tmp
, &d
, GET_MODE (op
));
1971 for (i
= 0; i
< 4; i
++)
1973 real_from_target (&d
, tmp
, mode
);
1979 return const_double_from_real_value (d
, mode
);
1981 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1982 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1983 && is_int_mode (mode
, &result_mode
))
1985 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1986 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1987 operators are intentionally left unspecified (to ease implementation
1988 by target backends), for consistency, this routine implements the
1989 same semantics for constant folding as used by the middle-end. */
1991 /* This was formerly used only for non-IEEE float.
1992 eggert@twinsun.com says it is safe for IEEE also. */
1994 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1995 wide_int wmax
, wmin
;
1996 /* This is part of the abi to real_to_integer, but we check
1997 things before making this call. */
2003 if (REAL_VALUE_ISNAN (*x
))
2006 /* Test against the signed upper bound. */
2007 wmax
= wi::max_value (width
, SIGNED
);
2008 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
2009 if (real_less (&t
, x
))
2010 return immed_wide_int_const (wmax
, mode
);
2012 /* Test against the signed lower bound. */
2013 wmin
= wi::min_value (width
, SIGNED
);
2014 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
2015 if (real_less (x
, &t
))
2016 return immed_wide_int_const (wmin
, mode
);
2018 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2022 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
2025 /* Test against the unsigned upper bound. */
2026 wmax
= wi::max_value (width
, UNSIGNED
);
2027 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
2028 if (real_less (&t
, x
))
2029 return immed_wide_int_const (wmax
, mode
);
2031 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2042 /* Subroutine of simplify_binary_operation to simplify a binary operation
2043 CODE that can commute with byte swapping, with result mode MODE and
2044 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2045 Return zero if no simplification or canonicalization is possible. */
2048 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2053 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2054 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2056 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2057 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2058 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2061 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2062 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2064 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2065 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2071 /* Subroutine of simplify_binary_operation to simplify a commutative,
2072 associative binary operation CODE with result mode MODE, operating
2073 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2074 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2075 canonicalization is possible. */
2078 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2083 /* Linearize the operator to the left. */
2084 if (GET_CODE (op1
) == code
)
2086 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2087 if (GET_CODE (op0
) == code
)
2089 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2090 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2093 /* "a op (b op c)" becomes "(b op c) op a". */
2094 if (! swap_commutative_operands_p (op1
, op0
))
2095 return simplify_gen_binary (code
, mode
, op1
, op0
);
2097 std::swap (op0
, op1
);
2100 if (GET_CODE (op0
) == code
)
2102 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2103 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2105 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2106 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2109 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2110 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2112 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2114 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2115 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2117 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2124 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2125 and OP1. Return 0 if no simplification is possible.
2127 Don't use this for relational operations such as EQ or LT.
2128 Use simplify_relational_operation instead. */
2130 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2133 rtx trueop0
, trueop1
;
2136 /* Relational operations don't work here. We must know the mode
2137 of the operands in order to do the comparison correctly.
2138 Assuming a full word can give incorrect results.
2139 Consider comparing 128 with -128 in QImode. */
2140 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2141 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2143 /* Make sure the constant is second. */
2144 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2145 && swap_commutative_operands_p (op0
, op1
))
2146 std::swap (op0
, op1
);
2148 trueop0
= avoid_constant_pool_reference (op0
);
2149 trueop1
= avoid_constant_pool_reference (op1
);
2151 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2154 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2159 /* If the above steps did not result in a simplification and op0 or op1
2160 were constant pool references, use the referenced constants directly. */
2161 if (trueop0
!= op0
|| trueop1
!= op1
)
2162 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2167 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2168 which OP0 and OP1 are both vector series or vector duplicates
2169 (which are really just series with a step of 0). If so, try to
2170 form a new series by applying CODE to the bases and to the steps.
2171 Return null if no simplification is possible.
2173 MODE is the mode of the operation and is known to be a vector
2177 simplify_binary_operation_series (rtx_code code
, machine_mode mode
,
2181 if (vec_duplicate_p (op0
, &base0
))
2183 else if (!vec_series_p (op0
, &base0
, &step0
))
2187 if (vec_duplicate_p (op1
, &base1
))
2189 else if (!vec_series_p (op1
, &base1
, &step1
))
2192 /* Only create a new series if we can simplify both parts. In other
2193 cases this isn't really a simplification, and it's not necessarily
2194 a win to replace a vector operation with a scalar operation. */
2195 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
2196 rtx new_base
= simplify_binary_operation (code
, inner_mode
, base0
, base1
);
2200 rtx new_step
= simplify_binary_operation (code
, inner_mode
, step0
, step1
);
2204 return gen_vec_series (mode
, new_base
, new_step
);
2207 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2208 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2209 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2210 actual constants. */
2213 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2214 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2216 rtx tem
, reversed
, opleft
, opright
, elt0
, elt1
;
2218 scalar_int_mode int_mode
, inner_mode
;
2220 /* Even if we can't compute a constant result,
2221 there are some cases worth simplifying. */
2226 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2227 when x is NaN, infinite, or finite and nonzero. They aren't
2228 when x is -0 and the rounding mode is not towards -infinity,
2229 since (-0) + 0 is then 0. */
2230 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2233 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2234 transformations are safe even for IEEE. */
2235 if (GET_CODE (op0
) == NEG
)
2236 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2237 else if (GET_CODE (op1
) == NEG
)
2238 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2240 /* (~a) + 1 -> -a */
2241 if (INTEGRAL_MODE_P (mode
)
2242 && GET_CODE (op0
) == NOT
2243 && trueop1
== const1_rtx
)
2244 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2246 /* Handle both-operands-constant cases. We can only add
2247 CONST_INTs to constants since the sum of relocatable symbols
2248 can't be handled by most assemblers. Don't add CONST_INT
2249 to CONST_INT since overflow won't be computed properly if wider
2250 than HOST_BITS_PER_WIDE_INT. */
2252 if ((GET_CODE (op0
) == CONST
2253 || GET_CODE (op0
) == SYMBOL_REF
2254 || GET_CODE (op0
) == LABEL_REF
)
2255 && CONST_INT_P (op1
))
2256 return plus_constant (mode
, op0
, INTVAL (op1
));
2257 else if ((GET_CODE (op1
) == CONST
2258 || GET_CODE (op1
) == SYMBOL_REF
2259 || GET_CODE (op1
) == LABEL_REF
)
2260 && CONST_INT_P (op0
))
2261 return plus_constant (mode
, op1
, INTVAL (op0
));
2263 /* See if this is something like X * C - X or vice versa or
2264 if the multiplication is written as a shift. If so, we can
2265 distribute and make a new multiply, shift, or maybe just
2266 have X (if C is 2 in the example above). But don't make
2267 something more expensive than we had before. */
2269 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2271 rtx lhs
= op0
, rhs
= op1
;
2273 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2274 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2276 if (GET_CODE (lhs
) == NEG
)
2278 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2279 lhs
= XEXP (lhs
, 0);
2281 else if (GET_CODE (lhs
) == MULT
2282 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2284 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2285 lhs
= XEXP (lhs
, 0);
2287 else if (GET_CODE (lhs
) == ASHIFT
2288 && CONST_INT_P (XEXP (lhs
, 1))
2289 && INTVAL (XEXP (lhs
, 1)) >= 0
2290 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2292 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2293 GET_MODE_PRECISION (int_mode
));
2294 lhs
= XEXP (lhs
, 0);
2297 if (GET_CODE (rhs
) == NEG
)
2299 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2300 rhs
= XEXP (rhs
, 0);
2302 else if (GET_CODE (rhs
) == MULT
2303 && CONST_INT_P (XEXP (rhs
, 1)))
2305 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2306 rhs
= XEXP (rhs
, 0);
2308 else if (GET_CODE (rhs
) == ASHIFT
2309 && CONST_INT_P (XEXP (rhs
, 1))
2310 && INTVAL (XEXP (rhs
, 1)) >= 0
2311 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2313 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2314 GET_MODE_PRECISION (int_mode
));
2315 rhs
= XEXP (rhs
, 0);
2318 if (rtx_equal_p (lhs
, rhs
))
2320 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2322 bool speed
= optimize_function_for_speed_p (cfun
);
2324 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2326 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2327 return (set_src_cost (tem
, int_mode
, speed
)
2328 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2332 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2333 if (CONST_SCALAR_INT_P (op1
)
2334 && GET_CODE (op0
) == XOR
2335 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2336 && mode_signbit_p (mode
, op1
))
2337 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2338 simplify_gen_binary (XOR
, mode
, op1
,
2341 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2342 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2343 && GET_CODE (op0
) == MULT
2344 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2348 in1
= XEXP (XEXP (op0
, 0), 0);
2349 in2
= XEXP (op0
, 1);
2350 return simplify_gen_binary (MINUS
, mode
, op1
,
2351 simplify_gen_binary (MULT
, mode
,
2355 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2356 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2358 if (COMPARISON_P (op0
)
2359 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2360 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2361 && (reversed
= reversed_comparison (op0
, mode
)))
2363 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2365 /* If one of the operands is a PLUS or a MINUS, see if we can
2366 simplify this by the associative law.
2367 Don't use the associative law for floating point.
2368 The inaccuracy makes it nonassociative,
2369 and subtle programs can break if operations are associated. */
2371 if (INTEGRAL_MODE_P (mode
)
2372 && (plus_minus_operand_p (op0
)
2373 || plus_minus_operand_p (op1
))
2374 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2377 /* Reassociate floating point addition only when the user
2378 specifies associative math operations. */
2379 if (FLOAT_MODE_P (mode
)
2380 && flag_associative_math
)
2382 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2387 /* Handle vector series. */
2388 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2390 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2397 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2398 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2399 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2400 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2402 rtx xop00
= XEXP (op0
, 0);
2403 rtx xop10
= XEXP (op1
, 0);
2405 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2408 if (REG_P (xop00
) && REG_P (xop10
)
2409 && REGNO (xop00
) == REGNO (xop10
)
2410 && GET_MODE (xop00
) == mode
2411 && GET_MODE (xop10
) == mode
2412 && GET_MODE_CLASS (mode
) == MODE_CC
)
2418 /* We can't assume x-x is 0 even with non-IEEE floating point,
2419 but since it is zero except in very strange circumstances, we
2420 will treat it as zero with -ffinite-math-only. */
2421 if (rtx_equal_p (trueop0
, trueop1
)
2422 && ! side_effects_p (op0
)
2423 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2424 return CONST0_RTX (mode
);
2426 /* Change subtraction from zero into negation. (0 - x) is the
2427 same as -x when x is NaN, infinite, or finite and nonzero.
2428 But if the mode has signed zeros, and does not round towards
2429 -infinity, then 0 - 0 is 0, not -0. */
2430 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2431 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2433 /* (-1 - a) is ~a, unless the expression contains symbolic
2434 constants, in which case not retaining additions and
2435 subtractions could cause invalid assembly to be produced. */
2436 if (trueop0
== constm1_rtx
2437 && !contains_symbolic_reference_p (op1
))
2438 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2440 /* Subtracting 0 has no effect unless the mode has signed zeros
2441 and supports rounding towards -infinity. In such a case,
2443 if (!(HONOR_SIGNED_ZEROS (mode
)
2444 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2445 && trueop1
== CONST0_RTX (mode
))
2448 /* See if this is something like X * C - X or vice versa or
2449 if the multiplication is written as a shift. If so, we can
2450 distribute and make a new multiply, shift, or maybe just
2451 have X (if C is 2 in the example above). But don't make
2452 something more expensive than we had before. */
2454 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2456 rtx lhs
= op0
, rhs
= op1
;
2458 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2459 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2461 if (GET_CODE (lhs
) == NEG
)
2463 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2464 lhs
= XEXP (lhs
, 0);
2466 else if (GET_CODE (lhs
) == MULT
2467 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2469 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2470 lhs
= XEXP (lhs
, 0);
2472 else if (GET_CODE (lhs
) == ASHIFT
2473 && CONST_INT_P (XEXP (lhs
, 1))
2474 && INTVAL (XEXP (lhs
, 1)) >= 0
2475 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2477 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2478 GET_MODE_PRECISION (int_mode
));
2479 lhs
= XEXP (lhs
, 0);
2482 if (GET_CODE (rhs
) == NEG
)
2484 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2485 rhs
= XEXP (rhs
, 0);
2487 else if (GET_CODE (rhs
) == MULT
2488 && CONST_INT_P (XEXP (rhs
, 1)))
2490 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2491 rhs
= XEXP (rhs
, 0);
2493 else if (GET_CODE (rhs
) == ASHIFT
2494 && CONST_INT_P (XEXP (rhs
, 1))
2495 && INTVAL (XEXP (rhs
, 1)) >= 0
2496 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2498 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2499 GET_MODE_PRECISION (int_mode
));
2500 negcoeff1
= -negcoeff1
;
2501 rhs
= XEXP (rhs
, 0);
2504 if (rtx_equal_p (lhs
, rhs
))
2506 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2508 bool speed
= optimize_function_for_speed_p (cfun
);
2510 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2512 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2513 return (set_src_cost (tem
, int_mode
, speed
)
2514 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2518 /* (a - (-b)) -> (a + b). True even for IEEE. */
2519 if (GET_CODE (op1
) == NEG
)
2520 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2522 /* (-x - c) may be simplified as (-c - x). */
2523 if (GET_CODE (op0
) == NEG
2524 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2526 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2528 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2531 /* Don't let a relocatable value get a negative coeff. */
2532 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2533 return simplify_gen_binary (PLUS
, mode
,
2535 neg_const_int (mode
, op1
));
2537 /* (x - (x & y)) -> (x & ~y) */
2538 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2540 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2542 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2543 GET_MODE (XEXP (op1
, 1)));
2544 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2546 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2548 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2549 GET_MODE (XEXP (op1
, 0)));
2550 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2554 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2555 by reversing the comparison code if valid. */
2556 if (STORE_FLAG_VALUE
== 1
2557 && trueop0
== const1_rtx
2558 && COMPARISON_P (op1
)
2559 && (reversed
= reversed_comparison (op1
, mode
)))
2562 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2563 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2564 && GET_CODE (op1
) == MULT
2565 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2569 in1
= XEXP (XEXP (op1
, 0), 0);
2570 in2
= XEXP (op1
, 1);
2571 return simplify_gen_binary (PLUS
, mode
,
2572 simplify_gen_binary (MULT
, mode
,
2577 /* Canonicalize (minus (neg A) (mult B C)) to
2578 (minus (mult (neg B) C) A). */
2579 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2580 && GET_CODE (op1
) == MULT
2581 && GET_CODE (op0
) == NEG
)
2585 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2586 in2
= XEXP (op1
, 1);
2587 return simplify_gen_binary (MINUS
, mode
,
2588 simplify_gen_binary (MULT
, mode
,
2593 /* If one of the operands is a PLUS or a MINUS, see if we can
2594 simplify this by the associative law. This will, for example,
2595 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2596 Don't use the associative law for floating point.
2597 The inaccuracy makes it nonassociative,
2598 and subtle programs can break if operations are associated. */
2600 if (INTEGRAL_MODE_P (mode
)
2601 && (plus_minus_operand_p (op0
)
2602 || plus_minus_operand_p (op1
))
2603 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2606 /* Handle vector series. */
2607 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
2609 tem
= simplify_binary_operation_series (code
, mode
, op0
, op1
);
2616 if (trueop1
== constm1_rtx
)
2617 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2619 if (GET_CODE (op0
) == NEG
)
2621 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2622 /* If op1 is a MULT as well and simplify_unary_operation
2623 just moved the NEG to the second operand, simplify_gen_binary
2624 below could through simplify_associative_operation move
2625 the NEG around again and recurse endlessly. */
2627 && GET_CODE (op1
) == MULT
2628 && GET_CODE (temp
) == MULT
2629 && XEXP (op1
, 0) == XEXP (temp
, 0)
2630 && GET_CODE (XEXP (temp
, 1)) == NEG
2631 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2634 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2636 if (GET_CODE (op1
) == NEG
)
2638 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2639 /* If op0 is a MULT as well and simplify_unary_operation
2640 just moved the NEG to the second operand, simplify_gen_binary
2641 below could through simplify_associative_operation move
2642 the NEG around again and recurse endlessly. */
2644 && GET_CODE (op0
) == MULT
2645 && GET_CODE (temp
) == MULT
2646 && XEXP (op0
, 0) == XEXP (temp
, 0)
2647 && GET_CODE (XEXP (temp
, 1)) == NEG
2648 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2651 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2654 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2655 x is NaN, since x * 0 is then also NaN. Nor is it valid
2656 when the mode has signed zeros, since multiplying a negative
2657 number by 0 will give -0, not 0. */
2658 if (!HONOR_NANS (mode
)
2659 && !HONOR_SIGNED_ZEROS (mode
)
2660 && trueop1
== CONST0_RTX (mode
)
2661 && ! side_effects_p (op0
))
2664 /* In IEEE floating point, x*1 is not equivalent to x for
2666 if (!HONOR_SNANS (mode
)
2667 && trueop1
== CONST1_RTX (mode
))
2670 /* Convert multiply by constant power of two into shift. */
2671 if (CONST_SCALAR_INT_P (trueop1
))
2673 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2675 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2678 /* x*2 is x+x and x*(-1) is -x */
2679 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2680 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2681 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2682 && GET_MODE (op0
) == mode
)
2684 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2686 if (real_equal (d1
, &dconst2
))
2687 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2689 if (!HONOR_SNANS (mode
)
2690 && real_equal (d1
, &dconstm1
))
2691 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2694 /* Optimize -x * -x as x * x. */
2695 if (FLOAT_MODE_P (mode
)
2696 && GET_CODE (op0
) == NEG
2697 && GET_CODE (op1
) == NEG
2698 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2699 && !side_effects_p (XEXP (op0
, 0)))
2700 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2702 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2703 if (SCALAR_FLOAT_MODE_P (mode
)
2704 && GET_CODE (op0
) == ABS
2705 && GET_CODE (op1
) == ABS
2706 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2707 && !side_effects_p (XEXP (op0
, 0)))
2708 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2710 /* Reassociate multiplication, but for floating point MULTs
2711 only when the user specifies unsafe math optimizations. */
2712 if (! FLOAT_MODE_P (mode
)
2713 || flag_unsafe_math_optimizations
)
2715 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2722 if (trueop1
== CONST0_RTX (mode
))
2724 if (INTEGRAL_MODE_P (mode
)
2725 && trueop1
== CONSTM1_RTX (mode
)
2726 && !side_effects_p (op0
))
2728 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2730 /* A | (~A) -> -1 */
2731 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2732 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2733 && ! side_effects_p (op0
)
2734 && SCALAR_INT_MODE_P (mode
))
2737 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2738 if (CONST_INT_P (op1
)
2739 && HWI_COMPUTABLE_MODE_P (mode
)
2740 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2741 && !side_effects_p (op0
))
2744 /* Canonicalize (X & C1) | C2. */
2745 if (GET_CODE (op0
) == AND
2746 && CONST_INT_P (trueop1
)
2747 && CONST_INT_P (XEXP (op0
, 1)))
2749 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2750 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2751 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2753 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2755 && !side_effects_p (XEXP (op0
, 0)))
2758 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2759 if (((c1
|c2
) & mask
) == mask
)
2760 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2763 /* Convert (A & B) | A to A. */
2764 if (GET_CODE (op0
) == AND
2765 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2766 || rtx_equal_p (XEXP (op0
, 1), op1
))
2767 && ! side_effects_p (XEXP (op0
, 0))
2768 && ! side_effects_p (XEXP (op0
, 1)))
2771 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2772 mode size to (rotate A CX). */
2774 if (GET_CODE (op1
) == ASHIFT
2775 || GET_CODE (op1
) == SUBREG
)
2786 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2787 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2788 && CONST_INT_P (XEXP (opleft
, 1))
2789 && CONST_INT_P (XEXP (opright
, 1))
2790 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2791 == GET_MODE_UNIT_PRECISION (mode
)))
2792 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2794 /* Same, but for ashift that has been "simplified" to a wider mode
2795 by simplify_shift_const. */
2797 if (GET_CODE (opleft
) == SUBREG
2798 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2799 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2801 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2802 && GET_CODE (opright
) == LSHIFTRT
2803 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2804 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2805 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2806 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2807 SUBREG_REG (XEXP (opright
, 0)))
2808 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2809 && CONST_INT_P (XEXP (opright
, 1))
2810 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2811 + INTVAL (XEXP (opright
, 1))
2812 == GET_MODE_PRECISION (int_mode
)))
2813 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2814 XEXP (SUBREG_REG (opleft
), 1));
2816 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2817 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2818 the PLUS does not affect any of the bits in OP1: then we can do
2819 the IOR as a PLUS and we can associate. This is valid if OP1
2820 can be safely shifted left C bits. */
2821 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2822 && GET_CODE (XEXP (op0
, 0)) == PLUS
2823 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2824 && CONST_INT_P (XEXP (op0
, 1))
2825 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2827 int count
= INTVAL (XEXP (op0
, 1));
2828 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2830 if (mask
>> count
== INTVAL (trueop1
)
2831 && trunc_int_for_mode (mask
, mode
) == mask
2832 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2833 return simplify_gen_binary (ASHIFTRT
, mode
,
2834 plus_constant (mode
, XEXP (op0
, 0),
2839 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2843 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2849 if (trueop1
== CONST0_RTX (mode
))
2851 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2852 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2853 if (rtx_equal_p (trueop0
, trueop1
)
2854 && ! side_effects_p (op0
)
2855 && GET_MODE_CLASS (mode
) != MODE_CC
)
2856 return CONST0_RTX (mode
);
2858 /* Canonicalize XOR of the most significant bit to PLUS. */
2859 if (CONST_SCALAR_INT_P (op1
)
2860 && mode_signbit_p (mode
, op1
))
2861 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2862 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2863 if (CONST_SCALAR_INT_P (op1
)
2864 && GET_CODE (op0
) == PLUS
2865 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2866 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2867 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2868 simplify_gen_binary (XOR
, mode
, op1
,
2871 /* If we are XORing two things that have no bits in common,
2872 convert them into an IOR. This helps to detect rotation encoded
2873 using those methods and possibly other simplifications. */
2875 if (HWI_COMPUTABLE_MODE_P (mode
)
2876 && (nonzero_bits (op0
, mode
)
2877 & nonzero_bits (op1
, mode
)) == 0)
2878 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2880 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2881 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2884 int num_negated
= 0;
2886 if (GET_CODE (op0
) == NOT
)
2887 num_negated
++, op0
= XEXP (op0
, 0);
2888 if (GET_CODE (op1
) == NOT
)
2889 num_negated
++, op1
= XEXP (op1
, 0);
2891 if (num_negated
== 2)
2892 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2893 else if (num_negated
== 1)
2894 return simplify_gen_unary (NOT
, mode
,
2895 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2899 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2900 correspond to a machine insn or result in further simplifications
2901 if B is a constant. */
2903 if (GET_CODE (op0
) == AND
2904 && rtx_equal_p (XEXP (op0
, 1), op1
)
2905 && ! side_effects_p (op1
))
2906 return simplify_gen_binary (AND
, mode
,
2907 simplify_gen_unary (NOT
, mode
,
2908 XEXP (op0
, 0), mode
),
2911 else if (GET_CODE (op0
) == AND
2912 && rtx_equal_p (XEXP (op0
, 0), op1
)
2913 && ! side_effects_p (op1
))
2914 return simplify_gen_binary (AND
, mode
,
2915 simplify_gen_unary (NOT
, mode
,
2916 XEXP (op0
, 1), mode
),
2919 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2920 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2921 out bits inverted twice and not set by C. Similarly, given
2922 (xor (and (xor A B) C) D), simplify without inverting C in
2923 the xor operand: (xor (and A C) (B&C)^D).
2925 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2926 && GET_CODE (XEXP (op0
, 0)) == XOR
2927 && CONST_INT_P (op1
)
2928 && CONST_INT_P (XEXP (op0
, 1))
2929 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2931 enum rtx_code op
= GET_CODE (op0
);
2932 rtx a
= XEXP (XEXP (op0
, 0), 0);
2933 rtx b
= XEXP (XEXP (op0
, 0), 1);
2934 rtx c
= XEXP (op0
, 1);
2936 HOST_WIDE_INT bval
= INTVAL (b
);
2937 HOST_WIDE_INT cval
= INTVAL (c
);
2938 HOST_WIDE_INT dval
= INTVAL (d
);
2939 HOST_WIDE_INT xcval
;
2946 return simplify_gen_binary (XOR
, mode
,
2947 simplify_gen_binary (op
, mode
, a
, c
),
2948 gen_int_mode ((bval
& xcval
) ^ dval
,
2952 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2953 we can transform like this:
2954 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2955 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2956 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2957 Attempt a few simplifications when B and C are both constants. */
2958 if (GET_CODE (op0
) == AND
2959 && CONST_INT_P (op1
)
2960 && CONST_INT_P (XEXP (op0
, 1)))
2962 rtx a
= XEXP (op0
, 0);
2963 rtx b
= XEXP (op0
, 1);
2965 HOST_WIDE_INT bval
= INTVAL (b
);
2966 HOST_WIDE_INT cval
= INTVAL (c
);
2968 /* Instead of computing ~A&C, we compute its negated value,
2969 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2970 optimize for sure. If it does not simplify, we still try
2971 to compute ~A&C below, but since that always allocates
2972 RTL, we don't try that before committing to returning a
2973 simplified expression. */
2974 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2977 if ((~cval
& bval
) == 0)
2979 rtx na_c
= NULL_RTX
;
2981 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2984 /* If ~A does not simplify, don't bother: we don't
2985 want to simplify 2 operations into 3, and if na_c
2986 were to simplify with na, n_na_c would have
2987 simplified as well. */
2988 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2990 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2993 /* Try to simplify ~A&C | ~B&C. */
2994 if (na_c
!= NULL_RTX
)
2995 return simplify_gen_binary (IOR
, mode
, na_c
,
2996 gen_int_mode (~bval
& cval
, mode
));
3000 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3001 if (n_na_c
== CONSTM1_RTX (mode
))
3003 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
3004 gen_int_mode (~cval
& bval
,
3006 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
3007 gen_int_mode (~bval
& cval
,
3013 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3014 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3015 machines, and also has shorter instruction path length. */
3016 if (GET_CODE (op0
) == AND
3017 && GET_CODE (XEXP (op0
, 0)) == XOR
3018 && CONST_INT_P (XEXP (op0
, 1))
3019 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
3022 rtx b
= XEXP (XEXP (op0
, 0), 1);
3023 rtx c
= XEXP (op0
, 1);
3024 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3025 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
3026 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
3027 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
3029 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3030 else if (GET_CODE (op0
) == AND
3031 && GET_CODE (XEXP (op0
, 0)) == XOR
3032 && CONST_INT_P (XEXP (op0
, 1))
3033 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
3035 rtx a
= XEXP (XEXP (op0
, 0), 0);
3037 rtx c
= XEXP (op0
, 1);
3038 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
3039 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
3040 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
3041 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
3044 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3045 comparison if STORE_FLAG_VALUE is 1. */
3046 if (STORE_FLAG_VALUE
== 1
3047 && trueop1
== const1_rtx
3048 && COMPARISON_P (op0
)
3049 && (reversed
= reversed_comparison (op0
, mode
)))
3052 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3053 is (lt foo (const_int 0)), so we can perform the above
3054 simplification if STORE_FLAG_VALUE is 1. */
3056 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3057 && STORE_FLAG_VALUE
== 1
3058 && trueop1
== const1_rtx
3059 && GET_CODE (op0
) == LSHIFTRT
3060 && CONST_INT_P (XEXP (op0
, 1))
3061 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3062 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3064 /* (xor (comparison foo bar) (const_int sign-bit))
3065 when STORE_FLAG_VALUE is the sign bit. */
3066 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3067 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3068 && trueop1
== const_true_rtx
3069 && COMPARISON_P (op0
)
3070 && (reversed
= reversed_comparison (op0
, int_mode
)))
3073 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3077 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3083 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3085 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3087 if (HWI_COMPUTABLE_MODE_P (mode
))
3089 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3090 HOST_WIDE_INT nzop1
;
3091 if (CONST_INT_P (trueop1
))
3093 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3094 /* If we are turning off bits already known off in OP0, we need
3096 if ((nzop0
& ~val1
) == 0)
3099 nzop1
= nonzero_bits (trueop1
, mode
);
3100 /* If we are clearing all the nonzero bits, the result is zero. */
3101 if ((nzop1
& nzop0
) == 0
3102 && !side_effects_p (op0
) && !side_effects_p (op1
))
3103 return CONST0_RTX (mode
);
3105 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3106 && GET_MODE_CLASS (mode
) != MODE_CC
)
3109 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3110 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3111 && ! side_effects_p (op0
)
3112 && GET_MODE_CLASS (mode
) != MODE_CC
)
3113 return CONST0_RTX (mode
);
3115 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3116 there are no nonzero bits of C outside of X's mode. */
3117 if ((GET_CODE (op0
) == SIGN_EXTEND
3118 || GET_CODE (op0
) == ZERO_EXTEND
)
3119 && CONST_INT_P (trueop1
)
3120 && HWI_COMPUTABLE_MODE_P (mode
)
3121 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3122 & UINTVAL (trueop1
)) == 0)
3124 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3125 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3126 gen_int_mode (INTVAL (trueop1
),
3128 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3131 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3132 we might be able to further simplify the AND with X and potentially
3133 remove the truncation altogether. */
3134 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3136 rtx x
= XEXP (op0
, 0);
3137 machine_mode xmode
= GET_MODE (x
);
3138 tem
= simplify_gen_binary (AND
, xmode
, x
,
3139 gen_int_mode (INTVAL (trueop1
), xmode
));
3140 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3143 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3144 if (GET_CODE (op0
) == IOR
3145 && CONST_INT_P (trueop1
)
3146 && CONST_INT_P (XEXP (op0
, 1)))
3148 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3149 return simplify_gen_binary (IOR
, mode
,
3150 simplify_gen_binary (AND
, mode
,
3151 XEXP (op0
, 0), op1
),
3152 gen_int_mode (tmp
, mode
));
3155 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3156 insn (and may simplify more). */
3157 if (GET_CODE (op0
) == XOR
3158 && rtx_equal_p (XEXP (op0
, 0), op1
)
3159 && ! side_effects_p (op1
))
3160 return simplify_gen_binary (AND
, mode
,
3161 simplify_gen_unary (NOT
, mode
,
3162 XEXP (op0
, 1), mode
),
3165 if (GET_CODE (op0
) == XOR
3166 && rtx_equal_p (XEXP (op0
, 1), op1
)
3167 && ! side_effects_p (op1
))
3168 return simplify_gen_binary (AND
, mode
,
3169 simplify_gen_unary (NOT
, mode
,
3170 XEXP (op0
, 0), mode
),
3173 /* Similarly for (~(A ^ B)) & A. */
3174 if (GET_CODE (op0
) == NOT
3175 && GET_CODE (XEXP (op0
, 0)) == XOR
3176 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3177 && ! side_effects_p (op1
))
3178 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3180 if (GET_CODE (op0
) == NOT
3181 && GET_CODE (XEXP (op0
, 0)) == XOR
3182 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3183 && ! side_effects_p (op1
))
3184 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3186 /* Convert (A | B) & A to A. */
3187 if (GET_CODE (op0
) == IOR
3188 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3189 || rtx_equal_p (XEXP (op0
, 1), op1
))
3190 && ! side_effects_p (XEXP (op0
, 0))
3191 && ! side_effects_p (XEXP (op0
, 1)))
3194 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3195 ((A & N) + B) & M -> (A + B) & M
3196 Similarly if (N & M) == 0,
3197 ((A | N) + B) & M -> (A + B) & M
3198 and for - instead of + and/or ^ instead of |.
3199 Also, if (N & M) == 0, then
3200 (A +- N) & M -> A & M. */
3201 if (CONST_INT_P (trueop1
)
3202 && HWI_COMPUTABLE_MODE_P (mode
)
3203 && ~UINTVAL (trueop1
)
3204 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3205 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3210 pmop
[0] = XEXP (op0
, 0);
3211 pmop
[1] = XEXP (op0
, 1);
3213 if (CONST_INT_P (pmop
[1])
3214 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3215 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3217 for (which
= 0; which
< 2; which
++)
3220 switch (GET_CODE (tem
))
3223 if (CONST_INT_P (XEXP (tem
, 1))
3224 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3225 == UINTVAL (trueop1
))
3226 pmop
[which
] = XEXP (tem
, 0);
3230 if (CONST_INT_P (XEXP (tem
, 1))
3231 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3232 pmop
[which
] = XEXP (tem
, 0);
3239 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3241 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3243 return simplify_gen_binary (code
, mode
, tem
, op1
);
3247 /* (and X (ior (not X) Y) -> (and X Y) */
3248 if (GET_CODE (op1
) == IOR
3249 && GET_CODE (XEXP (op1
, 0)) == NOT
3250 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3251 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3253 /* (and (ior (not X) Y) X) -> (and X Y) */
3254 if (GET_CODE (op0
) == IOR
3255 && GET_CODE (XEXP (op0
, 0)) == NOT
3256 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3257 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3259 /* (and X (ior Y (not X)) -> (and X Y) */
3260 if (GET_CODE (op1
) == IOR
3261 && GET_CODE (XEXP (op1
, 1)) == NOT
3262 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3263 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3265 /* (and (ior Y (not X)) X) -> (and X Y) */
3266 if (GET_CODE (op0
) == IOR
3267 && GET_CODE (XEXP (op0
, 1)) == NOT
3268 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3269 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3271 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3275 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3281 /* 0/x is 0 (or x&0 if x has side-effects). */
3282 if (trueop0
== CONST0_RTX (mode
)
3283 && !cfun
->can_throw_non_call_exceptions
)
3285 if (side_effects_p (op1
))
3286 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3290 if (trueop1
== CONST1_RTX (mode
))
3292 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3296 /* Convert divide by power of two into shift. */
3297 if (CONST_INT_P (trueop1
)
3298 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3299 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3303 /* Handle floating point and integers separately. */
3304 if (SCALAR_FLOAT_MODE_P (mode
))
3306 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3307 safe for modes with NaNs, since 0.0 / 0.0 will then be
3308 NaN rather than 0.0. Nor is it safe for modes with signed
3309 zeros, since dividing 0 by a negative number gives -0.0 */
3310 if (trueop0
== CONST0_RTX (mode
)
3311 && !HONOR_NANS (mode
)
3312 && !HONOR_SIGNED_ZEROS (mode
)
3313 && ! side_effects_p (op1
))
3316 if (trueop1
== CONST1_RTX (mode
)
3317 && !HONOR_SNANS (mode
))
3320 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3321 && trueop1
!= CONST0_RTX (mode
))
3323 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3326 if (real_equal (d1
, &dconstm1
)
3327 && !HONOR_SNANS (mode
))
3328 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3330 /* Change FP division by a constant into multiplication.
3331 Only do this with -freciprocal-math. */
3332 if (flag_reciprocal_math
3333 && !real_equal (d1
, &dconst0
))
3336 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3337 tem
= const_double_from_real_value (d
, mode
);
3338 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3342 else if (SCALAR_INT_MODE_P (mode
))
3344 /* 0/x is 0 (or x&0 if x has side-effects). */
3345 if (trueop0
== CONST0_RTX (mode
)
3346 && !cfun
->can_throw_non_call_exceptions
)
3348 if (side_effects_p (op1
))
3349 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3353 if (trueop1
== CONST1_RTX (mode
))
3355 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3360 if (trueop1
== constm1_rtx
)
3362 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3364 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3370 /* 0%x is 0 (or x&0 if x has side-effects). */
3371 if (trueop0
== CONST0_RTX (mode
))
3373 if (side_effects_p (op1
))
3374 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3377 /* x%1 is 0 (of x&0 if x has side-effects). */
3378 if (trueop1
== CONST1_RTX (mode
))
3380 if (side_effects_p (op0
))
3381 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3382 return CONST0_RTX (mode
);
3384 /* Implement modulus by power of two as AND. */
3385 if (CONST_INT_P (trueop1
)
3386 && exact_log2 (UINTVAL (trueop1
)) > 0)
3387 return simplify_gen_binary (AND
, mode
, op0
,
3388 gen_int_mode (INTVAL (op1
) - 1, mode
));
3392 /* 0%x is 0 (or x&0 if x has side-effects). */
3393 if (trueop0
== CONST0_RTX (mode
))
3395 if (side_effects_p (op1
))
3396 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3399 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3400 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3402 if (side_effects_p (op0
))
3403 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3404 return CONST0_RTX (mode
);
3410 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3411 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3412 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3414 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3415 if (CONST_INT_P (trueop1
)
3416 && IN_RANGE (INTVAL (trueop1
),
3417 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3418 GET_MODE_UNIT_PRECISION (mode
) - 1))
3419 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3421 GEN_INT (GET_MODE_UNIT_PRECISION (mode
)
3422 - INTVAL (trueop1
)));
3426 if (trueop1
== CONST0_RTX (mode
))
3428 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3430 /* Rotating ~0 always results in ~0. */
3431 if (CONST_INT_P (trueop0
)
3432 && HWI_COMPUTABLE_MODE_P (mode
)
3433 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3434 && ! side_effects_p (op1
))
3440 scalar constants c1, c2
3441 size (M2) > size (M1)
3442 c1 == size (M2) - size (M1)
3444 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3448 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3450 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3451 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3453 && CONST_INT_P (op1
)
3454 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3455 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3457 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3458 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3459 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3460 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3461 && subreg_lowpart_p (op0
))
3463 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3465 tmp
= simplify_gen_binary (code
, inner_mode
,
3466 XEXP (SUBREG_REG (op0
), 0),
3468 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3471 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3473 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3474 if (val
!= INTVAL (op1
))
3475 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3482 if (trueop1
== CONST0_RTX (mode
))
3484 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3486 goto canonicalize_shift
;
3489 if (trueop1
== CONST0_RTX (mode
))
3491 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3493 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3494 if (GET_CODE (op0
) == CLZ
3495 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3496 && CONST_INT_P (trueop1
)
3497 && STORE_FLAG_VALUE
== 1
3498 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3500 unsigned HOST_WIDE_INT zero_val
= 0;
3502 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3503 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3504 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3505 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3506 XEXP (op0
, 0), const0_rtx
);
3508 goto canonicalize_shift
;
3511 if (HWI_COMPUTABLE_MODE_P (mode
)
3512 && mode_signbit_p (mode
, trueop1
)
3513 && ! side_effects_p (op0
))
3515 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3517 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3523 if (HWI_COMPUTABLE_MODE_P (mode
)
3524 && CONST_INT_P (trueop1
)
3525 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3526 && ! side_effects_p (op0
))
3528 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3530 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3536 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3538 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3540 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3546 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3548 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3550 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3563 /* ??? There are simplifications that can be done. */
3567 if (op1
== CONST0_RTX (GET_MODE_INNER (mode
)))
3568 return gen_vec_duplicate (mode
, op0
);
3572 if (!VECTOR_MODE_P (mode
))
3574 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3575 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3576 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3577 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3578 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3580 if (vec_duplicate_p (trueop0
, &elt0
))
3583 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3584 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3587 /* Extract a scalar element from a nested VEC_SELECT expression
3588 (with optional nested VEC_CONCAT expression). Some targets
3589 (i386) extract scalar element from a vector using chain of
3590 nested VEC_SELECT expressions. When input operand is a memory
3591 operand, this operation can be simplified to a simple scalar
3592 load from an offseted memory address. */
3593 if (GET_CODE (trueop0
) == VEC_SELECT
)
3595 rtx op0
= XEXP (trueop0
, 0);
3596 rtx op1
= XEXP (trueop0
, 1);
3598 int n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3600 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3606 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3607 gcc_assert (i
< n_elts
);
3609 /* Select element, pointed by nested selector. */
3610 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3612 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3613 if (GET_CODE (op0
) == VEC_CONCAT
)
3615 rtx op00
= XEXP (op0
, 0);
3616 rtx op01
= XEXP (op0
, 1);
3618 machine_mode mode00
, mode01
;
3619 int n_elts00
, n_elts01
;
3621 mode00
= GET_MODE (op00
);
3622 mode01
= GET_MODE (op01
);
3624 /* Find out number of elements of each operand. */
3625 n_elts00
= GET_MODE_NUNITS (mode00
);
3626 n_elts01
= GET_MODE_NUNITS (mode01
);
3628 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3630 /* Select correct operand of VEC_CONCAT
3631 and adjust selector. */
3632 if (elem
< n_elts01
)
3643 vec
= rtvec_alloc (1);
3644 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3646 tmp
= gen_rtx_fmt_ee (code
, mode
,
3647 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3653 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3654 gcc_assert (GET_MODE_INNER (mode
)
3655 == GET_MODE_INNER (GET_MODE (trueop0
)));
3656 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3658 if (vec_duplicate_p (trueop0
, &elt0
))
3659 /* It doesn't matter which elements are selected by trueop1,
3660 because they are all the same. */
3661 return gen_vec_duplicate (mode
, elt0
);
3663 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3665 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3666 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3667 rtvec v
= rtvec_alloc (n_elts
);
3670 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3671 for (i
= 0; i
< n_elts
; i
++)
3673 rtx x
= XVECEXP (trueop1
, 0, i
);
3675 gcc_assert (CONST_INT_P (x
));
3676 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3680 return gen_rtx_CONST_VECTOR (mode
, v
);
3683 /* Recognize the identity. */
3684 if (GET_MODE (trueop0
) == mode
)
3686 bool maybe_ident
= true;
3687 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3689 rtx j
= XVECEXP (trueop1
, 0, i
);
3690 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3692 maybe_ident
= false;
3700 /* If we build {a,b} then permute it, build the result directly. */
3701 if (XVECLEN (trueop1
, 0) == 2
3702 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3703 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3704 && GET_CODE (trueop0
) == VEC_CONCAT
3705 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3706 && GET_MODE (XEXP (trueop0
, 0)) == mode
3707 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3708 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3710 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3711 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3714 gcc_assert (i0
< 4 && i1
< 4);
3715 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3716 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3718 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3721 if (XVECLEN (trueop1
, 0) == 2
3722 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3723 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3724 && GET_CODE (trueop0
) == VEC_CONCAT
3725 && GET_MODE (trueop0
) == mode
)
3727 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3728 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3731 gcc_assert (i0
< 2 && i1
< 2);
3732 subop0
= XEXP (trueop0
, i0
);
3733 subop1
= XEXP (trueop0
, i1
);
3735 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3738 /* If we select one half of a vec_concat, return that. */
3739 if (GET_CODE (trueop0
) == VEC_CONCAT
3740 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3742 rtx subop0
= XEXP (trueop0
, 0);
3743 rtx subop1
= XEXP (trueop0
, 1);
3744 machine_mode mode0
= GET_MODE (subop0
);
3745 machine_mode mode1
= GET_MODE (subop1
);
3746 int l0
= GET_MODE_NUNITS (mode0
);
3747 int l1
= GET_MODE_NUNITS (mode1
);
3748 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3749 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3751 bool success
= true;
3752 for (int i
= 1; i
< l0
; ++i
)
3754 rtx j
= XVECEXP (trueop1
, 0, i
);
3755 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3764 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3766 bool success
= true;
3767 for (int i
= 1; i
< l1
; ++i
)
3769 rtx j
= XVECEXP (trueop1
, 0, i
);
3770 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3782 if (XVECLEN (trueop1
, 0) == 1
3783 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3784 && GET_CODE (trueop0
) == VEC_CONCAT
)
3787 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3789 /* Try to find the element in the VEC_CONCAT. */
3790 while (GET_MODE (vec
) != mode
3791 && GET_CODE (vec
) == VEC_CONCAT
)
3793 HOST_WIDE_INT vec_size
;
3795 if (CONST_INT_P (XEXP (vec
, 0)))
3797 /* vec_concat of two const_ints doesn't make sense with
3798 respect to modes. */
3799 if (CONST_INT_P (XEXP (vec
, 1)))
3802 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3803 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3806 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3808 if (offset
< vec_size
)
3809 vec
= XEXP (vec
, 0);
3813 vec
= XEXP (vec
, 1);
3815 vec
= avoid_constant_pool_reference (vec
);
3818 if (GET_MODE (vec
) == mode
)
3822 /* If we select elements in a vec_merge that all come from the same
3823 operand, select from that operand directly. */
3824 if (GET_CODE (op0
) == VEC_MERGE
)
3826 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3827 if (CONST_INT_P (trueop02
))
3829 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3830 bool all_operand0
= true;
3831 bool all_operand1
= true;
3832 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3834 rtx j
= XVECEXP (trueop1
, 0, i
);
3835 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3836 all_operand1
= false;
3838 all_operand0
= false;
3840 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3841 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3842 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3843 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3847 /* If we have two nested selects that are inverses of each
3848 other, replace them with the source operand. */
3849 if (GET_CODE (trueop0
) == VEC_SELECT
3850 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3852 rtx op0_subop1
= XEXP (trueop0
, 1);
3853 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3854 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3856 /* Apply the outer ordering vector to the inner one. (The inner
3857 ordering vector is expressly permitted to be of a different
3858 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3859 then the two VEC_SELECTs cancel. */
3860 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3862 rtx x
= XVECEXP (trueop1
, 0, i
);
3863 if (!CONST_INT_P (x
))
3865 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3866 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3869 return XEXP (trueop0
, 0);
3875 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3876 ? GET_MODE (trueop0
)
3877 : GET_MODE_INNER (mode
));
3878 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3879 ? GET_MODE (trueop1
)
3880 : GET_MODE_INNER (mode
));
3882 gcc_assert (VECTOR_MODE_P (mode
));
3883 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3884 == GET_MODE_SIZE (mode
));
3886 if (VECTOR_MODE_P (op0_mode
))
3887 gcc_assert (GET_MODE_INNER (mode
)
3888 == GET_MODE_INNER (op0_mode
));
3890 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3892 if (VECTOR_MODE_P (op1_mode
))
3893 gcc_assert (GET_MODE_INNER (mode
)
3894 == GET_MODE_INNER (op1_mode
));
3896 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3898 if ((GET_CODE (trueop0
) == CONST_VECTOR
3899 || CONST_SCALAR_INT_P (trueop0
)
3900 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3901 && (GET_CODE (trueop1
) == CONST_VECTOR
3902 || CONST_SCALAR_INT_P (trueop1
)
3903 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3905 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3906 unsigned in_n_elts
= GET_MODE_NUNITS (op0_mode
);
3907 rtvec v
= rtvec_alloc (n_elts
);
3909 for (i
= 0; i
< n_elts
; i
++)
3913 if (!VECTOR_MODE_P (op0_mode
))
3914 RTVEC_ELT (v
, i
) = trueop0
;
3916 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3920 if (!VECTOR_MODE_P (op1_mode
))
3921 RTVEC_ELT (v
, i
) = trueop1
;
3923 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3928 return gen_rtx_CONST_VECTOR (mode
, v
);
3931 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3932 Restrict the transformation to avoid generating a VEC_SELECT with a
3933 mode unrelated to its operand. */
3934 if (GET_CODE (trueop0
) == VEC_SELECT
3935 && GET_CODE (trueop1
) == VEC_SELECT
3936 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3937 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3939 rtx par0
= XEXP (trueop0
, 1);
3940 rtx par1
= XEXP (trueop1
, 1);
3941 int len0
= XVECLEN (par0
, 0);
3942 int len1
= XVECLEN (par1
, 0);
3943 rtvec vec
= rtvec_alloc (len0
+ len1
);
3944 for (int i
= 0; i
< len0
; i
++)
3945 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3946 for (int i
= 0; i
< len1
; i
++)
3947 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3948 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3949 gen_rtx_PARALLEL (VOIDmode
, vec
));
3958 if (mode
== GET_MODE (op0
)
3959 && mode
== GET_MODE (op1
)
3960 && vec_duplicate_p (op0
, &elt0
)
3961 && vec_duplicate_p (op1
, &elt1
))
3963 /* Try applying the operator to ELT and see if that simplifies.
3964 We can duplicate the result if so.
3966 The reason we don't use simplify_gen_binary is that it isn't
3967 necessarily a win to convert things like:
3969 (plus:V (vec_duplicate:V (reg:S R1))
3970 (vec_duplicate:V (reg:S R2)))
3974 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
3976 The first might be done entirely in vector registers while the
3977 second might need a move between register files. */
3978 tem
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3981 return gen_vec_duplicate (mode
, tem
);
3988 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3991 if (VECTOR_MODE_P (mode
)
3992 && code
!= VEC_CONCAT
3993 && GET_CODE (op0
) == CONST_VECTOR
3994 && GET_CODE (op1
) == CONST_VECTOR
)
3996 unsigned int n_elts
= CONST_VECTOR_NUNITS (op0
);
3997 gcc_assert (n_elts
== (unsigned int) CONST_VECTOR_NUNITS (op1
));
3998 gcc_assert (n_elts
== GET_MODE_NUNITS (mode
));
3999 rtvec v
= rtvec_alloc (n_elts
);
4002 for (i
= 0; i
< n_elts
; i
++)
4004 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
4005 CONST_VECTOR_ELT (op0
, i
),
4006 CONST_VECTOR_ELT (op1
, i
));
4009 RTVEC_ELT (v
, i
) = x
;
4012 return gen_rtx_CONST_VECTOR (mode
, v
);
4015 if (VECTOR_MODE_P (mode
)
4016 && code
== VEC_CONCAT
4017 && (CONST_SCALAR_INT_P (op0
)
4018 || GET_CODE (op0
) == CONST_FIXED
4019 || CONST_DOUBLE_AS_FLOAT_P (op0
))
4020 && (CONST_SCALAR_INT_P (op1
)
4021 || CONST_DOUBLE_AS_FLOAT_P (op1
)
4022 || GET_CODE (op1
) == CONST_FIXED
))
4024 unsigned n_elts
= GET_MODE_NUNITS (mode
);
4025 rtvec v
= rtvec_alloc (n_elts
);
4027 gcc_assert (n_elts
>= 2);
4030 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
4031 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
4033 RTVEC_ELT (v
, 0) = op0
;
4034 RTVEC_ELT (v
, 1) = op1
;
4038 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
4039 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
4042 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
4043 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
4044 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
4046 for (i
= 0; i
< op0_n_elts
; ++i
)
4047 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
4048 for (i
= 0; i
< op1_n_elts
; ++i
)
4049 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
4052 return gen_rtx_CONST_VECTOR (mode
, v
);
4055 if (SCALAR_FLOAT_MODE_P (mode
)
4056 && CONST_DOUBLE_AS_FLOAT_P (op0
)
4057 && CONST_DOUBLE_AS_FLOAT_P (op1
)
4058 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
4069 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
4071 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
4073 for (i
= 0; i
< 4; i
++)
4090 real_from_target (&r
, tmp0
, mode
);
4091 return const_double_from_real_value (r
, mode
);
4095 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4096 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4099 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4100 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4102 if (HONOR_SNANS (mode
)
4103 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4104 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4107 real_convert (&f0
, mode
, opr0
);
4108 real_convert (&f1
, mode
, opr1
);
4111 && real_equal (&f1
, &dconst0
)
4112 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4115 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4116 && flag_trapping_math
4117 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4119 int s0
= REAL_VALUE_NEGATIVE (f0
);
4120 int s1
= REAL_VALUE_NEGATIVE (f1
);
4125 /* Inf + -Inf = NaN plus exception. */
4130 /* Inf - Inf = NaN plus exception. */
4135 /* Inf / Inf = NaN plus exception. */
4142 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4143 && flag_trapping_math
4144 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4145 || (REAL_VALUE_ISINF (f1
)
4146 && real_equal (&f0
, &dconst0
))))
4147 /* Inf * 0 = NaN plus exception. */
4150 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4152 real_convert (&result
, mode
, &value
);
4154 /* Don't constant fold this floating point operation if
4155 the result has overflowed and flag_trapping_math. */
4157 if (flag_trapping_math
4158 && MODE_HAS_INFINITIES (mode
)
4159 && REAL_VALUE_ISINF (result
)
4160 && !REAL_VALUE_ISINF (f0
)
4161 && !REAL_VALUE_ISINF (f1
))
4162 /* Overflow plus exception. */
4165 /* Don't constant fold this floating point operation if the
4166 result may dependent upon the run-time rounding mode and
4167 flag_rounding_math is set, or if GCC's software emulation
4168 is unable to accurately represent the result. */
4170 if ((flag_rounding_math
4171 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4172 && (inexact
|| !real_identical (&result
, &value
)))
4175 return const_double_from_real_value (result
, mode
);
4179 /* We can fold some multi-word operations. */
4180 scalar_int_mode int_mode
;
4181 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4182 && CONST_SCALAR_INT_P (op0
)
4183 && CONST_SCALAR_INT_P (op1
))
4187 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4188 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4190 #if TARGET_SUPPORTS_WIDE_INT == 0
4191 /* This assert keeps the simplification from producing a result
4192 that cannot be represented in a CONST_DOUBLE but a lot of
4193 upstream callers expect that this function never fails to
4194 simplify something and so you if you added this to the test
4195 above the code would die later anyway. If this assert
4196 happens, you just need to make the port support wide int. */
4197 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4202 result
= wi::sub (pop0
, pop1
);
4206 result
= wi::add (pop0
, pop1
);
4210 result
= wi::mul (pop0
, pop1
);
4214 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4220 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4226 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4232 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4238 result
= wi::bit_and (pop0
, pop1
);
4242 result
= wi::bit_or (pop0
, pop1
);
4246 result
= wi::bit_xor (pop0
, pop1
);
4250 result
= wi::smin (pop0
, pop1
);
4254 result
= wi::smax (pop0
, pop1
);
4258 result
= wi::umin (pop0
, pop1
);
4262 result
= wi::umax (pop0
, pop1
);
4269 wide_int wop1
= pop1
;
4270 if (SHIFT_COUNT_TRUNCATED
)
4271 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4272 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4278 result
= wi::lrshift (pop0
, wop1
);
4282 result
= wi::arshift (pop0
, wop1
);
4286 result
= wi::lshift (pop0
, wop1
);
4297 if (wi::neg_p (pop1
))
4303 result
= wi::lrotate (pop0
, pop1
);
4307 result
= wi::rrotate (pop0
, pop1
);
4318 return immed_wide_int_const (result
, int_mode
);
4326 /* Return a positive integer if X should sort after Y. The value
4327 returned is 1 if and only if X and Y are both regs. */
4330 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4334 result
= (commutative_operand_precedence (y
)
4335 - commutative_operand_precedence (x
));
4337 return result
+ result
;
4339 /* Group together equal REGs to do more simplification. */
4340 if (REG_P (x
) && REG_P (y
))
4341 return REGNO (x
) > REGNO (y
);
4346 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4347 operands may be another PLUS or MINUS.
4349 Rather than test for specific case, we do this by a brute-force method
4350 and do all possible simplifications until no more changes occur. Then
4351 we rebuild the operation.
4353 May return NULL_RTX when no changes were made. */
4356 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4359 struct simplify_plus_minus_op_data
4366 int changed
, n_constants
, canonicalized
= 0;
4369 memset (ops
, 0, sizeof ops
);
4371 /* Set up the two operands and then expand them until nothing has been
4372 changed. If we run out of room in our array, give up; this should
4373 almost never happen. */
4378 ops
[1].neg
= (code
== MINUS
);
4385 for (i
= 0; i
< n_ops
; i
++)
4387 rtx this_op
= ops
[i
].op
;
4388 int this_neg
= ops
[i
].neg
;
4389 enum rtx_code this_code
= GET_CODE (this_op
);
4395 if (n_ops
== ARRAY_SIZE (ops
))
4398 ops
[n_ops
].op
= XEXP (this_op
, 1);
4399 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4402 ops
[i
].op
= XEXP (this_op
, 0);
4404 /* If this operand was negated then we will potentially
4405 canonicalize the expression. Similarly if we don't
4406 place the operands adjacent we're re-ordering the
4407 expression and thus might be performing a
4408 canonicalization. Ignore register re-ordering.
4409 ??? It might be better to shuffle the ops array here,
4410 but then (plus (plus (A, B), plus (C, D))) wouldn't
4411 be seen as non-canonical. */
4414 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4419 ops
[i
].op
= XEXP (this_op
, 0);
4420 ops
[i
].neg
= ! this_neg
;
4426 if (n_ops
!= ARRAY_SIZE (ops
)
4427 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4428 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4429 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4431 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4432 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4433 ops
[n_ops
].neg
= this_neg
;
4441 /* ~a -> (-a - 1) */
4442 if (n_ops
!= ARRAY_SIZE (ops
))
4444 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4445 ops
[n_ops
++].neg
= this_neg
;
4446 ops
[i
].op
= XEXP (this_op
, 0);
4447 ops
[i
].neg
= !this_neg
;
4457 ops
[i
].op
= neg_const_int (mode
, this_op
);
4471 if (n_constants
> 1)
4474 gcc_assert (n_ops
>= 2);
4476 /* If we only have two operands, we can avoid the loops. */
4479 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4482 /* Get the two operands. Be careful with the order, especially for
4483 the cases where code == MINUS. */
4484 if (ops
[0].neg
&& ops
[1].neg
)
4486 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4489 else if (ops
[0].neg
)
4500 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4503 /* Now simplify each pair of operands until nothing changes. */
4506 /* Insertion sort is good enough for a small array. */
4507 for (i
= 1; i
< n_ops
; i
++)
4509 struct simplify_plus_minus_op_data save
;
4513 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4516 /* Just swapping registers doesn't count as canonicalization. */
4522 ops
[j
+ 1] = ops
[j
];
4524 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4529 for (i
= n_ops
- 1; i
> 0; i
--)
4530 for (j
= i
- 1; j
>= 0; j
--)
4532 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4533 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4535 if (lhs
!= 0 && rhs
!= 0)
4537 enum rtx_code ncode
= PLUS
;
4543 std::swap (lhs
, rhs
);
4545 else if (swap_commutative_operands_p (lhs
, rhs
))
4546 std::swap (lhs
, rhs
);
4548 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4549 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4551 rtx tem_lhs
, tem_rhs
;
4553 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4554 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4555 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4558 if (tem
&& !CONSTANT_P (tem
))
4559 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4562 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4566 /* Reject "simplifications" that just wrap the two
4567 arguments in a CONST. Failure to do so can result
4568 in infinite recursion with simplify_binary_operation
4569 when it calls us to simplify CONST operations.
4570 Also, if we find such a simplification, don't try
4571 any more combinations with this rhs: We must have
4572 something like symbol+offset, ie. one of the
4573 trivial CONST expressions we handle later. */
4574 if (GET_CODE (tem
) == CONST
4575 && GET_CODE (XEXP (tem
, 0)) == ncode
4576 && XEXP (XEXP (tem
, 0), 0) == lhs
4577 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4580 if (GET_CODE (tem
) == NEG
)
4581 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4582 if (CONST_INT_P (tem
) && lneg
)
4583 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4587 ops
[j
].op
= NULL_RTX
;
4597 /* Pack all the operands to the lower-numbered entries. */
4598 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4607 /* If nothing changed, check that rematerialization of rtl instructions
4608 is still required. */
4611 /* Perform rematerialization if only all operands are registers and
4612 all operations are PLUS. */
4613 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4614 around rs6000 and how it uses the CA register. See PR67145. */
4615 for (i
= 0; i
< n_ops
; i
++)
4617 || !REG_P (ops
[i
].op
)
4618 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4619 && fixed_regs
[REGNO (ops
[i
].op
)]
4620 && !global_regs
[REGNO (ops
[i
].op
)]
4621 && ops
[i
].op
!= frame_pointer_rtx
4622 && ops
[i
].op
!= arg_pointer_rtx
4623 && ops
[i
].op
!= stack_pointer_rtx
))
4628 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4630 && CONST_INT_P (ops
[1].op
)
4631 && CONSTANT_P (ops
[0].op
)
4633 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4635 /* We suppressed creation of trivial CONST expressions in the
4636 combination loop to avoid recursion. Create one manually now.
4637 The combination loop should have ensured that there is exactly
4638 one CONST_INT, and the sort will have ensured that it is last
4639 in the array and that any other constant will be next-to-last. */
4642 && CONST_INT_P (ops
[n_ops
- 1].op
)
4643 && CONSTANT_P (ops
[n_ops
- 2].op
))
4645 rtx value
= ops
[n_ops
- 1].op
;
4646 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4647 value
= neg_const_int (mode
, value
);
4648 if (CONST_INT_P (value
))
4650 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4656 /* Put a non-negated operand first, if possible. */
4658 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4661 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4670 /* Now make the result by performing the requested operations. */
4673 for (i
= 1; i
< n_ops
; i
++)
4674 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4675 mode
, result
, ops
[i
].op
);
4680 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4682 plus_minus_operand_p (const_rtx x
)
4684 return GET_CODE (x
) == PLUS
4685 || GET_CODE (x
) == MINUS
4686 || (GET_CODE (x
) == CONST
4687 && GET_CODE (XEXP (x
, 0)) == PLUS
4688 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4689 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4692 /* Like simplify_binary_operation except used for relational operators.
4693 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4694 not also be VOIDmode.
4696 CMP_MODE specifies in which mode the comparison is done in, so it is
4697 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4698 the operands or, if both are VOIDmode, the operands are compared in
4699 "infinite precision". */
4701 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4702 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4704 rtx tem
, trueop0
, trueop1
;
4706 if (cmp_mode
== VOIDmode
)
4707 cmp_mode
= GET_MODE (op0
);
4708 if (cmp_mode
== VOIDmode
)
4709 cmp_mode
= GET_MODE (op1
);
4711 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4714 if (SCALAR_FLOAT_MODE_P (mode
))
4716 if (tem
== const0_rtx
)
4717 return CONST0_RTX (mode
);
4718 #ifdef FLOAT_STORE_FLAG_VALUE
4720 REAL_VALUE_TYPE val
;
4721 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4722 return const_double_from_real_value (val
, mode
);
4728 if (VECTOR_MODE_P (mode
))
4730 if (tem
== const0_rtx
)
4731 return CONST0_RTX (mode
);
4732 #ifdef VECTOR_STORE_FLAG_VALUE
4734 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4735 if (val
== NULL_RTX
)
4737 if (val
== const1_rtx
)
4738 return CONST1_RTX (mode
);
4740 return gen_const_vec_duplicate (mode
, val
);
4750 /* For the following tests, ensure const0_rtx is op1. */
4751 if (swap_commutative_operands_p (op0
, op1
)
4752 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4753 std::swap (op0
, op1
), code
= swap_condition (code
);
4755 /* If op0 is a compare, extract the comparison arguments from it. */
4756 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4757 return simplify_gen_relational (code
, mode
, VOIDmode
,
4758 XEXP (op0
, 0), XEXP (op0
, 1));
4760 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4764 trueop0
= avoid_constant_pool_reference (op0
);
4765 trueop1
= avoid_constant_pool_reference (op1
);
4766 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4770 /* This part of simplify_relational_operation is only used when CMP_MODE
4771 is not in class MODE_CC (i.e. it is a real comparison).
4773 MODE is the mode of the result, while CMP_MODE specifies in which
4774 mode the comparison is done in, so it is the mode of the operands. */
4777 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4778 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4780 enum rtx_code op0code
= GET_CODE (op0
);
4782 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4784 /* If op0 is a comparison, extract the comparison arguments
4788 if (GET_MODE (op0
) == mode
)
4789 return simplify_rtx (op0
);
4791 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4792 XEXP (op0
, 0), XEXP (op0
, 1));
4794 else if (code
== EQ
)
4796 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4797 if (new_code
!= UNKNOWN
)
4798 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4799 XEXP (op0
, 0), XEXP (op0
, 1));
4803 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4804 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4805 if ((code
== LTU
|| code
== GEU
)
4806 && GET_CODE (op0
) == PLUS
4807 && CONST_INT_P (XEXP (op0
, 1))
4808 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4809 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4810 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4811 && XEXP (op0
, 1) != const0_rtx
)
4814 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4815 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4816 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4819 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4820 transformed into (LTU a -C). */
4821 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4822 && CONST_INT_P (XEXP (op0
, 1))
4823 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4824 && XEXP (op0
, 1) != const0_rtx
)
4827 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4828 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4829 XEXP (op0
, 0), new_cmp
);
4832 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4833 if ((code
== LTU
|| code
== GEU
)
4834 && GET_CODE (op0
) == PLUS
4835 && rtx_equal_p (op1
, XEXP (op0
, 1))
4836 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4837 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4838 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4839 copy_rtx (XEXP (op0
, 0)));
4841 if (op1
== const0_rtx
)
4843 /* Canonicalize (GTU x 0) as (NE x 0). */
4845 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4846 /* Canonicalize (LEU x 0) as (EQ x 0). */
4848 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4850 else if (op1
== const1_rtx
)
4855 /* Canonicalize (GE x 1) as (GT x 0). */
4856 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4859 /* Canonicalize (GEU x 1) as (NE x 0). */
4860 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4863 /* Canonicalize (LT x 1) as (LE x 0). */
4864 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4867 /* Canonicalize (LTU x 1) as (EQ x 0). */
4868 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4874 else if (op1
== constm1_rtx
)
4876 /* Canonicalize (LE x -1) as (LT x 0). */
4878 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4879 /* Canonicalize (GT x -1) as (GE x 0). */
4881 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4884 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4885 if ((code
== EQ
|| code
== NE
)
4886 && (op0code
== PLUS
|| op0code
== MINUS
)
4888 && CONSTANT_P (XEXP (op0
, 1))
4889 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4891 rtx x
= XEXP (op0
, 0);
4892 rtx c
= XEXP (op0
, 1);
4893 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4894 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4896 /* Detect an infinite recursive condition, where we oscillate at this
4897 simplification case between:
4898 A + B == C <---> C - B == A,
4899 where A, B, and C are all constants with non-simplifiable expressions,
4900 usually SYMBOL_REFs. */
4901 if (GET_CODE (tem
) == invcode
4903 && rtx_equal_p (c
, XEXP (tem
, 1)))
4906 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4909 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4910 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4911 scalar_int_mode int_mode
, int_cmp_mode
;
4913 && op1
== const0_rtx
4914 && is_int_mode (mode
, &int_mode
)
4915 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
4916 /* ??? Work-around BImode bugs in the ia64 backend. */
4917 && int_mode
!= BImode
4918 && int_cmp_mode
!= BImode
4919 && nonzero_bits (op0
, int_cmp_mode
) == 1
4920 && STORE_FLAG_VALUE
== 1)
4921 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
4922 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
4923 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
4925 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4926 if ((code
== EQ
|| code
== NE
)
4927 && op1
== const0_rtx
4929 return simplify_gen_relational (code
, mode
, cmp_mode
,
4930 XEXP (op0
, 0), XEXP (op0
, 1));
4932 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4933 if ((code
== EQ
|| code
== NE
)
4935 && rtx_equal_p (XEXP (op0
, 0), op1
)
4936 && !side_effects_p (XEXP (op0
, 0)))
4937 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4940 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4941 if ((code
== EQ
|| code
== NE
)
4943 && rtx_equal_p (XEXP (op0
, 1), op1
)
4944 && !side_effects_p (XEXP (op0
, 1)))
4945 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4948 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4949 if ((code
== EQ
|| code
== NE
)
4951 && CONST_SCALAR_INT_P (op1
)
4952 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4953 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4954 simplify_gen_binary (XOR
, cmp_mode
,
4955 XEXP (op0
, 1), op1
));
4957 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4958 can be implemented with a BICS instruction on some targets, or
4959 constant-folded if y is a constant. */
4960 if ((code
== EQ
|| code
== NE
)
4962 && rtx_equal_p (XEXP (op0
, 0), op1
)
4963 && !side_effects_p (op1
)
4964 && op1
!= CONST0_RTX (cmp_mode
))
4966 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4967 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4969 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4970 CONST0_RTX (cmp_mode
));
4973 /* Likewise for (eq/ne (and x y) y). */
4974 if ((code
== EQ
|| code
== NE
)
4976 && rtx_equal_p (XEXP (op0
, 1), op1
)
4977 && !side_effects_p (op1
)
4978 && op1
!= CONST0_RTX (cmp_mode
))
4980 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4981 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4983 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4984 CONST0_RTX (cmp_mode
));
4987 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4988 if ((code
== EQ
|| code
== NE
)
4989 && GET_CODE (op0
) == BSWAP
4990 && CONST_SCALAR_INT_P (op1
))
4991 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4992 simplify_gen_unary (BSWAP
, cmp_mode
,
4995 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4996 if ((code
== EQ
|| code
== NE
)
4997 && GET_CODE (op0
) == BSWAP
4998 && GET_CODE (op1
) == BSWAP
)
4999 return simplify_gen_relational (code
, mode
, cmp_mode
,
5000 XEXP (op0
, 0), XEXP (op1
, 0));
5002 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
5008 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5009 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
5010 XEXP (op0
, 0), const0_rtx
);
5015 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5016 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
5017 XEXP (op0
, 0), const0_rtx
);
5036 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5037 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5038 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5039 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5040 For floating-point comparisons, assume that the operands were ordered. */
5043 comparison_result (enum rtx_code code
, int known_results
)
5049 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
5052 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
5056 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
5059 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
5063 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
5066 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5069 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5071 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5074 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5076 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5079 return const_true_rtx
;
5087 /* Check if the given comparison (done in the given MODE) is actually
5088 a tautology or a contradiction. If the mode is VOID_mode, the
5089 comparison is done in "infinite precision". If no simplification
5090 is possible, this function returns zero. Otherwise, it returns
5091 either const_true_rtx or const0_rtx. */
5094 simplify_const_relational_operation (enum rtx_code code
,
5102 gcc_assert (mode
!= VOIDmode
5103 || (GET_MODE (op0
) == VOIDmode
5104 && GET_MODE (op1
) == VOIDmode
));
5106 /* If op0 is a compare, extract the comparison arguments from it. */
5107 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5109 op1
= XEXP (op0
, 1);
5110 op0
= XEXP (op0
, 0);
5112 if (GET_MODE (op0
) != VOIDmode
)
5113 mode
= GET_MODE (op0
);
5114 else if (GET_MODE (op1
) != VOIDmode
)
5115 mode
= GET_MODE (op1
);
5120 /* We can't simplify MODE_CC values since we don't know what the
5121 actual comparison is. */
5122 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5125 /* Make sure the constant is second. */
5126 if (swap_commutative_operands_p (op0
, op1
))
5128 std::swap (op0
, op1
);
5129 code
= swap_condition (code
);
5132 trueop0
= avoid_constant_pool_reference (op0
);
5133 trueop1
= avoid_constant_pool_reference (op1
);
5135 /* For integer comparisons of A and B maybe we can simplify A - B and can
5136 then simplify a comparison of that with zero. If A and B are both either
5137 a register or a CONST_INT, this can't help; testing for these cases will
5138 prevent infinite recursion here and speed things up.
5140 We can only do this for EQ and NE comparisons as otherwise we may
5141 lose or introduce overflow which we cannot disregard as undefined as
5142 we do not know the signedness of the operation on either the left or
5143 the right hand side of the comparison. */
5145 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5146 && (code
== EQ
|| code
== NE
)
5147 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5148 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5149 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5150 /* We cannot do this if tem is a nonzero address. */
5151 && ! nonzero_address_p (tem
))
5152 return simplify_const_relational_operation (signed_condition (code
),
5153 mode
, tem
, const0_rtx
);
5155 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5156 return const_true_rtx
;
5158 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5161 /* For modes without NaNs, if the two operands are equal, we know the
5162 result except if they have side-effects. Even with NaNs we know
5163 the result of unordered comparisons and, if signaling NaNs are
5164 irrelevant, also the result of LT/GT/LTGT. */
5165 if ((! HONOR_NANS (trueop0
)
5166 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5167 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5168 && ! HONOR_SNANS (trueop0
)))
5169 && rtx_equal_p (trueop0
, trueop1
)
5170 && ! side_effects_p (trueop0
))
5171 return comparison_result (code
, CMP_EQ
);
5173 /* If the operands are floating-point constants, see if we can fold
5175 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5176 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5177 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5179 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5180 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5182 /* Comparisons are unordered iff at least one of the values is NaN. */
5183 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5193 return const_true_rtx
;
5206 return comparison_result (code
,
5207 (real_equal (d0
, d1
) ? CMP_EQ
:
5208 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5211 /* Otherwise, see if the operands are both integers. */
5212 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5213 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5215 /* It would be nice if we really had a mode here. However, the
5216 largest int representable on the target is as good as
5218 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5219 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5220 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5222 if (wi::eq_p (ptrueop0
, ptrueop1
))
5223 return comparison_result (code
, CMP_EQ
);
5226 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5227 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5228 return comparison_result (code
, cr
);
5232 /* Optimize comparisons with upper and lower bounds. */
5233 scalar_int_mode int_mode
;
5234 if (CONST_INT_P (trueop1
)
5235 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5236 && HWI_COMPUTABLE_MODE_P (int_mode
)
5237 && !side_effects_p (trueop0
))
5240 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5241 HOST_WIDE_INT val
= INTVAL (trueop1
);
5242 HOST_WIDE_INT mmin
, mmax
;
5252 /* Get a reduced range if the sign bit is zero. */
5253 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5260 rtx mmin_rtx
, mmax_rtx
;
5261 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5263 mmin
= INTVAL (mmin_rtx
);
5264 mmax
= INTVAL (mmax_rtx
);
5267 unsigned int sign_copies
5268 = num_sign_bit_copies (trueop0
, int_mode
);
5270 mmin
>>= (sign_copies
- 1);
5271 mmax
>>= (sign_copies
- 1);
5277 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5279 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5280 return const_true_rtx
;
5281 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5286 return const_true_rtx
;
5291 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5293 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5294 return const_true_rtx
;
5295 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5300 return const_true_rtx
;
5306 /* x == y is always false for y out of range. */
5307 if (val
< mmin
|| val
> mmax
)
5311 /* x > y is always false for y >= mmax, always true for y < mmin. */
5313 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5315 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5316 return const_true_rtx
;
5322 return const_true_rtx
;
5325 /* x < y is always false for y <= mmin, always true for y > mmax. */
5327 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5329 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5330 return const_true_rtx
;
5336 return const_true_rtx
;
5340 /* x != y is always true for y out of range. */
5341 if (val
< mmin
|| val
> mmax
)
5342 return const_true_rtx
;
5350 /* Optimize integer comparisons with zero. */
5351 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5352 && trueop1
== const0_rtx
5353 && !side_effects_p (trueop0
))
5355 /* Some addresses are known to be nonzero. We don't know
5356 their sign, but equality comparisons are known. */
5357 if (nonzero_address_p (trueop0
))
5359 if (code
== EQ
|| code
== LEU
)
5361 if (code
== NE
|| code
== GTU
)
5362 return const_true_rtx
;
5365 /* See if the first operand is an IOR with a constant. If so, we
5366 may be able to determine the result of this comparison. */
5367 if (GET_CODE (op0
) == IOR
)
5369 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5370 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5372 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5373 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5374 && (UINTVAL (inner_const
)
5385 return const_true_rtx
;
5389 return const_true_rtx
;
5403 /* Optimize comparison of ABS with zero. */
5404 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5405 && (GET_CODE (trueop0
) == ABS
5406 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5407 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5412 /* Optimize abs(x) < 0.0. */
5413 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5418 /* Optimize abs(x) >= 0.0. */
5419 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5420 return const_true_rtx
;
5424 /* Optimize ! (abs(x) < 0.0). */
5425 return const_true_rtx
;
5435 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5436 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5437 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5438 can be simplified to that or NULL_RTX if not.
5439 Assume X is compared against zero with CMP_CODE and the true
5440 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5443 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5445 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5448 /* Result on X == 0 and X !=0 respectively. */
5449 rtx on_zero
, on_nonzero
;
5453 on_nonzero
= false_val
;
5457 on_zero
= false_val
;
5458 on_nonzero
= true_val
;
5461 rtx_code op_code
= GET_CODE (on_nonzero
);
5462 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5463 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5464 || !CONST_INT_P (on_zero
))
5467 HOST_WIDE_INT op_val
;
5468 scalar_int_mode mode ATTRIBUTE_UNUSED
5469 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5470 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5471 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5472 && op_val
== INTVAL (on_zero
))
5479 /* Simplify CODE, an operation with result mode MODE and three operands,
5480 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5481 a constant. Return 0 if no simplifications is possible. */
5484 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5485 machine_mode op0_mode
, rtx op0
, rtx op1
,
5488 bool any_change
= false;
5490 scalar_int_mode int_mode
, int_op0_mode
;
5495 /* Simplify negations around the multiplication. */
5496 /* -a * -b + c => a * b + c. */
5497 if (GET_CODE (op0
) == NEG
)
5499 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5501 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5503 else if (GET_CODE (op1
) == NEG
)
5505 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5507 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5510 /* Canonicalize the two multiplication operands. */
5511 /* a * -b + c => -b * a + c. */
5512 if (swap_commutative_operands_p (op0
, op1
))
5513 std::swap (op0
, op1
), any_change
= true;
5516 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5521 if (CONST_INT_P (op0
)
5522 && CONST_INT_P (op1
)
5523 && CONST_INT_P (op2
)
5524 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5525 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5526 && HWI_COMPUTABLE_MODE_P (int_mode
))
5528 /* Extracting a bit-field from a constant */
5529 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5530 HOST_WIDE_INT op1val
= INTVAL (op1
);
5531 HOST_WIDE_INT op2val
= INTVAL (op2
);
5532 if (!BITS_BIG_ENDIAN
)
5534 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5535 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5537 /* Not enough information to calculate the bit position. */
5540 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5542 /* First zero-extend. */
5543 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5544 /* If desired, propagate sign bit. */
5545 if (code
== SIGN_EXTRACT
5546 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5548 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5551 return gen_int_mode (val
, int_mode
);
5556 if (CONST_INT_P (op0
))
5557 return op0
!= const0_rtx
? op1
: op2
;
5559 /* Convert c ? a : a into "a". */
5560 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5563 /* Convert a != b ? a : b into "a". */
5564 if (GET_CODE (op0
) == NE
5565 && ! side_effects_p (op0
)
5566 && ! HONOR_NANS (mode
)
5567 && ! HONOR_SIGNED_ZEROS (mode
)
5568 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5569 && rtx_equal_p (XEXP (op0
, 1), op2
))
5570 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5571 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5574 /* Convert a == b ? a : b into "b". */
5575 if (GET_CODE (op0
) == EQ
5576 && ! side_effects_p (op0
)
5577 && ! HONOR_NANS (mode
)
5578 && ! HONOR_SIGNED_ZEROS (mode
)
5579 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5580 && rtx_equal_p (XEXP (op0
, 1), op2
))
5581 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5582 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5585 /* Convert (!c) != {0,...,0} ? a : b into
5586 c != {0,...,0} ? b : a for vector modes. */
5587 if (VECTOR_MODE_P (GET_MODE (op1
))
5588 && GET_CODE (op0
) == NE
5589 && GET_CODE (XEXP (op0
, 0)) == NOT
5590 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5592 rtx cv
= XEXP (op0
, 1);
5593 int nunits
= CONST_VECTOR_NUNITS (cv
);
5595 for (int i
= 0; i
< nunits
; ++i
)
5596 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5603 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5604 XEXP (XEXP (op0
, 0), 0),
5606 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5611 /* Convert x == 0 ? N : clz (x) into clz (x) when
5612 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5613 Similarly for ctz (x). */
5614 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5615 && XEXP (op0
, 1) == const0_rtx
)
5618 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5624 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5626 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5627 ? GET_MODE (XEXP (op0
, 1))
5628 : GET_MODE (XEXP (op0
, 0)));
5631 /* Look for happy constants in op1 and op2. */
5632 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5634 HOST_WIDE_INT t
= INTVAL (op1
);
5635 HOST_WIDE_INT f
= INTVAL (op2
);
5637 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5638 code
= GET_CODE (op0
);
5639 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5642 tmp
= reversed_comparison_code (op0
, NULL
);
5650 return simplify_gen_relational (code
, mode
, cmp_mode
,
5651 XEXP (op0
, 0), XEXP (op0
, 1));
5654 if (cmp_mode
== VOIDmode
)
5655 cmp_mode
= op0_mode
;
5656 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5657 cmp_mode
, XEXP (op0
, 0),
5660 /* See if any simplifications were possible. */
5663 if (CONST_INT_P (temp
))
5664 return temp
== const0_rtx
? op2
: op1
;
5666 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5672 gcc_assert (GET_MODE (op0
) == mode
);
5673 gcc_assert (GET_MODE (op1
) == mode
);
5674 gcc_assert (VECTOR_MODE_P (mode
));
5675 trueop2
= avoid_constant_pool_reference (op2
);
5676 if (CONST_INT_P (trueop2
))
5678 unsigned n_elts
= GET_MODE_NUNITS (mode
);
5679 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5680 unsigned HOST_WIDE_INT mask
;
5681 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5684 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5686 if (!(sel
& mask
) && !side_effects_p (op0
))
5688 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5691 rtx trueop0
= avoid_constant_pool_reference (op0
);
5692 rtx trueop1
= avoid_constant_pool_reference (op1
);
5693 if (GET_CODE (trueop0
) == CONST_VECTOR
5694 && GET_CODE (trueop1
) == CONST_VECTOR
)
5696 rtvec v
= rtvec_alloc (n_elts
);
5699 for (i
= 0; i
< n_elts
; i
++)
5700 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5701 ? CONST_VECTOR_ELT (trueop0
, i
)
5702 : CONST_VECTOR_ELT (trueop1
, i
));
5703 return gen_rtx_CONST_VECTOR (mode
, v
);
5706 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5707 if no element from a appears in the result. */
5708 if (GET_CODE (op0
) == VEC_MERGE
)
5710 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5711 if (CONST_INT_P (tem
))
5713 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5714 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5715 return simplify_gen_ternary (code
, mode
, mode
,
5716 XEXP (op0
, 1), op1
, op2
);
5717 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5718 return simplify_gen_ternary (code
, mode
, mode
,
5719 XEXP (op0
, 0), op1
, op2
);
5722 if (GET_CODE (op1
) == VEC_MERGE
)
5724 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5725 if (CONST_INT_P (tem
))
5727 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5728 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5729 return simplify_gen_ternary (code
, mode
, mode
,
5730 op0
, XEXP (op1
, 1), op2
);
5731 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5732 return simplify_gen_ternary (code
, mode
, mode
,
5733 op0
, XEXP (op1
, 0), op2
);
5737 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5739 if (GET_CODE (op0
) == VEC_DUPLICATE
5740 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5741 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5742 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5744 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5745 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5747 if (XEXP (XEXP (op0
, 0), 0) == op1
5748 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5752 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5754 with (vec_concat (X) (B)) if N == 1 or
5755 (vec_concat (A) (X)) if N == 2. */
5756 if (GET_CODE (op0
) == VEC_DUPLICATE
5757 && GET_CODE (op1
) == CONST_VECTOR
5758 && CONST_VECTOR_NUNITS (op1
) == 2
5759 && GET_MODE_NUNITS (GET_MODE (op0
)) == 2
5760 && IN_RANGE (sel
, 1, 2))
5762 rtx newop0
= XEXP (op0
, 0);
5763 rtx newop1
= CONST_VECTOR_ELT (op1
, 2 - sel
);
5765 std::swap (newop0
, newop1
);
5766 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5768 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5769 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5770 Only applies for vectors of two elements. */
5771 if (GET_CODE (op0
) == VEC_DUPLICATE
5772 && GET_CODE (op1
) == VEC_CONCAT
5773 && GET_MODE_NUNITS (GET_MODE (op0
)) == 2
5774 && GET_MODE_NUNITS (GET_MODE (op1
)) == 2
5775 && IN_RANGE (sel
, 1, 2))
5777 rtx newop0
= XEXP (op0
, 0);
5778 rtx newop1
= XEXP (op1
, 2 - sel
);
5779 rtx otherop
= XEXP (op1
, sel
- 1);
5781 std::swap (newop0
, newop1
);
5782 /* Don't want to throw away the other part of the vec_concat if
5783 it has side-effects. */
5784 if (!side_effects_p (otherop
))
5785 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5788 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5790 with (vec_concat x y) or (vec_concat y x) depending on value
5792 if (GET_CODE (op0
) == VEC_DUPLICATE
5793 && GET_CODE (op1
) == VEC_DUPLICATE
5794 && GET_MODE_NUNITS (GET_MODE (op0
)) == 2
5795 && GET_MODE_NUNITS (GET_MODE (op1
)) == 2
5796 && IN_RANGE (sel
, 1, 2))
5798 rtx newop0
= XEXP (op0
, 0);
5799 rtx newop1
= XEXP (op1
, 0);
5801 std::swap (newop0
, newop1
);
5803 return simplify_gen_binary (VEC_CONCAT
, mode
, newop0
, newop1
);
5807 if (rtx_equal_p (op0
, op1
)
5808 && !side_effects_p (op2
) && !side_effects_p (op1
))
5820 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5821 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5822 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5824 Works by unpacking OP into a collection of 8-bit values
5825 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5826 and then repacking them again for OUTERMODE. */
5829 simplify_immed_subreg (fixed_size_mode outermode
, rtx op
,
5830 fixed_size_mode innermode
, unsigned int byte
)
5834 value_mask
= (1 << value_bit
) - 1
5836 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5844 rtx result_s
= NULL
;
5845 rtvec result_v
= NULL
;
5846 enum mode_class outer_class
;
5847 scalar_mode outer_submode
;
5850 /* Some ports misuse CCmode. */
5851 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5854 /* We have no way to represent a complex constant at the rtl level. */
5855 if (COMPLEX_MODE_P (outermode
))
5858 /* We support any size mode. */
5859 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5860 GET_MODE_BITSIZE (innermode
));
5862 /* Unpack the value. */
5864 if (GET_CODE (op
) == CONST_VECTOR
)
5866 num_elem
= CONST_VECTOR_NUNITS (op
);
5867 elems
= &CONST_VECTOR_ELT (op
, 0);
5868 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5874 elem_bitsize
= max_bitsize
;
5876 /* If this asserts, it is too complicated; reducing value_bit may help. */
5877 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5878 /* I don't know how to handle endianness of sub-units. */
5879 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5881 for (elem
= 0; elem
< num_elem
; elem
++)
5884 rtx el
= elems
[elem
];
5886 /* Vectors are kept in target memory order. (This is probably
5889 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5890 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5892 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5893 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5894 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5895 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5896 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5899 switch (GET_CODE (el
))
5903 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5905 *vp
++ = INTVAL (el
) >> i
;
5906 /* CONST_INTs are always logically sign-extended. */
5907 for (; i
< elem_bitsize
; i
+= value_bit
)
5908 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5911 case CONST_WIDE_INT
:
5913 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
5914 unsigned char extend
= wi::sign_mask (val
);
5915 int prec
= wi::get_precision (val
);
5917 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
5918 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5919 for (; i
< elem_bitsize
; i
+= value_bit
)
5925 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5927 unsigned char extend
= 0;
5928 /* If this triggers, someone should have generated a
5929 CONST_INT instead. */
5930 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5932 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5933 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5934 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5937 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5941 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5943 for (; i
< elem_bitsize
; i
+= value_bit
)
5948 /* This is big enough for anything on the platform. */
5949 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5950 scalar_float_mode el_mode
;
5952 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
5953 int bitsize
= GET_MODE_BITSIZE (el_mode
);
5955 gcc_assert (bitsize
<= elem_bitsize
);
5956 gcc_assert (bitsize
% value_bit
== 0);
5958 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5961 /* real_to_target produces its result in words affected by
5962 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5963 and use WORDS_BIG_ENDIAN instead; see the documentation
5964 of SUBREG in rtl.texi. */
5965 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5968 if (WORDS_BIG_ENDIAN
)
5969 ibase
= bitsize
- 1 - i
;
5972 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5975 /* It shouldn't matter what's done here, so fill it with
5977 for (; i
< elem_bitsize
; i
+= value_bit
)
5983 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5985 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5986 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5990 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5991 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5992 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5994 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5995 >> (i
- HOST_BITS_PER_WIDE_INT
);
5996 for (; i
< elem_bitsize
; i
+= value_bit
)
6006 /* Now, pick the right byte to start with. */
6007 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6008 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6009 will already have offset 0. */
6010 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
6012 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
6014 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6015 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6016 byte
= (subword_byte
% UNITS_PER_WORD
6017 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6020 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6021 so if it's become negative it will instead be very large.) */
6022 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
6024 /* Convert from bytes to chunks of size value_bit. */
6025 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
6027 /* Re-pack the value. */
6028 num_elem
= GET_MODE_NUNITS (outermode
);
6030 if (VECTOR_MODE_P (outermode
))
6032 result_v
= rtvec_alloc (num_elem
);
6033 elems
= &RTVEC_ELT (result_v
, 0);
6038 outer_submode
= GET_MODE_INNER (outermode
);
6039 outer_class
= GET_MODE_CLASS (outer_submode
);
6040 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
6042 gcc_assert (elem_bitsize
% value_bit
== 0);
6043 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
6045 for (elem
= 0; elem
< num_elem
; elem
++)
6049 /* Vectors are stored in target memory order. (This is probably
6052 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
6053 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
6055 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
6056 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
6057 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
6058 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
6059 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
6062 switch (outer_class
)
6065 case MODE_PARTIAL_INT
:
6070 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
6071 / HOST_BITS_PER_WIDE_INT
;
6072 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
6075 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
6077 for (u
= 0; u
< units
; u
++)
6079 unsigned HOST_WIDE_INT buf
= 0;
6081 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
6083 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6086 base
+= HOST_BITS_PER_WIDE_INT
;
6088 r
= wide_int::from_array (tmp
, units
,
6089 GET_MODE_PRECISION (outer_submode
));
6090 #if TARGET_SUPPORTS_WIDE_INT == 0
6091 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6092 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
6095 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
6100 case MODE_DECIMAL_FLOAT
:
6103 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
6105 /* real_from_target wants its input in words affected by
6106 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6107 and use WORDS_BIG_ENDIAN instead; see the documentation
6108 of SUBREG in rtl.texi. */
6109 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
6112 if (WORDS_BIG_ENDIAN
)
6113 ibase
= elem_bitsize
- 1 - i
;
6116 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6119 real_from_target (&r
, tmp
, outer_submode
);
6120 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6132 f
.mode
= outer_submode
;
6135 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6137 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6138 for (; i
< elem_bitsize
; i
+= value_bit
)
6139 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6140 << (i
- HOST_BITS_PER_WIDE_INT
));
6142 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6150 if (VECTOR_MODE_P (outermode
))
6151 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6156 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6157 Return 0 if no simplifications are possible. */
6159 simplify_subreg (machine_mode outermode
, rtx op
,
6160 machine_mode innermode
, unsigned int byte
)
6162 /* Little bit of sanity checking. */
6163 gcc_assert (innermode
!= VOIDmode
);
6164 gcc_assert (outermode
!= VOIDmode
);
6165 gcc_assert (innermode
!= BLKmode
);
6166 gcc_assert (outermode
!= BLKmode
);
6168 gcc_assert (GET_MODE (op
) == innermode
6169 || GET_MODE (op
) == VOIDmode
);
6171 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
6174 if (byte
>= GET_MODE_SIZE (innermode
))
6177 if (outermode
== innermode
&& !byte
)
6180 if (byte
% GET_MODE_UNIT_SIZE (innermode
) == 0)
6184 if (VECTOR_MODE_P (outermode
)
6185 && GET_MODE_INNER (outermode
) == GET_MODE_INNER (innermode
)
6186 && vec_duplicate_p (op
, &elt
))
6187 return gen_vec_duplicate (outermode
, elt
);
6189 if (outermode
== GET_MODE_INNER (innermode
)
6190 && vec_duplicate_p (op
, &elt
))
6194 if (CONST_SCALAR_INT_P (op
)
6195 || CONST_DOUBLE_AS_FLOAT_P (op
)
6196 || GET_CODE (op
) == CONST_FIXED
6197 || GET_CODE (op
) == CONST_VECTOR
)
6199 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6200 the result from bytes, so it only works if the sizes of the modes
6201 are known at compile time. Cases that apply to general modes
6202 should be handled here before calling simplify_immed_subreg. */
6203 fixed_size_mode fs_outermode
, fs_innermode
;
6204 if (is_a
<fixed_size_mode
> (outermode
, &fs_outermode
)
6205 && is_a
<fixed_size_mode
> (innermode
, &fs_innermode
))
6206 return simplify_immed_subreg (fs_outermode
, op
, fs_innermode
, byte
);
6211 /* Changing mode twice with SUBREG => just change it once,
6212 or not at all if changing back op starting mode. */
6213 if (GET_CODE (op
) == SUBREG
)
6215 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6218 if (outermode
== innermostmode
6219 && byte
== 0 && SUBREG_BYTE (op
) == 0)
6220 return SUBREG_REG (op
);
6222 /* Work out the memory offset of the final OUTERMODE value relative
6223 to the inner value of OP. */
6224 HOST_WIDE_INT mem_offset
= subreg_memory_offset (outermode
,
6226 HOST_WIDE_INT op_mem_offset
= subreg_memory_offset (op
);
6227 HOST_WIDE_INT final_offset
= mem_offset
+ op_mem_offset
;
6229 /* See whether resulting subreg will be paradoxical. */
6230 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6232 /* In nonparadoxical subregs we can't handle negative offsets. */
6233 if (final_offset
< 0)
6235 /* Bail out in case resulting subreg would be incorrect. */
6236 if (final_offset
% GET_MODE_SIZE (outermode
)
6237 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
6242 HOST_WIDE_INT required_offset
6243 = subreg_memory_offset (outermode
, innermostmode
, 0);
6244 if (final_offset
!= required_offset
)
6246 /* Paradoxical subregs always have byte offset 0. */
6250 /* Recurse for further possible simplifications. */
6251 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6255 if (validate_subreg (outermode
, innermostmode
,
6256 SUBREG_REG (op
), final_offset
))
6258 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6259 if (SUBREG_PROMOTED_VAR_P (op
)
6260 && SUBREG_PROMOTED_SIGN (op
) >= 0
6261 && GET_MODE_CLASS (outermode
) == MODE_INT
6262 && IN_RANGE (GET_MODE_SIZE (outermode
),
6263 GET_MODE_SIZE (innermode
),
6264 GET_MODE_SIZE (innermostmode
))
6265 && subreg_lowpart_p (newx
))
6267 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6268 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6275 /* SUBREG of a hard register => just change the register number
6276 and/or mode. If the hard register is not valid in that mode,
6277 suppress this simplification. If the hard register is the stack,
6278 frame, or argument pointer, leave this as a SUBREG. */
6280 if (REG_P (op
) && HARD_REGISTER_P (op
))
6282 unsigned int regno
, final_regno
;
6285 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6286 if (HARD_REGISTER_NUM_P (final_regno
))
6288 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6289 subreg_memory_offset (outermode
,
6292 /* Propagate original regno. We don't have any way to specify
6293 the offset inside original regno, so do so only for lowpart.
6294 The information is used only by alias analysis that can not
6295 grog partial register anyway. */
6297 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6298 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6303 /* If we have a SUBREG of a register that we are replacing and we are
6304 replacing it with a MEM, make a new MEM and try replacing the
6305 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6306 or if we would be widening it. */
6309 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6310 /* Allow splitting of volatile memory references in case we don't
6311 have instruction to move the whole thing. */
6312 && (! MEM_VOLATILE_P (op
)
6313 || ! have_insn_for (SET
, innermode
))
6314 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6315 return adjust_address_nv (op
, outermode
, byte
);
6317 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6319 if (GET_CODE (op
) == CONCAT
6320 || GET_CODE (op
) == VEC_CONCAT
)
6322 unsigned int part_size
, final_offset
;
6325 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6326 if (part_mode
== VOIDmode
)
6327 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6328 part_size
= GET_MODE_SIZE (part_mode
);
6329 if (byte
< part_size
)
6331 part
= XEXP (op
, 0);
6332 final_offset
= byte
;
6336 part
= XEXP (op
, 1);
6337 final_offset
= byte
- part_size
;
6340 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6343 part_mode
= GET_MODE (part
);
6344 if (part_mode
== VOIDmode
)
6345 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6346 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6349 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6350 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6354 /* A SUBREG resulting from a zero extension may fold to zero if
6355 it extracts higher bits that the ZERO_EXTEND's source bits. */
6356 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6358 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6359 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6360 return CONST0_RTX (outermode
);
6363 scalar_int_mode int_outermode
, int_innermode
;
6364 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6365 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6366 && (GET_MODE_PRECISION (int_outermode
)
6367 < GET_MODE_PRECISION (int_innermode
))
6368 && byte
== subreg_lowpart_offset (int_outermode
, int_innermode
))
6370 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6378 /* Make a SUBREG operation or equivalent if it folds. */
6381 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6382 machine_mode innermode
, unsigned int byte
)
6386 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6390 if (GET_CODE (op
) == SUBREG
6391 || GET_CODE (op
) == CONCAT
6392 || GET_MODE (op
) == VOIDmode
)
6395 if (validate_subreg (outermode
, innermode
, op
, byte
))
6396 return gen_rtx_SUBREG (outermode
, op
, byte
);
6401 /* Generates a subreg to get the least significant part of EXPR (in mode
6402 INNER_MODE) to OUTER_MODE. */
6405 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6406 machine_mode inner_mode
)
6408 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6409 subreg_lowpart_offset (outer_mode
, inner_mode
));
6412 /* Simplify X, an rtx expression.
6414 Return the simplified expression or NULL if no simplifications
6417 This is the preferred entry point into the simplification routines;
6418 however, we still allow passes to call the more specific routines.
6420 Right now GCC has three (yes, three) major bodies of RTL simplification
6421 code that need to be unified.
6423 1. fold_rtx in cse.c. This code uses various CSE specific
6424 information to aid in RTL simplification.
6426 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6427 it uses combine specific information to aid in RTL
6430 3. The routines in this file.
6433 Long term we want to only have one body of simplification code; to
6434 get to that state I recommend the following steps:
6436 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6437 which are not pass dependent state into these routines.
6439 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6440 use this routine whenever possible.
6442 3. Allow for pass dependent state to be provided to these
6443 routines and add simplifications based on the pass dependent
6444 state. Remove code from cse.c & combine.c that becomes
6447 It will take time, but ultimately the compiler will be easier to
6448 maintain and improve. It's totally silly that when we add a
6449 simplification that it needs to be added to 4 places (3 for RTL
6450 simplification and 1 for tree simplification. */
6453 simplify_rtx (const_rtx x
)
6455 const enum rtx_code code
= GET_CODE (x
);
6456 const machine_mode mode
= GET_MODE (x
);
6458 switch (GET_RTX_CLASS (code
))
6461 return simplify_unary_operation (code
, mode
,
6462 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6463 case RTX_COMM_ARITH
:
6464 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6465 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6470 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6473 case RTX_BITFIELD_OPS
:
6474 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6475 XEXP (x
, 0), XEXP (x
, 1),
6479 case RTX_COMM_COMPARE
:
6480 return simplify_relational_operation (code
, mode
,
6481 ((GET_MODE (XEXP (x
, 0))
6483 ? GET_MODE (XEXP (x
, 0))
6484 : GET_MODE (XEXP (x
, 1))),
6490 return simplify_subreg (mode
, SUBREG_REG (x
),
6491 GET_MODE (SUBREG_REG (x
)),
6498 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6499 if (GET_CODE (XEXP (x
, 0)) == HIGH
6500 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6513 namespace selftest
{
6515 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6518 make_test_reg (machine_mode mode
)
6520 static int test_reg_num
= LAST_VIRTUAL_REGISTER
+ 1;
6522 return gen_rtx_REG (mode
, test_reg_num
++);
6525 /* Test vector simplifications involving VEC_DUPLICATE in which the
6526 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6527 register that holds one element of MODE. */
6530 test_vector_ops_duplicate (machine_mode mode
, rtx scalar_reg
)
6532 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6533 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6534 unsigned int nunits
= GET_MODE_NUNITS (mode
);
6535 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
6537 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6538 rtx not_scalar_reg
= gen_rtx_NOT (inner_mode
, scalar_reg
);
6539 rtx duplicate_not
= gen_rtx_VEC_DUPLICATE (mode
, not_scalar_reg
);
6540 ASSERT_RTX_EQ (duplicate
,
6541 simplify_unary_operation (NOT
, mode
,
6542 duplicate_not
, mode
));
6544 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6545 rtx duplicate_neg
= gen_rtx_VEC_DUPLICATE (mode
, neg_scalar_reg
);
6546 ASSERT_RTX_EQ (duplicate
,
6547 simplify_unary_operation (NEG
, mode
,
6548 duplicate_neg
, mode
));
6550 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6551 ASSERT_RTX_EQ (duplicate
,
6552 simplify_binary_operation (PLUS
, mode
, duplicate
,
6553 CONST0_RTX (mode
)));
6555 ASSERT_RTX_EQ (duplicate
,
6556 simplify_binary_operation (MINUS
, mode
, duplicate
,
6557 CONST0_RTX (mode
)));
6559 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode
),
6560 simplify_binary_operation (MINUS
, mode
, duplicate
,
6564 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6565 rtx zero_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, const0_rtx
));
6566 ASSERT_RTX_PTR_EQ (scalar_reg
,
6567 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6568 duplicate
, zero_par
));
6570 /* And again with the final element. */
6571 rtx last_index
= gen_int_mode (GET_MODE_NUNITS (mode
) - 1, word_mode
);
6572 rtx last_par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, last_index
));
6573 ASSERT_RTX_PTR_EQ (scalar_reg
,
6574 simplify_binary_operation (VEC_SELECT
, inner_mode
,
6575 duplicate
, last_par
));
6577 /* Test a scalar subreg of a VEC_DUPLICATE. */
6578 unsigned int offset
= subreg_lowpart_offset (inner_mode
, mode
);
6579 ASSERT_RTX_EQ (scalar_reg
,
6580 simplify_gen_subreg (inner_mode
, duplicate
,
6583 machine_mode narrower_mode
;
6585 && mode_for_vector (inner_mode
, 2).exists (&narrower_mode
)
6586 && VECTOR_MODE_P (narrower_mode
))
6588 /* Test VEC_SELECT of a vector. */
6590 = gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, const1_rtx
, const0_rtx
));
6591 rtx narrower_duplicate
6592 = gen_rtx_VEC_DUPLICATE (narrower_mode
, scalar_reg
);
6593 ASSERT_RTX_EQ (narrower_duplicate
,
6594 simplify_binary_operation (VEC_SELECT
, narrower_mode
,
6595 duplicate
, vec_par
));
6597 /* Test a vector subreg of a VEC_DUPLICATE. */
6598 unsigned int offset
= subreg_lowpart_offset (narrower_mode
, mode
);
6599 ASSERT_RTX_EQ (narrower_duplicate
,
6600 simplify_gen_subreg (narrower_mode
, duplicate
,
6605 /* Test vector simplifications involving VEC_SERIES in which the
6606 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6607 register that holds one element of MODE. */
6610 test_vector_ops_series (machine_mode mode
, rtx scalar_reg
)
6612 /* Test unary cases with VEC_SERIES arguments. */
6613 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
6614 rtx duplicate
= gen_rtx_VEC_DUPLICATE (mode
, scalar_reg
);
6615 rtx neg_scalar_reg
= gen_rtx_NEG (inner_mode
, scalar_reg
);
6616 rtx series_0_r
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, scalar_reg
);
6617 rtx series_0_nr
= gen_rtx_VEC_SERIES (mode
, const0_rtx
, neg_scalar_reg
);
6618 rtx series_nr_1
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
, const1_rtx
);
6619 rtx series_r_m1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, constm1_rtx
);
6620 rtx series_r_r
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, scalar_reg
);
6621 rtx series_nr_nr
= gen_rtx_VEC_SERIES (mode
, neg_scalar_reg
,
6623 ASSERT_RTX_EQ (series_0_r
,
6624 simplify_unary_operation (NEG
, mode
, series_0_nr
, mode
));
6625 ASSERT_RTX_EQ (series_r_m1
,
6626 simplify_unary_operation (NEG
, mode
, series_nr_1
, mode
));
6627 ASSERT_RTX_EQ (series_r_r
,
6628 simplify_unary_operation (NEG
, mode
, series_nr_nr
, mode
));
6630 /* Test that a VEC_SERIES with a zero step is simplified away. */
6631 ASSERT_RTX_EQ (duplicate
,
6632 simplify_binary_operation (VEC_SERIES
, mode
,
6633 scalar_reg
, const0_rtx
));
6635 /* Test PLUS and MINUS with VEC_SERIES. */
6636 rtx series_0_1
= gen_const_vec_series (mode
, const0_rtx
, const1_rtx
);
6637 rtx series_0_m1
= gen_const_vec_series (mode
, const0_rtx
, constm1_rtx
);
6638 rtx series_r_1
= gen_rtx_VEC_SERIES (mode
, scalar_reg
, const1_rtx
);
6639 ASSERT_RTX_EQ (series_r_r
,
6640 simplify_binary_operation (PLUS
, mode
, series_0_r
,
6642 ASSERT_RTX_EQ (series_r_1
,
6643 simplify_binary_operation (PLUS
, mode
, duplicate
,
6645 ASSERT_RTX_EQ (series_r_m1
,
6646 simplify_binary_operation (PLUS
, mode
, duplicate
,
6648 ASSERT_RTX_EQ (series_0_r
,
6649 simplify_binary_operation (MINUS
, mode
, series_r_r
,
6651 ASSERT_RTX_EQ (series_r_m1
,
6652 simplify_binary_operation (MINUS
, mode
, duplicate
,
6654 ASSERT_RTX_EQ (series_r_1
,
6655 simplify_binary_operation (MINUS
, mode
, duplicate
,
6659 /* Verify some simplifications involving vectors. */
6664 for (unsigned int i
= 0; i
< NUM_MACHINE_MODES
; ++i
)
6666 machine_mode mode
= (machine_mode
) i
;
6667 if (VECTOR_MODE_P (mode
))
6669 rtx scalar_reg
= make_test_reg (GET_MODE_INNER (mode
));
6670 test_vector_ops_duplicate (mode
, scalar_reg
);
6671 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
6672 && GET_MODE_NUNITS (mode
) > 2)
6673 test_vector_ops_series (mode
, scalar_reg
);
6678 /* Run all of the selftests within this file. */
6681 simplify_rtx_c_tests ()
6686 } // namespace selftest
6688 #endif /* CHECKING_P */