1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx
neg_const_int (machine_mode
, const_rtx
);
47 static bool plus_minus_operand_p (const_rtx
);
48 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
49 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
80 scalar_int_mode int_mode
;
82 if (!is_int_mode (mode
, &int_mode
))
85 width
= GET_MODE_PRECISION (int_mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x
))
96 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
97 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
99 for (i
= 0; i
< elts
- 1; i
++)
100 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
102 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
103 width
%= HOST_BITS_PER_WIDE_INT
;
105 width
= HOST_BITS_PER_WIDE_INT
;
108 else if (width
<= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x
)
110 && CONST_DOUBLE_LOW (x
) == 0)
112 val
= CONST_DOUBLE_HIGH (x
);
113 width
-= HOST_BITS_PER_WIDE_INT
;
117 /* X is not an integer constant. */
120 if (width
< HOST_BITS_PER_WIDE_INT
)
121 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
122 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
130 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 scalar_int_mode int_mode
;
135 if (!is_int_mode (mode
, &int_mode
))
138 width
= GET_MODE_PRECISION (int_mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= GET_MODE_MASK (int_mode
);
143 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 scalar_int_mode int_mode
;
154 if (!is_int_mode (mode
, &int_mode
))
157 width
= GET_MODE_PRECISION (int_mode
);
158 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
161 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
168 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
172 scalar_int_mode int_mode
;
173 if (!is_int_mode (mode
, &int_mode
))
176 width
= GET_MODE_PRECISION (int_mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 HOST_WIDE_INT offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr
) == CONST
243 && GET_CODE (XEXP (addr
, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
246 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
247 addr
= XEXP (XEXP (addr
, 0), 0);
250 if (GET_CODE (addr
) == LO_SUM
)
251 addr
= XEXP (addr
, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr
) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr
))
258 c
= get_pool_constant (addr
);
259 cmode
= get_pool_mode (addr
);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset
== 0 && cmode
== GET_MODE (x
))
266 else if (offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
268 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
269 if (tem
&& CONSTANT_P (tem
))
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x
)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
288 && MEM_OFFSET_KNOWN_P (x
))
290 tree decl
= MEM_EXPR (x
);
291 machine_mode mode
= GET_MODE (x
);
292 HOST_WIDE_INT offset
= 0;
294 switch (TREE_CODE (decl
))
304 case ARRAY_RANGE_REF
:
309 case VIEW_CONVERT_EXPR
:
311 HOST_WIDE_INT bitsize
, bitpos
;
313 int unsignedp
, reversep
, volatilep
= 0;
316 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
317 &unsignedp
, &reversep
, &volatilep
);
318 if (bitsize
!= GET_MODE_BITSIZE (mode
)
319 || (bitpos
% BITS_PER_UNIT
)
320 || (toffset
&& !tree_fits_shwi_p (toffset
)))
324 offset
+= bitpos
/ BITS_PER_UNIT
;
326 offset
+= tree_to_shwi (toffset
);
333 && mode
== GET_MODE (x
)
335 && (TREE_STATIC (decl
)
336 || DECL_THREAD_LOCAL_P (decl
))
337 && DECL_RTL_SET_P (decl
)
338 && MEM_P (DECL_RTL (decl
)))
342 offset
+= MEM_OFFSET (x
);
344 newx
= DECL_RTL (decl
);
348 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
357 || (GET_CODE (o
) == PLUS
358 && GET_CODE (XEXP (o
, 1)) == CONST_INT
359 && (offset
== INTVAL (XEXP (o
, 1))
360 || (GET_CODE (n
) == PLUS
361 && GET_CODE (XEXP (n
, 1)) == CONST_INT
362 && (INTVAL (XEXP (n
, 1)) + offset
363 == INTVAL (XEXP (o
, 1)))
364 && (n
= XEXP (n
, 0))))
365 && (o
= XEXP (o
, 0))))
366 && rtx_equal_p (o
, n
)))
367 x
= adjust_address_nv (newx
, mode
, offset
);
369 else if (GET_MODE (x
) == GET_MODE (newx
)
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
383 machine_mode op_mode
)
387 /* If this simplifies, use it. */
388 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
391 return gen_rtx_fmt_e (code
, mode
, op
);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
398 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
402 /* If this simplifies, use it. */
403 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
407 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
415 machine_mode cmp_mode
, rtx op0
, rtx op1
)
419 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
423 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
432 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
433 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
435 enum rtx_code code
= GET_CODE (x
);
436 machine_mode mode
= GET_MODE (x
);
437 machine_mode op_mode
;
439 rtx op0
, op1
, op2
, newx
, op
;
443 if (__builtin_expect (fn
!= NULL
, 0))
445 newx
= fn (x
, old_rtx
, data
);
449 else if (rtx_equal_p (x
, old_rtx
))
450 return copy_rtx ((rtx
) data
);
452 switch (GET_RTX_CLASS (code
))
456 op_mode
= GET_MODE (op0
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 if (op0
== XEXP (x
, 0))
460 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
464 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
465 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
466 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
468 return simplify_gen_binary (code
, mode
, op0
, op1
);
471 case RTX_COMM_COMPARE
:
474 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
475 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
476 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
477 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
479 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
482 case RTX_BITFIELD_OPS
:
484 op_mode
= GET_MODE (op0
);
485 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
486 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
487 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
490 if (op_mode
== VOIDmode
)
491 op_mode
= GET_MODE (op0
);
492 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
497 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
498 if (op0
== SUBREG_REG (x
))
500 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
501 GET_MODE (SUBREG_REG (x
)),
503 return op0
? op0
: x
;
510 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
511 if (op0
== XEXP (x
, 0))
513 return replace_equiv_address_nv (x
, op0
);
515 else if (code
== LO_SUM
)
517 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
518 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0
) == HIGH
)
523 rtx base0
, base1
, offset0
, offset1
;
524 split_const (XEXP (op0
, 0), &base0
, &offset0
);
525 split_const (op1
, &base1
, &offset1
);
526 if (rtx_equal_p (base0
, base1
))
530 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
532 return gen_rtx_LO_SUM (mode
, op0
, op1
);
541 fmt
= GET_RTX_FORMAT (code
);
542 for (i
= 0; fmt
[i
]; i
++)
547 newvec
= XVEC (newx
, i
);
548 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
550 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
552 if (op
!= RTVEC_ELT (vec
, j
))
556 newvec
= shallow_copy_rtvec (vec
);
558 newx
= shallow_copy_rtx (x
);
559 XVEC (newx
, i
) = newvec
;
561 RTVEC_ELT (newvec
, j
) = op
;
569 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
570 if (op
!= XEXP (x
, i
))
573 newx
= shallow_copy_rtx (x
);
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
588 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
639 simplify_truncation (machine_mode mode
, rtx op
,
640 machine_mode op_mode
)
642 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
643 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
644 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
646 gcc_assert (precision
<= op_precision
);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op
) == ZERO_EXTEND
650 || GET_CODE (op
) == SIGN_EXTEND
)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
658 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
659 if (mode
== origmode
)
661 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
662 return simplify_gen_unary (TRUNCATE
, mode
,
663 XEXP (op
, 0), origmode
);
665 return simplify_gen_unary (GET_CODE (op
), mode
,
666 XEXP (op
, 0), origmode
);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
673 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
674 && (GET_CODE (op
) == PLUS
675 || GET_CODE (op
) == MINUS
676 || GET_CODE (op
) == MULT
))
678 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
681 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
683 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op
) == LSHIFTRT
691 || GET_CODE (op
) == ASHIFTRT
)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision
<= op_precision
697 && CONST_INT_P (XEXP (op
, 1))
698 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
700 && UINTVAL (XEXP (op
, 1)) < precision
)
701 return simplify_gen_binary (ASHIFTRT
, mode
,
702 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op
) == LSHIFTRT
708 || GET_CODE (op
) == ASHIFTRT
)
709 && CONST_INT_P (XEXP (op
, 1))
710 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
712 && UINTVAL (XEXP (op
, 1)) < precision
)
713 return simplify_gen_binary (LSHIFTRT
, mode
,
714 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op
) == ASHIFT
720 && CONST_INT_P (XEXP (op
, 1))
721 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
723 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
724 && UINTVAL (XEXP (op
, 1)) < precision
)
725 return simplify_gen_binary (ASHIFT
, mode
,
726 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
731 if (GET_CODE (op
) == AND
732 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
734 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
735 && CONST_INT_P (XEXP (op
, 1)))
737 rtx op0
= (XEXP (XEXP (op
, 0), 0));
738 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
739 rtx mask_op
= XEXP (op
, 1);
740 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
741 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
743 if (shift
< precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
747 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
748 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
749 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
751 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
752 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
759 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
760 && REG_P (XEXP (op
, 0))
761 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
762 && CONST_INT_P (XEXP (op
, 1))
763 && CONST_INT_P (XEXP (op
, 2)))
765 rtx op0
= XEXP (op
, 0);
766 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
767 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
768 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
770 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
773 pos
-= op_precision
- precision
;
774 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
775 XEXP (op
, 1), GEN_INT (pos
));
778 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
780 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
782 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
783 XEXP (op
, 1), XEXP (op
, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op
) == LSHIFTRT
789 || GET_CODE (op
) == ASHIFTRT
)
790 && SCALAR_INT_MODE_P (mode
)
791 && SCALAR_INT_MODE_P (op_mode
)
792 && precision
>= BITS_PER_WORD
793 && 2 * precision
<= op_precision
794 && CONST_INT_P (XEXP (op
, 1))
795 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
796 && UINTVAL (XEXP (op
, 1)) < op_precision
)
798 int byte
= subreg_lowpart_offset (mode
, op_mode
);
799 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
800 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
802 ? byte
- shifted_bytes
803 : byte
+ shifted_bytes
));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op
) == LSHIFTRT
810 || GET_CODE (op
) == ASHIFTRT
)
811 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
812 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
813 && MEM_P (XEXP (op
, 0))
814 && CONST_INT_P (XEXP (op
, 1))
815 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
816 && INTVAL (XEXP (op
, 1)) > 0
817 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
818 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op
, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op
, 0))
821 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
824 int byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
825 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
826 return adjust_address_nv (XEXP (op
, 0), int_mode
,
828 ? byte
- shifted_bytes
829 : byte
+ shifted_bytes
));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op
) == ABS
835 || GET_CODE (op
) == NEG
)
836 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
838 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
839 return simplify_gen_unary (GET_CODE (op
), mode
,
840 XEXP (XEXP (op
, 0), 0), mode
);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
844 if (GET_CODE (op
) == SUBREG
845 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
846 && SCALAR_INT_MODE_P (op_mode
)
847 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
848 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
849 && subreg_lowpart_p (op
))
851 rtx inner
= XEXP (SUBREG_REG (op
), 0);
852 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
853 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op
) == TRUNCATE
)
863 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
864 GET_MODE (XEXP (op
, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
868 if (GET_CODE (op
) == IOR
869 && SCALAR_INT_MODE_P (mode
)
870 && SCALAR_INT_MODE_P (op_mode
)
871 && CONST_INT_P (XEXP (op
, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
883 rtx op
, machine_mode op_mode
)
887 trueop
= avoid_constant_pool_reference (op
);
889 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
893 return simplify_unary_operation_1 (code
, mode
, op
);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
900 exact_int_to_float_conversion_p (const_rtx op
)
902 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
903 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode
!= VOIDmode
);
906 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
907 int in_bits
= in_prec
;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
910 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
911 if (GET_CODE (op
) == FLOAT
)
912 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
913 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
914 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
917 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
919 return in_bits
<= out_bits
;
922 /* Perform some simplifications we can do even if the operands
925 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
927 enum rtx_code reversed
;
929 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
934 /* (not (not X)) == X. */
935 if (GET_CODE (op
) == NOT
)
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op
)
941 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
942 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
943 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
944 XEXP (op
, 0), XEXP (op
, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op
) == PLUS
948 && XEXP (op
, 1) == constm1_rtx
)
949 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
955 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op
) == XOR
960 && CONST_INT_P (XEXP (op
, 1))
961 && (temp
= simplify_unary_operation (NOT
, mode
,
962 XEXP (op
, 1), mode
)) != 0)
963 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op
) == PLUS
967 && CONST_INT_P (XEXP (op
, 1))
968 && mode_signbit_p (mode
, XEXP (op
, 1))
969 && (temp
= simplify_unary_operation (NOT
, mode
,
970 XEXP (op
, 1), mode
)) != 0)
971 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
979 if (GET_CODE (op
) == ASHIFT
980 && XEXP (op
, 0) == const1_rtx
)
982 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
983 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE
== -1
990 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
991 && GET_CODE (op
) == ASHIFTRT
992 && CONST_INT_P (XEXP (op
, 1))
993 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
994 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
995 XEXP (op
, 0), const0_rtx
);
998 if (partial_subreg_p (op
)
999 && subreg_lowpart_p (op
)
1000 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
1001 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
1003 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1006 x
= gen_rtx_ROTATE (inner_mode
,
1007 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1009 XEXP (SUBREG_REG (op
), 1));
1010 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1019 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1021 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1022 machine_mode op_mode
;
1024 op_mode
= GET_MODE (in1
);
1025 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1027 op_mode
= GET_MODE (in2
);
1028 if (op_mode
== VOIDmode
)
1030 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1032 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1033 std::swap (in1
, in2
);
1035 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op
) == BSWAP
)
1042 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1043 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op
) == NEG
)
1050 return XEXP (op
, 0);
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1055 if (GET_CODE (op
) == IF_THEN_ELSE
)
1057 rtx cond
= XEXP (op
, 0);
1058 rtx true_rtx
= XEXP (op
, 1);
1059 rtx false_rtx
= XEXP (op
, 2);
1061 if ((GET_CODE (true_rtx
) == NEG
1062 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1063 || (GET_CODE (false_rtx
) == NEG
1064 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1066 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1067 temp
= reversed_comparison (cond
, mode
);
1071 std::swap (true_rtx
, false_rtx
);
1073 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1074 mode
, temp
, true_rtx
, false_rtx
);
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op
) == PLUS
1080 && XEXP (op
, 1) == const1_rtx
)
1081 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op
) == NOT
)
1085 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op
) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode
)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1096 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1098 if (GET_CODE (op
) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode
)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1106 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1108 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1113 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op
) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1121 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1122 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1128 if (GET_CODE (op
) == ASHIFT
)
1130 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1132 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op
) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op
, 1))
1139 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1140 return simplify_gen_binary (LSHIFTRT
, mode
,
1141 XEXP (op
, 0), XEXP (op
, 1));
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op
) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op
, 1))
1147 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1148 return simplify_gen_binary (ASHIFTRT
, mode
,
1149 XEXP (op
, 0), XEXP (op
, 1));
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op
) == XOR
1153 && XEXP (op
, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1155 return plus_constant (mode
, XEXP (op
, 0), -1);
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op
) == LT
1160 && XEXP (op
, 1) == const0_rtx
1161 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1163 int_mode
= as_a
<scalar_int_mode
> (mode
);
1164 int isize
= GET_MODE_PRECISION (inner
);
1165 if (STORE_FLAG_VALUE
== 1)
1167 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1168 GEN_INT (isize
- 1));
1169 if (int_mode
== inner
)
1171 if (GET_MODE_PRECISION (int_mode
) > isize
)
1172 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1173 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1175 else if (STORE_FLAG_VALUE
== -1)
1177 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1178 GEN_INT (isize
- 1));
1179 if (int_mode
== inner
)
1181 if (GET_MODE_PRECISION (int_mode
) > isize
)
1182 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1183 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1189 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1190 with the umulXi3_highpart patterns. */
1191 if (GET_CODE (op
) == LSHIFTRT
1192 && GET_CODE (XEXP (op
, 0)) == MULT
)
1195 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1197 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1199 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1203 /* We can't handle truncation to a partial integer mode here
1204 because we don't know the real bitsize of the partial
1209 if (GET_MODE (op
) != VOIDmode
)
1211 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1216 /* If we know that the value is already truncated, we can
1217 replace the TRUNCATE with a SUBREG. */
1218 if (GET_MODE_NUNITS (mode
) == 1
1219 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1220 || truncated_to_mode (mode
, op
)))
1222 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1227 /* A truncate of a comparison can be replaced with a subreg if
1228 STORE_FLAG_VALUE permits. This is like the previous test,
1229 but it works even if the comparison is done in a mode larger
1230 than HOST_BITS_PER_WIDE_INT. */
1231 if (HWI_COMPUTABLE_MODE_P (mode
)
1232 && COMPARISON_P (op
)
1233 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1235 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1240 /* A truncate of a memory is just loading the low part of the memory
1241 if we are not changing the meaning of the address. */
1242 if (GET_CODE (op
) == MEM
1243 && !VECTOR_MODE_P (mode
)
1244 && !MEM_VOLATILE_P (op
)
1245 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1247 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1254 case FLOAT_TRUNCATE
:
1255 if (DECIMAL_FLOAT_MODE_P (mode
))
1258 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1259 if (GET_CODE (op
) == FLOAT_EXTEND
1260 && GET_MODE (XEXP (op
, 0)) == mode
)
1261 return XEXP (op
, 0);
1263 /* (float_truncate:SF (float_truncate:DF foo:XF))
1264 = (float_truncate:SF foo:XF).
1265 This may eliminate double rounding, so it is unsafe.
1267 (float_truncate:SF (float_extend:XF foo:DF))
1268 = (float_truncate:SF foo:DF).
1270 (float_truncate:DF (float_extend:XF foo:SF))
1271 = (float_extend:DF foo:SF). */
1272 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1273 && flag_unsafe_math_optimizations
)
1274 || GET_CODE (op
) == FLOAT_EXTEND
)
1275 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1277 > GET_MODE_SIZE (mode
)
1278 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1280 XEXP (op
, 0), mode
);
1282 /* (float_truncate (float x)) is (float x) */
1283 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1284 && (flag_unsafe_math_optimizations
1285 || exact_int_to_float_conversion_p (op
)))
1286 return simplify_gen_unary (GET_CODE (op
), mode
,
1288 GET_MODE (XEXP (op
, 0)));
1290 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1291 (OP:SF foo:SF) if OP is NEG or ABS. */
1292 if ((GET_CODE (op
) == ABS
1293 || GET_CODE (op
) == NEG
)
1294 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1295 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1296 return simplify_gen_unary (GET_CODE (op
), mode
,
1297 XEXP (XEXP (op
, 0), 0), mode
);
1299 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1300 is (float_truncate:SF x). */
1301 if (GET_CODE (op
) == SUBREG
1302 && subreg_lowpart_p (op
)
1303 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1304 return SUBREG_REG (op
);
1308 if (DECIMAL_FLOAT_MODE_P (mode
))
1311 /* (float_extend (float_extend x)) is (float_extend x)
1313 (float_extend (float x)) is (float x) assuming that double
1314 rounding can't happen.
1316 if (GET_CODE (op
) == FLOAT_EXTEND
1317 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1318 && exact_int_to_float_conversion_p (op
)))
1319 return simplify_gen_unary (GET_CODE (op
), mode
,
1321 GET_MODE (XEXP (op
, 0)));
1326 /* (abs (neg <foo>)) -> (abs <foo>) */
1327 if (GET_CODE (op
) == NEG
)
1328 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1329 GET_MODE (XEXP (op
, 0)));
1331 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1333 if (GET_MODE (op
) == VOIDmode
)
1336 /* If operand is something known to be positive, ignore the ABS. */
1337 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1338 || val_signbit_known_clear_p (GET_MODE (op
),
1339 nonzero_bits (op
, GET_MODE (op
))))
1342 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1343 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1344 && (num_sign_bit_copies (op
, int_mode
)
1345 == GET_MODE_PRECISION (int_mode
)))
1346 return gen_rtx_NEG (int_mode
, op
);
1351 /* (ffs (*_extend <X>)) = (ffs <X>) */
1352 if (GET_CODE (op
) == SIGN_EXTEND
1353 || GET_CODE (op
) == ZERO_EXTEND
)
1354 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1355 GET_MODE (XEXP (op
, 0)));
1359 switch (GET_CODE (op
))
1363 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1364 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1365 GET_MODE (XEXP (op
, 0)));
1369 /* Rotations don't affect popcount. */
1370 if (!side_effects_p (XEXP (op
, 1)))
1371 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1372 GET_MODE (XEXP (op
, 0)));
1381 switch (GET_CODE (op
))
1387 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1388 GET_MODE (XEXP (op
, 0)));
1392 /* Rotations don't affect parity. */
1393 if (!side_effects_p (XEXP (op
, 1)))
1394 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1395 GET_MODE (XEXP (op
, 0)));
1404 /* (bswap (bswap x)) -> x. */
1405 if (GET_CODE (op
) == BSWAP
)
1406 return XEXP (op
, 0);
1410 /* (float (sign_extend <X>)) = (float <X>). */
1411 if (GET_CODE (op
) == SIGN_EXTEND
)
1412 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1413 GET_MODE (XEXP (op
, 0)));
1417 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1418 becomes just the MINUS if its mode is MODE. This allows
1419 folding switch statements on machines using casesi (such as
1421 if (GET_CODE (op
) == TRUNCATE
1422 && GET_MODE (XEXP (op
, 0)) == mode
1423 && GET_CODE (XEXP (op
, 0)) == MINUS
1424 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1425 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1426 return XEXP (op
, 0);
1428 /* Extending a widening multiplication should be canonicalized to
1429 a wider widening multiplication. */
1430 if (GET_CODE (op
) == MULT
)
1432 rtx lhs
= XEXP (op
, 0);
1433 rtx rhs
= XEXP (op
, 1);
1434 enum rtx_code lcode
= GET_CODE (lhs
);
1435 enum rtx_code rcode
= GET_CODE (rhs
);
1437 /* Widening multiplies usually extend both operands, but sometimes
1438 they use a shift to extract a portion of a register. */
1439 if ((lcode
== SIGN_EXTEND
1440 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1441 && (rcode
== SIGN_EXTEND
1442 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1444 machine_mode lmode
= GET_MODE (lhs
);
1445 machine_mode rmode
= GET_MODE (rhs
);
1448 if (lcode
== ASHIFTRT
)
1449 /* Number of bits not shifted off the end. */
1450 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1451 else /* lcode == SIGN_EXTEND */
1452 /* Size of inner mode. */
1453 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1455 if (rcode
== ASHIFTRT
)
1456 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1457 else /* rcode == SIGN_EXTEND */
1458 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1460 /* We can only widen multiplies if the result is mathematiclly
1461 equivalent. I.e. if overflow was impossible. */
1462 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1463 return simplify_gen_binary
1465 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1466 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1470 /* Check for a sign extension of a subreg of a promoted
1471 variable, where the promotion is sign-extended, and the
1472 target mode is the same as the variable's promotion. */
1473 if (GET_CODE (op
) == SUBREG
1474 && SUBREG_PROMOTED_VAR_P (op
)
1475 && SUBREG_PROMOTED_SIGNED_P (op
)
1476 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1478 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1483 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1484 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1485 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1487 gcc_assert (GET_MODE_PRECISION (mode
)
1488 > GET_MODE_PRECISION (GET_MODE (op
)));
1489 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1490 GET_MODE (XEXP (op
, 0)));
1493 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1494 is (sign_extend:M (subreg:O <X>)) if there is mode with
1495 GET_MODE_BITSIZE (N) - I bits.
1496 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1497 is similarly (zero_extend:M (subreg:O <X>)). */
1498 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1499 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1500 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1501 && CONST_INT_P (XEXP (op
, 1))
1502 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1503 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1504 GET_MODE_BITSIZE (op_mode
) > INTVAL (XEXP (op
, 1))))
1506 scalar_int_mode tmode
;
1507 gcc_assert (GET_MODE_BITSIZE (int_mode
)
1508 > GET_MODE_BITSIZE (op_mode
));
1509 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode
)
1510 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1513 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1515 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1516 ? SIGN_EXTEND
: ZERO_EXTEND
,
1517 int_mode
, inner
, tmode
);
1521 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1522 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1523 if (GET_CODE (op
) == LSHIFTRT
1524 && CONST_INT_P (XEXP (op
, 1))
1525 && XEXP (op
, 1) != const0_rtx
)
1526 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1528 #if defined(POINTERS_EXTEND_UNSIGNED)
1529 /* As we do not know which address space the pointer is referring to,
1530 we can do this only if the target does not support different pointer
1531 or address modes depending on the address space. */
1532 if (target_default_pointer_address_modes_p ()
1533 && ! POINTERS_EXTEND_UNSIGNED
1534 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1536 || (GET_CODE (op
) == SUBREG
1537 && REG_P (SUBREG_REG (op
))
1538 && REG_POINTER (SUBREG_REG (op
))
1539 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1540 && !targetm
.have_ptr_extend ())
1543 = convert_memory_address_addr_space_1 (Pmode
, op
,
1544 ADDR_SPACE_GENERIC
, false,
1553 /* Check for a zero extension of a subreg of a promoted
1554 variable, where the promotion is zero-extended, and the
1555 target mode is the same as the variable's promotion. */
1556 if (GET_CODE (op
) == SUBREG
1557 && SUBREG_PROMOTED_VAR_P (op
)
1558 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1559 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1561 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1566 /* Extending a widening multiplication should be canonicalized to
1567 a wider widening multiplication. */
1568 if (GET_CODE (op
) == MULT
)
1570 rtx lhs
= XEXP (op
, 0);
1571 rtx rhs
= XEXP (op
, 1);
1572 enum rtx_code lcode
= GET_CODE (lhs
);
1573 enum rtx_code rcode
= GET_CODE (rhs
);
1575 /* Widening multiplies usually extend both operands, but sometimes
1576 they use a shift to extract a portion of a register. */
1577 if ((lcode
== ZERO_EXTEND
1578 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1579 && (rcode
== ZERO_EXTEND
1580 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1582 machine_mode lmode
= GET_MODE (lhs
);
1583 machine_mode rmode
= GET_MODE (rhs
);
1586 if (lcode
== LSHIFTRT
)
1587 /* Number of bits not shifted off the end. */
1588 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1589 else /* lcode == ZERO_EXTEND */
1590 /* Size of inner mode. */
1591 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1593 if (rcode
== LSHIFTRT
)
1594 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1595 else /* rcode == ZERO_EXTEND */
1596 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1598 /* We can only widen multiplies if the result is mathematiclly
1599 equivalent. I.e. if overflow was impossible. */
1600 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1601 return simplify_gen_binary
1603 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1604 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1608 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1609 if (GET_CODE (op
) == ZERO_EXTEND
)
1610 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1611 GET_MODE (XEXP (op
, 0)));
1613 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1614 is (zero_extend:M (subreg:O <X>)) if there is mode with
1615 GET_MODE_PRECISION (N) - I bits. */
1616 if (GET_CODE (op
) == LSHIFTRT
1617 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1618 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1619 && CONST_INT_P (XEXP (op
, 1))
1620 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1621 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1622 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1624 scalar_int_mode tmode
;
1625 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1626 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1629 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1631 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1636 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1637 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1639 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1640 (and:SI (reg:SI) (const_int 63)). */
1641 if (partial_subreg_p (op
)
1642 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1643 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1644 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1645 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1646 && subreg_lowpart_p (op
)
1647 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1648 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1650 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1651 return SUBREG_REG (op
);
1652 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1656 #if defined(POINTERS_EXTEND_UNSIGNED)
1657 /* As we do not know which address space the pointer is referring to,
1658 we can do this only if the target does not support different pointer
1659 or address modes depending on the address space. */
1660 if (target_default_pointer_address_modes_p ()
1661 && POINTERS_EXTEND_UNSIGNED
> 0
1662 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1664 || (GET_CODE (op
) == SUBREG
1665 && REG_P (SUBREG_REG (op
))
1666 && REG_POINTER (SUBREG_REG (op
))
1667 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1668 && !targetm
.have_ptr_extend ())
1671 = convert_memory_address_addr_space_1 (Pmode
, op
,
1672 ADDR_SPACE_GENERIC
, false,
1687 /* Try to compute the value of a unary operation CODE whose output mode is to
1688 be MODE with input operand OP whose mode was originally OP_MODE.
1689 Return zero if the value cannot be computed. */
1691 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1692 rtx op
, machine_mode op_mode
)
1694 scalar_int_mode result_mode
;
1696 if (code
== VEC_DUPLICATE
)
1698 gcc_assert (VECTOR_MODE_P (mode
));
1699 if (GET_MODE (op
) != VOIDmode
)
1701 if (!VECTOR_MODE_P (GET_MODE (op
)))
1702 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1704 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1707 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1708 || GET_CODE (op
) == CONST_VECTOR
)
1710 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1711 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1712 rtvec v
= rtvec_alloc (n_elts
);
1715 if (GET_CODE (op
) != CONST_VECTOR
)
1716 for (i
= 0; i
< n_elts
; i
++)
1717 RTVEC_ELT (v
, i
) = op
;
1720 machine_mode inmode
= GET_MODE (op
);
1721 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1722 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1724 gcc_assert (in_n_elts
< n_elts
);
1725 gcc_assert ((n_elts
% in_n_elts
) == 0);
1726 for (i
= 0; i
< n_elts
; i
++)
1727 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1729 return gen_rtx_CONST_VECTOR (mode
, v
);
1733 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1735 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1736 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1737 machine_mode opmode
= GET_MODE (op
);
1738 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1739 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1740 rtvec v
= rtvec_alloc (n_elts
);
1743 gcc_assert (op_n_elts
== n_elts
);
1744 for (i
= 0; i
< n_elts
; i
++)
1746 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1747 CONST_VECTOR_ELT (op
, i
),
1748 GET_MODE_INNER (opmode
));
1751 RTVEC_ELT (v
, i
) = x
;
1753 return gen_rtx_CONST_VECTOR (mode
, v
);
1756 /* The order of these tests is critical so that, for example, we don't
1757 check the wrong mode (input vs. output) for a conversion operation,
1758 such as FIX. At some point, this should be simplified. */
1760 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1764 if (op_mode
== VOIDmode
)
1766 /* CONST_INT have VOIDmode as the mode. We assume that all
1767 the bits of the constant are significant, though, this is
1768 a dangerous assumption as many times CONST_INTs are
1769 created and used with garbage in the bits outside of the
1770 precision of the implied mode of the const_int. */
1771 op_mode
= MAX_MODE_INT
;
1774 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1776 /* Avoid the folding if flag_signaling_nans is on and
1777 operand is a signaling NaN. */
1778 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1781 d
= real_value_truncate (mode
, d
);
1782 return const_double_from_real_value (d
, mode
);
1784 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1788 if (op_mode
== VOIDmode
)
1790 /* CONST_INT have VOIDmode as the mode. We assume that all
1791 the bits of the constant are significant, though, this is
1792 a dangerous assumption as many times CONST_INTs are
1793 created and used with garbage in the bits outside of the
1794 precision of the implied mode of the const_int. */
1795 op_mode
= MAX_MODE_INT
;
1798 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1800 /* Avoid the folding if flag_signaling_nans is on and
1801 operand is a signaling NaN. */
1802 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1805 d
= real_value_truncate (mode
, d
);
1806 return const_double_from_real_value (d
, mode
);
1809 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1811 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1813 scalar_int_mode imode
= (op_mode
== VOIDmode
1815 : as_a
<scalar_int_mode
> (op_mode
));
1816 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1819 #if TARGET_SUPPORTS_WIDE_INT == 0
1820 /* This assert keeps the simplification from producing a result
1821 that cannot be represented in a CONST_DOUBLE but a lot of
1822 upstream callers expect that this function never fails to
1823 simplify something and so you if you added this to the test
1824 above the code would die later anyway. If this assert
1825 happens, you just need to make the port support wide int. */
1826 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1832 result
= wi::bit_not (op0
);
1836 result
= wi::neg (op0
);
1840 result
= wi::abs (op0
);
1844 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1848 if (wi::ne_p (op0
, 0))
1849 int_value
= wi::clz (op0
);
1850 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1851 int_value
= GET_MODE_PRECISION (imode
);
1852 result
= wi::shwi (int_value
, result_mode
);
1856 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1860 if (wi::ne_p (op0
, 0))
1861 int_value
= wi::ctz (op0
);
1862 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1863 int_value
= GET_MODE_PRECISION (imode
);
1864 result
= wi::shwi (int_value
, result_mode
);
1868 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1872 result
= wi::shwi (wi::parity (op0
), result_mode
);
1876 result
= wide_int (op0
).bswap ();
1881 result
= wide_int::from (op0
, width
, UNSIGNED
);
1885 result
= wide_int::from (op0
, width
, SIGNED
);
1893 return immed_wide_int_const (result
, result_mode
);
1896 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1897 && SCALAR_FLOAT_MODE_P (mode
)
1898 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1900 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1906 d
= real_value_abs (&d
);
1909 d
= real_value_negate (&d
);
1911 case FLOAT_TRUNCATE
:
1912 /* Don't perform the operation if flag_signaling_nans is on
1913 and the operand is a signaling NaN. */
1914 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1916 d
= real_value_truncate (mode
, d
);
1919 /* Don't perform the operation if flag_signaling_nans is on
1920 and the operand is a signaling NaN. */
1921 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1923 /* All this does is change the mode, unless changing
1925 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1926 real_convert (&d
, mode
, &d
);
1929 /* Don't perform the operation if flag_signaling_nans is on
1930 and the operand is a signaling NaN. */
1931 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1933 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1940 real_to_target (tmp
, &d
, GET_MODE (op
));
1941 for (i
= 0; i
< 4; i
++)
1943 real_from_target (&d
, tmp
, mode
);
1949 return const_double_from_real_value (d
, mode
);
1951 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1952 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1953 && is_int_mode (mode
, &result_mode
))
1955 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1956 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1957 operators are intentionally left unspecified (to ease implementation
1958 by target backends), for consistency, this routine implements the
1959 same semantics for constant folding as used by the middle-end. */
1961 /* This was formerly used only for non-IEEE float.
1962 eggert@twinsun.com says it is safe for IEEE also. */
1964 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1965 wide_int wmax
, wmin
;
1966 /* This is part of the abi to real_to_integer, but we check
1967 things before making this call. */
1973 if (REAL_VALUE_ISNAN (*x
))
1976 /* Test against the signed upper bound. */
1977 wmax
= wi::max_value (width
, SIGNED
);
1978 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1979 if (real_less (&t
, x
))
1980 return immed_wide_int_const (wmax
, mode
);
1982 /* Test against the signed lower bound. */
1983 wmin
= wi::min_value (width
, SIGNED
);
1984 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1985 if (real_less (x
, &t
))
1986 return immed_wide_int_const (wmin
, mode
);
1988 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1992 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1995 /* Test against the unsigned upper bound. */
1996 wmax
= wi::max_value (width
, UNSIGNED
);
1997 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1998 if (real_less (&t
, x
))
1999 return immed_wide_int_const (wmax
, mode
);
2001 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2012 /* Subroutine of simplify_binary_operation to simplify a binary operation
2013 CODE that can commute with byte swapping, with result mode MODE and
2014 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2015 Return zero if no simplification or canonicalization is possible. */
2018 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2023 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2024 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2026 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2027 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2028 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2031 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2032 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2034 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2035 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2041 /* Subroutine of simplify_binary_operation to simplify a commutative,
2042 associative binary operation CODE with result mode MODE, operating
2043 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2044 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2045 canonicalization is possible. */
2048 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2053 /* Linearize the operator to the left. */
2054 if (GET_CODE (op1
) == code
)
2056 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2057 if (GET_CODE (op0
) == code
)
2059 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2060 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2063 /* "a op (b op c)" becomes "(b op c) op a". */
2064 if (! swap_commutative_operands_p (op1
, op0
))
2065 return simplify_gen_binary (code
, mode
, op1
, op0
);
2067 std::swap (op0
, op1
);
2070 if (GET_CODE (op0
) == code
)
2072 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2073 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2075 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2076 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2079 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2080 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2082 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2084 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2085 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2087 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2094 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2095 and OP1. Return 0 if no simplification is possible.
2097 Don't use this for relational operations such as EQ or LT.
2098 Use simplify_relational_operation instead. */
2100 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2103 rtx trueop0
, trueop1
;
2106 /* Relational operations don't work here. We must know the mode
2107 of the operands in order to do the comparison correctly.
2108 Assuming a full word can give incorrect results.
2109 Consider comparing 128 with -128 in QImode. */
2110 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2111 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2113 /* Make sure the constant is second. */
2114 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2115 && swap_commutative_operands_p (op0
, op1
))
2116 std::swap (op0
, op1
);
2118 trueop0
= avoid_constant_pool_reference (op0
);
2119 trueop1
= avoid_constant_pool_reference (op1
);
2121 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2124 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2129 /* If the above steps did not result in a simplification and op0 or op1
2130 were constant pool references, use the referenced constants directly. */
2131 if (trueop0
!= op0
|| trueop1
!= op1
)
2132 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2137 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2138 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2139 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2140 actual constants. */
2143 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2144 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2146 rtx tem
, reversed
, opleft
, opright
;
2148 unsigned int width
= GET_MODE_PRECISION (mode
);
2149 scalar_int_mode int_mode
, inner_mode
;
2151 /* Even if we can't compute a constant result,
2152 there are some cases worth simplifying. */
2157 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2158 when x is NaN, infinite, or finite and nonzero. They aren't
2159 when x is -0 and the rounding mode is not towards -infinity,
2160 since (-0) + 0 is then 0. */
2161 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2164 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2165 transformations are safe even for IEEE. */
2166 if (GET_CODE (op0
) == NEG
)
2167 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2168 else if (GET_CODE (op1
) == NEG
)
2169 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2171 /* (~a) + 1 -> -a */
2172 if (INTEGRAL_MODE_P (mode
)
2173 && GET_CODE (op0
) == NOT
2174 && trueop1
== const1_rtx
)
2175 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2177 /* Handle both-operands-constant cases. We can only add
2178 CONST_INTs to constants since the sum of relocatable symbols
2179 can't be handled by most assemblers. Don't add CONST_INT
2180 to CONST_INT since overflow won't be computed properly if wider
2181 than HOST_BITS_PER_WIDE_INT. */
2183 if ((GET_CODE (op0
) == CONST
2184 || GET_CODE (op0
) == SYMBOL_REF
2185 || GET_CODE (op0
) == LABEL_REF
)
2186 && CONST_INT_P (op1
))
2187 return plus_constant (mode
, op0
, INTVAL (op1
));
2188 else if ((GET_CODE (op1
) == CONST
2189 || GET_CODE (op1
) == SYMBOL_REF
2190 || GET_CODE (op1
) == LABEL_REF
)
2191 && CONST_INT_P (op0
))
2192 return plus_constant (mode
, op1
, INTVAL (op0
));
2194 /* See if this is something like X * C - X or vice versa or
2195 if the multiplication is written as a shift. If so, we can
2196 distribute and make a new multiply, shift, or maybe just
2197 have X (if C is 2 in the example above). But don't make
2198 something more expensive than we had before. */
2200 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2202 rtx lhs
= op0
, rhs
= op1
;
2204 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2205 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2207 if (GET_CODE (lhs
) == NEG
)
2209 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2210 lhs
= XEXP (lhs
, 0);
2212 else if (GET_CODE (lhs
) == MULT
2213 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2215 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2216 lhs
= XEXP (lhs
, 0);
2218 else if (GET_CODE (lhs
) == ASHIFT
2219 && CONST_INT_P (XEXP (lhs
, 1))
2220 && INTVAL (XEXP (lhs
, 1)) >= 0
2221 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2223 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2224 GET_MODE_PRECISION (int_mode
));
2225 lhs
= XEXP (lhs
, 0);
2228 if (GET_CODE (rhs
) == NEG
)
2230 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2231 rhs
= XEXP (rhs
, 0);
2233 else if (GET_CODE (rhs
) == MULT
2234 && CONST_INT_P (XEXP (rhs
, 1)))
2236 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2237 rhs
= XEXP (rhs
, 0);
2239 else if (GET_CODE (rhs
) == ASHIFT
2240 && CONST_INT_P (XEXP (rhs
, 1))
2241 && INTVAL (XEXP (rhs
, 1)) >= 0
2242 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2244 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2245 GET_MODE_PRECISION (int_mode
));
2246 rhs
= XEXP (rhs
, 0);
2249 if (rtx_equal_p (lhs
, rhs
))
2251 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2253 bool speed
= optimize_function_for_speed_p (cfun
);
2255 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2257 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2258 return (set_src_cost (tem
, int_mode
, speed
)
2259 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2263 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2264 if (CONST_SCALAR_INT_P (op1
)
2265 && GET_CODE (op0
) == XOR
2266 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2267 && mode_signbit_p (mode
, op1
))
2268 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2269 simplify_gen_binary (XOR
, mode
, op1
,
2272 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2273 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2274 && GET_CODE (op0
) == MULT
2275 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2279 in1
= XEXP (XEXP (op0
, 0), 0);
2280 in2
= XEXP (op0
, 1);
2281 return simplify_gen_binary (MINUS
, mode
, op1
,
2282 simplify_gen_binary (MULT
, mode
,
2286 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2287 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2289 if (COMPARISON_P (op0
)
2290 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2291 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2292 && (reversed
= reversed_comparison (op0
, mode
)))
2294 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2296 /* If one of the operands is a PLUS or a MINUS, see if we can
2297 simplify this by the associative law.
2298 Don't use the associative law for floating point.
2299 The inaccuracy makes it nonassociative,
2300 and subtle programs can break if operations are associated. */
2302 if (INTEGRAL_MODE_P (mode
)
2303 && (plus_minus_operand_p (op0
)
2304 || plus_minus_operand_p (op1
))
2305 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2308 /* Reassociate floating point addition only when the user
2309 specifies associative math operations. */
2310 if (FLOAT_MODE_P (mode
)
2311 && flag_associative_math
)
2313 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2320 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2321 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2322 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2323 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2325 rtx xop00
= XEXP (op0
, 0);
2326 rtx xop10
= XEXP (op1
, 0);
2328 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2331 if (REG_P (xop00
) && REG_P (xop10
)
2332 && REGNO (xop00
) == REGNO (xop10
)
2333 && GET_MODE (xop00
) == mode
2334 && GET_MODE (xop10
) == mode
2335 && GET_MODE_CLASS (mode
) == MODE_CC
)
2341 /* We can't assume x-x is 0 even with non-IEEE floating point,
2342 but since it is zero except in very strange circumstances, we
2343 will treat it as zero with -ffinite-math-only. */
2344 if (rtx_equal_p (trueop0
, trueop1
)
2345 && ! side_effects_p (op0
)
2346 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2347 return CONST0_RTX (mode
);
2349 /* Change subtraction from zero into negation. (0 - x) is the
2350 same as -x when x is NaN, infinite, or finite and nonzero.
2351 But if the mode has signed zeros, and does not round towards
2352 -infinity, then 0 - 0 is 0, not -0. */
2353 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2354 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2356 /* (-1 - a) is ~a, unless the expression contains symbolic
2357 constants, in which case not retaining additions and
2358 subtractions could cause invalid assembly to be produced. */
2359 if (trueop0
== constm1_rtx
2360 && !contains_symbolic_reference_p (op1
))
2361 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2363 /* Subtracting 0 has no effect unless the mode has signed zeros
2364 and supports rounding towards -infinity. In such a case,
2366 if (!(HONOR_SIGNED_ZEROS (mode
)
2367 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2368 && trueop1
== CONST0_RTX (mode
))
2371 /* See if this is something like X * C - X or vice versa or
2372 if the multiplication is written as a shift. If so, we can
2373 distribute and make a new multiply, shift, or maybe just
2374 have X (if C is 2 in the example above). But don't make
2375 something more expensive than we had before. */
2377 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2379 rtx lhs
= op0
, rhs
= op1
;
2381 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2382 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2384 if (GET_CODE (lhs
) == NEG
)
2386 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2387 lhs
= XEXP (lhs
, 0);
2389 else if (GET_CODE (lhs
) == MULT
2390 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2392 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2393 lhs
= XEXP (lhs
, 0);
2395 else if (GET_CODE (lhs
) == ASHIFT
2396 && CONST_INT_P (XEXP (lhs
, 1))
2397 && INTVAL (XEXP (lhs
, 1)) >= 0
2398 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2400 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2401 GET_MODE_PRECISION (int_mode
));
2402 lhs
= XEXP (lhs
, 0);
2405 if (GET_CODE (rhs
) == NEG
)
2407 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2408 rhs
= XEXP (rhs
, 0);
2410 else if (GET_CODE (rhs
) == MULT
2411 && CONST_INT_P (XEXP (rhs
, 1)))
2413 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2414 rhs
= XEXP (rhs
, 0);
2416 else if (GET_CODE (rhs
) == ASHIFT
2417 && CONST_INT_P (XEXP (rhs
, 1))
2418 && INTVAL (XEXP (rhs
, 1)) >= 0
2419 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2421 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2422 GET_MODE_PRECISION (int_mode
));
2423 negcoeff1
= -negcoeff1
;
2424 rhs
= XEXP (rhs
, 0);
2427 if (rtx_equal_p (lhs
, rhs
))
2429 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2431 bool speed
= optimize_function_for_speed_p (cfun
);
2433 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2435 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2436 return (set_src_cost (tem
, int_mode
, speed
)
2437 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2441 /* (a - (-b)) -> (a + b). True even for IEEE. */
2442 if (GET_CODE (op1
) == NEG
)
2443 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2445 /* (-x - c) may be simplified as (-c - x). */
2446 if (GET_CODE (op0
) == NEG
2447 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2449 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2451 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2454 /* Don't let a relocatable value get a negative coeff. */
2455 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2456 return simplify_gen_binary (PLUS
, mode
,
2458 neg_const_int (mode
, op1
));
2460 /* (x - (x & y)) -> (x & ~y) */
2461 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2463 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2465 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2466 GET_MODE (XEXP (op1
, 1)));
2467 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2469 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2471 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2472 GET_MODE (XEXP (op1
, 0)));
2473 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2477 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2478 by reversing the comparison code if valid. */
2479 if (STORE_FLAG_VALUE
== 1
2480 && trueop0
== const1_rtx
2481 && COMPARISON_P (op1
)
2482 && (reversed
= reversed_comparison (op1
, mode
)))
2485 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2486 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2487 && GET_CODE (op1
) == MULT
2488 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2492 in1
= XEXP (XEXP (op1
, 0), 0);
2493 in2
= XEXP (op1
, 1);
2494 return simplify_gen_binary (PLUS
, mode
,
2495 simplify_gen_binary (MULT
, mode
,
2500 /* Canonicalize (minus (neg A) (mult B C)) to
2501 (minus (mult (neg B) C) A). */
2502 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2503 && GET_CODE (op1
) == MULT
2504 && GET_CODE (op0
) == NEG
)
2508 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2509 in2
= XEXP (op1
, 1);
2510 return simplify_gen_binary (MINUS
, mode
,
2511 simplify_gen_binary (MULT
, mode
,
2516 /* If one of the operands is a PLUS or a MINUS, see if we can
2517 simplify this by the associative law. This will, for example,
2518 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2519 Don't use the associative law for floating point.
2520 The inaccuracy makes it nonassociative,
2521 and subtle programs can break if operations are associated. */
2523 if (INTEGRAL_MODE_P (mode
)
2524 && (plus_minus_operand_p (op0
)
2525 || plus_minus_operand_p (op1
))
2526 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2531 if (trueop1
== constm1_rtx
)
2532 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2534 if (GET_CODE (op0
) == NEG
)
2536 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2537 /* If op1 is a MULT as well and simplify_unary_operation
2538 just moved the NEG to the second operand, simplify_gen_binary
2539 below could through simplify_associative_operation move
2540 the NEG around again and recurse endlessly. */
2542 && GET_CODE (op1
) == MULT
2543 && GET_CODE (temp
) == MULT
2544 && XEXP (op1
, 0) == XEXP (temp
, 0)
2545 && GET_CODE (XEXP (temp
, 1)) == NEG
2546 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2549 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2551 if (GET_CODE (op1
) == NEG
)
2553 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2554 /* If op0 is a MULT as well and simplify_unary_operation
2555 just moved the NEG to the second operand, simplify_gen_binary
2556 below could through simplify_associative_operation move
2557 the NEG around again and recurse endlessly. */
2559 && GET_CODE (op0
) == MULT
2560 && GET_CODE (temp
) == MULT
2561 && XEXP (op0
, 0) == XEXP (temp
, 0)
2562 && GET_CODE (XEXP (temp
, 1)) == NEG
2563 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2566 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2569 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2570 x is NaN, since x * 0 is then also NaN. Nor is it valid
2571 when the mode has signed zeros, since multiplying a negative
2572 number by 0 will give -0, not 0. */
2573 if (!HONOR_NANS (mode
)
2574 && !HONOR_SIGNED_ZEROS (mode
)
2575 && trueop1
== CONST0_RTX (mode
)
2576 && ! side_effects_p (op0
))
2579 /* In IEEE floating point, x*1 is not equivalent to x for
2581 if (!HONOR_SNANS (mode
)
2582 && trueop1
== CONST1_RTX (mode
))
2585 /* Convert multiply by constant power of two into shift. */
2586 if (CONST_SCALAR_INT_P (trueop1
))
2588 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2590 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2593 /* x*2 is x+x and x*(-1) is -x */
2594 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2595 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2596 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2597 && GET_MODE (op0
) == mode
)
2599 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2601 if (real_equal (d1
, &dconst2
))
2602 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2604 if (!HONOR_SNANS (mode
)
2605 && real_equal (d1
, &dconstm1
))
2606 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2609 /* Optimize -x * -x as x * x. */
2610 if (FLOAT_MODE_P (mode
)
2611 && GET_CODE (op0
) == NEG
2612 && GET_CODE (op1
) == NEG
2613 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2614 && !side_effects_p (XEXP (op0
, 0)))
2615 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2617 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2618 if (SCALAR_FLOAT_MODE_P (mode
)
2619 && GET_CODE (op0
) == ABS
2620 && GET_CODE (op1
) == ABS
2621 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2622 && !side_effects_p (XEXP (op0
, 0)))
2623 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2625 /* Reassociate multiplication, but for floating point MULTs
2626 only when the user specifies unsafe math optimizations. */
2627 if (! FLOAT_MODE_P (mode
)
2628 || flag_unsafe_math_optimizations
)
2630 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2637 if (trueop1
== CONST0_RTX (mode
))
2639 if (INTEGRAL_MODE_P (mode
)
2640 && trueop1
== CONSTM1_RTX (mode
)
2641 && !side_effects_p (op0
))
2643 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2645 /* A | (~A) -> -1 */
2646 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2647 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2648 && ! side_effects_p (op0
)
2649 && SCALAR_INT_MODE_P (mode
))
2652 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2653 if (CONST_INT_P (op1
)
2654 && HWI_COMPUTABLE_MODE_P (mode
)
2655 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2656 && !side_effects_p (op0
))
2659 /* Canonicalize (X & C1) | C2. */
2660 if (GET_CODE (op0
) == AND
2661 && CONST_INT_P (trueop1
)
2662 && CONST_INT_P (XEXP (op0
, 1)))
2664 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2665 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2666 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2668 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2670 && !side_effects_p (XEXP (op0
, 0)))
2673 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2674 if (((c1
|c2
) & mask
) == mask
)
2675 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2677 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2678 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2680 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2681 gen_int_mode (c1
& ~c2
, mode
));
2682 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2686 /* Convert (A & B) | A to A. */
2687 if (GET_CODE (op0
) == AND
2688 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2689 || rtx_equal_p (XEXP (op0
, 1), op1
))
2690 && ! side_effects_p (XEXP (op0
, 0))
2691 && ! side_effects_p (XEXP (op0
, 1)))
2694 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2695 mode size to (rotate A CX). */
2697 if (GET_CODE (op1
) == ASHIFT
2698 || GET_CODE (op1
) == SUBREG
)
2709 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2710 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2711 && CONST_INT_P (XEXP (opleft
, 1))
2712 && CONST_INT_P (XEXP (opright
, 1))
2713 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2714 == GET_MODE_PRECISION (mode
)))
2715 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2717 /* Same, but for ashift that has been "simplified" to a wider mode
2718 by simplify_shift_const. */
2720 if (GET_CODE (opleft
) == SUBREG
2721 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2722 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2724 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2725 && GET_CODE (opright
) == LSHIFTRT
2726 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2727 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2728 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2729 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2730 SUBREG_REG (XEXP (opright
, 0)))
2731 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2732 && CONST_INT_P (XEXP (opright
, 1))
2733 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2734 + INTVAL (XEXP (opright
, 1))
2735 == GET_MODE_PRECISION (int_mode
)))
2736 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2737 XEXP (SUBREG_REG (opleft
), 1));
2739 /* If we have (ior (and (X C1) C2)), simplify this by making
2740 C1 as small as possible if C1 actually changes. */
2741 if (CONST_INT_P (op1
)
2742 && (HWI_COMPUTABLE_MODE_P (mode
)
2743 || INTVAL (op1
) > 0)
2744 && GET_CODE (op0
) == AND
2745 && CONST_INT_P (XEXP (op0
, 1))
2746 && CONST_INT_P (op1
)
2747 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2749 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2750 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2753 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2756 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2757 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2758 the PLUS does not affect any of the bits in OP1: then we can do
2759 the IOR as a PLUS and we can associate. This is valid if OP1
2760 can be safely shifted left C bits. */
2761 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2762 && GET_CODE (XEXP (op0
, 0)) == PLUS
2763 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2764 && CONST_INT_P (XEXP (op0
, 1))
2765 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2767 int count
= INTVAL (XEXP (op0
, 1));
2768 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2770 if (mask
>> count
== INTVAL (trueop1
)
2771 && trunc_int_for_mode (mask
, mode
) == mask
2772 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2773 return simplify_gen_binary (ASHIFTRT
, mode
,
2774 plus_constant (mode
, XEXP (op0
, 0),
2779 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2783 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2789 if (trueop1
== CONST0_RTX (mode
))
2791 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2792 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2793 if (rtx_equal_p (trueop0
, trueop1
)
2794 && ! side_effects_p (op0
)
2795 && GET_MODE_CLASS (mode
) != MODE_CC
)
2796 return CONST0_RTX (mode
);
2798 /* Canonicalize XOR of the most significant bit to PLUS. */
2799 if (CONST_SCALAR_INT_P (op1
)
2800 && mode_signbit_p (mode
, op1
))
2801 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2802 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2803 if (CONST_SCALAR_INT_P (op1
)
2804 && GET_CODE (op0
) == PLUS
2805 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2806 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2807 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2808 simplify_gen_binary (XOR
, mode
, op1
,
2811 /* If we are XORing two things that have no bits in common,
2812 convert them into an IOR. This helps to detect rotation encoded
2813 using those methods and possibly other simplifications. */
2815 if (HWI_COMPUTABLE_MODE_P (mode
)
2816 && (nonzero_bits (op0
, mode
)
2817 & nonzero_bits (op1
, mode
)) == 0)
2818 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2820 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2821 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2824 int num_negated
= 0;
2826 if (GET_CODE (op0
) == NOT
)
2827 num_negated
++, op0
= XEXP (op0
, 0);
2828 if (GET_CODE (op1
) == NOT
)
2829 num_negated
++, op1
= XEXP (op1
, 0);
2831 if (num_negated
== 2)
2832 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2833 else if (num_negated
== 1)
2834 return simplify_gen_unary (NOT
, mode
,
2835 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2839 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2840 correspond to a machine insn or result in further simplifications
2841 if B is a constant. */
2843 if (GET_CODE (op0
) == AND
2844 && rtx_equal_p (XEXP (op0
, 1), op1
)
2845 && ! side_effects_p (op1
))
2846 return simplify_gen_binary (AND
, mode
,
2847 simplify_gen_unary (NOT
, mode
,
2848 XEXP (op0
, 0), mode
),
2851 else if (GET_CODE (op0
) == AND
2852 && rtx_equal_p (XEXP (op0
, 0), op1
)
2853 && ! side_effects_p (op1
))
2854 return simplify_gen_binary (AND
, mode
,
2855 simplify_gen_unary (NOT
, mode
,
2856 XEXP (op0
, 1), mode
),
2859 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2860 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2861 out bits inverted twice and not set by C. Similarly, given
2862 (xor (and (xor A B) C) D), simplify without inverting C in
2863 the xor operand: (xor (and A C) (B&C)^D).
2865 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2866 && GET_CODE (XEXP (op0
, 0)) == XOR
2867 && CONST_INT_P (op1
)
2868 && CONST_INT_P (XEXP (op0
, 1))
2869 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2871 enum rtx_code op
= GET_CODE (op0
);
2872 rtx a
= XEXP (XEXP (op0
, 0), 0);
2873 rtx b
= XEXP (XEXP (op0
, 0), 1);
2874 rtx c
= XEXP (op0
, 1);
2876 HOST_WIDE_INT bval
= INTVAL (b
);
2877 HOST_WIDE_INT cval
= INTVAL (c
);
2878 HOST_WIDE_INT dval
= INTVAL (d
);
2879 HOST_WIDE_INT xcval
;
2886 return simplify_gen_binary (XOR
, mode
,
2887 simplify_gen_binary (op
, mode
, a
, c
),
2888 gen_int_mode ((bval
& xcval
) ^ dval
,
2892 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2893 we can transform like this:
2894 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2895 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2896 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2897 Attempt a few simplifications when B and C are both constants. */
2898 if (GET_CODE (op0
) == AND
2899 && CONST_INT_P (op1
)
2900 && CONST_INT_P (XEXP (op0
, 1)))
2902 rtx a
= XEXP (op0
, 0);
2903 rtx b
= XEXP (op0
, 1);
2905 HOST_WIDE_INT bval
= INTVAL (b
);
2906 HOST_WIDE_INT cval
= INTVAL (c
);
2908 /* Instead of computing ~A&C, we compute its negated value,
2909 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2910 optimize for sure. If it does not simplify, we still try
2911 to compute ~A&C below, but since that always allocates
2912 RTL, we don't try that before committing to returning a
2913 simplified expression. */
2914 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2917 if ((~cval
& bval
) == 0)
2919 rtx na_c
= NULL_RTX
;
2921 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2924 /* If ~A does not simplify, don't bother: we don't
2925 want to simplify 2 operations into 3, and if na_c
2926 were to simplify with na, n_na_c would have
2927 simplified as well. */
2928 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2930 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2933 /* Try to simplify ~A&C | ~B&C. */
2934 if (na_c
!= NULL_RTX
)
2935 return simplify_gen_binary (IOR
, mode
, na_c
,
2936 gen_int_mode (~bval
& cval
, mode
));
2940 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2941 if (n_na_c
== CONSTM1_RTX (mode
))
2943 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2944 gen_int_mode (~cval
& bval
,
2946 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2947 gen_int_mode (~bval
& cval
,
2953 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2954 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2955 machines, and also has shorter instruction path length. */
2956 if (GET_CODE (op0
) == AND
2957 && GET_CODE (XEXP (op0
, 0)) == XOR
2958 && CONST_INT_P (XEXP (op0
, 1))
2959 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
2962 rtx b
= XEXP (XEXP (op0
, 0), 1);
2963 rtx c
= XEXP (op0
, 1);
2964 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2965 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
2966 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
2967 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
2969 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2970 else if (GET_CODE (op0
) == AND
2971 && GET_CODE (XEXP (op0
, 0)) == XOR
2972 && CONST_INT_P (XEXP (op0
, 1))
2973 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
2975 rtx a
= XEXP (XEXP (op0
, 0), 0);
2977 rtx c
= XEXP (op0
, 1);
2978 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2979 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
2980 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
2981 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
2984 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2985 comparison if STORE_FLAG_VALUE is 1. */
2986 if (STORE_FLAG_VALUE
== 1
2987 && trueop1
== const1_rtx
2988 && COMPARISON_P (op0
)
2989 && (reversed
= reversed_comparison (op0
, mode
)))
2992 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2993 is (lt foo (const_int 0)), so we can perform the above
2994 simplification if STORE_FLAG_VALUE is 1. */
2996 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
2997 && STORE_FLAG_VALUE
== 1
2998 && trueop1
== const1_rtx
2999 && GET_CODE (op0
) == LSHIFTRT
3000 && CONST_INT_P (XEXP (op0
, 1))
3001 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
3002 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
3004 /* (xor (comparison foo bar) (const_int sign-bit))
3005 when STORE_FLAG_VALUE is the sign bit. */
3006 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
3007 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
3008 && trueop1
== const_true_rtx
3009 && COMPARISON_P (op0
)
3010 && (reversed
= reversed_comparison (op0
, int_mode
)))
3013 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3017 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3023 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3025 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3027 if (HWI_COMPUTABLE_MODE_P (mode
))
3029 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3030 HOST_WIDE_INT nzop1
;
3031 if (CONST_INT_P (trueop1
))
3033 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3034 /* If we are turning off bits already known off in OP0, we need
3036 if ((nzop0
& ~val1
) == 0)
3039 nzop1
= nonzero_bits (trueop1
, mode
);
3040 /* If we are clearing all the nonzero bits, the result is zero. */
3041 if ((nzop1
& nzop0
) == 0
3042 && !side_effects_p (op0
) && !side_effects_p (op1
))
3043 return CONST0_RTX (mode
);
3045 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3046 && GET_MODE_CLASS (mode
) != MODE_CC
)
3049 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3050 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3051 && ! side_effects_p (op0
)
3052 && GET_MODE_CLASS (mode
) != MODE_CC
)
3053 return CONST0_RTX (mode
);
3055 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3056 there are no nonzero bits of C outside of X's mode. */
3057 if ((GET_CODE (op0
) == SIGN_EXTEND
3058 || GET_CODE (op0
) == ZERO_EXTEND
)
3059 && CONST_INT_P (trueop1
)
3060 && HWI_COMPUTABLE_MODE_P (mode
)
3061 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3062 & UINTVAL (trueop1
)) == 0)
3064 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3065 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3066 gen_int_mode (INTVAL (trueop1
),
3068 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3071 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3072 we might be able to further simplify the AND with X and potentially
3073 remove the truncation altogether. */
3074 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3076 rtx x
= XEXP (op0
, 0);
3077 machine_mode xmode
= GET_MODE (x
);
3078 tem
= simplify_gen_binary (AND
, xmode
, x
,
3079 gen_int_mode (INTVAL (trueop1
), xmode
));
3080 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3083 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3084 if (GET_CODE (op0
) == IOR
3085 && CONST_INT_P (trueop1
)
3086 && CONST_INT_P (XEXP (op0
, 1)))
3088 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3089 return simplify_gen_binary (IOR
, mode
,
3090 simplify_gen_binary (AND
, mode
,
3091 XEXP (op0
, 0), op1
),
3092 gen_int_mode (tmp
, mode
));
3095 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3096 insn (and may simplify more). */
3097 if (GET_CODE (op0
) == XOR
3098 && rtx_equal_p (XEXP (op0
, 0), op1
)
3099 && ! side_effects_p (op1
))
3100 return simplify_gen_binary (AND
, mode
,
3101 simplify_gen_unary (NOT
, mode
,
3102 XEXP (op0
, 1), mode
),
3105 if (GET_CODE (op0
) == XOR
3106 && rtx_equal_p (XEXP (op0
, 1), op1
)
3107 && ! side_effects_p (op1
))
3108 return simplify_gen_binary (AND
, mode
,
3109 simplify_gen_unary (NOT
, mode
,
3110 XEXP (op0
, 0), mode
),
3113 /* Similarly for (~(A ^ B)) & A. */
3114 if (GET_CODE (op0
) == NOT
3115 && GET_CODE (XEXP (op0
, 0)) == XOR
3116 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3117 && ! side_effects_p (op1
))
3118 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3120 if (GET_CODE (op0
) == NOT
3121 && GET_CODE (XEXP (op0
, 0)) == XOR
3122 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3123 && ! side_effects_p (op1
))
3124 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3126 /* Convert (A | B) & A to A. */
3127 if (GET_CODE (op0
) == IOR
3128 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3129 || rtx_equal_p (XEXP (op0
, 1), op1
))
3130 && ! side_effects_p (XEXP (op0
, 0))
3131 && ! side_effects_p (XEXP (op0
, 1)))
3134 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3135 ((A & N) + B) & M -> (A + B) & M
3136 Similarly if (N & M) == 0,
3137 ((A | N) + B) & M -> (A + B) & M
3138 and for - instead of + and/or ^ instead of |.
3139 Also, if (N & M) == 0, then
3140 (A +- N) & M -> A & M. */
3141 if (CONST_INT_P (trueop1
)
3142 && HWI_COMPUTABLE_MODE_P (mode
)
3143 && ~UINTVAL (trueop1
)
3144 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3145 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3150 pmop
[0] = XEXP (op0
, 0);
3151 pmop
[1] = XEXP (op0
, 1);
3153 if (CONST_INT_P (pmop
[1])
3154 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3155 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3157 for (which
= 0; which
< 2; which
++)
3160 switch (GET_CODE (tem
))
3163 if (CONST_INT_P (XEXP (tem
, 1))
3164 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3165 == UINTVAL (trueop1
))
3166 pmop
[which
] = XEXP (tem
, 0);
3170 if (CONST_INT_P (XEXP (tem
, 1))
3171 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3172 pmop
[which
] = XEXP (tem
, 0);
3179 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3181 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3183 return simplify_gen_binary (code
, mode
, tem
, op1
);
3187 /* (and X (ior (not X) Y) -> (and X Y) */
3188 if (GET_CODE (op1
) == IOR
3189 && GET_CODE (XEXP (op1
, 0)) == NOT
3190 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3191 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3193 /* (and (ior (not X) Y) X) -> (and X Y) */
3194 if (GET_CODE (op0
) == IOR
3195 && GET_CODE (XEXP (op0
, 0)) == NOT
3196 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3197 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3199 /* (and X (ior Y (not X)) -> (and X Y) */
3200 if (GET_CODE (op1
) == IOR
3201 && GET_CODE (XEXP (op1
, 1)) == NOT
3202 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3203 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3205 /* (and (ior Y (not X)) X) -> (and X Y) */
3206 if (GET_CODE (op0
) == IOR
3207 && GET_CODE (XEXP (op0
, 1)) == NOT
3208 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3209 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3211 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3215 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3221 /* 0/x is 0 (or x&0 if x has side-effects). */
3222 if (trueop0
== CONST0_RTX (mode
)
3223 && !cfun
->can_throw_non_call_exceptions
)
3225 if (side_effects_p (op1
))
3226 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3230 if (trueop1
== CONST1_RTX (mode
))
3232 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3236 /* Convert divide by power of two into shift. */
3237 if (CONST_INT_P (trueop1
)
3238 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3239 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3243 /* Handle floating point and integers separately. */
3244 if (SCALAR_FLOAT_MODE_P (mode
))
3246 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3247 safe for modes with NaNs, since 0.0 / 0.0 will then be
3248 NaN rather than 0.0. Nor is it safe for modes with signed
3249 zeros, since dividing 0 by a negative number gives -0.0 */
3250 if (trueop0
== CONST0_RTX (mode
)
3251 && !HONOR_NANS (mode
)
3252 && !HONOR_SIGNED_ZEROS (mode
)
3253 && ! side_effects_p (op1
))
3256 if (trueop1
== CONST1_RTX (mode
)
3257 && !HONOR_SNANS (mode
))
3260 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3261 && trueop1
!= CONST0_RTX (mode
))
3263 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3266 if (real_equal (d1
, &dconstm1
)
3267 && !HONOR_SNANS (mode
))
3268 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3270 /* Change FP division by a constant into multiplication.
3271 Only do this with -freciprocal-math. */
3272 if (flag_reciprocal_math
3273 && !real_equal (d1
, &dconst0
))
3276 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3277 tem
= const_double_from_real_value (d
, mode
);
3278 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3282 else if (SCALAR_INT_MODE_P (mode
))
3284 /* 0/x is 0 (or x&0 if x has side-effects). */
3285 if (trueop0
== CONST0_RTX (mode
)
3286 && !cfun
->can_throw_non_call_exceptions
)
3288 if (side_effects_p (op1
))
3289 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3293 if (trueop1
== CONST1_RTX (mode
))
3295 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3300 if (trueop1
== constm1_rtx
)
3302 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3304 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3310 /* 0%x is 0 (or x&0 if x has side-effects). */
3311 if (trueop0
== CONST0_RTX (mode
))
3313 if (side_effects_p (op1
))
3314 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3317 /* x%1 is 0 (of x&0 if x has side-effects). */
3318 if (trueop1
== CONST1_RTX (mode
))
3320 if (side_effects_p (op0
))
3321 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3322 return CONST0_RTX (mode
);
3324 /* Implement modulus by power of two as AND. */
3325 if (CONST_INT_P (trueop1
)
3326 && exact_log2 (UINTVAL (trueop1
)) > 0)
3327 return simplify_gen_binary (AND
, mode
, op0
,
3328 gen_int_mode (INTVAL (op1
) - 1, mode
));
3332 /* 0%x is 0 (or x&0 if x has side-effects). */
3333 if (trueop0
== CONST0_RTX (mode
))
3335 if (side_effects_p (op1
))
3336 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3339 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3340 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3342 if (side_effects_p (op0
))
3343 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3344 return CONST0_RTX (mode
);
3350 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3351 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3352 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3354 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3355 if (CONST_INT_P (trueop1
)
3356 && IN_RANGE (INTVAL (trueop1
),
3357 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3358 GET_MODE_PRECISION (mode
) - 1))
3359 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3360 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3361 - INTVAL (trueop1
)));
3365 if (trueop1
== CONST0_RTX (mode
))
3367 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3369 /* Rotating ~0 always results in ~0. */
3370 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3371 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3372 && ! side_effects_p (op1
))
3378 scalar constants c1, c2
3379 size (M2) > size (M1)
3380 c1 == size (M2) - size (M1)
3382 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3386 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3388 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3389 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3391 && CONST_INT_P (op1
)
3392 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3393 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3395 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3396 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3397 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3398 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3399 && subreg_lowpart_p (op0
))
3401 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3403 tmp
= simplify_gen_binary (code
, inner_mode
,
3404 XEXP (SUBREG_REG (op0
), 0),
3406 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3409 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3411 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3412 if (val
!= INTVAL (op1
))
3413 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3420 if (trueop1
== CONST0_RTX (mode
))
3422 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3424 goto canonicalize_shift
;
3427 if (trueop1
== CONST0_RTX (mode
))
3429 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3431 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3432 if (GET_CODE (op0
) == CLZ
3433 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3434 && CONST_INT_P (trueop1
)
3435 && STORE_FLAG_VALUE
== 1
3436 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3438 unsigned HOST_WIDE_INT zero_val
= 0;
3440 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3441 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3442 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3443 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3444 XEXP (op0
, 0), const0_rtx
);
3446 goto canonicalize_shift
;
3449 if (width
<= HOST_BITS_PER_WIDE_INT
3450 && mode_signbit_p (mode
, trueop1
)
3451 && ! side_effects_p (op0
))
3453 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3455 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3461 if (width
<= HOST_BITS_PER_WIDE_INT
3462 && CONST_INT_P (trueop1
)
3463 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3464 && ! side_effects_p (op0
))
3466 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3468 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3474 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3476 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3478 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3484 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3486 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3488 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3501 /* ??? There are simplifications that can be done. */
3505 if (!VECTOR_MODE_P (mode
))
3507 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3508 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3509 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3510 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3511 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3513 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3514 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3517 /* Extract a scalar element from a nested VEC_SELECT expression
3518 (with optional nested VEC_CONCAT expression). Some targets
3519 (i386) extract scalar element from a vector using chain of
3520 nested VEC_SELECT expressions. When input operand is a memory
3521 operand, this operation can be simplified to a simple scalar
3522 load from an offseted memory address. */
3523 if (GET_CODE (trueop0
) == VEC_SELECT
)
3525 rtx op0
= XEXP (trueop0
, 0);
3526 rtx op1
= XEXP (trueop0
, 1);
3528 machine_mode opmode
= GET_MODE (op0
);
3529 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3530 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3532 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3538 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3539 gcc_assert (i
< n_elts
);
3541 /* Select element, pointed by nested selector. */
3542 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3544 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3545 if (GET_CODE (op0
) == VEC_CONCAT
)
3547 rtx op00
= XEXP (op0
, 0);
3548 rtx op01
= XEXP (op0
, 1);
3550 machine_mode mode00
, mode01
;
3551 int n_elts00
, n_elts01
;
3553 mode00
= GET_MODE (op00
);
3554 mode01
= GET_MODE (op01
);
3556 /* Find out number of elements of each operand. */
3557 if (VECTOR_MODE_P (mode00
))
3559 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3560 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3565 if (VECTOR_MODE_P (mode01
))
3567 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3568 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3573 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3575 /* Select correct operand of VEC_CONCAT
3576 and adjust selector. */
3577 if (elem
< n_elts01
)
3588 vec
= rtvec_alloc (1);
3589 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3591 tmp
= gen_rtx_fmt_ee (code
, mode
,
3592 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3595 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3596 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3597 return XEXP (trueop0
, 0);
3601 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3602 gcc_assert (GET_MODE_INNER (mode
)
3603 == GET_MODE_INNER (GET_MODE (trueop0
)));
3604 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3606 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3608 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3609 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3610 rtvec v
= rtvec_alloc (n_elts
);
3613 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3614 for (i
= 0; i
< n_elts
; i
++)
3616 rtx x
= XVECEXP (trueop1
, 0, i
);
3618 gcc_assert (CONST_INT_P (x
));
3619 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3623 return gen_rtx_CONST_VECTOR (mode
, v
);
3626 /* Recognize the identity. */
3627 if (GET_MODE (trueop0
) == mode
)
3629 bool maybe_ident
= true;
3630 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3632 rtx j
= XVECEXP (trueop1
, 0, i
);
3633 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3635 maybe_ident
= false;
3643 /* If we build {a,b} then permute it, build the result directly. */
3644 if (XVECLEN (trueop1
, 0) == 2
3645 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3646 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3647 && GET_CODE (trueop0
) == VEC_CONCAT
3648 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3649 && GET_MODE (XEXP (trueop0
, 0)) == mode
3650 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3651 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3653 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3654 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3657 gcc_assert (i0
< 4 && i1
< 4);
3658 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3659 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3661 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3664 if (XVECLEN (trueop1
, 0) == 2
3665 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3666 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3667 && GET_CODE (trueop0
) == VEC_CONCAT
3668 && GET_MODE (trueop0
) == mode
)
3670 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3671 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3674 gcc_assert (i0
< 2 && i1
< 2);
3675 subop0
= XEXP (trueop0
, i0
);
3676 subop1
= XEXP (trueop0
, i1
);
3678 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3681 /* If we select one half of a vec_concat, return that. */
3682 if (GET_CODE (trueop0
) == VEC_CONCAT
3683 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3685 rtx subop0
= XEXP (trueop0
, 0);
3686 rtx subop1
= XEXP (trueop0
, 1);
3687 machine_mode mode0
= GET_MODE (subop0
);
3688 machine_mode mode1
= GET_MODE (subop1
);
3689 int li
= GET_MODE_UNIT_SIZE (mode0
);
3690 int l0
= GET_MODE_SIZE (mode0
) / li
;
3691 int l1
= GET_MODE_SIZE (mode1
) / li
;
3692 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3693 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3695 bool success
= true;
3696 for (int i
= 1; i
< l0
; ++i
)
3698 rtx j
= XVECEXP (trueop1
, 0, i
);
3699 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3708 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3710 bool success
= true;
3711 for (int i
= 1; i
< l1
; ++i
)
3713 rtx j
= XVECEXP (trueop1
, 0, i
);
3714 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3726 if (XVECLEN (trueop1
, 0) == 1
3727 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3728 && GET_CODE (trueop0
) == VEC_CONCAT
)
3731 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3733 /* Try to find the element in the VEC_CONCAT. */
3734 while (GET_MODE (vec
) != mode
3735 && GET_CODE (vec
) == VEC_CONCAT
)
3737 HOST_WIDE_INT vec_size
;
3739 if (CONST_INT_P (XEXP (vec
, 0)))
3741 /* vec_concat of two const_ints doesn't make sense with
3742 respect to modes. */
3743 if (CONST_INT_P (XEXP (vec
, 1)))
3746 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3747 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3750 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3752 if (offset
< vec_size
)
3753 vec
= XEXP (vec
, 0);
3757 vec
= XEXP (vec
, 1);
3759 vec
= avoid_constant_pool_reference (vec
);
3762 if (GET_MODE (vec
) == mode
)
3766 /* If we select elements in a vec_merge that all come from the same
3767 operand, select from that operand directly. */
3768 if (GET_CODE (op0
) == VEC_MERGE
)
3770 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3771 if (CONST_INT_P (trueop02
))
3773 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3774 bool all_operand0
= true;
3775 bool all_operand1
= true;
3776 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3778 rtx j
= XVECEXP (trueop1
, 0, i
);
3779 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3780 all_operand1
= false;
3782 all_operand0
= false;
3784 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3785 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3786 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3787 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3791 /* If we have two nested selects that are inverses of each
3792 other, replace them with the source operand. */
3793 if (GET_CODE (trueop0
) == VEC_SELECT
3794 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3796 rtx op0_subop1
= XEXP (trueop0
, 1);
3797 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3798 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3800 /* Apply the outer ordering vector to the inner one. (The inner
3801 ordering vector is expressly permitted to be of a different
3802 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3803 then the two VEC_SELECTs cancel. */
3804 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3806 rtx x
= XVECEXP (trueop1
, 0, i
);
3807 if (!CONST_INT_P (x
))
3809 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3810 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3813 return XEXP (trueop0
, 0);
3819 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3820 ? GET_MODE (trueop0
)
3821 : GET_MODE_INNER (mode
));
3822 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3823 ? GET_MODE (trueop1
)
3824 : GET_MODE_INNER (mode
));
3826 gcc_assert (VECTOR_MODE_P (mode
));
3827 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3828 == GET_MODE_SIZE (mode
));
3830 if (VECTOR_MODE_P (op0_mode
))
3831 gcc_assert (GET_MODE_INNER (mode
)
3832 == GET_MODE_INNER (op0_mode
));
3834 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3836 if (VECTOR_MODE_P (op1_mode
))
3837 gcc_assert (GET_MODE_INNER (mode
)
3838 == GET_MODE_INNER (op1_mode
));
3840 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3842 if ((GET_CODE (trueop0
) == CONST_VECTOR
3843 || CONST_SCALAR_INT_P (trueop0
)
3844 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3845 && (GET_CODE (trueop1
) == CONST_VECTOR
3846 || CONST_SCALAR_INT_P (trueop1
)
3847 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3849 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3850 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3851 rtvec v
= rtvec_alloc (n_elts
);
3853 unsigned in_n_elts
= 1;
3855 if (VECTOR_MODE_P (op0_mode
))
3856 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3857 for (i
= 0; i
< n_elts
; i
++)
3861 if (!VECTOR_MODE_P (op0_mode
))
3862 RTVEC_ELT (v
, i
) = trueop0
;
3864 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3868 if (!VECTOR_MODE_P (op1_mode
))
3869 RTVEC_ELT (v
, i
) = trueop1
;
3871 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3876 return gen_rtx_CONST_VECTOR (mode
, v
);
3879 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3880 Restrict the transformation to avoid generating a VEC_SELECT with a
3881 mode unrelated to its operand. */
3882 if (GET_CODE (trueop0
) == VEC_SELECT
3883 && GET_CODE (trueop1
) == VEC_SELECT
3884 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3885 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3887 rtx par0
= XEXP (trueop0
, 1);
3888 rtx par1
= XEXP (trueop1
, 1);
3889 int len0
= XVECLEN (par0
, 0);
3890 int len1
= XVECLEN (par1
, 0);
3891 rtvec vec
= rtvec_alloc (len0
+ len1
);
3892 for (int i
= 0; i
< len0
; i
++)
3893 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3894 for (int i
= 0; i
< len1
; i
++)
3895 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3896 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3897 gen_rtx_PARALLEL (VOIDmode
, vec
));
3910 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3913 if (VECTOR_MODE_P (mode
)
3914 && code
!= VEC_CONCAT
3915 && GET_CODE (op0
) == CONST_VECTOR
3916 && GET_CODE (op1
) == CONST_VECTOR
)
3918 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3919 machine_mode op0mode
= GET_MODE (op0
);
3920 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3921 machine_mode op1mode
= GET_MODE (op1
);
3922 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3923 rtvec v
= rtvec_alloc (n_elts
);
3926 gcc_assert (op0_n_elts
== n_elts
);
3927 gcc_assert (op1_n_elts
== n_elts
);
3928 for (i
= 0; i
< n_elts
; i
++)
3930 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3931 CONST_VECTOR_ELT (op0
, i
),
3932 CONST_VECTOR_ELT (op1
, i
));
3935 RTVEC_ELT (v
, i
) = x
;
3938 return gen_rtx_CONST_VECTOR (mode
, v
);
3941 if (VECTOR_MODE_P (mode
)
3942 && code
== VEC_CONCAT
3943 && (CONST_SCALAR_INT_P (op0
)
3944 || GET_CODE (op0
) == CONST_FIXED
3945 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3946 && (CONST_SCALAR_INT_P (op1
)
3947 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3948 || GET_CODE (op1
) == CONST_FIXED
))
3950 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3951 rtvec v
= rtvec_alloc (n_elts
);
3953 gcc_assert (n_elts
>= 2);
3956 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3957 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3959 RTVEC_ELT (v
, 0) = op0
;
3960 RTVEC_ELT (v
, 1) = op1
;
3964 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3965 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3968 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3969 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3970 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3972 for (i
= 0; i
< op0_n_elts
; ++i
)
3973 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3974 for (i
= 0; i
< op1_n_elts
; ++i
)
3975 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3978 return gen_rtx_CONST_VECTOR (mode
, v
);
3981 if (SCALAR_FLOAT_MODE_P (mode
)
3982 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3983 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3984 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3995 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3997 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3999 for (i
= 0; i
< 4; i
++)
4016 real_from_target (&r
, tmp0
, mode
);
4017 return const_double_from_real_value (r
, mode
);
4021 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4022 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4025 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4026 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4028 if (HONOR_SNANS (mode
)
4029 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4030 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4033 real_convert (&f0
, mode
, opr0
);
4034 real_convert (&f1
, mode
, opr1
);
4037 && real_equal (&f1
, &dconst0
)
4038 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4041 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4042 && flag_trapping_math
4043 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4045 int s0
= REAL_VALUE_NEGATIVE (f0
);
4046 int s1
= REAL_VALUE_NEGATIVE (f1
);
4051 /* Inf + -Inf = NaN plus exception. */
4056 /* Inf - Inf = NaN plus exception. */
4061 /* Inf / Inf = NaN plus exception. */
4068 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4069 && flag_trapping_math
4070 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4071 || (REAL_VALUE_ISINF (f1
)
4072 && real_equal (&f0
, &dconst0
))))
4073 /* Inf * 0 = NaN plus exception. */
4076 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4078 real_convert (&result
, mode
, &value
);
4080 /* Don't constant fold this floating point operation if
4081 the result has overflowed and flag_trapping_math. */
4083 if (flag_trapping_math
4084 && MODE_HAS_INFINITIES (mode
)
4085 && REAL_VALUE_ISINF (result
)
4086 && !REAL_VALUE_ISINF (f0
)
4087 && !REAL_VALUE_ISINF (f1
))
4088 /* Overflow plus exception. */
4091 /* Don't constant fold this floating point operation if the
4092 result may dependent upon the run-time rounding mode and
4093 flag_rounding_math is set, or if GCC's software emulation
4094 is unable to accurately represent the result. */
4096 if ((flag_rounding_math
4097 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4098 && (inexact
|| !real_identical (&result
, &value
)))
4101 return const_double_from_real_value (result
, mode
);
4105 /* We can fold some multi-word operations. */
4106 scalar_int_mode int_mode
;
4107 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4108 && CONST_SCALAR_INT_P (op0
)
4109 && CONST_SCALAR_INT_P (op1
))
4113 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4114 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4116 #if TARGET_SUPPORTS_WIDE_INT == 0
4117 /* This assert keeps the simplification from producing a result
4118 that cannot be represented in a CONST_DOUBLE but a lot of
4119 upstream callers expect that this function never fails to
4120 simplify something and so you if you added this to the test
4121 above the code would die later anyway. If this assert
4122 happens, you just need to make the port support wide int. */
4123 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4128 result
= wi::sub (pop0
, pop1
);
4132 result
= wi::add (pop0
, pop1
);
4136 result
= wi::mul (pop0
, pop1
);
4140 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4146 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4152 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4158 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4164 result
= wi::bit_and (pop0
, pop1
);
4168 result
= wi::bit_or (pop0
, pop1
);
4172 result
= wi::bit_xor (pop0
, pop1
);
4176 result
= wi::smin (pop0
, pop1
);
4180 result
= wi::smax (pop0
, pop1
);
4184 result
= wi::umin (pop0
, pop1
);
4188 result
= wi::umax (pop0
, pop1
);
4195 wide_int wop1
= pop1
;
4196 if (SHIFT_COUNT_TRUNCATED
)
4197 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4198 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4204 result
= wi::lrshift (pop0
, wop1
);
4208 result
= wi::arshift (pop0
, wop1
);
4212 result
= wi::lshift (pop0
, wop1
);
4223 if (wi::neg_p (pop1
))
4229 result
= wi::lrotate (pop0
, pop1
);
4233 result
= wi::rrotate (pop0
, pop1
);
4244 return immed_wide_int_const (result
, int_mode
);
4252 /* Return a positive integer if X should sort after Y. The value
4253 returned is 1 if and only if X and Y are both regs. */
4256 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4260 result
= (commutative_operand_precedence (y
)
4261 - commutative_operand_precedence (x
));
4263 return result
+ result
;
4265 /* Group together equal REGs to do more simplification. */
4266 if (REG_P (x
) && REG_P (y
))
4267 return REGNO (x
) > REGNO (y
);
4272 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4273 operands may be another PLUS or MINUS.
4275 Rather than test for specific case, we do this by a brute-force method
4276 and do all possible simplifications until no more changes occur. Then
4277 we rebuild the operation.
4279 May return NULL_RTX when no changes were made. */
4282 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4285 struct simplify_plus_minus_op_data
4292 int changed
, n_constants
, canonicalized
= 0;
4295 memset (ops
, 0, sizeof ops
);
4297 /* Set up the two operands and then expand them until nothing has been
4298 changed. If we run out of room in our array, give up; this should
4299 almost never happen. */
4304 ops
[1].neg
= (code
== MINUS
);
4311 for (i
= 0; i
< n_ops
; i
++)
4313 rtx this_op
= ops
[i
].op
;
4314 int this_neg
= ops
[i
].neg
;
4315 enum rtx_code this_code
= GET_CODE (this_op
);
4321 if (n_ops
== ARRAY_SIZE (ops
))
4324 ops
[n_ops
].op
= XEXP (this_op
, 1);
4325 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4328 ops
[i
].op
= XEXP (this_op
, 0);
4330 /* If this operand was negated then we will potentially
4331 canonicalize the expression. Similarly if we don't
4332 place the operands adjacent we're re-ordering the
4333 expression and thus might be performing a
4334 canonicalization. Ignore register re-ordering.
4335 ??? It might be better to shuffle the ops array here,
4336 but then (plus (plus (A, B), plus (C, D))) wouldn't
4337 be seen as non-canonical. */
4340 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4345 ops
[i
].op
= XEXP (this_op
, 0);
4346 ops
[i
].neg
= ! this_neg
;
4352 if (n_ops
!= ARRAY_SIZE (ops
)
4353 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4354 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4355 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4357 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4358 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4359 ops
[n_ops
].neg
= this_neg
;
4367 /* ~a -> (-a - 1) */
4368 if (n_ops
!= ARRAY_SIZE (ops
))
4370 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4371 ops
[n_ops
++].neg
= this_neg
;
4372 ops
[i
].op
= XEXP (this_op
, 0);
4373 ops
[i
].neg
= !this_neg
;
4383 ops
[i
].op
= neg_const_int (mode
, this_op
);
4397 if (n_constants
> 1)
4400 gcc_assert (n_ops
>= 2);
4402 /* If we only have two operands, we can avoid the loops. */
4405 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4408 /* Get the two operands. Be careful with the order, especially for
4409 the cases where code == MINUS. */
4410 if (ops
[0].neg
&& ops
[1].neg
)
4412 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4415 else if (ops
[0].neg
)
4426 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4429 /* Now simplify each pair of operands until nothing changes. */
4432 /* Insertion sort is good enough for a small array. */
4433 for (i
= 1; i
< n_ops
; i
++)
4435 struct simplify_plus_minus_op_data save
;
4439 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4442 /* Just swapping registers doesn't count as canonicalization. */
4448 ops
[j
+ 1] = ops
[j
];
4450 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4455 for (i
= n_ops
- 1; i
> 0; i
--)
4456 for (j
= i
- 1; j
>= 0; j
--)
4458 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4459 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4461 if (lhs
!= 0 && rhs
!= 0)
4463 enum rtx_code ncode
= PLUS
;
4469 std::swap (lhs
, rhs
);
4471 else if (swap_commutative_operands_p (lhs
, rhs
))
4472 std::swap (lhs
, rhs
);
4474 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4475 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4477 rtx tem_lhs
, tem_rhs
;
4479 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4480 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4481 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4484 if (tem
&& !CONSTANT_P (tem
))
4485 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4488 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4492 /* Reject "simplifications" that just wrap the two
4493 arguments in a CONST. Failure to do so can result
4494 in infinite recursion with simplify_binary_operation
4495 when it calls us to simplify CONST operations.
4496 Also, if we find such a simplification, don't try
4497 any more combinations with this rhs: We must have
4498 something like symbol+offset, ie. one of the
4499 trivial CONST expressions we handle later. */
4500 if (GET_CODE (tem
) == CONST
4501 && GET_CODE (XEXP (tem
, 0)) == ncode
4502 && XEXP (XEXP (tem
, 0), 0) == lhs
4503 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4506 if (GET_CODE (tem
) == NEG
)
4507 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4508 if (CONST_INT_P (tem
) && lneg
)
4509 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4513 ops
[j
].op
= NULL_RTX
;
4523 /* Pack all the operands to the lower-numbered entries. */
4524 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4533 /* If nothing changed, check that rematerialization of rtl instructions
4534 is still required. */
4537 /* Perform rematerialization if only all operands are registers and
4538 all operations are PLUS. */
4539 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4540 around rs6000 and how it uses the CA register. See PR67145. */
4541 for (i
= 0; i
< n_ops
; i
++)
4543 || !REG_P (ops
[i
].op
)
4544 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4545 && fixed_regs
[REGNO (ops
[i
].op
)]
4546 && !global_regs
[REGNO (ops
[i
].op
)]
4547 && ops
[i
].op
!= frame_pointer_rtx
4548 && ops
[i
].op
!= arg_pointer_rtx
4549 && ops
[i
].op
!= stack_pointer_rtx
))
4554 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4556 && CONST_INT_P (ops
[1].op
)
4557 && CONSTANT_P (ops
[0].op
)
4559 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4561 /* We suppressed creation of trivial CONST expressions in the
4562 combination loop to avoid recursion. Create one manually now.
4563 The combination loop should have ensured that there is exactly
4564 one CONST_INT, and the sort will have ensured that it is last
4565 in the array and that any other constant will be next-to-last. */
4568 && CONST_INT_P (ops
[n_ops
- 1].op
)
4569 && CONSTANT_P (ops
[n_ops
- 2].op
))
4571 rtx value
= ops
[n_ops
- 1].op
;
4572 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4573 value
= neg_const_int (mode
, value
);
4574 if (CONST_INT_P (value
))
4576 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4582 /* Put a non-negated operand first, if possible. */
4584 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4587 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4596 /* Now make the result by performing the requested operations. */
4599 for (i
= 1; i
< n_ops
; i
++)
4600 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4601 mode
, result
, ops
[i
].op
);
4606 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4608 plus_minus_operand_p (const_rtx x
)
4610 return GET_CODE (x
) == PLUS
4611 || GET_CODE (x
) == MINUS
4612 || (GET_CODE (x
) == CONST
4613 && GET_CODE (XEXP (x
, 0)) == PLUS
4614 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4615 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4618 /* Like simplify_binary_operation except used for relational operators.
4619 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4620 not also be VOIDmode.
4622 CMP_MODE specifies in which mode the comparison is done in, so it is
4623 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4624 the operands or, if both are VOIDmode, the operands are compared in
4625 "infinite precision". */
4627 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4628 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4630 rtx tem
, trueop0
, trueop1
;
4632 if (cmp_mode
== VOIDmode
)
4633 cmp_mode
= GET_MODE (op0
);
4634 if (cmp_mode
== VOIDmode
)
4635 cmp_mode
= GET_MODE (op1
);
4637 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4640 if (SCALAR_FLOAT_MODE_P (mode
))
4642 if (tem
== const0_rtx
)
4643 return CONST0_RTX (mode
);
4644 #ifdef FLOAT_STORE_FLAG_VALUE
4646 REAL_VALUE_TYPE val
;
4647 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4648 return const_double_from_real_value (val
, mode
);
4654 if (VECTOR_MODE_P (mode
))
4656 if (tem
== const0_rtx
)
4657 return CONST0_RTX (mode
);
4658 #ifdef VECTOR_STORE_FLAG_VALUE
4663 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4664 if (val
== NULL_RTX
)
4666 if (val
== const1_rtx
)
4667 return CONST1_RTX (mode
);
4669 units
= GET_MODE_NUNITS (mode
);
4670 v
= rtvec_alloc (units
);
4671 for (i
= 0; i
< units
; i
++)
4672 RTVEC_ELT (v
, i
) = val
;
4673 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4683 /* For the following tests, ensure const0_rtx is op1. */
4684 if (swap_commutative_operands_p (op0
, op1
)
4685 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4686 std::swap (op0
, op1
), code
= swap_condition (code
);
4688 /* If op0 is a compare, extract the comparison arguments from it. */
4689 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4690 return simplify_gen_relational (code
, mode
, VOIDmode
,
4691 XEXP (op0
, 0), XEXP (op0
, 1));
4693 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4697 trueop0
= avoid_constant_pool_reference (op0
);
4698 trueop1
= avoid_constant_pool_reference (op1
);
4699 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4703 /* This part of simplify_relational_operation is only used when CMP_MODE
4704 is not in class MODE_CC (i.e. it is a real comparison).
4706 MODE is the mode of the result, while CMP_MODE specifies in which
4707 mode the comparison is done in, so it is the mode of the operands. */
4710 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4711 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4713 enum rtx_code op0code
= GET_CODE (op0
);
4715 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4717 /* If op0 is a comparison, extract the comparison arguments
4721 if (GET_MODE (op0
) == mode
)
4722 return simplify_rtx (op0
);
4724 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4725 XEXP (op0
, 0), XEXP (op0
, 1));
4727 else if (code
== EQ
)
4729 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4730 if (new_code
!= UNKNOWN
)
4731 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4732 XEXP (op0
, 0), XEXP (op0
, 1));
4736 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4737 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4738 if ((code
== LTU
|| code
== GEU
)
4739 && GET_CODE (op0
) == PLUS
4740 && CONST_INT_P (XEXP (op0
, 1))
4741 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4742 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4743 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4744 && XEXP (op0
, 1) != const0_rtx
)
4747 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4748 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4749 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4752 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4753 transformed into (LTU a -C). */
4754 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4755 && CONST_INT_P (XEXP (op0
, 1))
4756 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4757 && XEXP (op0
, 1) != const0_rtx
)
4760 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4761 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4762 XEXP (op0
, 0), new_cmp
);
4765 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4766 if ((code
== LTU
|| code
== GEU
)
4767 && GET_CODE (op0
) == PLUS
4768 && rtx_equal_p (op1
, XEXP (op0
, 1))
4769 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4770 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4771 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4772 copy_rtx (XEXP (op0
, 0)));
4774 if (op1
== const0_rtx
)
4776 /* Canonicalize (GTU x 0) as (NE x 0). */
4778 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4779 /* Canonicalize (LEU x 0) as (EQ x 0). */
4781 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4783 else if (op1
== const1_rtx
)
4788 /* Canonicalize (GE x 1) as (GT x 0). */
4789 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4792 /* Canonicalize (GEU x 1) as (NE x 0). */
4793 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4796 /* Canonicalize (LT x 1) as (LE x 0). */
4797 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4800 /* Canonicalize (LTU x 1) as (EQ x 0). */
4801 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4807 else if (op1
== constm1_rtx
)
4809 /* Canonicalize (LE x -1) as (LT x 0). */
4811 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4812 /* Canonicalize (GT x -1) as (GE x 0). */
4814 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4817 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4818 if ((code
== EQ
|| code
== NE
)
4819 && (op0code
== PLUS
|| op0code
== MINUS
)
4821 && CONSTANT_P (XEXP (op0
, 1))
4822 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4824 rtx x
= XEXP (op0
, 0);
4825 rtx c
= XEXP (op0
, 1);
4826 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4827 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4829 /* Detect an infinite recursive condition, where we oscillate at this
4830 simplification case between:
4831 A + B == C <---> C - B == A,
4832 where A, B, and C are all constants with non-simplifiable expressions,
4833 usually SYMBOL_REFs. */
4834 if (GET_CODE (tem
) == invcode
4836 && rtx_equal_p (c
, XEXP (tem
, 1)))
4839 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4842 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4843 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4844 scalar_int_mode int_mode
, int_cmp_mode
;
4846 && op1
== const0_rtx
4847 && is_int_mode (mode
, &int_mode
)
4848 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
4849 /* ??? Work-around BImode bugs in the ia64 backend. */
4850 && int_mode
!= BImode
4851 && int_cmp_mode
!= BImode
4852 && nonzero_bits (op0
, int_cmp_mode
) == 1
4853 && STORE_FLAG_VALUE
== 1)
4854 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
4855 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
4856 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
4858 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4859 if ((code
== EQ
|| code
== NE
)
4860 && op1
== const0_rtx
4862 return simplify_gen_relational (code
, mode
, cmp_mode
,
4863 XEXP (op0
, 0), XEXP (op0
, 1));
4865 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4866 if ((code
== EQ
|| code
== NE
)
4868 && rtx_equal_p (XEXP (op0
, 0), op1
)
4869 && !side_effects_p (XEXP (op0
, 0)))
4870 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4873 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4874 if ((code
== EQ
|| code
== NE
)
4876 && rtx_equal_p (XEXP (op0
, 1), op1
)
4877 && !side_effects_p (XEXP (op0
, 1)))
4878 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4881 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4882 if ((code
== EQ
|| code
== NE
)
4884 && CONST_SCALAR_INT_P (op1
)
4885 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4886 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4887 simplify_gen_binary (XOR
, cmp_mode
,
4888 XEXP (op0
, 1), op1
));
4890 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4891 can be implemented with a BICS instruction on some targets, or
4892 constant-folded if y is a constant. */
4893 if ((code
== EQ
|| code
== NE
)
4895 && rtx_equal_p (XEXP (op0
, 0), op1
)
4896 && !side_effects_p (op1
)
4897 && op1
!= CONST0_RTX (cmp_mode
))
4899 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4900 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4902 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4903 CONST0_RTX (cmp_mode
));
4906 /* Likewise for (eq/ne (and x y) y). */
4907 if ((code
== EQ
|| code
== NE
)
4909 && rtx_equal_p (XEXP (op0
, 1), op1
)
4910 && !side_effects_p (op1
)
4911 && op1
!= CONST0_RTX (cmp_mode
))
4913 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4914 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4916 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4917 CONST0_RTX (cmp_mode
));
4920 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4921 if ((code
== EQ
|| code
== NE
)
4922 && GET_CODE (op0
) == BSWAP
4923 && CONST_SCALAR_INT_P (op1
))
4924 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4925 simplify_gen_unary (BSWAP
, cmp_mode
,
4928 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4929 if ((code
== EQ
|| code
== NE
)
4930 && GET_CODE (op0
) == BSWAP
4931 && GET_CODE (op1
) == BSWAP
)
4932 return simplify_gen_relational (code
, mode
, cmp_mode
,
4933 XEXP (op0
, 0), XEXP (op1
, 0));
4935 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4941 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4942 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4943 XEXP (op0
, 0), const0_rtx
);
4948 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4949 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4950 XEXP (op0
, 0), const0_rtx
);
4969 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4970 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4971 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4972 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4973 For floating-point comparisons, assume that the operands were ordered. */
4976 comparison_result (enum rtx_code code
, int known_results
)
4982 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4985 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4989 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4992 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4996 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4999 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
5002 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
5004 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
5007 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
5009 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5012 return const_true_rtx
;
5020 /* Check if the given comparison (done in the given MODE) is actually
5021 a tautology or a contradiction. If the mode is VOID_mode, the
5022 comparison is done in "infinite precision". If no simplification
5023 is possible, this function returns zero. Otherwise, it returns
5024 either const_true_rtx or const0_rtx. */
5027 simplify_const_relational_operation (enum rtx_code code
,
5035 gcc_assert (mode
!= VOIDmode
5036 || (GET_MODE (op0
) == VOIDmode
5037 && GET_MODE (op1
) == VOIDmode
));
5039 /* If op0 is a compare, extract the comparison arguments from it. */
5040 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5042 op1
= XEXP (op0
, 1);
5043 op0
= XEXP (op0
, 0);
5045 if (GET_MODE (op0
) != VOIDmode
)
5046 mode
= GET_MODE (op0
);
5047 else if (GET_MODE (op1
) != VOIDmode
)
5048 mode
= GET_MODE (op1
);
5053 /* We can't simplify MODE_CC values since we don't know what the
5054 actual comparison is. */
5055 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5058 /* Make sure the constant is second. */
5059 if (swap_commutative_operands_p (op0
, op1
))
5061 std::swap (op0
, op1
);
5062 code
= swap_condition (code
);
5065 trueop0
= avoid_constant_pool_reference (op0
);
5066 trueop1
= avoid_constant_pool_reference (op1
);
5068 /* For integer comparisons of A and B maybe we can simplify A - B and can
5069 then simplify a comparison of that with zero. If A and B are both either
5070 a register or a CONST_INT, this can't help; testing for these cases will
5071 prevent infinite recursion here and speed things up.
5073 We can only do this for EQ and NE comparisons as otherwise we may
5074 lose or introduce overflow which we cannot disregard as undefined as
5075 we do not know the signedness of the operation on either the left or
5076 the right hand side of the comparison. */
5078 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5079 && (code
== EQ
|| code
== NE
)
5080 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5081 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5082 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5083 /* We cannot do this if tem is a nonzero address. */
5084 && ! nonzero_address_p (tem
))
5085 return simplify_const_relational_operation (signed_condition (code
),
5086 mode
, tem
, const0_rtx
);
5088 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5089 return const_true_rtx
;
5091 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5094 /* For modes without NaNs, if the two operands are equal, we know the
5095 result except if they have side-effects. Even with NaNs we know
5096 the result of unordered comparisons and, if signaling NaNs are
5097 irrelevant, also the result of LT/GT/LTGT. */
5098 if ((! HONOR_NANS (trueop0
)
5099 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5100 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5101 && ! HONOR_SNANS (trueop0
)))
5102 && rtx_equal_p (trueop0
, trueop1
)
5103 && ! side_effects_p (trueop0
))
5104 return comparison_result (code
, CMP_EQ
);
5106 /* If the operands are floating-point constants, see if we can fold
5108 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5109 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5110 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5112 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5113 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5115 /* Comparisons are unordered iff at least one of the values is NaN. */
5116 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5126 return const_true_rtx
;
5139 return comparison_result (code
,
5140 (real_equal (d0
, d1
) ? CMP_EQ
:
5141 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5144 /* Otherwise, see if the operands are both integers. */
5145 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5146 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5148 /* It would be nice if we really had a mode here. However, the
5149 largest int representable on the target is as good as
5151 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5152 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5153 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5155 if (wi::eq_p (ptrueop0
, ptrueop1
))
5156 return comparison_result (code
, CMP_EQ
);
5159 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5160 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5161 return comparison_result (code
, cr
);
5165 /* Optimize comparisons with upper and lower bounds. */
5166 scalar_int_mode int_mode
;
5167 if (CONST_INT_P (trueop1
)
5168 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5169 && HWI_COMPUTABLE_MODE_P (int_mode
)
5170 && !side_effects_p (trueop0
))
5173 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5174 HOST_WIDE_INT val
= INTVAL (trueop1
);
5175 HOST_WIDE_INT mmin
, mmax
;
5185 /* Get a reduced range if the sign bit is zero. */
5186 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5193 rtx mmin_rtx
, mmax_rtx
;
5194 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5196 mmin
= INTVAL (mmin_rtx
);
5197 mmax
= INTVAL (mmax_rtx
);
5200 unsigned int sign_copies
5201 = num_sign_bit_copies (trueop0
, int_mode
);
5203 mmin
>>= (sign_copies
- 1);
5204 mmax
>>= (sign_copies
- 1);
5210 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5212 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5213 return const_true_rtx
;
5214 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5219 return const_true_rtx
;
5224 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5226 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5227 return const_true_rtx
;
5228 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5233 return const_true_rtx
;
5239 /* x == y is always false for y out of range. */
5240 if (val
< mmin
|| val
> mmax
)
5244 /* x > y is always false for y >= mmax, always true for y < mmin. */
5246 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5248 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5249 return const_true_rtx
;
5255 return const_true_rtx
;
5258 /* x < y is always false for y <= mmin, always true for y > mmax. */
5260 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5262 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5263 return const_true_rtx
;
5269 return const_true_rtx
;
5273 /* x != y is always true for y out of range. */
5274 if (val
< mmin
|| val
> mmax
)
5275 return const_true_rtx
;
5283 /* Optimize integer comparisons with zero. */
5284 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5285 && trueop1
== const0_rtx
5286 && !side_effects_p (trueop0
))
5288 /* Some addresses are known to be nonzero. We don't know
5289 their sign, but equality comparisons are known. */
5290 if (nonzero_address_p (trueop0
))
5292 if (code
== EQ
|| code
== LEU
)
5294 if (code
== NE
|| code
== GTU
)
5295 return const_true_rtx
;
5298 /* See if the first operand is an IOR with a constant. If so, we
5299 may be able to determine the result of this comparison. */
5300 if (GET_CODE (op0
) == IOR
)
5302 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5303 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5305 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5306 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5307 && (UINTVAL (inner_const
)
5318 return const_true_rtx
;
5322 return const_true_rtx
;
5336 /* Optimize comparison of ABS with zero. */
5337 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5338 && (GET_CODE (trueop0
) == ABS
5339 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5340 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5345 /* Optimize abs(x) < 0.0. */
5346 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5351 /* Optimize abs(x) >= 0.0. */
5352 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5353 return const_true_rtx
;
5357 /* Optimize ! (abs(x) < 0.0). */
5358 return const_true_rtx
;
5368 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5369 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5370 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5371 can be simplified to that or NULL_RTX if not.
5372 Assume X is compared against zero with CMP_CODE and the true
5373 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5376 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5378 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5381 /* Result on X == 0 and X !=0 respectively. */
5382 rtx on_zero
, on_nonzero
;
5386 on_nonzero
= false_val
;
5390 on_zero
= false_val
;
5391 on_nonzero
= true_val
;
5394 rtx_code op_code
= GET_CODE (on_nonzero
);
5395 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5396 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5397 || !CONST_INT_P (on_zero
))
5400 HOST_WIDE_INT op_val
;
5401 scalar_int_mode mode ATTRIBUTE_UNUSED
5402 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5403 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5404 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5405 && op_val
== INTVAL (on_zero
))
5412 /* Simplify CODE, an operation with result mode MODE and three operands,
5413 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5414 a constant. Return 0 if no simplifications is possible. */
5417 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5418 machine_mode op0_mode
, rtx op0
, rtx op1
,
5421 bool any_change
= false;
5423 scalar_int_mode int_mode
, int_op0_mode
;
5428 /* Simplify negations around the multiplication. */
5429 /* -a * -b + c => a * b + c. */
5430 if (GET_CODE (op0
) == NEG
)
5432 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5434 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5436 else if (GET_CODE (op1
) == NEG
)
5438 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5440 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5443 /* Canonicalize the two multiplication operands. */
5444 /* a * -b + c => -b * a + c. */
5445 if (swap_commutative_operands_p (op0
, op1
))
5446 std::swap (op0
, op1
), any_change
= true;
5449 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5454 if (CONST_INT_P (op0
)
5455 && CONST_INT_P (op1
)
5456 && CONST_INT_P (op2
)
5457 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5458 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5459 && HWI_COMPUTABLE_MODE_P (int_mode
))
5461 /* Extracting a bit-field from a constant */
5462 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5463 HOST_WIDE_INT op1val
= INTVAL (op1
);
5464 HOST_WIDE_INT op2val
= INTVAL (op2
);
5465 if (!BITS_BIG_ENDIAN
)
5467 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5468 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5470 /* Not enough information to calculate the bit position. */
5473 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5475 /* First zero-extend. */
5476 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5477 /* If desired, propagate sign bit. */
5478 if (code
== SIGN_EXTRACT
5479 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5481 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5484 return gen_int_mode (val
, int_mode
);
5489 if (CONST_INT_P (op0
))
5490 return op0
!= const0_rtx
? op1
: op2
;
5492 /* Convert c ? a : a into "a". */
5493 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5496 /* Convert a != b ? a : b into "a". */
5497 if (GET_CODE (op0
) == NE
5498 && ! side_effects_p (op0
)
5499 && ! HONOR_NANS (mode
)
5500 && ! HONOR_SIGNED_ZEROS (mode
)
5501 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5502 && rtx_equal_p (XEXP (op0
, 1), op2
))
5503 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5504 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5507 /* Convert a == b ? a : b into "b". */
5508 if (GET_CODE (op0
) == EQ
5509 && ! side_effects_p (op0
)
5510 && ! HONOR_NANS (mode
)
5511 && ! HONOR_SIGNED_ZEROS (mode
)
5512 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5513 && rtx_equal_p (XEXP (op0
, 1), op2
))
5514 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5515 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5518 /* Convert (!c) != {0,...,0} ? a : b into
5519 c != {0,...,0} ? b : a for vector modes. */
5520 if (VECTOR_MODE_P (GET_MODE (op1
))
5521 && GET_CODE (op0
) == NE
5522 && GET_CODE (XEXP (op0
, 0)) == NOT
5523 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5525 rtx cv
= XEXP (op0
, 1);
5526 int nunits
= CONST_VECTOR_NUNITS (cv
);
5528 for (int i
= 0; i
< nunits
; ++i
)
5529 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5536 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5537 XEXP (XEXP (op0
, 0), 0),
5539 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5544 /* Convert x == 0 ? N : clz (x) into clz (x) when
5545 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5546 Similarly for ctz (x). */
5547 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5548 && XEXP (op0
, 1) == const0_rtx
)
5551 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5557 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5559 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5560 ? GET_MODE (XEXP (op0
, 1))
5561 : GET_MODE (XEXP (op0
, 0)));
5564 /* Look for happy constants in op1 and op2. */
5565 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5567 HOST_WIDE_INT t
= INTVAL (op1
);
5568 HOST_WIDE_INT f
= INTVAL (op2
);
5570 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5571 code
= GET_CODE (op0
);
5572 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5575 tmp
= reversed_comparison_code (op0
, NULL
);
5583 return simplify_gen_relational (code
, mode
, cmp_mode
,
5584 XEXP (op0
, 0), XEXP (op0
, 1));
5587 if (cmp_mode
== VOIDmode
)
5588 cmp_mode
= op0_mode
;
5589 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5590 cmp_mode
, XEXP (op0
, 0),
5593 /* See if any simplifications were possible. */
5596 if (CONST_INT_P (temp
))
5597 return temp
== const0_rtx
? op2
: op1
;
5599 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5605 gcc_assert (GET_MODE (op0
) == mode
);
5606 gcc_assert (GET_MODE (op1
) == mode
);
5607 gcc_assert (VECTOR_MODE_P (mode
));
5608 trueop2
= avoid_constant_pool_reference (op2
);
5609 if (CONST_INT_P (trueop2
))
5611 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5612 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5613 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5614 unsigned HOST_WIDE_INT mask
;
5615 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5618 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5620 if (!(sel
& mask
) && !side_effects_p (op0
))
5622 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5625 rtx trueop0
= avoid_constant_pool_reference (op0
);
5626 rtx trueop1
= avoid_constant_pool_reference (op1
);
5627 if (GET_CODE (trueop0
) == CONST_VECTOR
5628 && GET_CODE (trueop1
) == CONST_VECTOR
)
5630 rtvec v
= rtvec_alloc (n_elts
);
5633 for (i
= 0; i
< n_elts
; i
++)
5634 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5635 ? CONST_VECTOR_ELT (trueop0
, i
)
5636 : CONST_VECTOR_ELT (trueop1
, i
));
5637 return gen_rtx_CONST_VECTOR (mode
, v
);
5640 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5641 if no element from a appears in the result. */
5642 if (GET_CODE (op0
) == VEC_MERGE
)
5644 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5645 if (CONST_INT_P (tem
))
5647 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5648 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5649 return simplify_gen_ternary (code
, mode
, mode
,
5650 XEXP (op0
, 1), op1
, op2
);
5651 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5652 return simplify_gen_ternary (code
, mode
, mode
,
5653 XEXP (op0
, 0), op1
, op2
);
5656 if (GET_CODE (op1
) == VEC_MERGE
)
5658 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5659 if (CONST_INT_P (tem
))
5661 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5662 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5663 return simplify_gen_ternary (code
, mode
, mode
,
5664 op0
, XEXP (op1
, 1), op2
);
5665 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5666 return simplify_gen_ternary (code
, mode
, mode
,
5667 op0
, XEXP (op1
, 0), op2
);
5671 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5673 if (GET_CODE (op0
) == VEC_DUPLICATE
5674 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5675 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5676 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5678 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5679 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5681 if (XEXP (XEXP (op0
, 0), 0) == op1
5682 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5688 if (rtx_equal_p (op0
, op1
)
5689 && !side_effects_p (op2
) && !side_effects_p (op1
))
5701 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5702 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5703 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5705 Works by unpacking OP into a collection of 8-bit values
5706 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5707 and then repacking them again for OUTERMODE. */
5710 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5711 machine_mode innermode
, unsigned int byte
)
5715 value_mask
= (1 << value_bit
) - 1
5717 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5725 rtx result_s
= NULL
;
5726 rtvec result_v
= NULL
;
5727 enum mode_class outer_class
;
5728 scalar_mode outer_submode
;
5731 /* Some ports misuse CCmode. */
5732 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5735 /* We have no way to represent a complex constant at the rtl level. */
5736 if (COMPLEX_MODE_P (outermode
))
5739 /* We support any size mode. */
5740 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5741 GET_MODE_BITSIZE (innermode
));
5743 /* Unpack the value. */
5745 if (GET_CODE (op
) == CONST_VECTOR
)
5747 num_elem
= CONST_VECTOR_NUNITS (op
);
5748 elems
= &CONST_VECTOR_ELT (op
, 0);
5749 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5755 elem_bitsize
= max_bitsize
;
5757 /* If this asserts, it is too complicated; reducing value_bit may help. */
5758 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5759 /* I don't know how to handle endianness of sub-units. */
5760 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5762 for (elem
= 0; elem
< num_elem
; elem
++)
5765 rtx el
= elems
[elem
];
5767 /* Vectors are kept in target memory order. (This is probably
5770 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5771 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5773 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5774 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5775 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5776 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5777 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5780 switch (GET_CODE (el
))
5784 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5786 *vp
++ = INTVAL (el
) >> i
;
5787 /* CONST_INTs are always logically sign-extended. */
5788 for (; i
< elem_bitsize
; i
+= value_bit
)
5789 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5792 case CONST_WIDE_INT
:
5794 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
5795 unsigned char extend
= wi::sign_mask (val
);
5796 int prec
= wi::get_precision (val
);
5798 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
5799 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5800 for (; i
< elem_bitsize
; i
+= value_bit
)
5806 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5808 unsigned char extend
= 0;
5809 /* If this triggers, someone should have generated a
5810 CONST_INT instead. */
5811 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5813 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5814 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5815 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5818 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5822 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5824 for (; i
< elem_bitsize
; i
+= value_bit
)
5829 /* This is big enough for anything on the platform. */
5830 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5831 scalar_float_mode el_mode
;
5833 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
5834 int bitsize
= GET_MODE_BITSIZE (el_mode
);
5836 gcc_assert (bitsize
<= elem_bitsize
);
5837 gcc_assert (bitsize
% value_bit
== 0);
5839 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5842 /* real_to_target produces its result in words affected by
5843 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5844 and use WORDS_BIG_ENDIAN instead; see the documentation
5845 of SUBREG in rtl.texi. */
5846 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5849 if (WORDS_BIG_ENDIAN
)
5850 ibase
= bitsize
- 1 - i
;
5853 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5856 /* It shouldn't matter what's done here, so fill it with
5858 for (; i
< elem_bitsize
; i
+= value_bit
)
5864 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5866 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5867 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5871 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5872 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5873 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5875 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5876 >> (i
- HOST_BITS_PER_WIDE_INT
);
5877 for (; i
< elem_bitsize
; i
+= value_bit
)
5887 /* Now, pick the right byte to start with. */
5888 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5889 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5890 will already have offset 0. */
5891 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5893 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5895 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5896 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5897 byte
= (subword_byte
% UNITS_PER_WORD
5898 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5901 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5902 so if it's become negative it will instead be very large.) */
5903 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5905 /* Convert from bytes to chunks of size value_bit. */
5906 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5908 /* Re-pack the value. */
5909 num_elem
= GET_MODE_NUNITS (outermode
);
5911 if (VECTOR_MODE_P (outermode
))
5913 result_v
= rtvec_alloc (num_elem
);
5914 elems
= &RTVEC_ELT (result_v
, 0);
5919 outer_submode
= GET_MODE_INNER (outermode
);
5920 outer_class
= GET_MODE_CLASS (outer_submode
);
5921 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5923 gcc_assert (elem_bitsize
% value_bit
== 0);
5924 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5926 for (elem
= 0; elem
< num_elem
; elem
++)
5930 /* Vectors are stored in target memory order. (This is probably
5933 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5934 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5936 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5937 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5938 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5939 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5940 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5943 switch (outer_class
)
5946 case MODE_PARTIAL_INT
:
5951 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5952 / HOST_BITS_PER_WIDE_INT
;
5953 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5956 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5958 for (u
= 0; u
< units
; u
++)
5960 unsigned HOST_WIDE_INT buf
= 0;
5962 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5964 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5967 base
+= HOST_BITS_PER_WIDE_INT
;
5969 r
= wide_int::from_array (tmp
, units
,
5970 GET_MODE_PRECISION (outer_submode
));
5971 #if TARGET_SUPPORTS_WIDE_INT == 0
5972 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5973 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5976 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5981 case MODE_DECIMAL_FLOAT
:
5984 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
5986 /* real_from_target wants its input in words affected by
5987 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5988 and use WORDS_BIG_ENDIAN instead; see the documentation
5989 of SUBREG in rtl.texi. */
5990 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5993 if (WORDS_BIG_ENDIAN
)
5994 ibase
= elem_bitsize
- 1 - i
;
5997 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
6000 real_from_target (&r
, tmp
, outer_submode
);
6001 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
6013 f
.mode
= outer_submode
;
6016 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
6018 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6019 for (; i
< elem_bitsize
; i
+= value_bit
)
6020 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6021 << (i
- HOST_BITS_PER_WIDE_INT
));
6023 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6031 if (VECTOR_MODE_P (outermode
))
6032 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6037 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6038 Return 0 if no simplifications are possible. */
6040 simplify_subreg (machine_mode outermode
, rtx op
,
6041 machine_mode innermode
, unsigned int byte
)
6043 /* Little bit of sanity checking. */
6044 gcc_assert (innermode
!= VOIDmode
);
6045 gcc_assert (outermode
!= VOIDmode
);
6046 gcc_assert (innermode
!= BLKmode
);
6047 gcc_assert (outermode
!= BLKmode
);
6049 gcc_assert (GET_MODE (op
) == innermode
6050 || GET_MODE (op
) == VOIDmode
);
6052 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
6055 if (byte
>= GET_MODE_SIZE (innermode
))
6058 if (outermode
== innermode
&& !byte
)
6061 if (CONST_SCALAR_INT_P (op
)
6062 || CONST_DOUBLE_AS_FLOAT_P (op
)
6063 || GET_CODE (op
) == CONST_FIXED
6064 || GET_CODE (op
) == CONST_VECTOR
)
6065 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
6067 /* Changing mode twice with SUBREG => just change it once,
6068 or not at all if changing back op starting mode. */
6069 if (GET_CODE (op
) == SUBREG
)
6071 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6074 if (outermode
== innermostmode
6075 && byte
== 0 && SUBREG_BYTE (op
) == 0)
6076 return SUBREG_REG (op
);
6078 /* Work out the memory offset of the final OUTERMODE value relative
6079 to the inner value of OP. */
6080 HOST_WIDE_INT mem_offset
= subreg_memory_offset (outermode
,
6082 HOST_WIDE_INT op_mem_offset
= subreg_memory_offset (op
);
6083 HOST_WIDE_INT final_offset
= mem_offset
+ op_mem_offset
;
6085 /* See whether resulting subreg will be paradoxical. */
6086 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6088 /* In nonparadoxical subregs we can't handle negative offsets. */
6089 if (final_offset
< 0)
6091 /* Bail out in case resulting subreg would be incorrect. */
6092 if (final_offset
% GET_MODE_SIZE (outermode
)
6093 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
6098 HOST_WIDE_INT required_offset
6099 = subreg_memory_offset (outermode
, innermostmode
, 0);
6100 if (final_offset
!= required_offset
)
6102 /* Paradoxical subregs always have byte offset 0. */
6106 /* Recurse for further possible simplifications. */
6107 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6111 if (validate_subreg (outermode
, innermostmode
,
6112 SUBREG_REG (op
), final_offset
))
6114 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6115 if (SUBREG_PROMOTED_VAR_P (op
)
6116 && SUBREG_PROMOTED_SIGN (op
) >= 0
6117 && GET_MODE_CLASS (outermode
) == MODE_INT
6118 && IN_RANGE (GET_MODE_SIZE (outermode
),
6119 GET_MODE_SIZE (innermode
),
6120 GET_MODE_SIZE (innermostmode
))
6121 && subreg_lowpart_p (newx
))
6123 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6124 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6131 /* SUBREG of a hard register => just change the register number
6132 and/or mode. If the hard register is not valid in that mode,
6133 suppress this simplification. If the hard register is the stack,
6134 frame, or argument pointer, leave this as a SUBREG. */
6136 if (REG_P (op
) && HARD_REGISTER_P (op
))
6138 unsigned int regno
, final_regno
;
6141 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6142 if (HARD_REGISTER_NUM_P (final_regno
))
6144 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6145 subreg_memory_offset (outermode
,
6148 /* Propagate original regno. We don't have any way to specify
6149 the offset inside original regno, so do so only for lowpart.
6150 The information is used only by alias analysis that can not
6151 grog partial register anyway. */
6153 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6154 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6159 /* If we have a SUBREG of a register that we are replacing and we are
6160 replacing it with a MEM, make a new MEM and try replacing the
6161 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6162 or if we would be widening it. */
6165 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6166 /* Allow splitting of volatile memory references in case we don't
6167 have instruction to move the whole thing. */
6168 && (! MEM_VOLATILE_P (op
)
6169 || ! have_insn_for (SET
, innermode
))
6170 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6171 return adjust_address_nv (op
, outermode
, byte
);
6173 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6175 if (GET_CODE (op
) == CONCAT
6176 || GET_CODE (op
) == VEC_CONCAT
)
6178 unsigned int part_size
, final_offset
;
6181 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6182 if (part_mode
== VOIDmode
)
6183 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6184 part_size
= GET_MODE_SIZE (part_mode
);
6185 if (byte
< part_size
)
6187 part
= XEXP (op
, 0);
6188 final_offset
= byte
;
6192 part
= XEXP (op
, 1);
6193 final_offset
= byte
- part_size
;
6196 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6199 part_mode
= GET_MODE (part
);
6200 if (part_mode
== VOIDmode
)
6201 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6202 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6205 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6206 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6210 /* A SUBREG resulting from a zero extension may fold to zero if
6211 it extracts higher bits that the ZERO_EXTEND's source bits. */
6212 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6214 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6215 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6216 return CONST0_RTX (outermode
);
6219 scalar_int_mode int_outermode
, int_innermode
;
6220 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6221 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6222 && (GET_MODE_PRECISION (int_outermode
)
6223 < GET_MODE_PRECISION (int_innermode
))
6224 && byte
== subreg_lowpart_offset (int_outermode
, int_innermode
))
6226 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6234 /* Make a SUBREG operation or equivalent if it folds. */
6237 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6238 machine_mode innermode
, unsigned int byte
)
6242 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6246 if (GET_CODE (op
) == SUBREG
6247 || GET_CODE (op
) == CONCAT
6248 || GET_MODE (op
) == VOIDmode
)
6251 if (validate_subreg (outermode
, innermode
, op
, byte
))
6252 return gen_rtx_SUBREG (outermode
, op
, byte
);
6257 /* Generates a subreg to get the least significant part of EXPR (in mode
6258 INNER_MODE) to OUTER_MODE. */
6261 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6262 machine_mode inner_mode
)
6264 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6265 subreg_lowpart_offset (outer_mode
, inner_mode
));
6268 /* Simplify X, an rtx expression.
6270 Return the simplified expression or NULL if no simplifications
6273 This is the preferred entry point into the simplification routines;
6274 however, we still allow passes to call the more specific routines.
6276 Right now GCC has three (yes, three) major bodies of RTL simplification
6277 code that need to be unified.
6279 1. fold_rtx in cse.c. This code uses various CSE specific
6280 information to aid in RTL simplification.
6282 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6283 it uses combine specific information to aid in RTL
6286 3. The routines in this file.
6289 Long term we want to only have one body of simplification code; to
6290 get to that state I recommend the following steps:
6292 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6293 which are not pass dependent state into these routines.
6295 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6296 use this routine whenever possible.
6298 3. Allow for pass dependent state to be provided to these
6299 routines and add simplifications based on the pass dependent
6300 state. Remove code from cse.c & combine.c that becomes
6303 It will take time, but ultimately the compiler will be easier to
6304 maintain and improve. It's totally silly that when we add a
6305 simplification that it needs to be added to 4 places (3 for RTL
6306 simplification and 1 for tree simplification. */
6309 simplify_rtx (const_rtx x
)
6311 const enum rtx_code code
= GET_CODE (x
);
6312 const machine_mode mode
= GET_MODE (x
);
6314 switch (GET_RTX_CLASS (code
))
6317 return simplify_unary_operation (code
, mode
,
6318 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6319 case RTX_COMM_ARITH
:
6320 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6321 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6326 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6329 case RTX_BITFIELD_OPS
:
6330 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6331 XEXP (x
, 0), XEXP (x
, 1),
6335 case RTX_COMM_COMPARE
:
6336 return simplify_relational_operation (code
, mode
,
6337 ((GET_MODE (XEXP (x
, 0))
6339 ? GET_MODE (XEXP (x
, 0))
6340 : GET_MODE (XEXP (x
, 1))),
6346 return simplify_subreg (mode
, SUBREG_REG (x
),
6347 GET_MODE (SUBREG_REG (x
)),
6354 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6355 if (GET_CODE (XEXP (x
, 0)) == HIGH
6356 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))