1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx
neg_const_int (machine_mode
, const_rtx
);
47 static bool plus_minus_operand_p (const_rtx
);
48 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
49 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x
))
95 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
96 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
98 for (i
= 0; i
< elts
- 1; i
++)
99 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
101 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
102 width
%= HOST_BITS_PER_WIDE_INT
;
104 width
= HOST_BITS_PER_WIDE_INT
;
107 else if (width
<= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x
)
109 && CONST_DOUBLE_LOW (x
) == 0)
111 val
= CONST_DOUBLE_HIGH (x
);
112 width
-= HOST_BITS_PER_WIDE_INT
;
116 /* X is not an integer constant. */
119 if (width
< HOST_BITS_PER_WIDE_INT
)
120 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
121 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
129 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 if (GET_MODE_CLASS (mode
) != MODE_INT
)
136 width
= GET_MODE_PRECISION (mode
);
137 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
140 val
&= GET_MODE_MASK (mode
);
141 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
147 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
151 if (GET_MODE_CLASS (mode
) != MODE_INT
)
154 width
= GET_MODE_PRECISION (mode
);
155 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
158 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
165 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
169 if (GET_MODE_CLASS (mode
) != MODE_INT
)
172 width
= GET_MODE_PRECISION (mode
);
173 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
176 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
184 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
189 /* If this simplifies, do it. */
190 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0
, op1
))
197 std::swap (op0
, op1
);
199 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
205 avoid_constant_pool_reference (rtx x
)
209 HOST_WIDE_INT offset
= 0;
211 switch (GET_CODE (x
))
217 /* Handle float extensions of constant pool references. */
219 c
= avoid_constant_pool_reference (tmp
);
220 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
229 if (GET_MODE (x
) == BLKmode
)
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr
= targetm
.delegitimize_address (addr
);
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr
) == CONST
239 && GET_CODE (XEXP (addr
, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
242 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
243 addr
= XEXP (XEXP (addr
, 0), 0);
246 if (GET_CODE (addr
) == LO_SUM
)
247 addr
= XEXP (addr
, 1);
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr
) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr
))
254 c
= get_pool_constant (addr
);
255 cmode
= get_pool_mode (addr
);
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset
== 0 && cmode
== GET_MODE (x
))
262 else if (offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
264 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
265 if (tem
&& CONSTANT_P (tem
))
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
278 delegitimize_mem_from_attrs (rtx x
)
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
284 && MEM_OFFSET_KNOWN_P (x
))
286 tree decl
= MEM_EXPR (x
);
287 machine_mode mode
= GET_MODE (x
);
288 HOST_WIDE_INT offset
= 0;
290 switch (TREE_CODE (decl
))
300 case ARRAY_RANGE_REF
:
305 case VIEW_CONVERT_EXPR
:
307 HOST_WIDE_INT bitsize
, bitpos
;
309 int unsignedp
, reversep
, volatilep
= 0;
312 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
313 &unsignedp
, &reversep
, &volatilep
);
314 if (bitsize
!= GET_MODE_BITSIZE (mode
)
315 || (bitpos
% BITS_PER_UNIT
)
316 || (toffset
&& !tree_fits_shwi_p (toffset
)))
320 offset
+= bitpos
/ BITS_PER_UNIT
;
322 offset
+= tree_to_shwi (toffset
);
329 && mode
== GET_MODE (x
)
331 && (TREE_STATIC (decl
)
332 || DECL_THREAD_LOCAL_P (decl
))
333 && DECL_RTL_SET_P (decl
)
334 && MEM_P (DECL_RTL (decl
)))
338 offset
+= MEM_OFFSET (x
);
340 newx
= DECL_RTL (decl
);
344 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
353 || (GET_CODE (o
) == PLUS
354 && GET_CODE (XEXP (o
, 1)) == CONST_INT
355 && (offset
== INTVAL (XEXP (o
, 1))
356 || (GET_CODE (n
) == PLUS
357 && GET_CODE (XEXP (n
, 1)) == CONST_INT
358 && (INTVAL (XEXP (n
, 1)) + offset
359 == INTVAL (XEXP (o
, 1)))
360 && (n
= XEXP (n
, 0))))
361 && (o
= XEXP (o
, 0))))
362 && rtx_equal_p (o
, n
)))
363 x
= adjust_address_nv (newx
, mode
, offset
);
365 else if (GET_MODE (x
) == GET_MODE (newx
)
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
378 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
379 machine_mode op_mode
)
383 /* If this simplifies, use it. */
384 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
387 return gen_rtx_fmt_e (code
, mode
, op
);
390 /* Likewise for ternary operations. */
393 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
394 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
398 /* If this simplifies, use it. */
399 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
403 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
410 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
411 machine_mode cmp_mode
, rtx op0
, rtx op1
)
415 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
419 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
428 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
429 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
431 enum rtx_code code
= GET_CODE (x
);
432 machine_mode mode
= GET_MODE (x
);
433 machine_mode op_mode
;
435 rtx op0
, op1
, op2
, newx
, op
;
439 if (__builtin_expect (fn
!= NULL
, 0))
441 newx
= fn (x
, old_rtx
, data
);
445 else if (rtx_equal_p (x
, old_rtx
))
446 return copy_rtx ((rtx
) data
);
448 switch (GET_RTX_CLASS (code
))
452 op_mode
= GET_MODE (op0
);
453 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
454 if (op0
== XEXP (x
, 0))
456 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
460 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
461 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
464 return simplify_gen_binary (code
, mode
, op0
, op1
);
467 case RTX_COMM_COMPARE
:
470 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
471 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
472 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
473 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
475 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
478 case RTX_BITFIELD_OPS
:
480 op_mode
= GET_MODE (op0
);
481 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
482 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
483 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
484 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
486 if (op_mode
== VOIDmode
)
487 op_mode
= GET_MODE (op0
);
488 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
493 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
494 if (op0
== SUBREG_REG (x
))
496 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
497 GET_MODE (SUBREG_REG (x
)),
499 return op0
? op0
: x
;
506 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
507 if (op0
== XEXP (x
, 0))
509 return replace_equiv_address_nv (x
, op0
);
511 else if (code
== LO_SUM
)
513 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
514 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0
) == HIGH
)
519 rtx base0
, base1
, offset0
, offset1
;
520 split_const (XEXP (op0
, 0), &base0
, &offset0
);
521 split_const (op1
, &base1
, &offset1
);
522 if (rtx_equal_p (base0
, base1
))
526 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
528 return gen_rtx_LO_SUM (mode
, op0
, op1
);
537 fmt
= GET_RTX_FORMAT (code
);
538 for (i
= 0; fmt
[i
]; i
++)
543 newvec
= XVEC (newx
, i
);
544 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
546 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
548 if (op
!= RTVEC_ELT (vec
, j
))
552 newvec
= shallow_copy_rtvec (vec
);
554 newx
= shallow_copy_rtx (x
);
555 XVEC (newx
, i
) = newvec
;
557 RTVEC_ELT (newvec
, j
) = op
;
565 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
566 if (op
!= XEXP (x
, i
))
569 newx
= shallow_copy_rtx (x
);
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
582 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
584 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
590 RTL provides two ways of truncating a value:
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
603 2. a TRUNCATE. This form handles both scalar and compound integers.
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
610 simplify_gen_unary (TRUNCATE, ...)
612 and leave simplify_unary_operation to work out which representation
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
628 (and:DI (reg:DI X) (const_int 63))
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
635 simplify_truncation (machine_mode mode
, rtx op
,
636 machine_mode op_mode
)
638 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
639 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
640 gcc_assert (precision
<= op_precision
);
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op
) == ZERO_EXTEND
644 || GET_CODE (op
) == SIGN_EXTEND
)
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
652 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
653 if (mode
== origmode
)
655 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
656 return simplify_gen_unary (TRUNCATE
, mode
,
657 XEXP (op
, 0), origmode
);
659 return simplify_gen_unary (GET_CODE (op
), mode
,
660 XEXP (op
, 0), origmode
);
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
667 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
668 && (GET_CODE (op
) == PLUS
669 || GET_CODE (op
) == MINUS
670 || GET_CODE (op
) == MULT
))
672 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
675 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
677 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op
) == LSHIFTRT
685 || GET_CODE (op
) == ASHIFTRT
)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision
<= op_precision
691 && CONST_INT_P (XEXP (op
, 1))
692 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
694 && UINTVAL (XEXP (op
, 1)) < precision
)
695 return simplify_gen_binary (ASHIFTRT
, mode
,
696 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op
) == LSHIFTRT
702 || GET_CODE (op
) == ASHIFTRT
)
703 && CONST_INT_P (XEXP (op
, 1))
704 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
706 && UINTVAL (XEXP (op
, 1)) < precision
)
707 return simplify_gen_binary (LSHIFTRT
, mode
,
708 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op
) == ASHIFT
714 && CONST_INT_P (XEXP (op
, 1))
715 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
717 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
718 && UINTVAL (XEXP (op
, 1)) < precision
)
719 return simplify_gen_binary (ASHIFT
, mode
,
720 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
725 if (GET_CODE (op
) == AND
726 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
728 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
729 && CONST_INT_P (XEXP (op
, 1)))
731 rtx op0
= (XEXP (XEXP (op
, 0), 0));
732 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
733 rtx mask_op
= XEXP (op
, 1);
734 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
735 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
737 if (shift
< precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
741 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
742 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
743 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
745 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
746 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
753 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
754 && REG_P (XEXP (op
, 0))
755 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
756 && CONST_INT_P (XEXP (op
, 1))
757 && CONST_INT_P (XEXP (op
, 2)))
759 rtx op0
= XEXP (op
, 0);
760 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
761 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
762 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
764 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
767 pos
-= op_precision
- precision
;
768 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
769 XEXP (op
, 1), GEN_INT (pos
));
772 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
774 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
776 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
777 XEXP (op
, 1), XEXP (op
, 2));
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op
) == LSHIFTRT
783 || GET_CODE (op
) == ASHIFTRT
)
784 && SCALAR_INT_MODE_P (mode
)
785 && SCALAR_INT_MODE_P (op_mode
)
786 && precision
>= BITS_PER_WORD
787 && 2 * precision
<= op_precision
788 && CONST_INT_P (XEXP (op
, 1))
789 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
790 && UINTVAL (XEXP (op
, 1)) < op_precision
)
792 int byte
= subreg_lowpart_offset (mode
, op_mode
);
793 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
794 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
796 ? byte
- shifted_bytes
797 : byte
+ shifted_bytes
));
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op
) == LSHIFTRT
804 || GET_CODE (op
) == ASHIFTRT
)
805 && SCALAR_INT_MODE_P (op_mode
)
806 && MEM_P (XEXP (op
, 0))
807 && CONST_INT_P (XEXP (op
, 1))
808 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
809 && INTVAL (XEXP (op
, 1)) > 0
810 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
811 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op
, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op
, 0))
814 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
817 int byte
= subreg_lowpart_offset (mode
, op_mode
);
818 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
819 return adjust_address_nv (XEXP (op
, 0), mode
,
821 ? byte
- shifted_bytes
822 : byte
+ shifted_bytes
));
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op
) == ABS
828 || GET_CODE (op
) == NEG
)
829 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
831 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
832 return simplify_gen_unary (GET_CODE (op
), mode
,
833 XEXP (XEXP (op
, 0), 0), mode
);
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
837 if (GET_CODE (op
) == SUBREG
838 && SCALAR_INT_MODE_P (mode
)
839 && SCALAR_INT_MODE_P (op_mode
)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
841 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
842 && subreg_lowpart_p (op
))
844 rtx inner
= XEXP (SUBREG_REG (op
), 0);
845 if (GET_MODE_PRECISION (mode
)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
847 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
852 GET_MODE (SUBREG_REG (op
)), 0);
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op
) == TRUNCATE
)
857 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
858 GET_MODE (XEXP (op
, 0)));
860 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
862 if (GET_CODE (op
) == IOR
863 && SCALAR_INT_MODE_P (mode
)
864 && SCALAR_INT_MODE_P (op_mode
)
865 && CONST_INT_P (XEXP (op
, 1))
866 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
872 /* Try to simplify a unary operation CODE whose output mode is to be
873 MODE with input operand OP whose mode was originally OP_MODE.
874 Return zero if no simplification can be made. */
876 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
877 rtx op
, machine_mode op_mode
)
881 trueop
= avoid_constant_pool_reference (op
);
883 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
887 return simplify_unary_operation_1 (code
, mode
, op
);
890 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
894 exact_int_to_float_conversion_p (const_rtx op
)
896 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
897 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
898 /* Constants shouldn't reach here. */
899 gcc_assert (op0_mode
!= VOIDmode
);
900 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
901 int in_bits
= in_prec
;
902 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
904 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
905 if (GET_CODE (op
) == FLOAT
)
906 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
907 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
908 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
911 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
913 return in_bits
<= out_bits
;
916 /* Perform some simplifications we can do even if the operands
919 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
921 enum rtx_code reversed
;
927 /* (not (not X)) == X. */
928 if (GET_CODE (op
) == NOT
)
931 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
932 comparison is all ones. */
933 if (COMPARISON_P (op
)
934 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
935 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
936 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
937 XEXP (op
, 0), XEXP (op
, 1));
939 /* (not (plus X -1)) can become (neg X). */
940 if (GET_CODE (op
) == PLUS
941 && XEXP (op
, 1) == constm1_rtx
)
942 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
944 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
945 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
946 and MODE_VECTOR_INT. */
947 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
948 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
951 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
952 if (GET_CODE (op
) == XOR
953 && CONST_INT_P (XEXP (op
, 1))
954 && (temp
= simplify_unary_operation (NOT
, mode
,
955 XEXP (op
, 1), mode
)) != 0)
956 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
958 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
959 if (GET_CODE (op
) == PLUS
960 && CONST_INT_P (XEXP (op
, 1))
961 && mode_signbit_p (mode
, XEXP (op
, 1))
962 && (temp
= simplify_unary_operation (NOT
, mode
,
963 XEXP (op
, 1), mode
)) != 0)
964 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
967 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
968 operands other than 1, but that is not valid. We could do a
969 similar simplification for (not (lshiftrt C X)) where C is
970 just the sign bit, but this doesn't seem common enough to
972 if (GET_CODE (op
) == ASHIFT
973 && XEXP (op
, 0) == const1_rtx
)
975 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
976 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
979 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
980 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
981 so we can perform the above simplification. */
982 if (STORE_FLAG_VALUE
== -1
983 && GET_CODE (op
) == ASHIFTRT
984 && CONST_INT_P (XEXP (op
, 1))
985 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
986 return simplify_gen_relational (GE
, mode
, VOIDmode
,
987 XEXP (op
, 0), const0_rtx
);
990 if (GET_CODE (op
) == SUBREG
991 && subreg_lowpart_p (op
)
992 && (GET_MODE_SIZE (GET_MODE (op
))
993 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
994 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
995 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
997 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1000 x
= gen_rtx_ROTATE (inner_mode
,
1001 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1003 XEXP (SUBREG_REG (op
), 1));
1004 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1009 /* Apply De Morgan's laws to reduce number of patterns for machines
1010 with negating logical insns (and-not, nand, etc.). If result has
1011 only one NOT, put it first, since that is how the patterns are
1013 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1015 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1016 machine_mode op_mode
;
1018 op_mode
= GET_MODE (in1
);
1019 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1021 op_mode
= GET_MODE (in2
);
1022 if (op_mode
== VOIDmode
)
1024 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1026 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1027 std::swap (in1
, in2
);
1029 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1033 /* (not (bswap x)) -> (bswap (not x)). */
1034 if (GET_CODE (op
) == BSWAP
)
1036 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1037 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1042 /* (neg (neg X)) == X. */
1043 if (GET_CODE (op
) == NEG
)
1044 return XEXP (op
, 0);
1046 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1047 If comparison is not reversible use
1049 if (GET_CODE (op
) == IF_THEN_ELSE
)
1051 rtx cond
= XEXP (op
, 0);
1052 rtx true_rtx
= XEXP (op
, 1);
1053 rtx false_rtx
= XEXP (op
, 2);
1055 if ((GET_CODE (true_rtx
) == NEG
1056 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1057 || (GET_CODE (false_rtx
) == NEG
1058 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1060 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1061 temp
= reversed_comparison (cond
, mode
);
1065 std::swap (true_rtx
, false_rtx
);
1067 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1068 mode
, temp
, true_rtx
, false_rtx
);
1072 /* (neg (plus X 1)) can become (not X). */
1073 if (GET_CODE (op
) == PLUS
1074 && XEXP (op
, 1) == const1_rtx
)
1075 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1077 /* Similarly, (neg (not X)) is (plus X 1). */
1078 if (GET_CODE (op
) == NOT
)
1079 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1082 /* (neg (minus X Y)) can become (minus Y X). This transformation
1083 isn't safe for modes with signed zeros, since if X and Y are
1084 both +0, (minus Y X) is the same as (minus X Y). If the
1085 rounding mode is towards +infinity (or -infinity) then the two
1086 expressions will be rounded differently. */
1087 if (GET_CODE (op
) == MINUS
1088 && !HONOR_SIGNED_ZEROS (mode
)
1089 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1090 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1092 if (GET_CODE (op
) == PLUS
1093 && !HONOR_SIGNED_ZEROS (mode
)
1094 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1096 /* (neg (plus A C)) is simplified to (minus -C A). */
1097 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1098 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1100 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1102 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1105 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1106 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1107 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1110 /* (neg (mult A B)) becomes (mult A (neg B)).
1111 This works even for floating-point values. */
1112 if (GET_CODE (op
) == MULT
1113 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1115 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1116 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1119 /* NEG commutes with ASHIFT since it is multiplication. Only do
1120 this if we can then eliminate the NEG (e.g., if the operand
1122 if (GET_CODE (op
) == ASHIFT
)
1124 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1126 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1129 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1130 C is equal to the width of MODE minus 1. */
1131 if (GET_CODE (op
) == ASHIFTRT
1132 && CONST_INT_P (XEXP (op
, 1))
1133 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1134 return simplify_gen_binary (LSHIFTRT
, mode
,
1135 XEXP (op
, 0), XEXP (op
, 1));
1137 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1138 C is equal to the width of MODE minus 1. */
1139 if (GET_CODE (op
) == LSHIFTRT
1140 && CONST_INT_P (XEXP (op
, 1))
1141 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1142 return simplify_gen_binary (ASHIFTRT
, mode
,
1143 XEXP (op
, 0), XEXP (op
, 1));
1145 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1146 if (GET_CODE (op
) == XOR
1147 && XEXP (op
, 1) == const1_rtx
1148 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1149 return plus_constant (mode
, XEXP (op
, 0), -1);
1151 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1152 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1153 if (GET_CODE (op
) == LT
1154 && XEXP (op
, 1) == const0_rtx
1155 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1157 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1158 int isize
= GET_MODE_PRECISION (inner
);
1159 if (STORE_FLAG_VALUE
== 1)
1161 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1162 GEN_INT (isize
- 1));
1165 if (GET_MODE_PRECISION (mode
) > isize
)
1166 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1167 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1169 else if (STORE_FLAG_VALUE
== -1)
1171 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1172 GEN_INT (isize
- 1));
1175 if (GET_MODE_PRECISION (mode
) > isize
)
1176 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1177 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1183 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1184 with the umulXi3_highpart patterns. */
1185 if (GET_CODE (op
) == LSHIFTRT
1186 && GET_CODE (XEXP (op
, 0)) == MULT
)
1189 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1191 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1193 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1197 /* We can't handle truncation to a partial integer mode here
1198 because we don't know the real bitsize of the partial
1203 if (GET_MODE (op
) != VOIDmode
)
1205 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1210 /* If we know that the value is already truncated, we can
1211 replace the TRUNCATE with a SUBREG. */
1212 if (GET_MODE_NUNITS (mode
) == 1
1213 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1214 || truncated_to_mode (mode
, op
)))
1216 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1221 /* A truncate of a comparison can be replaced with a subreg if
1222 STORE_FLAG_VALUE permits. This is like the previous test,
1223 but it works even if the comparison is done in a mode larger
1224 than HOST_BITS_PER_WIDE_INT. */
1225 if (HWI_COMPUTABLE_MODE_P (mode
)
1226 && COMPARISON_P (op
)
1227 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1229 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1234 /* A truncate of a memory is just loading the low part of the memory
1235 if we are not changing the meaning of the address. */
1236 if (GET_CODE (op
) == MEM
1237 && !VECTOR_MODE_P (mode
)
1238 && !MEM_VOLATILE_P (op
)
1239 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1241 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1248 case FLOAT_TRUNCATE
:
1249 if (DECIMAL_FLOAT_MODE_P (mode
))
1252 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1253 if (GET_CODE (op
) == FLOAT_EXTEND
1254 && GET_MODE (XEXP (op
, 0)) == mode
)
1255 return XEXP (op
, 0);
1257 /* (float_truncate:SF (float_truncate:DF foo:XF))
1258 = (float_truncate:SF foo:XF).
1259 This may eliminate double rounding, so it is unsafe.
1261 (float_truncate:SF (float_extend:XF foo:DF))
1262 = (float_truncate:SF foo:DF).
1264 (float_truncate:DF (float_extend:XF foo:SF))
1265 = (float_extend:DF foo:SF). */
1266 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1267 && flag_unsafe_math_optimizations
)
1268 || GET_CODE (op
) == FLOAT_EXTEND
)
1269 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1271 > GET_MODE_SIZE (mode
)
1272 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1274 XEXP (op
, 0), mode
);
1276 /* (float_truncate (float x)) is (float x) */
1277 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1278 && (flag_unsafe_math_optimizations
1279 || exact_int_to_float_conversion_p (op
)))
1280 return simplify_gen_unary (GET_CODE (op
), mode
,
1282 GET_MODE (XEXP (op
, 0)));
1284 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1285 (OP:SF foo:SF) if OP is NEG or ABS. */
1286 if ((GET_CODE (op
) == ABS
1287 || GET_CODE (op
) == NEG
)
1288 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1289 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1290 return simplify_gen_unary (GET_CODE (op
), mode
,
1291 XEXP (XEXP (op
, 0), 0), mode
);
1293 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1294 is (float_truncate:SF x). */
1295 if (GET_CODE (op
) == SUBREG
1296 && subreg_lowpart_p (op
)
1297 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1298 return SUBREG_REG (op
);
1302 if (DECIMAL_FLOAT_MODE_P (mode
))
1305 /* (float_extend (float_extend x)) is (float_extend x)
1307 (float_extend (float x)) is (float x) assuming that double
1308 rounding can't happen.
1310 if (GET_CODE (op
) == FLOAT_EXTEND
1311 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1312 && exact_int_to_float_conversion_p (op
)))
1313 return simplify_gen_unary (GET_CODE (op
), mode
,
1315 GET_MODE (XEXP (op
, 0)));
1320 /* (abs (neg <foo>)) -> (abs <foo>) */
1321 if (GET_CODE (op
) == NEG
)
1322 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1323 GET_MODE (XEXP (op
, 0)));
1325 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1327 if (GET_MODE (op
) == VOIDmode
)
1330 /* If operand is something known to be positive, ignore the ABS. */
1331 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1332 || val_signbit_known_clear_p (GET_MODE (op
),
1333 nonzero_bits (op
, GET_MODE (op
))))
1336 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1337 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1338 return gen_rtx_NEG (mode
, op
);
1343 /* (ffs (*_extend <X>)) = (ffs <X>) */
1344 if (GET_CODE (op
) == SIGN_EXTEND
1345 || GET_CODE (op
) == ZERO_EXTEND
)
1346 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1347 GET_MODE (XEXP (op
, 0)));
1351 switch (GET_CODE (op
))
1355 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1356 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1357 GET_MODE (XEXP (op
, 0)));
1361 /* Rotations don't affect popcount. */
1362 if (!side_effects_p (XEXP (op
, 1)))
1363 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1364 GET_MODE (XEXP (op
, 0)));
1373 switch (GET_CODE (op
))
1379 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1380 GET_MODE (XEXP (op
, 0)));
1384 /* Rotations don't affect parity. */
1385 if (!side_effects_p (XEXP (op
, 1)))
1386 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1387 GET_MODE (XEXP (op
, 0)));
1396 /* (bswap (bswap x)) -> x. */
1397 if (GET_CODE (op
) == BSWAP
)
1398 return XEXP (op
, 0);
1402 /* (float (sign_extend <X>)) = (float <X>). */
1403 if (GET_CODE (op
) == SIGN_EXTEND
)
1404 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1405 GET_MODE (XEXP (op
, 0)));
1409 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1410 becomes just the MINUS if its mode is MODE. This allows
1411 folding switch statements on machines using casesi (such as
1413 if (GET_CODE (op
) == TRUNCATE
1414 && GET_MODE (XEXP (op
, 0)) == mode
1415 && GET_CODE (XEXP (op
, 0)) == MINUS
1416 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1417 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1418 return XEXP (op
, 0);
1420 /* Extending a widening multiplication should be canonicalized to
1421 a wider widening multiplication. */
1422 if (GET_CODE (op
) == MULT
)
1424 rtx lhs
= XEXP (op
, 0);
1425 rtx rhs
= XEXP (op
, 1);
1426 enum rtx_code lcode
= GET_CODE (lhs
);
1427 enum rtx_code rcode
= GET_CODE (rhs
);
1429 /* Widening multiplies usually extend both operands, but sometimes
1430 they use a shift to extract a portion of a register. */
1431 if ((lcode
== SIGN_EXTEND
1432 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1433 && (rcode
== SIGN_EXTEND
1434 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1436 machine_mode lmode
= GET_MODE (lhs
);
1437 machine_mode rmode
= GET_MODE (rhs
);
1440 if (lcode
== ASHIFTRT
)
1441 /* Number of bits not shifted off the end. */
1442 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1443 else /* lcode == SIGN_EXTEND */
1444 /* Size of inner mode. */
1445 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1447 if (rcode
== ASHIFTRT
)
1448 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1449 else /* rcode == SIGN_EXTEND */
1450 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1452 /* We can only widen multiplies if the result is mathematiclly
1453 equivalent. I.e. if overflow was impossible. */
1454 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1455 return simplify_gen_binary
1457 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1458 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1462 /* Check for a sign extension of a subreg of a promoted
1463 variable, where the promotion is sign-extended, and the
1464 target mode is the same as the variable's promotion. */
1465 if (GET_CODE (op
) == SUBREG
1466 && SUBREG_PROMOTED_VAR_P (op
)
1467 && SUBREG_PROMOTED_SIGNED_P (op
)
1468 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1470 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1475 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1476 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1477 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1479 gcc_assert (GET_MODE_PRECISION (mode
)
1480 > GET_MODE_PRECISION (GET_MODE (op
)));
1481 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1482 GET_MODE (XEXP (op
, 0)));
1485 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (sign_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_BITSIZE (N) - I bits.
1488 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1489 is similarly (zero_extend:M (subreg:O <X>)). */
1490 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1491 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1492 && CONST_INT_P (XEXP (op
, 1))
1493 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1494 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1496 scalar_int_mode tmode
;
1497 gcc_assert (GET_MODE_BITSIZE (mode
)
1498 > GET_MODE_BITSIZE (GET_MODE (op
)));
1499 if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1500 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1503 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1505 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1506 ? SIGN_EXTEND
: ZERO_EXTEND
,
1507 mode
, inner
, tmode
);
1511 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1512 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1513 if (GET_CODE (op
) == LSHIFTRT
1514 && CONST_INT_P (XEXP (op
, 1))
1515 && XEXP (op
, 1) != const0_rtx
)
1516 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1518 #if defined(POINTERS_EXTEND_UNSIGNED)
1519 /* As we do not know which address space the pointer is referring to,
1520 we can do this only if the target does not support different pointer
1521 or address modes depending on the address space. */
1522 if (target_default_pointer_address_modes_p ()
1523 && ! POINTERS_EXTEND_UNSIGNED
1524 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1526 || (GET_CODE (op
) == SUBREG
1527 && REG_P (SUBREG_REG (op
))
1528 && REG_POINTER (SUBREG_REG (op
))
1529 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1530 && !targetm
.have_ptr_extend ())
1533 = convert_memory_address_addr_space_1 (Pmode
, op
,
1534 ADDR_SPACE_GENERIC
, false,
1543 /* Check for a zero extension of a subreg of a promoted
1544 variable, where the promotion is zero-extended, and the
1545 target mode is the same as the variable's promotion. */
1546 if (GET_CODE (op
) == SUBREG
1547 && SUBREG_PROMOTED_VAR_P (op
)
1548 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1549 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1551 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1556 /* Extending a widening multiplication should be canonicalized to
1557 a wider widening multiplication. */
1558 if (GET_CODE (op
) == MULT
)
1560 rtx lhs
= XEXP (op
, 0);
1561 rtx rhs
= XEXP (op
, 1);
1562 enum rtx_code lcode
= GET_CODE (lhs
);
1563 enum rtx_code rcode
= GET_CODE (rhs
);
1565 /* Widening multiplies usually extend both operands, but sometimes
1566 they use a shift to extract a portion of a register. */
1567 if ((lcode
== ZERO_EXTEND
1568 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1569 && (rcode
== ZERO_EXTEND
1570 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1572 machine_mode lmode
= GET_MODE (lhs
);
1573 machine_mode rmode
= GET_MODE (rhs
);
1576 if (lcode
== LSHIFTRT
)
1577 /* Number of bits not shifted off the end. */
1578 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1579 else /* lcode == ZERO_EXTEND */
1580 /* Size of inner mode. */
1581 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1583 if (rcode
== LSHIFTRT
)
1584 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1585 else /* rcode == ZERO_EXTEND */
1586 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1588 /* We can only widen multiplies if the result is mathematiclly
1589 equivalent. I.e. if overflow was impossible. */
1590 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1591 return simplify_gen_binary
1593 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1594 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1598 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1599 if (GET_CODE (op
) == ZERO_EXTEND
)
1600 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1601 GET_MODE (XEXP (op
, 0)));
1603 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1604 is (zero_extend:M (subreg:O <X>)) if there is mode with
1605 GET_MODE_PRECISION (N) - I bits. */
1606 if (GET_CODE (op
) == LSHIFTRT
1607 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1608 && CONST_INT_P (XEXP (op
, 1))
1609 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1610 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1612 scalar_int_mode tmode
;
1613 if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1614 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1617 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1619 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1623 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1624 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1626 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1627 (and:SI (reg:SI) (const_int 63)). */
1628 if (GET_CODE (op
) == SUBREG
1629 && GET_MODE_PRECISION (GET_MODE (op
))
1630 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1631 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1632 <= HOST_BITS_PER_WIDE_INT
1633 && GET_MODE_PRECISION (mode
)
1634 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1635 && subreg_lowpart_p (op
)
1636 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1637 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1639 if (GET_MODE_PRECISION (mode
)
1640 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1641 return SUBREG_REG (op
);
1642 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1643 GET_MODE (SUBREG_REG (op
)));
1646 #if defined(POINTERS_EXTEND_UNSIGNED)
1647 /* As we do not know which address space the pointer is referring to,
1648 we can do this only if the target does not support different pointer
1649 or address modes depending on the address space. */
1650 if (target_default_pointer_address_modes_p ()
1651 && POINTERS_EXTEND_UNSIGNED
> 0
1652 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1654 || (GET_CODE (op
) == SUBREG
1655 && REG_P (SUBREG_REG (op
))
1656 && REG_POINTER (SUBREG_REG (op
))
1657 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1658 && !targetm
.have_ptr_extend ())
1661 = convert_memory_address_addr_space_1 (Pmode
, op
,
1662 ADDR_SPACE_GENERIC
, false,
1677 /* Try to compute the value of a unary operation CODE whose output mode is to
1678 be MODE with input operand OP whose mode was originally OP_MODE.
1679 Return zero if the value cannot be computed. */
1681 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1682 rtx op
, machine_mode op_mode
)
1684 unsigned int width
= GET_MODE_PRECISION (mode
);
1686 if (code
== VEC_DUPLICATE
)
1688 gcc_assert (VECTOR_MODE_P (mode
));
1689 if (GET_MODE (op
) != VOIDmode
)
1691 if (!VECTOR_MODE_P (GET_MODE (op
)))
1692 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1694 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1697 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1698 || GET_CODE (op
) == CONST_VECTOR
)
1700 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1701 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1702 rtvec v
= rtvec_alloc (n_elts
);
1705 if (GET_CODE (op
) != CONST_VECTOR
)
1706 for (i
= 0; i
< n_elts
; i
++)
1707 RTVEC_ELT (v
, i
) = op
;
1710 machine_mode inmode
= GET_MODE (op
);
1711 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1712 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1714 gcc_assert (in_n_elts
< n_elts
);
1715 gcc_assert ((n_elts
% in_n_elts
) == 0);
1716 for (i
= 0; i
< n_elts
; i
++)
1717 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1719 return gen_rtx_CONST_VECTOR (mode
, v
);
1723 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1725 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1726 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1727 machine_mode opmode
= GET_MODE (op
);
1728 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1729 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1730 rtvec v
= rtvec_alloc (n_elts
);
1733 gcc_assert (op_n_elts
== n_elts
);
1734 for (i
= 0; i
< n_elts
; i
++)
1736 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1737 CONST_VECTOR_ELT (op
, i
),
1738 GET_MODE_INNER (opmode
));
1741 RTVEC_ELT (v
, i
) = x
;
1743 return gen_rtx_CONST_VECTOR (mode
, v
);
1746 /* The order of these tests is critical so that, for example, we don't
1747 check the wrong mode (input vs. output) for a conversion operation,
1748 such as FIX. At some point, this should be simplified. */
1750 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1754 if (op_mode
== VOIDmode
)
1756 /* CONST_INT have VOIDmode as the mode. We assume that all
1757 the bits of the constant are significant, though, this is
1758 a dangerous assumption as many times CONST_INTs are
1759 created and used with garbage in the bits outside of the
1760 precision of the implied mode of the const_int. */
1761 op_mode
= MAX_MODE_INT
;
1764 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1766 /* Avoid the folding if flag_signaling_nans is on and
1767 operand is a signaling NaN. */
1768 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1771 d
= real_value_truncate (mode
, d
);
1772 return const_double_from_real_value (d
, mode
);
1774 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1778 if (op_mode
== VOIDmode
)
1780 /* CONST_INT have VOIDmode as the mode. We assume that all
1781 the bits of the constant are significant, though, this is
1782 a dangerous assumption as many times CONST_INTs are
1783 created and used with garbage in the bits outside of the
1784 precision of the implied mode of the const_int. */
1785 op_mode
= MAX_MODE_INT
;
1788 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1790 /* Avoid the folding if flag_signaling_nans is on and
1791 operand is a signaling NaN. */
1792 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1795 d
= real_value_truncate (mode
, d
);
1796 return const_double_from_real_value (d
, mode
);
1799 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1802 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1803 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1806 #if TARGET_SUPPORTS_WIDE_INT == 0
1807 /* This assert keeps the simplification from producing a result
1808 that cannot be represented in a CONST_DOUBLE but a lot of
1809 upstream callers expect that this function never fails to
1810 simplify something and so you if you added this to the test
1811 above the code would die later anyway. If this assert
1812 happens, you just need to make the port support wide int. */
1813 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1819 result
= wi::bit_not (op0
);
1823 result
= wi::neg (op0
);
1827 result
= wi::abs (op0
);
1831 result
= wi::shwi (wi::ffs (op0
), mode
);
1835 if (wi::ne_p (op0
, 0))
1836 int_value
= wi::clz (op0
);
1837 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1838 int_value
= GET_MODE_PRECISION (mode
);
1839 result
= wi::shwi (int_value
, mode
);
1843 result
= wi::shwi (wi::clrsb (op0
), mode
);
1847 if (wi::ne_p (op0
, 0))
1848 int_value
= wi::ctz (op0
);
1849 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1850 int_value
= GET_MODE_PRECISION (mode
);
1851 result
= wi::shwi (int_value
, mode
);
1855 result
= wi::shwi (wi::popcount (op0
), mode
);
1859 result
= wi::shwi (wi::parity (op0
), mode
);
1863 result
= wide_int (op0
).bswap ();
1868 result
= wide_int::from (op0
, width
, UNSIGNED
);
1872 result
= wide_int::from (op0
, width
, SIGNED
);
1880 return immed_wide_int_const (result
, mode
);
1883 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1884 && SCALAR_FLOAT_MODE_P (mode
)
1885 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1887 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1893 d
= real_value_abs (&d
);
1896 d
= real_value_negate (&d
);
1898 case FLOAT_TRUNCATE
:
1899 /* Don't perform the operation if flag_signaling_nans is on
1900 and the operand is a signaling NaN. */
1901 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1903 d
= real_value_truncate (mode
, d
);
1906 /* Don't perform the operation if flag_signaling_nans is on
1907 and the operand is a signaling NaN. */
1908 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1910 /* All this does is change the mode, unless changing
1912 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1913 real_convert (&d
, mode
, &d
);
1916 /* Don't perform the operation if flag_signaling_nans is on
1917 and the operand is a signaling NaN. */
1918 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1920 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1927 real_to_target (tmp
, &d
, GET_MODE (op
));
1928 for (i
= 0; i
< 4; i
++)
1930 real_from_target (&d
, tmp
, mode
);
1936 return const_double_from_real_value (d
, mode
);
1938 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1939 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1940 && GET_MODE_CLASS (mode
) == MODE_INT
1943 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1944 operators are intentionally left unspecified (to ease implementation
1945 by target backends), for consistency, this routine implements the
1946 same semantics for constant folding as used by the middle-end. */
1948 /* This was formerly used only for non-IEEE float.
1949 eggert@twinsun.com says it is safe for IEEE also. */
1951 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1952 wide_int wmax
, wmin
;
1953 /* This is part of the abi to real_to_integer, but we check
1954 things before making this call. */
1960 if (REAL_VALUE_ISNAN (*x
))
1963 /* Test against the signed upper bound. */
1964 wmax
= wi::max_value (width
, SIGNED
);
1965 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1966 if (real_less (&t
, x
))
1967 return immed_wide_int_const (wmax
, mode
);
1969 /* Test against the signed lower bound. */
1970 wmin
= wi::min_value (width
, SIGNED
);
1971 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1972 if (real_less (x
, &t
))
1973 return immed_wide_int_const (wmin
, mode
);
1975 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1979 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1982 /* Test against the unsigned upper bound. */
1983 wmax
= wi::max_value (width
, UNSIGNED
);
1984 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1985 if (real_less (&t
, x
))
1986 return immed_wide_int_const (wmax
, mode
);
1988 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1999 /* Subroutine of simplify_binary_operation to simplify a binary operation
2000 CODE that can commute with byte swapping, with result mode MODE and
2001 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2002 Return zero if no simplification or canonicalization is possible. */
2005 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2010 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2011 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2013 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2014 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2015 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2018 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2019 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2021 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2022 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2028 /* Subroutine of simplify_binary_operation to simplify a commutative,
2029 associative binary operation CODE with result mode MODE, operating
2030 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2031 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2032 canonicalization is possible. */
2035 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2040 /* Linearize the operator to the left. */
2041 if (GET_CODE (op1
) == code
)
2043 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2044 if (GET_CODE (op0
) == code
)
2046 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2047 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2050 /* "a op (b op c)" becomes "(b op c) op a". */
2051 if (! swap_commutative_operands_p (op1
, op0
))
2052 return simplify_gen_binary (code
, mode
, op1
, op0
);
2054 std::swap (op0
, op1
);
2057 if (GET_CODE (op0
) == code
)
2059 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2060 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2062 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2063 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2066 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2067 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2069 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2071 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2072 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2074 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2081 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2082 and OP1. Return 0 if no simplification is possible.
2084 Don't use this for relational operations such as EQ or LT.
2085 Use simplify_relational_operation instead. */
2087 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2090 rtx trueop0
, trueop1
;
2093 /* Relational operations don't work here. We must know the mode
2094 of the operands in order to do the comparison correctly.
2095 Assuming a full word can give incorrect results.
2096 Consider comparing 128 with -128 in QImode. */
2097 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2098 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2100 /* Make sure the constant is second. */
2101 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2102 && swap_commutative_operands_p (op0
, op1
))
2103 std::swap (op0
, op1
);
2105 trueop0
= avoid_constant_pool_reference (op0
);
2106 trueop1
= avoid_constant_pool_reference (op1
);
2108 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2111 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2116 /* If the above steps did not result in a simplification and op0 or op1
2117 were constant pool references, use the referenced constants directly. */
2118 if (trueop0
!= op0
|| trueop1
!= op1
)
2119 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2124 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2125 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2126 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2127 actual constants. */
2130 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2131 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2133 rtx tem
, reversed
, opleft
, opright
;
2135 unsigned int width
= GET_MODE_PRECISION (mode
);
2137 /* Even if we can't compute a constant result,
2138 there are some cases worth simplifying. */
2143 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2144 when x is NaN, infinite, or finite and nonzero. They aren't
2145 when x is -0 and the rounding mode is not towards -infinity,
2146 since (-0) + 0 is then 0. */
2147 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2150 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2151 transformations are safe even for IEEE. */
2152 if (GET_CODE (op0
) == NEG
)
2153 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2154 else if (GET_CODE (op1
) == NEG
)
2155 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2157 /* (~a) + 1 -> -a */
2158 if (INTEGRAL_MODE_P (mode
)
2159 && GET_CODE (op0
) == NOT
2160 && trueop1
== const1_rtx
)
2161 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2163 /* Handle both-operands-constant cases. We can only add
2164 CONST_INTs to constants since the sum of relocatable symbols
2165 can't be handled by most assemblers. Don't add CONST_INT
2166 to CONST_INT since overflow won't be computed properly if wider
2167 than HOST_BITS_PER_WIDE_INT. */
2169 if ((GET_CODE (op0
) == CONST
2170 || GET_CODE (op0
) == SYMBOL_REF
2171 || GET_CODE (op0
) == LABEL_REF
)
2172 && CONST_INT_P (op1
))
2173 return plus_constant (mode
, op0
, INTVAL (op1
));
2174 else if ((GET_CODE (op1
) == CONST
2175 || GET_CODE (op1
) == SYMBOL_REF
2176 || GET_CODE (op1
) == LABEL_REF
)
2177 && CONST_INT_P (op0
))
2178 return plus_constant (mode
, op1
, INTVAL (op0
));
2180 /* See if this is something like X * C - X or vice versa or
2181 if the multiplication is written as a shift. If so, we can
2182 distribute and make a new multiply, shift, or maybe just
2183 have X (if C is 2 in the example above). But don't make
2184 something more expensive than we had before. */
2186 if (SCALAR_INT_MODE_P (mode
))
2188 rtx lhs
= op0
, rhs
= op1
;
2190 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2191 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2193 if (GET_CODE (lhs
) == NEG
)
2195 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2196 lhs
= XEXP (lhs
, 0);
2198 else if (GET_CODE (lhs
) == MULT
2199 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2201 coeff0
= rtx_mode_t (XEXP (lhs
, 1), mode
);
2202 lhs
= XEXP (lhs
, 0);
2204 else if (GET_CODE (lhs
) == ASHIFT
2205 && CONST_INT_P (XEXP (lhs
, 1))
2206 && INTVAL (XEXP (lhs
, 1)) >= 0
2207 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2209 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2210 GET_MODE_PRECISION (mode
));
2211 lhs
= XEXP (lhs
, 0);
2214 if (GET_CODE (rhs
) == NEG
)
2216 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2217 rhs
= XEXP (rhs
, 0);
2219 else if (GET_CODE (rhs
) == MULT
2220 && CONST_INT_P (XEXP (rhs
, 1)))
2222 coeff1
= rtx_mode_t (XEXP (rhs
, 1), mode
);
2223 rhs
= XEXP (rhs
, 0);
2225 else if (GET_CODE (rhs
) == ASHIFT
2226 && CONST_INT_P (XEXP (rhs
, 1))
2227 && INTVAL (XEXP (rhs
, 1)) >= 0
2228 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2230 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2231 GET_MODE_PRECISION (mode
));
2232 rhs
= XEXP (rhs
, 0);
2235 if (rtx_equal_p (lhs
, rhs
))
2237 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2239 bool speed
= optimize_function_for_speed_p (cfun
);
2241 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2243 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2244 return (set_src_cost (tem
, mode
, speed
)
2245 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2249 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2250 if (CONST_SCALAR_INT_P (op1
)
2251 && GET_CODE (op0
) == XOR
2252 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2253 && mode_signbit_p (mode
, op1
))
2254 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2255 simplify_gen_binary (XOR
, mode
, op1
,
2258 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2259 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2260 && GET_CODE (op0
) == MULT
2261 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2265 in1
= XEXP (XEXP (op0
, 0), 0);
2266 in2
= XEXP (op0
, 1);
2267 return simplify_gen_binary (MINUS
, mode
, op1
,
2268 simplify_gen_binary (MULT
, mode
,
2272 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2273 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2275 if (COMPARISON_P (op0
)
2276 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2277 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2278 && (reversed
= reversed_comparison (op0
, mode
)))
2280 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2282 /* If one of the operands is a PLUS or a MINUS, see if we can
2283 simplify this by the associative law.
2284 Don't use the associative law for floating point.
2285 The inaccuracy makes it nonassociative,
2286 and subtle programs can break if operations are associated. */
2288 if (INTEGRAL_MODE_P (mode
)
2289 && (plus_minus_operand_p (op0
)
2290 || plus_minus_operand_p (op1
))
2291 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2294 /* Reassociate floating point addition only when the user
2295 specifies associative math operations. */
2296 if (FLOAT_MODE_P (mode
)
2297 && flag_associative_math
)
2299 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2306 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2307 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2308 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2309 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2311 rtx xop00
= XEXP (op0
, 0);
2312 rtx xop10
= XEXP (op1
, 0);
2314 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2317 if (REG_P (xop00
) && REG_P (xop10
)
2318 && REGNO (xop00
) == REGNO (xop10
)
2319 && GET_MODE (xop00
) == mode
2320 && GET_MODE (xop10
) == mode
2321 && GET_MODE_CLASS (mode
) == MODE_CC
)
2327 /* We can't assume x-x is 0 even with non-IEEE floating point,
2328 but since it is zero except in very strange circumstances, we
2329 will treat it as zero with -ffinite-math-only. */
2330 if (rtx_equal_p (trueop0
, trueop1
)
2331 && ! side_effects_p (op0
)
2332 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2333 return CONST0_RTX (mode
);
2335 /* Change subtraction from zero into negation. (0 - x) is the
2336 same as -x when x is NaN, infinite, or finite and nonzero.
2337 But if the mode has signed zeros, and does not round towards
2338 -infinity, then 0 - 0 is 0, not -0. */
2339 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2340 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2342 /* (-1 - a) is ~a, unless the expression contains symbolic
2343 constants, in which case not retaining additions and
2344 subtractions could cause invalid assembly to be produced. */
2345 if (trueop0
== constm1_rtx
2346 && !contains_symbolic_reference_p (op1
))
2347 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2349 /* Subtracting 0 has no effect unless the mode has signed zeros
2350 and supports rounding towards -infinity. In such a case,
2352 if (!(HONOR_SIGNED_ZEROS (mode
)
2353 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2354 && trueop1
== CONST0_RTX (mode
))
2357 /* See if this is something like X * C - X or vice versa or
2358 if the multiplication is written as a shift. If so, we can
2359 distribute and make a new multiply, shift, or maybe just
2360 have X (if C is 2 in the example above). But don't make
2361 something more expensive than we had before. */
2363 if (SCALAR_INT_MODE_P (mode
))
2365 rtx lhs
= op0
, rhs
= op1
;
2367 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2368 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2370 if (GET_CODE (lhs
) == NEG
)
2372 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2373 lhs
= XEXP (lhs
, 0);
2375 else if (GET_CODE (lhs
) == MULT
2376 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2378 coeff0
= rtx_mode_t (XEXP (lhs
, 1), mode
);
2379 lhs
= XEXP (lhs
, 0);
2381 else if (GET_CODE (lhs
) == ASHIFT
2382 && CONST_INT_P (XEXP (lhs
, 1))
2383 && INTVAL (XEXP (lhs
, 1)) >= 0
2384 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2386 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2387 GET_MODE_PRECISION (mode
));
2388 lhs
= XEXP (lhs
, 0);
2391 if (GET_CODE (rhs
) == NEG
)
2393 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2394 rhs
= XEXP (rhs
, 0);
2396 else if (GET_CODE (rhs
) == MULT
2397 && CONST_INT_P (XEXP (rhs
, 1)))
2399 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), mode
));
2400 rhs
= XEXP (rhs
, 0);
2402 else if (GET_CODE (rhs
) == ASHIFT
2403 && CONST_INT_P (XEXP (rhs
, 1))
2404 && INTVAL (XEXP (rhs
, 1)) >= 0
2405 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2407 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2408 GET_MODE_PRECISION (mode
));
2409 negcoeff1
= -negcoeff1
;
2410 rhs
= XEXP (rhs
, 0);
2413 if (rtx_equal_p (lhs
, rhs
))
2415 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2417 bool speed
= optimize_function_for_speed_p (cfun
);
2419 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2421 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2422 return (set_src_cost (tem
, mode
, speed
)
2423 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2427 /* (a - (-b)) -> (a + b). True even for IEEE. */
2428 if (GET_CODE (op1
) == NEG
)
2429 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2431 /* (-x - c) may be simplified as (-c - x). */
2432 if (GET_CODE (op0
) == NEG
2433 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2435 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2437 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2440 /* Don't let a relocatable value get a negative coeff. */
2441 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2442 return simplify_gen_binary (PLUS
, mode
,
2444 neg_const_int (mode
, op1
));
2446 /* (x - (x & y)) -> (x & ~y) */
2447 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2449 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2451 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2452 GET_MODE (XEXP (op1
, 1)));
2453 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2455 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2457 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2458 GET_MODE (XEXP (op1
, 0)));
2459 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2463 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2464 by reversing the comparison code if valid. */
2465 if (STORE_FLAG_VALUE
== 1
2466 && trueop0
== const1_rtx
2467 && COMPARISON_P (op1
)
2468 && (reversed
= reversed_comparison (op1
, mode
)))
2471 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2472 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2473 && GET_CODE (op1
) == MULT
2474 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2478 in1
= XEXP (XEXP (op1
, 0), 0);
2479 in2
= XEXP (op1
, 1);
2480 return simplify_gen_binary (PLUS
, mode
,
2481 simplify_gen_binary (MULT
, mode
,
2486 /* Canonicalize (minus (neg A) (mult B C)) to
2487 (minus (mult (neg B) C) A). */
2488 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2489 && GET_CODE (op1
) == MULT
2490 && GET_CODE (op0
) == NEG
)
2494 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2495 in2
= XEXP (op1
, 1);
2496 return simplify_gen_binary (MINUS
, mode
,
2497 simplify_gen_binary (MULT
, mode
,
2502 /* If one of the operands is a PLUS or a MINUS, see if we can
2503 simplify this by the associative law. This will, for example,
2504 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2505 Don't use the associative law for floating point.
2506 The inaccuracy makes it nonassociative,
2507 and subtle programs can break if operations are associated. */
2509 if (INTEGRAL_MODE_P (mode
)
2510 && (plus_minus_operand_p (op0
)
2511 || plus_minus_operand_p (op1
))
2512 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2517 if (trueop1
== constm1_rtx
)
2518 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2520 if (GET_CODE (op0
) == NEG
)
2522 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2523 /* If op1 is a MULT as well and simplify_unary_operation
2524 just moved the NEG to the second operand, simplify_gen_binary
2525 below could through simplify_associative_operation move
2526 the NEG around again and recurse endlessly. */
2528 && GET_CODE (op1
) == MULT
2529 && GET_CODE (temp
) == MULT
2530 && XEXP (op1
, 0) == XEXP (temp
, 0)
2531 && GET_CODE (XEXP (temp
, 1)) == NEG
2532 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2535 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2537 if (GET_CODE (op1
) == NEG
)
2539 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2540 /* If op0 is a MULT as well and simplify_unary_operation
2541 just moved the NEG to the second operand, simplify_gen_binary
2542 below could through simplify_associative_operation move
2543 the NEG around again and recurse endlessly. */
2545 && GET_CODE (op0
) == MULT
2546 && GET_CODE (temp
) == MULT
2547 && XEXP (op0
, 0) == XEXP (temp
, 0)
2548 && GET_CODE (XEXP (temp
, 1)) == NEG
2549 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2552 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2555 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2556 x is NaN, since x * 0 is then also NaN. Nor is it valid
2557 when the mode has signed zeros, since multiplying a negative
2558 number by 0 will give -0, not 0. */
2559 if (!HONOR_NANS (mode
)
2560 && !HONOR_SIGNED_ZEROS (mode
)
2561 && trueop1
== CONST0_RTX (mode
)
2562 && ! side_effects_p (op0
))
2565 /* In IEEE floating point, x*1 is not equivalent to x for
2567 if (!HONOR_SNANS (mode
)
2568 && trueop1
== CONST1_RTX (mode
))
2571 /* Convert multiply by constant power of two into shift. */
2572 if (CONST_SCALAR_INT_P (trueop1
))
2574 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2576 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2579 /* x*2 is x+x and x*(-1) is -x */
2580 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2581 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2582 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2583 && GET_MODE (op0
) == mode
)
2585 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2587 if (real_equal (d1
, &dconst2
))
2588 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2590 if (!HONOR_SNANS (mode
)
2591 && real_equal (d1
, &dconstm1
))
2592 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2595 /* Optimize -x * -x as x * x. */
2596 if (FLOAT_MODE_P (mode
)
2597 && GET_CODE (op0
) == NEG
2598 && GET_CODE (op1
) == NEG
2599 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2600 && !side_effects_p (XEXP (op0
, 0)))
2601 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2603 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2604 if (SCALAR_FLOAT_MODE_P (mode
)
2605 && GET_CODE (op0
) == ABS
2606 && GET_CODE (op1
) == ABS
2607 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2608 && !side_effects_p (XEXP (op0
, 0)))
2609 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2611 /* Reassociate multiplication, but for floating point MULTs
2612 only when the user specifies unsafe math optimizations. */
2613 if (! FLOAT_MODE_P (mode
)
2614 || flag_unsafe_math_optimizations
)
2616 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2623 if (trueop1
== CONST0_RTX (mode
))
2625 if (INTEGRAL_MODE_P (mode
)
2626 && trueop1
== CONSTM1_RTX (mode
)
2627 && !side_effects_p (op0
))
2629 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2631 /* A | (~A) -> -1 */
2632 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2633 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2634 && ! side_effects_p (op0
)
2635 && SCALAR_INT_MODE_P (mode
))
2638 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2639 if (CONST_INT_P (op1
)
2640 && HWI_COMPUTABLE_MODE_P (mode
)
2641 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2642 && !side_effects_p (op0
))
2645 /* Canonicalize (X & C1) | C2. */
2646 if (GET_CODE (op0
) == AND
2647 && CONST_INT_P (trueop1
)
2648 && CONST_INT_P (XEXP (op0
, 1)))
2650 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2651 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2652 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2654 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2656 && !side_effects_p (XEXP (op0
, 0)))
2659 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2660 if (((c1
|c2
) & mask
) == mask
)
2661 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2663 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2664 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2666 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2667 gen_int_mode (c1
& ~c2
, mode
));
2668 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2672 /* Convert (A & B) | A to A. */
2673 if (GET_CODE (op0
) == AND
2674 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2675 || rtx_equal_p (XEXP (op0
, 1), op1
))
2676 && ! side_effects_p (XEXP (op0
, 0))
2677 && ! side_effects_p (XEXP (op0
, 1)))
2680 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2681 mode size to (rotate A CX). */
2683 if (GET_CODE (op1
) == ASHIFT
2684 || GET_CODE (op1
) == SUBREG
)
2695 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2696 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2697 && CONST_INT_P (XEXP (opleft
, 1))
2698 && CONST_INT_P (XEXP (opright
, 1))
2699 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2700 == GET_MODE_PRECISION (mode
)))
2701 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2703 /* Same, but for ashift that has been "simplified" to a wider mode
2704 by simplify_shift_const. */
2706 if (GET_CODE (opleft
) == SUBREG
2707 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2708 && GET_CODE (opright
) == LSHIFTRT
2709 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2710 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2711 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2712 && (GET_MODE_SIZE (GET_MODE (opleft
))
2713 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2714 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2715 SUBREG_REG (XEXP (opright
, 0)))
2716 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2717 && CONST_INT_P (XEXP (opright
, 1))
2718 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2719 == GET_MODE_PRECISION (mode
)))
2720 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2721 XEXP (SUBREG_REG (opleft
), 1));
2723 /* If we have (ior (and (X C1) C2)), simplify this by making
2724 C1 as small as possible if C1 actually changes. */
2725 if (CONST_INT_P (op1
)
2726 && (HWI_COMPUTABLE_MODE_P (mode
)
2727 || INTVAL (op1
) > 0)
2728 && GET_CODE (op0
) == AND
2729 && CONST_INT_P (XEXP (op0
, 1))
2730 && CONST_INT_P (op1
)
2731 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2733 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2734 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2737 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2740 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2741 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2742 the PLUS does not affect any of the bits in OP1: then we can do
2743 the IOR as a PLUS and we can associate. This is valid if OP1
2744 can be safely shifted left C bits. */
2745 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2746 && GET_CODE (XEXP (op0
, 0)) == PLUS
2747 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2748 && CONST_INT_P (XEXP (op0
, 1))
2749 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2751 int count
= INTVAL (XEXP (op0
, 1));
2752 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2754 if (mask
>> count
== INTVAL (trueop1
)
2755 && trunc_int_for_mode (mask
, mode
) == mask
2756 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2757 return simplify_gen_binary (ASHIFTRT
, mode
,
2758 plus_constant (mode
, XEXP (op0
, 0),
2763 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2767 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2773 if (trueop1
== CONST0_RTX (mode
))
2775 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2776 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2777 if (rtx_equal_p (trueop0
, trueop1
)
2778 && ! side_effects_p (op0
)
2779 && GET_MODE_CLASS (mode
) != MODE_CC
)
2780 return CONST0_RTX (mode
);
2782 /* Canonicalize XOR of the most significant bit to PLUS. */
2783 if (CONST_SCALAR_INT_P (op1
)
2784 && mode_signbit_p (mode
, op1
))
2785 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2786 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2787 if (CONST_SCALAR_INT_P (op1
)
2788 && GET_CODE (op0
) == PLUS
2789 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2790 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2791 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2792 simplify_gen_binary (XOR
, mode
, op1
,
2795 /* If we are XORing two things that have no bits in common,
2796 convert them into an IOR. This helps to detect rotation encoded
2797 using those methods and possibly other simplifications. */
2799 if (HWI_COMPUTABLE_MODE_P (mode
)
2800 && (nonzero_bits (op0
, mode
)
2801 & nonzero_bits (op1
, mode
)) == 0)
2802 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2804 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2805 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2808 int num_negated
= 0;
2810 if (GET_CODE (op0
) == NOT
)
2811 num_negated
++, op0
= XEXP (op0
, 0);
2812 if (GET_CODE (op1
) == NOT
)
2813 num_negated
++, op1
= XEXP (op1
, 0);
2815 if (num_negated
== 2)
2816 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2817 else if (num_negated
== 1)
2818 return simplify_gen_unary (NOT
, mode
,
2819 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2823 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2824 correspond to a machine insn or result in further simplifications
2825 if B is a constant. */
2827 if (GET_CODE (op0
) == AND
2828 && rtx_equal_p (XEXP (op0
, 1), op1
)
2829 && ! side_effects_p (op1
))
2830 return simplify_gen_binary (AND
, mode
,
2831 simplify_gen_unary (NOT
, mode
,
2832 XEXP (op0
, 0), mode
),
2835 else if (GET_CODE (op0
) == AND
2836 && rtx_equal_p (XEXP (op0
, 0), op1
)
2837 && ! side_effects_p (op1
))
2838 return simplify_gen_binary (AND
, mode
,
2839 simplify_gen_unary (NOT
, mode
,
2840 XEXP (op0
, 1), mode
),
2843 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2844 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2845 out bits inverted twice and not set by C. Similarly, given
2846 (xor (and (xor A B) C) D), simplify without inverting C in
2847 the xor operand: (xor (and A C) (B&C)^D).
2849 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2850 && GET_CODE (XEXP (op0
, 0)) == XOR
2851 && CONST_INT_P (op1
)
2852 && CONST_INT_P (XEXP (op0
, 1))
2853 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2855 enum rtx_code op
= GET_CODE (op0
);
2856 rtx a
= XEXP (XEXP (op0
, 0), 0);
2857 rtx b
= XEXP (XEXP (op0
, 0), 1);
2858 rtx c
= XEXP (op0
, 1);
2860 HOST_WIDE_INT bval
= INTVAL (b
);
2861 HOST_WIDE_INT cval
= INTVAL (c
);
2862 HOST_WIDE_INT dval
= INTVAL (d
);
2863 HOST_WIDE_INT xcval
;
2870 return simplify_gen_binary (XOR
, mode
,
2871 simplify_gen_binary (op
, mode
, a
, c
),
2872 gen_int_mode ((bval
& xcval
) ^ dval
,
2876 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2877 we can transform like this:
2878 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2879 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2880 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2881 Attempt a few simplifications when B and C are both constants. */
2882 if (GET_CODE (op0
) == AND
2883 && CONST_INT_P (op1
)
2884 && CONST_INT_P (XEXP (op0
, 1)))
2886 rtx a
= XEXP (op0
, 0);
2887 rtx b
= XEXP (op0
, 1);
2889 HOST_WIDE_INT bval
= INTVAL (b
);
2890 HOST_WIDE_INT cval
= INTVAL (c
);
2892 /* Instead of computing ~A&C, we compute its negated value,
2893 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2894 optimize for sure. If it does not simplify, we still try
2895 to compute ~A&C below, but since that always allocates
2896 RTL, we don't try that before committing to returning a
2897 simplified expression. */
2898 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2901 if ((~cval
& bval
) == 0)
2903 rtx na_c
= NULL_RTX
;
2905 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2908 /* If ~A does not simplify, don't bother: we don't
2909 want to simplify 2 operations into 3, and if na_c
2910 were to simplify with na, n_na_c would have
2911 simplified as well. */
2912 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2914 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2917 /* Try to simplify ~A&C | ~B&C. */
2918 if (na_c
!= NULL_RTX
)
2919 return simplify_gen_binary (IOR
, mode
, na_c
,
2920 gen_int_mode (~bval
& cval
, mode
));
2924 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2925 if (n_na_c
== CONSTM1_RTX (mode
))
2927 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2928 gen_int_mode (~cval
& bval
,
2930 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2931 gen_int_mode (~bval
& cval
,
2937 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2938 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2939 machines, and also has shorter instruction path length. */
2940 if (GET_CODE (op0
) == AND
2941 && GET_CODE (XEXP (op0
, 0)) == XOR
2942 && CONST_INT_P (XEXP (op0
, 1))
2943 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
2946 rtx b
= XEXP (XEXP (op0
, 0), 1);
2947 rtx c
= XEXP (op0
, 1);
2948 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2949 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
2950 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
2951 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
2953 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2954 else if (GET_CODE (op0
) == AND
2955 && GET_CODE (XEXP (op0
, 0)) == XOR
2956 && CONST_INT_P (XEXP (op0
, 1))
2957 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
2959 rtx a
= XEXP (XEXP (op0
, 0), 0);
2961 rtx c
= XEXP (op0
, 1);
2962 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2963 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
2964 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
2965 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
2968 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2969 comparison if STORE_FLAG_VALUE is 1. */
2970 if (STORE_FLAG_VALUE
== 1
2971 && trueop1
== const1_rtx
2972 && COMPARISON_P (op0
)
2973 && (reversed
= reversed_comparison (op0
, mode
)))
2976 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2977 is (lt foo (const_int 0)), so we can perform the above
2978 simplification if STORE_FLAG_VALUE is 1. */
2980 if (STORE_FLAG_VALUE
== 1
2981 && trueop1
== const1_rtx
2982 && GET_CODE (op0
) == LSHIFTRT
2983 && CONST_INT_P (XEXP (op0
, 1))
2984 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2985 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2987 /* (xor (comparison foo bar) (const_int sign-bit))
2988 when STORE_FLAG_VALUE is the sign bit. */
2989 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2990 && trueop1
== const_true_rtx
2991 && COMPARISON_P (op0
)
2992 && (reversed
= reversed_comparison (op0
, mode
)))
2995 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2999 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3005 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3007 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3009 if (HWI_COMPUTABLE_MODE_P (mode
))
3011 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3012 HOST_WIDE_INT nzop1
;
3013 if (CONST_INT_P (trueop1
))
3015 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3016 /* If we are turning off bits already known off in OP0, we need
3018 if ((nzop0
& ~val1
) == 0)
3021 nzop1
= nonzero_bits (trueop1
, mode
);
3022 /* If we are clearing all the nonzero bits, the result is zero. */
3023 if ((nzop1
& nzop0
) == 0
3024 && !side_effects_p (op0
) && !side_effects_p (op1
))
3025 return CONST0_RTX (mode
);
3027 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3028 && GET_MODE_CLASS (mode
) != MODE_CC
)
3031 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3032 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3033 && ! side_effects_p (op0
)
3034 && GET_MODE_CLASS (mode
) != MODE_CC
)
3035 return CONST0_RTX (mode
);
3037 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3038 there are no nonzero bits of C outside of X's mode. */
3039 if ((GET_CODE (op0
) == SIGN_EXTEND
3040 || GET_CODE (op0
) == ZERO_EXTEND
)
3041 && CONST_INT_P (trueop1
)
3042 && HWI_COMPUTABLE_MODE_P (mode
)
3043 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3044 & UINTVAL (trueop1
)) == 0)
3046 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3047 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3048 gen_int_mode (INTVAL (trueop1
),
3050 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3053 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3054 we might be able to further simplify the AND with X and potentially
3055 remove the truncation altogether. */
3056 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3058 rtx x
= XEXP (op0
, 0);
3059 machine_mode xmode
= GET_MODE (x
);
3060 tem
= simplify_gen_binary (AND
, xmode
, x
,
3061 gen_int_mode (INTVAL (trueop1
), xmode
));
3062 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3065 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3066 if (GET_CODE (op0
) == IOR
3067 && CONST_INT_P (trueop1
)
3068 && CONST_INT_P (XEXP (op0
, 1)))
3070 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3071 return simplify_gen_binary (IOR
, mode
,
3072 simplify_gen_binary (AND
, mode
,
3073 XEXP (op0
, 0), op1
),
3074 gen_int_mode (tmp
, mode
));
3077 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3078 insn (and may simplify more). */
3079 if (GET_CODE (op0
) == XOR
3080 && rtx_equal_p (XEXP (op0
, 0), op1
)
3081 && ! side_effects_p (op1
))
3082 return simplify_gen_binary (AND
, mode
,
3083 simplify_gen_unary (NOT
, mode
,
3084 XEXP (op0
, 1), mode
),
3087 if (GET_CODE (op0
) == XOR
3088 && rtx_equal_p (XEXP (op0
, 1), op1
)
3089 && ! side_effects_p (op1
))
3090 return simplify_gen_binary (AND
, mode
,
3091 simplify_gen_unary (NOT
, mode
,
3092 XEXP (op0
, 0), mode
),
3095 /* Similarly for (~(A ^ B)) & A. */
3096 if (GET_CODE (op0
) == NOT
3097 && GET_CODE (XEXP (op0
, 0)) == XOR
3098 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3099 && ! side_effects_p (op1
))
3100 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3102 if (GET_CODE (op0
) == NOT
3103 && GET_CODE (XEXP (op0
, 0)) == XOR
3104 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3105 && ! side_effects_p (op1
))
3106 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3108 /* Convert (A | B) & A to A. */
3109 if (GET_CODE (op0
) == IOR
3110 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3111 || rtx_equal_p (XEXP (op0
, 1), op1
))
3112 && ! side_effects_p (XEXP (op0
, 0))
3113 && ! side_effects_p (XEXP (op0
, 1)))
3116 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3117 ((A & N) + B) & M -> (A + B) & M
3118 Similarly if (N & M) == 0,
3119 ((A | N) + B) & M -> (A + B) & M
3120 and for - instead of + and/or ^ instead of |.
3121 Also, if (N & M) == 0, then
3122 (A +- N) & M -> A & M. */
3123 if (CONST_INT_P (trueop1
)
3124 && HWI_COMPUTABLE_MODE_P (mode
)
3125 && ~UINTVAL (trueop1
)
3126 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3127 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3132 pmop
[0] = XEXP (op0
, 0);
3133 pmop
[1] = XEXP (op0
, 1);
3135 if (CONST_INT_P (pmop
[1])
3136 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3137 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3139 for (which
= 0; which
< 2; which
++)
3142 switch (GET_CODE (tem
))
3145 if (CONST_INT_P (XEXP (tem
, 1))
3146 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3147 == UINTVAL (trueop1
))
3148 pmop
[which
] = XEXP (tem
, 0);
3152 if (CONST_INT_P (XEXP (tem
, 1))
3153 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3154 pmop
[which
] = XEXP (tem
, 0);
3161 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3163 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3165 return simplify_gen_binary (code
, mode
, tem
, op1
);
3169 /* (and X (ior (not X) Y) -> (and X Y) */
3170 if (GET_CODE (op1
) == IOR
3171 && GET_CODE (XEXP (op1
, 0)) == NOT
3172 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3173 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3175 /* (and (ior (not X) Y) X) -> (and X Y) */
3176 if (GET_CODE (op0
) == IOR
3177 && GET_CODE (XEXP (op0
, 0)) == NOT
3178 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3179 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3181 /* (and X (ior Y (not X)) -> (and X Y) */
3182 if (GET_CODE (op1
) == IOR
3183 && GET_CODE (XEXP (op1
, 1)) == NOT
3184 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3185 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3187 /* (and (ior Y (not X)) X) -> (and X Y) */
3188 if (GET_CODE (op0
) == IOR
3189 && GET_CODE (XEXP (op0
, 1)) == NOT
3190 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3191 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3193 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3197 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3203 /* 0/x is 0 (or x&0 if x has side-effects). */
3204 if (trueop0
== CONST0_RTX (mode
)
3205 && !cfun
->can_throw_non_call_exceptions
)
3207 if (side_effects_p (op1
))
3208 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3212 if (trueop1
== CONST1_RTX (mode
))
3214 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3218 /* Convert divide by power of two into shift. */
3219 if (CONST_INT_P (trueop1
)
3220 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3221 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3225 /* Handle floating point and integers separately. */
3226 if (SCALAR_FLOAT_MODE_P (mode
))
3228 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3229 safe for modes with NaNs, since 0.0 / 0.0 will then be
3230 NaN rather than 0.0. Nor is it safe for modes with signed
3231 zeros, since dividing 0 by a negative number gives -0.0 */
3232 if (trueop0
== CONST0_RTX (mode
)
3233 && !HONOR_NANS (mode
)
3234 && !HONOR_SIGNED_ZEROS (mode
)
3235 && ! side_effects_p (op1
))
3238 if (trueop1
== CONST1_RTX (mode
)
3239 && !HONOR_SNANS (mode
))
3242 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3243 && trueop1
!= CONST0_RTX (mode
))
3245 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3248 if (real_equal (d1
, &dconstm1
)
3249 && !HONOR_SNANS (mode
))
3250 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3252 /* Change FP division by a constant into multiplication.
3253 Only do this with -freciprocal-math. */
3254 if (flag_reciprocal_math
3255 && !real_equal (d1
, &dconst0
))
3258 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3259 tem
= const_double_from_real_value (d
, mode
);
3260 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3264 else if (SCALAR_INT_MODE_P (mode
))
3266 /* 0/x is 0 (or x&0 if x has side-effects). */
3267 if (trueop0
== CONST0_RTX (mode
)
3268 && !cfun
->can_throw_non_call_exceptions
)
3270 if (side_effects_p (op1
))
3271 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3275 if (trueop1
== CONST1_RTX (mode
))
3277 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3282 if (trueop1
== constm1_rtx
)
3284 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3286 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3292 /* 0%x is 0 (or x&0 if x has side-effects). */
3293 if (trueop0
== CONST0_RTX (mode
))
3295 if (side_effects_p (op1
))
3296 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3299 /* x%1 is 0 (of x&0 if x has side-effects). */
3300 if (trueop1
== CONST1_RTX (mode
))
3302 if (side_effects_p (op0
))
3303 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3304 return CONST0_RTX (mode
);
3306 /* Implement modulus by power of two as AND. */
3307 if (CONST_INT_P (trueop1
)
3308 && exact_log2 (UINTVAL (trueop1
)) > 0)
3309 return simplify_gen_binary (AND
, mode
, op0
,
3310 gen_int_mode (INTVAL (op1
) - 1, mode
));
3314 /* 0%x is 0 (or x&0 if x has side-effects). */
3315 if (trueop0
== CONST0_RTX (mode
))
3317 if (side_effects_p (op1
))
3318 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3321 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3322 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3324 if (side_effects_p (op0
))
3325 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3326 return CONST0_RTX (mode
);
3332 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3333 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3334 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3336 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3337 if (CONST_INT_P (trueop1
)
3338 && IN_RANGE (INTVAL (trueop1
),
3339 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3340 GET_MODE_PRECISION (mode
) - 1))
3341 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3342 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3343 - INTVAL (trueop1
)));
3347 if (trueop1
== CONST0_RTX (mode
))
3349 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3351 /* Rotating ~0 always results in ~0. */
3352 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3353 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3354 && ! side_effects_p (op1
))
3360 scalar constants c1, c2
3361 size (M2) > size (M1)
3362 c1 == size (M2) - size (M1)
3364 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3368 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3370 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3371 && !VECTOR_MODE_P (mode
)
3373 && CONST_INT_P (op1
)
3374 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3375 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3376 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3377 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3378 > GET_MODE_BITSIZE (mode
))
3379 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3380 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3381 - GET_MODE_BITSIZE (mode
)))
3382 && subreg_lowpart_p (op0
))
3384 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3386 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3387 tmp
= simplify_gen_binary (code
,
3388 GET_MODE (SUBREG_REG (op0
)),
3389 XEXP (SUBREG_REG (op0
), 0),
3391 return lowpart_subreg (mode
, tmp
, inner_mode
);
3394 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3396 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3397 if (val
!= INTVAL (op1
))
3398 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3405 if (trueop1
== CONST0_RTX (mode
))
3407 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3409 goto canonicalize_shift
;
3412 if (trueop1
== CONST0_RTX (mode
))
3414 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3416 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3417 if (GET_CODE (op0
) == CLZ
3418 && CONST_INT_P (trueop1
)
3419 && STORE_FLAG_VALUE
== 1
3420 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3422 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3423 unsigned HOST_WIDE_INT zero_val
= 0;
3425 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3426 && zero_val
== GET_MODE_PRECISION (imode
)
3427 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3428 return simplify_gen_relational (EQ
, mode
, imode
,
3429 XEXP (op0
, 0), const0_rtx
);
3431 goto canonicalize_shift
;
3434 if (width
<= HOST_BITS_PER_WIDE_INT
3435 && mode_signbit_p (mode
, trueop1
)
3436 && ! side_effects_p (op0
))
3438 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3440 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3446 if (width
<= HOST_BITS_PER_WIDE_INT
3447 && CONST_INT_P (trueop1
)
3448 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3449 && ! side_effects_p (op0
))
3451 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3453 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3459 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3461 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3463 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3469 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3471 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3473 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3486 /* ??? There are simplifications that can be done. */
3490 if (!VECTOR_MODE_P (mode
))
3492 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3493 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3494 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3495 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3496 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3498 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3499 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3502 /* Extract a scalar element from a nested VEC_SELECT expression
3503 (with optional nested VEC_CONCAT expression). Some targets
3504 (i386) extract scalar element from a vector using chain of
3505 nested VEC_SELECT expressions. When input operand is a memory
3506 operand, this operation can be simplified to a simple scalar
3507 load from an offseted memory address. */
3508 if (GET_CODE (trueop0
) == VEC_SELECT
)
3510 rtx op0
= XEXP (trueop0
, 0);
3511 rtx op1
= XEXP (trueop0
, 1);
3513 machine_mode opmode
= GET_MODE (op0
);
3514 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3515 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3517 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3523 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3524 gcc_assert (i
< n_elts
);
3526 /* Select element, pointed by nested selector. */
3527 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3529 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3530 if (GET_CODE (op0
) == VEC_CONCAT
)
3532 rtx op00
= XEXP (op0
, 0);
3533 rtx op01
= XEXP (op0
, 1);
3535 machine_mode mode00
, mode01
;
3536 int n_elts00
, n_elts01
;
3538 mode00
= GET_MODE (op00
);
3539 mode01
= GET_MODE (op01
);
3541 /* Find out number of elements of each operand. */
3542 if (VECTOR_MODE_P (mode00
))
3544 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3545 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3550 if (VECTOR_MODE_P (mode01
))
3552 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3553 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3558 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3560 /* Select correct operand of VEC_CONCAT
3561 and adjust selector. */
3562 if (elem
< n_elts01
)
3573 vec
= rtvec_alloc (1);
3574 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3576 tmp
= gen_rtx_fmt_ee (code
, mode
,
3577 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3580 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3581 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3582 return XEXP (trueop0
, 0);
3586 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3587 gcc_assert (GET_MODE_INNER (mode
)
3588 == GET_MODE_INNER (GET_MODE (trueop0
)));
3589 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3591 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3593 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3594 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3595 rtvec v
= rtvec_alloc (n_elts
);
3598 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3599 for (i
= 0; i
< n_elts
; i
++)
3601 rtx x
= XVECEXP (trueop1
, 0, i
);
3603 gcc_assert (CONST_INT_P (x
));
3604 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3608 return gen_rtx_CONST_VECTOR (mode
, v
);
3611 /* Recognize the identity. */
3612 if (GET_MODE (trueop0
) == mode
)
3614 bool maybe_ident
= true;
3615 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3617 rtx j
= XVECEXP (trueop1
, 0, i
);
3618 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3620 maybe_ident
= false;
3628 /* If we build {a,b} then permute it, build the result directly. */
3629 if (XVECLEN (trueop1
, 0) == 2
3630 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3631 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3632 && GET_CODE (trueop0
) == VEC_CONCAT
3633 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3634 && GET_MODE (XEXP (trueop0
, 0)) == mode
3635 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3636 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3638 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3639 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3642 gcc_assert (i0
< 4 && i1
< 4);
3643 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3644 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3646 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3649 if (XVECLEN (trueop1
, 0) == 2
3650 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3651 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3652 && GET_CODE (trueop0
) == VEC_CONCAT
3653 && GET_MODE (trueop0
) == mode
)
3655 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3656 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3659 gcc_assert (i0
< 2 && i1
< 2);
3660 subop0
= XEXP (trueop0
, i0
);
3661 subop1
= XEXP (trueop0
, i1
);
3663 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3666 /* If we select one half of a vec_concat, return that. */
3667 if (GET_CODE (trueop0
) == VEC_CONCAT
3668 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3670 rtx subop0
= XEXP (trueop0
, 0);
3671 rtx subop1
= XEXP (trueop0
, 1);
3672 machine_mode mode0
= GET_MODE (subop0
);
3673 machine_mode mode1
= GET_MODE (subop1
);
3674 int li
= GET_MODE_UNIT_SIZE (mode0
);
3675 int l0
= GET_MODE_SIZE (mode0
) / li
;
3676 int l1
= GET_MODE_SIZE (mode1
) / li
;
3677 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3678 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3680 bool success
= true;
3681 for (int i
= 1; i
< l0
; ++i
)
3683 rtx j
= XVECEXP (trueop1
, 0, i
);
3684 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3693 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3695 bool success
= true;
3696 for (int i
= 1; i
< l1
; ++i
)
3698 rtx j
= XVECEXP (trueop1
, 0, i
);
3699 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3711 if (XVECLEN (trueop1
, 0) == 1
3712 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3713 && GET_CODE (trueop0
) == VEC_CONCAT
)
3716 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3718 /* Try to find the element in the VEC_CONCAT. */
3719 while (GET_MODE (vec
) != mode
3720 && GET_CODE (vec
) == VEC_CONCAT
)
3722 HOST_WIDE_INT vec_size
;
3724 if (CONST_INT_P (XEXP (vec
, 0)))
3726 /* vec_concat of two const_ints doesn't make sense with
3727 respect to modes. */
3728 if (CONST_INT_P (XEXP (vec
, 1)))
3731 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3732 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3735 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3737 if (offset
< vec_size
)
3738 vec
= XEXP (vec
, 0);
3742 vec
= XEXP (vec
, 1);
3744 vec
= avoid_constant_pool_reference (vec
);
3747 if (GET_MODE (vec
) == mode
)
3751 /* If we select elements in a vec_merge that all come from the same
3752 operand, select from that operand directly. */
3753 if (GET_CODE (op0
) == VEC_MERGE
)
3755 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3756 if (CONST_INT_P (trueop02
))
3758 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3759 bool all_operand0
= true;
3760 bool all_operand1
= true;
3761 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3763 rtx j
= XVECEXP (trueop1
, 0, i
);
3764 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3765 all_operand1
= false;
3767 all_operand0
= false;
3769 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3770 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3771 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3772 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3776 /* If we have two nested selects that are inverses of each
3777 other, replace them with the source operand. */
3778 if (GET_CODE (trueop0
) == VEC_SELECT
3779 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3781 rtx op0_subop1
= XEXP (trueop0
, 1);
3782 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3783 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3785 /* Apply the outer ordering vector to the inner one. (The inner
3786 ordering vector is expressly permitted to be of a different
3787 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3788 then the two VEC_SELECTs cancel. */
3789 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3791 rtx x
= XVECEXP (trueop1
, 0, i
);
3792 if (!CONST_INT_P (x
))
3794 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3795 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3798 return XEXP (trueop0
, 0);
3804 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3805 ? GET_MODE (trueop0
)
3806 : GET_MODE_INNER (mode
));
3807 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3808 ? GET_MODE (trueop1
)
3809 : GET_MODE_INNER (mode
));
3811 gcc_assert (VECTOR_MODE_P (mode
));
3812 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3813 == GET_MODE_SIZE (mode
));
3815 if (VECTOR_MODE_P (op0_mode
))
3816 gcc_assert (GET_MODE_INNER (mode
)
3817 == GET_MODE_INNER (op0_mode
));
3819 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3821 if (VECTOR_MODE_P (op1_mode
))
3822 gcc_assert (GET_MODE_INNER (mode
)
3823 == GET_MODE_INNER (op1_mode
));
3825 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3827 if ((GET_CODE (trueop0
) == CONST_VECTOR
3828 || CONST_SCALAR_INT_P (trueop0
)
3829 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3830 && (GET_CODE (trueop1
) == CONST_VECTOR
3831 || CONST_SCALAR_INT_P (trueop1
)
3832 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3834 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3835 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3836 rtvec v
= rtvec_alloc (n_elts
);
3838 unsigned in_n_elts
= 1;
3840 if (VECTOR_MODE_P (op0_mode
))
3841 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3842 for (i
= 0; i
< n_elts
; i
++)
3846 if (!VECTOR_MODE_P (op0_mode
))
3847 RTVEC_ELT (v
, i
) = trueop0
;
3849 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3853 if (!VECTOR_MODE_P (op1_mode
))
3854 RTVEC_ELT (v
, i
) = trueop1
;
3856 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3861 return gen_rtx_CONST_VECTOR (mode
, v
);
3864 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3865 Restrict the transformation to avoid generating a VEC_SELECT with a
3866 mode unrelated to its operand. */
3867 if (GET_CODE (trueop0
) == VEC_SELECT
3868 && GET_CODE (trueop1
) == VEC_SELECT
3869 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3870 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3872 rtx par0
= XEXP (trueop0
, 1);
3873 rtx par1
= XEXP (trueop1
, 1);
3874 int len0
= XVECLEN (par0
, 0);
3875 int len1
= XVECLEN (par1
, 0);
3876 rtvec vec
= rtvec_alloc (len0
+ len1
);
3877 for (int i
= 0; i
< len0
; i
++)
3878 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3879 for (int i
= 0; i
< len1
; i
++)
3880 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3881 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3882 gen_rtx_PARALLEL (VOIDmode
, vec
));
3895 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3898 unsigned int width
= GET_MODE_PRECISION (mode
);
3900 if (VECTOR_MODE_P (mode
)
3901 && code
!= VEC_CONCAT
3902 && GET_CODE (op0
) == CONST_VECTOR
3903 && GET_CODE (op1
) == CONST_VECTOR
)
3905 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3906 machine_mode op0mode
= GET_MODE (op0
);
3907 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3908 machine_mode op1mode
= GET_MODE (op1
);
3909 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3910 rtvec v
= rtvec_alloc (n_elts
);
3913 gcc_assert (op0_n_elts
== n_elts
);
3914 gcc_assert (op1_n_elts
== n_elts
);
3915 for (i
= 0; i
< n_elts
; i
++)
3917 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3918 CONST_VECTOR_ELT (op0
, i
),
3919 CONST_VECTOR_ELT (op1
, i
));
3922 RTVEC_ELT (v
, i
) = x
;
3925 return gen_rtx_CONST_VECTOR (mode
, v
);
3928 if (VECTOR_MODE_P (mode
)
3929 && code
== VEC_CONCAT
3930 && (CONST_SCALAR_INT_P (op0
)
3931 || GET_CODE (op0
) == CONST_FIXED
3932 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3933 && (CONST_SCALAR_INT_P (op1
)
3934 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3935 || GET_CODE (op1
) == CONST_FIXED
))
3937 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3938 rtvec v
= rtvec_alloc (n_elts
);
3940 gcc_assert (n_elts
>= 2);
3943 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3944 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3946 RTVEC_ELT (v
, 0) = op0
;
3947 RTVEC_ELT (v
, 1) = op1
;
3951 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3952 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3955 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3956 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3957 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3959 for (i
= 0; i
< op0_n_elts
; ++i
)
3960 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3961 for (i
= 0; i
< op1_n_elts
; ++i
)
3962 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3965 return gen_rtx_CONST_VECTOR (mode
, v
);
3968 if (SCALAR_FLOAT_MODE_P (mode
)
3969 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3970 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3971 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3982 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3984 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3986 for (i
= 0; i
< 4; i
++)
4003 real_from_target (&r
, tmp0
, mode
);
4004 return const_double_from_real_value (r
, mode
);
4008 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4009 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4012 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4013 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4015 if (HONOR_SNANS (mode
)
4016 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4017 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4020 real_convert (&f0
, mode
, opr0
);
4021 real_convert (&f1
, mode
, opr1
);
4024 && real_equal (&f1
, &dconst0
)
4025 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4028 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4029 && flag_trapping_math
4030 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4032 int s0
= REAL_VALUE_NEGATIVE (f0
);
4033 int s1
= REAL_VALUE_NEGATIVE (f1
);
4038 /* Inf + -Inf = NaN plus exception. */
4043 /* Inf - Inf = NaN plus exception. */
4048 /* Inf / Inf = NaN plus exception. */
4055 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4056 && flag_trapping_math
4057 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4058 || (REAL_VALUE_ISINF (f1
)
4059 && real_equal (&f0
, &dconst0
))))
4060 /* Inf * 0 = NaN plus exception. */
4063 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4065 real_convert (&result
, mode
, &value
);
4067 /* Don't constant fold this floating point operation if
4068 the result has overflowed and flag_trapping_math. */
4070 if (flag_trapping_math
4071 && MODE_HAS_INFINITIES (mode
)
4072 && REAL_VALUE_ISINF (result
)
4073 && !REAL_VALUE_ISINF (f0
)
4074 && !REAL_VALUE_ISINF (f1
))
4075 /* Overflow plus exception. */
4078 /* Don't constant fold this floating point operation if the
4079 result may dependent upon the run-time rounding mode and
4080 flag_rounding_math is set, or if GCC's software emulation
4081 is unable to accurately represent the result. */
4083 if ((flag_rounding_math
4084 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4085 && (inexact
|| !real_identical (&result
, &value
)))
4088 return const_double_from_real_value (result
, mode
);
4092 /* We can fold some multi-word operations. */
4093 if ((GET_MODE_CLASS (mode
) == MODE_INT
4094 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
4095 && CONST_SCALAR_INT_P (op0
)
4096 && CONST_SCALAR_INT_P (op1
))
4100 rtx_mode_t pop0
= rtx_mode_t (op0
, mode
);
4101 rtx_mode_t pop1
= rtx_mode_t (op1
, mode
);
4103 #if TARGET_SUPPORTS_WIDE_INT == 0
4104 /* This assert keeps the simplification from producing a result
4105 that cannot be represented in a CONST_DOUBLE but a lot of
4106 upstream callers expect that this function never fails to
4107 simplify something and so you if you added this to the test
4108 above the code would die later anyway. If this assert
4109 happens, you just need to make the port support wide int. */
4110 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
4115 result
= wi::sub (pop0
, pop1
);
4119 result
= wi::add (pop0
, pop1
);
4123 result
= wi::mul (pop0
, pop1
);
4127 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4133 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4139 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4145 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4151 result
= wi::bit_and (pop0
, pop1
);
4155 result
= wi::bit_or (pop0
, pop1
);
4159 result
= wi::bit_xor (pop0
, pop1
);
4163 result
= wi::smin (pop0
, pop1
);
4167 result
= wi::smax (pop0
, pop1
);
4171 result
= wi::umin (pop0
, pop1
);
4175 result
= wi::umax (pop0
, pop1
);
4182 wide_int wop1
= pop1
;
4183 if (SHIFT_COUNT_TRUNCATED
)
4184 wop1
= wi::umod_trunc (wop1
, width
);
4185 else if (wi::geu_p (wop1
, width
))
4191 result
= wi::lrshift (pop0
, wop1
);
4195 result
= wi::arshift (pop0
, wop1
);
4199 result
= wi::lshift (pop0
, wop1
);
4210 if (wi::neg_p (pop1
))
4216 result
= wi::lrotate (pop0
, pop1
);
4220 result
= wi::rrotate (pop0
, pop1
);
4231 return immed_wide_int_const (result
, mode
);
4239 /* Return a positive integer if X should sort after Y. The value
4240 returned is 1 if and only if X and Y are both regs. */
4243 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4247 result
= (commutative_operand_precedence (y
)
4248 - commutative_operand_precedence (x
));
4250 return result
+ result
;
4252 /* Group together equal REGs to do more simplification. */
4253 if (REG_P (x
) && REG_P (y
))
4254 return REGNO (x
) > REGNO (y
);
4259 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4260 operands may be another PLUS or MINUS.
4262 Rather than test for specific case, we do this by a brute-force method
4263 and do all possible simplifications until no more changes occur. Then
4264 we rebuild the operation.
4266 May return NULL_RTX when no changes were made. */
4269 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4272 struct simplify_plus_minus_op_data
4279 int changed
, n_constants
, canonicalized
= 0;
4282 memset (ops
, 0, sizeof ops
);
4284 /* Set up the two operands and then expand them until nothing has been
4285 changed. If we run out of room in our array, give up; this should
4286 almost never happen. */
4291 ops
[1].neg
= (code
== MINUS
);
4298 for (i
= 0; i
< n_ops
; i
++)
4300 rtx this_op
= ops
[i
].op
;
4301 int this_neg
= ops
[i
].neg
;
4302 enum rtx_code this_code
= GET_CODE (this_op
);
4308 if (n_ops
== ARRAY_SIZE (ops
))
4311 ops
[n_ops
].op
= XEXP (this_op
, 1);
4312 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4315 ops
[i
].op
= XEXP (this_op
, 0);
4317 /* If this operand was negated then we will potentially
4318 canonicalize the expression. Similarly if we don't
4319 place the operands adjacent we're re-ordering the
4320 expression and thus might be performing a
4321 canonicalization. Ignore register re-ordering.
4322 ??? It might be better to shuffle the ops array here,
4323 but then (plus (plus (A, B), plus (C, D))) wouldn't
4324 be seen as non-canonical. */
4327 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4332 ops
[i
].op
= XEXP (this_op
, 0);
4333 ops
[i
].neg
= ! this_neg
;
4339 if (n_ops
!= ARRAY_SIZE (ops
)
4340 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4341 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4342 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4344 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4345 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4346 ops
[n_ops
].neg
= this_neg
;
4354 /* ~a -> (-a - 1) */
4355 if (n_ops
!= ARRAY_SIZE (ops
))
4357 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4358 ops
[n_ops
++].neg
= this_neg
;
4359 ops
[i
].op
= XEXP (this_op
, 0);
4360 ops
[i
].neg
= !this_neg
;
4370 ops
[i
].op
= neg_const_int (mode
, this_op
);
4384 if (n_constants
> 1)
4387 gcc_assert (n_ops
>= 2);
4389 /* If we only have two operands, we can avoid the loops. */
4392 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4395 /* Get the two operands. Be careful with the order, especially for
4396 the cases where code == MINUS. */
4397 if (ops
[0].neg
&& ops
[1].neg
)
4399 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4402 else if (ops
[0].neg
)
4413 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4416 /* Now simplify each pair of operands until nothing changes. */
4419 /* Insertion sort is good enough for a small array. */
4420 for (i
= 1; i
< n_ops
; i
++)
4422 struct simplify_plus_minus_op_data save
;
4426 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4429 /* Just swapping registers doesn't count as canonicalization. */
4435 ops
[j
+ 1] = ops
[j
];
4437 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4442 for (i
= n_ops
- 1; i
> 0; i
--)
4443 for (j
= i
- 1; j
>= 0; j
--)
4445 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4446 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4448 if (lhs
!= 0 && rhs
!= 0)
4450 enum rtx_code ncode
= PLUS
;
4456 std::swap (lhs
, rhs
);
4458 else if (swap_commutative_operands_p (lhs
, rhs
))
4459 std::swap (lhs
, rhs
);
4461 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4462 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4464 rtx tem_lhs
, tem_rhs
;
4466 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4467 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4468 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4471 if (tem
&& !CONSTANT_P (tem
))
4472 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4475 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4479 /* Reject "simplifications" that just wrap the two
4480 arguments in a CONST. Failure to do so can result
4481 in infinite recursion with simplify_binary_operation
4482 when it calls us to simplify CONST operations.
4483 Also, if we find such a simplification, don't try
4484 any more combinations with this rhs: We must have
4485 something like symbol+offset, ie. one of the
4486 trivial CONST expressions we handle later. */
4487 if (GET_CODE (tem
) == CONST
4488 && GET_CODE (XEXP (tem
, 0)) == ncode
4489 && XEXP (XEXP (tem
, 0), 0) == lhs
4490 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4493 if (GET_CODE (tem
) == NEG
)
4494 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4495 if (CONST_INT_P (tem
) && lneg
)
4496 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4500 ops
[j
].op
= NULL_RTX
;
4510 /* Pack all the operands to the lower-numbered entries. */
4511 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4520 /* If nothing changed, check that rematerialization of rtl instructions
4521 is still required. */
4524 /* Perform rematerialization if only all operands are registers and
4525 all operations are PLUS. */
4526 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4527 around rs6000 and how it uses the CA register. See PR67145. */
4528 for (i
= 0; i
< n_ops
; i
++)
4530 || !REG_P (ops
[i
].op
)
4531 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4532 && fixed_regs
[REGNO (ops
[i
].op
)]
4533 && !global_regs
[REGNO (ops
[i
].op
)]
4534 && ops
[i
].op
!= frame_pointer_rtx
4535 && ops
[i
].op
!= arg_pointer_rtx
4536 && ops
[i
].op
!= stack_pointer_rtx
))
4541 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4543 && CONST_INT_P (ops
[1].op
)
4544 && CONSTANT_P (ops
[0].op
)
4546 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4548 /* We suppressed creation of trivial CONST expressions in the
4549 combination loop to avoid recursion. Create one manually now.
4550 The combination loop should have ensured that there is exactly
4551 one CONST_INT, and the sort will have ensured that it is last
4552 in the array and that any other constant will be next-to-last. */
4555 && CONST_INT_P (ops
[n_ops
- 1].op
)
4556 && CONSTANT_P (ops
[n_ops
- 2].op
))
4558 rtx value
= ops
[n_ops
- 1].op
;
4559 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4560 value
= neg_const_int (mode
, value
);
4561 if (CONST_INT_P (value
))
4563 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4569 /* Put a non-negated operand first, if possible. */
4571 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4574 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4583 /* Now make the result by performing the requested operations. */
4586 for (i
= 1; i
< n_ops
; i
++)
4587 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4588 mode
, result
, ops
[i
].op
);
4593 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4595 plus_minus_operand_p (const_rtx x
)
4597 return GET_CODE (x
) == PLUS
4598 || GET_CODE (x
) == MINUS
4599 || (GET_CODE (x
) == CONST
4600 && GET_CODE (XEXP (x
, 0)) == PLUS
4601 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4602 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4605 /* Like simplify_binary_operation except used for relational operators.
4606 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4607 not also be VOIDmode.
4609 CMP_MODE specifies in which mode the comparison is done in, so it is
4610 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4611 the operands or, if both are VOIDmode, the operands are compared in
4612 "infinite precision". */
4614 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4615 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4617 rtx tem
, trueop0
, trueop1
;
4619 if (cmp_mode
== VOIDmode
)
4620 cmp_mode
= GET_MODE (op0
);
4621 if (cmp_mode
== VOIDmode
)
4622 cmp_mode
= GET_MODE (op1
);
4624 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4627 if (SCALAR_FLOAT_MODE_P (mode
))
4629 if (tem
== const0_rtx
)
4630 return CONST0_RTX (mode
);
4631 #ifdef FLOAT_STORE_FLAG_VALUE
4633 REAL_VALUE_TYPE val
;
4634 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4635 return const_double_from_real_value (val
, mode
);
4641 if (VECTOR_MODE_P (mode
))
4643 if (tem
== const0_rtx
)
4644 return CONST0_RTX (mode
);
4645 #ifdef VECTOR_STORE_FLAG_VALUE
4650 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4651 if (val
== NULL_RTX
)
4653 if (val
== const1_rtx
)
4654 return CONST1_RTX (mode
);
4656 units
= GET_MODE_NUNITS (mode
);
4657 v
= rtvec_alloc (units
);
4658 for (i
= 0; i
< units
; i
++)
4659 RTVEC_ELT (v
, i
) = val
;
4660 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4670 /* For the following tests, ensure const0_rtx is op1. */
4671 if (swap_commutative_operands_p (op0
, op1
)
4672 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4673 std::swap (op0
, op1
), code
= swap_condition (code
);
4675 /* If op0 is a compare, extract the comparison arguments from it. */
4676 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4677 return simplify_gen_relational (code
, mode
, VOIDmode
,
4678 XEXP (op0
, 0), XEXP (op0
, 1));
4680 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4684 trueop0
= avoid_constant_pool_reference (op0
);
4685 trueop1
= avoid_constant_pool_reference (op1
);
4686 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4690 /* This part of simplify_relational_operation is only used when CMP_MODE
4691 is not in class MODE_CC (i.e. it is a real comparison).
4693 MODE is the mode of the result, while CMP_MODE specifies in which
4694 mode the comparison is done in, so it is the mode of the operands. */
4697 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4698 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4700 enum rtx_code op0code
= GET_CODE (op0
);
4702 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4704 /* If op0 is a comparison, extract the comparison arguments
4708 if (GET_MODE (op0
) == mode
)
4709 return simplify_rtx (op0
);
4711 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4712 XEXP (op0
, 0), XEXP (op0
, 1));
4714 else if (code
== EQ
)
4716 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4717 if (new_code
!= UNKNOWN
)
4718 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4719 XEXP (op0
, 0), XEXP (op0
, 1));
4723 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4724 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4725 if ((code
== LTU
|| code
== GEU
)
4726 && GET_CODE (op0
) == PLUS
4727 && CONST_INT_P (XEXP (op0
, 1))
4728 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4729 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4730 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4731 && XEXP (op0
, 1) != const0_rtx
)
4734 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4735 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4736 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4739 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4740 transformed into (LTU a -C). */
4741 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4742 && CONST_INT_P (XEXP (op0
, 1))
4743 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4744 && XEXP (op0
, 1) != const0_rtx
)
4747 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4748 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4749 XEXP (op0
, 0), new_cmp
);
4752 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4753 if ((code
== LTU
|| code
== GEU
)
4754 && GET_CODE (op0
) == PLUS
4755 && rtx_equal_p (op1
, XEXP (op0
, 1))
4756 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4757 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4758 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4759 copy_rtx (XEXP (op0
, 0)));
4761 if (op1
== const0_rtx
)
4763 /* Canonicalize (GTU x 0) as (NE x 0). */
4765 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4766 /* Canonicalize (LEU x 0) as (EQ x 0). */
4768 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4770 else if (op1
== const1_rtx
)
4775 /* Canonicalize (GE x 1) as (GT x 0). */
4776 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4779 /* Canonicalize (GEU x 1) as (NE x 0). */
4780 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4783 /* Canonicalize (LT x 1) as (LE x 0). */
4784 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4787 /* Canonicalize (LTU x 1) as (EQ x 0). */
4788 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4794 else if (op1
== constm1_rtx
)
4796 /* Canonicalize (LE x -1) as (LT x 0). */
4798 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4799 /* Canonicalize (GT x -1) as (GE x 0). */
4801 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4804 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4805 if ((code
== EQ
|| code
== NE
)
4806 && (op0code
== PLUS
|| op0code
== MINUS
)
4808 && CONSTANT_P (XEXP (op0
, 1))
4809 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4811 rtx x
= XEXP (op0
, 0);
4812 rtx c
= XEXP (op0
, 1);
4813 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4814 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4816 /* Detect an infinite recursive condition, where we oscillate at this
4817 simplification case between:
4818 A + B == C <---> C - B == A,
4819 where A, B, and C are all constants with non-simplifiable expressions,
4820 usually SYMBOL_REFs. */
4821 if (GET_CODE (tem
) == invcode
4823 && rtx_equal_p (c
, XEXP (tem
, 1)))
4826 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4829 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4830 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4832 && op1
== const0_rtx
4833 && GET_MODE_CLASS (mode
) == MODE_INT
4834 && cmp_mode
!= VOIDmode
4835 /* ??? Work-around BImode bugs in the ia64 backend. */
4837 && cmp_mode
!= BImode
4838 && nonzero_bits (op0
, cmp_mode
) == 1
4839 && STORE_FLAG_VALUE
== 1)
4840 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4841 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4842 : lowpart_subreg (mode
, op0
, cmp_mode
);
4844 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4845 if ((code
== EQ
|| code
== NE
)
4846 && op1
== const0_rtx
4848 return simplify_gen_relational (code
, mode
, cmp_mode
,
4849 XEXP (op0
, 0), XEXP (op0
, 1));
4851 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4852 if ((code
== EQ
|| code
== NE
)
4854 && rtx_equal_p (XEXP (op0
, 0), op1
)
4855 && !side_effects_p (XEXP (op0
, 0)))
4856 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4859 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4860 if ((code
== EQ
|| code
== NE
)
4862 && rtx_equal_p (XEXP (op0
, 1), op1
)
4863 && !side_effects_p (XEXP (op0
, 1)))
4864 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4867 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4868 if ((code
== EQ
|| code
== NE
)
4870 && CONST_SCALAR_INT_P (op1
)
4871 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4872 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4873 simplify_gen_binary (XOR
, cmp_mode
,
4874 XEXP (op0
, 1), op1
));
4876 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4877 can be implemented with a BICS instruction on some targets, or
4878 constant-folded if y is a constant. */
4879 if ((code
== EQ
|| code
== NE
)
4881 && rtx_equal_p (XEXP (op0
, 0), op1
)
4882 && !side_effects_p (op1
)
4883 && op1
!= CONST0_RTX (cmp_mode
))
4885 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4886 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4888 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4889 CONST0_RTX (cmp_mode
));
4892 /* Likewise for (eq/ne (and x y) y). */
4893 if ((code
== EQ
|| code
== NE
)
4895 && rtx_equal_p (XEXP (op0
, 1), op1
)
4896 && !side_effects_p (op1
)
4897 && op1
!= CONST0_RTX (cmp_mode
))
4899 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4900 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4902 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4903 CONST0_RTX (cmp_mode
));
4906 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4907 if ((code
== EQ
|| code
== NE
)
4908 && GET_CODE (op0
) == BSWAP
4909 && CONST_SCALAR_INT_P (op1
))
4910 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4911 simplify_gen_unary (BSWAP
, cmp_mode
,
4914 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4915 if ((code
== EQ
|| code
== NE
)
4916 && GET_CODE (op0
) == BSWAP
4917 && GET_CODE (op1
) == BSWAP
)
4918 return simplify_gen_relational (code
, mode
, cmp_mode
,
4919 XEXP (op0
, 0), XEXP (op1
, 0));
4921 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4927 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4928 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4929 XEXP (op0
, 0), const0_rtx
);
4934 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4935 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4936 XEXP (op0
, 0), const0_rtx
);
4955 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4956 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4957 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4958 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4959 For floating-point comparisons, assume that the operands were ordered. */
4962 comparison_result (enum rtx_code code
, int known_results
)
4968 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4971 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4975 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4978 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4982 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4985 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4988 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4990 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4993 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4995 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4998 return const_true_rtx
;
5006 /* Check if the given comparison (done in the given MODE) is actually
5007 a tautology or a contradiction. If the mode is VOID_mode, the
5008 comparison is done in "infinite precision". If no simplification
5009 is possible, this function returns zero. Otherwise, it returns
5010 either const_true_rtx or const0_rtx. */
5013 simplify_const_relational_operation (enum rtx_code code
,
5021 gcc_assert (mode
!= VOIDmode
5022 || (GET_MODE (op0
) == VOIDmode
5023 && GET_MODE (op1
) == VOIDmode
));
5025 /* If op0 is a compare, extract the comparison arguments from it. */
5026 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5028 op1
= XEXP (op0
, 1);
5029 op0
= XEXP (op0
, 0);
5031 if (GET_MODE (op0
) != VOIDmode
)
5032 mode
= GET_MODE (op0
);
5033 else if (GET_MODE (op1
) != VOIDmode
)
5034 mode
= GET_MODE (op1
);
5039 /* We can't simplify MODE_CC values since we don't know what the
5040 actual comparison is. */
5041 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5044 /* Make sure the constant is second. */
5045 if (swap_commutative_operands_p (op0
, op1
))
5047 std::swap (op0
, op1
);
5048 code
= swap_condition (code
);
5051 trueop0
= avoid_constant_pool_reference (op0
);
5052 trueop1
= avoid_constant_pool_reference (op1
);
5054 /* For integer comparisons of A and B maybe we can simplify A - B and can
5055 then simplify a comparison of that with zero. If A and B are both either
5056 a register or a CONST_INT, this can't help; testing for these cases will
5057 prevent infinite recursion here and speed things up.
5059 We can only do this for EQ and NE comparisons as otherwise we may
5060 lose or introduce overflow which we cannot disregard as undefined as
5061 we do not know the signedness of the operation on either the left or
5062 the right hand side of the comparison. */
5064 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5065 && (code
== EQ
|| code
== NE
)
5066 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5067 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5068 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5069 /* We cannot do this if tem is a nonzero address. */
5070 && ! nonzero_address_p (tem
))
5071 return simplify_const_relational_operation (signed_condition (code
),
5072 mode
, tem
, const0_rtx
);
5074 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5075 return const_true_rtx
;
5077 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5080 /* For modes without NaNs, if the two operands are equal, we know the
5081 result except if they have side-effects. Even with NaNs we know
5082 the result of unordered comparisons and, if signaling NaNs are
5083 irrelevant, also the result of LT/GT/LTGT. */
5084 if ((! HONOR_NANS (trueop0
)
5085 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5086 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5087 && ! HONOR_SNANS (trueop0
)))
5088 && rtx_equal_p (trueop0
, trueop1
)
5089 && ! side_effects_p (trueop0
))
5090 return comparison_result (code
, CMP_EQ
);
5092 /* If the operands are floating-point constants, see if we can fold
5094 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5095 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5096 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5098 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5099 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5101 /* Comparisons are unordered iff at least one of the values is NaN. */
5102 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5112 return const_true_rtx
;
5125 return comparison_result (code
,
5126 (real_equal (d0
, d1
) ? CMP_EQ
:
5127 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5130 /* Otherwise, see if the operands are both integers. */
5131 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5132 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5134 /* It would be nice if we really had a mode here. However, the
5135 largest int representable on the target is as good as
5137 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5138 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5139 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5141 if (wi::eq_p (ptrueop0
, ptrueop1
))
5142 return comparison_result (code
, CMP_EQ
);
5145 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5146 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5147 return comparison_result (code
, cr
);
5151 /* Optimize comparisons with upper and lower bounds. */
5152 if (HWI_COMPUTABLE_MODE_P (mode
)
5153 && CONST_INT_P (trueop1
)
5154 && !side_effects_p (trueop0
))
5157 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5158 HOST_WIDE_INT val
= INTVAL (trueop1
);
5159 HOST_WIDE_INT mmin
, mmax
;
5169 /* Get a reduced range if the sign bit is zero. */
5170 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5177 rtx mmin_rtx
, mmax_rtx
;
5178 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5180 mmin
= INTVAL (mmin_rtx
);
5181 mmax
= INTVAL (mmax_rtx
);
5184 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5186 mmin
>>= (sign_copies
- 1);
5187 mmax
>>= (sign_copies
- 1);
5193 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5195 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5196 return const_true_rtx
;
5197 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5202 return const_true_rtx
;
5207 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5209 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5210 return const_true_rtx
;
5211 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5216 return const_true_rtx
;
5222 /* x == y is always false for y out of range. */
5223 if (val
< mmin
|| val
> mmax
)
5227 /* x > y is always false for y >= mmax, always true for y < mmin. */
5229 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5231 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5232 return const_true_rtx
;
5238 return const_true_rtx
;
5241 /* x < y is always false for y <= mmin, always true for y > mmax. */
5243 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5245 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5246 return const_true_rtx
;
5252 return const_true_rtx
;
5256 /* x != y is always true for y out of range. */
5257 if (val
< mmin
|| val
> mmax
)
5258 return const_true_rtx
;
5266 /* Optimize integer comparisons with zero. */
5267 if (trueop1
== const0_rtx
&& !side_effects_p (trueop0
))
5269 /* Some addresses are known to be nonzero. We don't know
5270 their sign, but equality comparisons are known. */
5271 if (nonzero_address_p (trueop0
))
5273 if (code
== EQ
|| code
== LEU
)
5275 if (code
== NE
|| code
== GTU
)
5276 return const_true_rtx
;
5279 /* See if the first operand is an IOR with a constant. If so, we
5280 may be able to determine the result of this comparison. */
5281 if (GET_CODE (op0
) == IOR
)
5283 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5284 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5286 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5287 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5288 && (UINTVAL (inner_const
)
5299 return const_true_rtx
;
5303 return const_true_rtx
;
5317 /* Optimize comparison of ABS with zero. */
5318 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5319 && (GET_CODE (trueop0
) == ABS
5320 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5321 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5326 /* Optimize abs(x) < 0.0. */
5327 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5332 /* Optimize abs(x) >= 0.0. */
5333 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5334 return const_true_rtx
;
5338 /* Optimize ! (abs(x) < 0.0). */
5339 return const_true_rtx
;
5349 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5350 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5351 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5352 can be simplified to that or NULL_RTX if not.
5353 Assume X is compared against zero with CMP_CODE and the true
5354 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5357 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5359 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5362 /* Result on X == 0 and X !=0 respectively. */
5363 rtx on_zero
, on_nonzero
;
5367 on_nonzero
= false_val
;
5371 on_zero
= false_val
;
5372 on_nonzero
= true_val
;
5375 rtx_code op_code
= GET_CODE (on_nonzero
);
5376 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5377 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5378 || !CONST_INT_P (on_zero
))
5381 HOST_WIDE_INT op_val
;
5382 if (((op_code
== CLZ
5383 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero
), op_val
))
5385 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero
), op_val
)))
5386 && op_val
== INTVAL (on_zero
))
5393 /* Simplify CODE, an operation with result mode MODE and three operands,
5394 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5395 a constant. Return 0 if no simplifications is possible. */
5398 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5399 machine_mode op0_mode
, rtx op0
, rtx op1
,
5402 unsigned int width
= GET_MODE_PRECISION (mode
);
5403 bool any_change
= false;
5406 /* VOIDmode means "infinite" precision. */
5408 width
= HOST_BITS_PER_WIDE_INT
;
5413 /* Simplify negations around the multiplication. */
5414 /* -a * -b + c => a * b + c. */
5415 if (GET_CODE (op0
) == NEG
)
5417 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5419 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5421 else if (GET_CODE (op1
) == NEG
)
5423 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5425 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5428 /* Canonicalize the two multiplication operands. */
5429 /* a * -b + c => -b * a + c. */
5430 if (swap_commutative_operands_p (op0
, op1
))
5431 std::swap (op0
, op1
), any_change
= true;
5434 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5439 if (CONST_INT_P (op0
)
5440 && CONST_INT_P (op1
)
5441 && CONST_INT_P (op2
)
5442 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5443 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5445 /* Extracting a bit-field from a constant */
5446 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5447 HOST_WIDE_INT op1val
= INTVAL (op1
);
5448 HOST_WIDE_INT op2val
= INTVAL (op2
);
5449 if (BITS_BIG_ENDIAN
)
5450 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5454 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5456 /* First zero-extend. */
5457 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5458 /* If desired, propagate sign bit. */
5459 if (code
== SIGN_EXTRACT
5460 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5462 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5465 return gen_int_mode (val
, mode
);
5470 if (CONST_INT_P (op0
))
5471 return op0
!= const0_rtx
? op1
: op2
;
5473 /* Convert c ? a : a into "a". */
5474 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5477 /* Convert a != b ? a : b into "a". */
5478 if (GET_CODE (op0
) == NE
5479 && ! side_effects_p (op0
)
5480 && ! HONOR_NANS (mode
)
5481 && ! HONOR_SIGNED_ZEROS (mode
)
5482 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5483 && rtx_equal_p (XEXP (op0
, 1), op2
))
5484 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5485 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5488 /* Convert a == b ? a : b into "b". */
5489 if (GET_CODE (op0
) == EQ
5490 && ! side_effects_p (op0
)
5491 && ! HONOR_NANS (mode
)
5492 && ! HONOR_SIGNED_ZEROS (mode
)
5493 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5494 && rtx_equal_p (XEXP (op0
, 1), op2
))
5495 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5496 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5499 /* Convert (!c) != {0,...,0} ? a : b into
5500 c != {0,...,0} ? b : a for vector modes. */
5501 if (VECTOR_MODE_P (GET_MODE (op1
))
5502 && GET_CODE (op0
) == NE
5503 && GET_CODE (XEXP (op0
, 0)) == NOT
5504 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5506 rtx cv
= XEXP (op0
, 1);
5507 int nunits
= CONST_VECTOR_NUNITS (cv
);
5509 for (int i
= 0; i
< nunits
; ++i
)
5510 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5517 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5518 XEXP (XEXP (op0
, 0), 0),
5520 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5525 /* Convert x == 0 ? N : clz (x) into clz (x) when
5526 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5527 Similarly for ctz (x). */
5528 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5529 && XEXP (op0
, 1) == const0_rtx
)
5532 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5538 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5540 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5541 ? GET_MODE (XEXP (op0
, 1))
5542 : GET_MODE (XEXP (op0
, 0)));
5545 /* Look for happy constants in op1 and op2. */
5546 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5548 HOST_WIDE_INT t
= INTVAL (op1
);
5549 HOST_WIDE_INT f
= INTVAL (op2
);
5551 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5552 code
= GET_CODE (op0
);
5553 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5556 tmp
= reversed_comparison_code (op0
, NULL
);
5564 return simplify_gen_relational (code
, mode
, cmp_mode
,
5565 XEXP (op0
, 0), XEXP (op0
, 1));
5568 if (cmp_mode
== VOIDmode
)
5569 cmp_mode
= op0_mode
;
5570 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5571 cmp_mode
, XEXP (op0
, 0),
5574 /* See if any simplifications were possible. */
5577 if (CONST_INT_P (temp
))
5578 return temp
== const0_rtx
? op2
: op1
;
5580 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5586 gcc_assert (GET_MODE (op0
) == mode
);
5587 gcc_assert (GET_MODE (op1
) == mode
);
5588 gcc_assert (VECTOR_MODE_P (mode
));
5589 trueop2
= avoid_constant_pool_reference (op2
);
5590 if (CONST_INT_P (trueop2
))
5592 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5593 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5594 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5595 unsigned HOST_WIDE_INT mask
;
5596 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5599 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5601 if (!(sel
& mask
) && !side_effects_p (op0
))
5603 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5606 rtx trueop0
= avoid_constant_pool_reference (op0
);
5607 rtx trueop1
= avoid_constant_pool_reference (op1
);
5608 if (GET_CODE (trueop0
) == CONST_VECTOR
5609 && GET_CODE (trueop1
) == CONST_VECTOR
)
5611 rtvec v
= rtvec_alloc (n_elts
);
5614 for (i
= 0; i
< n_elts
; i
++)
5615 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5616 ? CONST_VECTOR_ELT (trueop0
, i
)
5617 : CONST_VECTOR_ELT (trueop1
, i
));
5618 return gen_rtx_CONST_VECTOR (mode
, v
);
5621 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5622 if no element from a appears in the result. */
5623 if (GET_CODE (op0
) == VEC_MERGE
)
5625 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5626 if (CONST_INT_P (tem
))
5628 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5629 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5630 return simplify_gen_ternary (code
, mode
, mode
,
5631 XEXP (op0
, 1), op1
, op2
);
5632 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5633 return simplify_gen_ternary (code
, mode
, mode
,
5634 XEXP (op0
, 0), op1
, op2
);
5637 if (GET_CODE (op1
) == VEC_MERGE
)
5639 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5640 if (CONST_INT_P (tem
))
5642 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5643 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5644 return simplify_gen_ternary (code
, mode
, mode
,
5645 op0
, XEXP (op1
, 1), op2
);
5646 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5647 return simplify_gen_ternary (code
, mode
, mode
,
5648 op0
, XEXP (op1
, 0), op2
);
5652 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5654 if (GET_CODE (op0
) == VEC_DUPLICATE
5655 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5656 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5657 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5659 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5660 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5662 if (XEXP (XEXP (op0
, 0), 0) == op1
5663 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5669 if (rtx_equal_p (op0
, op1
)
5670 && !side_effects_p (op2
) && !side_effects_p (op1
))
5682 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5683 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5684 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5686 Works by unpacking OP into a collection of 8-bit values
5687 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5688 and then repacking them again for OUTERMODE. */
5691 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5692 machine_mode innermode
, unsigned int byte
)
5696 value_mask
= (1 << value_bit
) - 1
5698 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5706 rtx result_s
= NULL
;
5707 rtvec result_v
= NULL
;
5708 enum mode_class outer_class
;
5709 machine_mode outer_submode
;
5712 /* Some ports misuse CCmode. */
5713 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5716 /* We have no way to represent a complex constant at the rtl level. */
5717 if (COMPLEX_MODE_P (outermode
))
5720 /* We support any size mode. */
5721 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5722 GET_MODE_BITSIZE (innermode
));
5724 /* Unpack the value. */
5726 if (GET_CODE (op
) == CONST_VECTOR
)
5728 num_elem
= CONST_VECTOR_NUNITS (op
);
5729 elems
= &CONST_VECTOR_ELT (op
, 0);
5730 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5736 elem_bitsize
= max_bitsize
;
5738 /* If this asserts, it is too complicated; reducing value_bit may help. */
5739 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5740 /* I don't know how to handle endianness of sub-units. */
5741 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5743 for (elem
= 0; elem
< num_elem
; elem
++)
5746 rtx el
= elems
[elem
];
5748 /* Vectors are kept in target memory order. (This is probably
5751 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5752 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5754 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5755 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5756 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5757 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5758 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5761 switch (GET_CODE (el
))
5765 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5767 *vp
++ = INTVAL (el
) >> i
;
5768 /* CONST_INTs are always logically sign-extended. */
5769 for (; i
< elem_bitsize
; i
+= value_bit
)
5770 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5773 case CONST_WIDE_INT
:
5775 rtx_mode_t val
= rtx_mode_t (el
, innermode
);
5776 unsigned char extend
= wi::sign_mask (val
);
5777 int prec
= wi::get_precision (val
);
5779 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
5780 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5781 for (; i
< elem_bitsize
; i
+= value_bit
)
5787 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5789 unsigned char extend
= 0;
5790 /* If this triggers, someone should have generated a
5791 CONST_INT instead. */
5792 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5794 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5795 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5796 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5799 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5803 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5805 for (; i
< elem_bitsize
; i
+= value_bit
)
5810 /* This is big enough for anything on the platform. */
5811 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5812 scalar_float_mode el_mode
;
5814 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
5815 int bitsize
= GET_MODE_BITSIZE (el_mode
);
5817 gcc_assert (bitsize
<= elem_bitsize
);
5818 gcc_assert (bitsize
% value_bit
== 0);
5820 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5823 /* real_to_target produces its result in words affected by
5824 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5825 and use WORDS_BIG_ENDIAN instead; see the documentation
5826 of SUBREG in rtl.texi. */
5827 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5830 if (WORDS_BIG_ENDIAN
)
5831 ibase
= bitsize
- 1 - i
;
5834 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5837 /* It shouldn't matter what's done here, so fill it with
5839 for (; i
< elem_bitsize
; i
+= value_bit
)
5845 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5847 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5848 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5852 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5853 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5854 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5856 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5857 >> (i
- HOST_BITS_PER_WIDE_INT
);
5858 for (; i
< elem_bitsize
; i
+= value_bit
)
5868 /* Now, pick the right byte to start with. */
5869 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5870 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5871 will already have offset 0. */
5872 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5874 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5876 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5877 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5878 byte
= (subword_byte
% UNITS_PER_WORD
5879 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5882 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5883 so if it's become negative it will instead be very large.) */
5884 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5886 /* Convert from bytes to chunks of size value_bit. */
5887 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5889 /* Re-pack the value. */
5890 num_elem
= GET_MODE_NUNITS (outermode
);
5892 if (VECTOR_MODE_P (outermode
))
5894 result_v
= rtvec_alloc (num_elem
);
5895 elems
= &RTVEC_ELT (result_v
, 0);
5900 outer_submode
= GET_MODE_INNER (outermode
);
5901 outer_class
= GET_MODE_CLASS (outer_submode
);
5902 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5904 gcc_assert (elem_bitsize
% value_bit
== 0);
5905 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5907 for (elem
= 0; elem
< num_elem
; elem
++)
5911 /* Vectors are stored in target memory order. (This is probably
5914 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5915 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5917 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5918 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5919 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5920 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5921 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5924 switch (outer_class
)
5927 case MODE_PARTIAL_INT
:
5932 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5933 / HOST_BITS_PER_WIDE_INT
;
5934 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5937 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5939 for (u
= 0; u
< units
; u
++)
5941 unsigned HOST_WIDE_INT buf
= 0;
5943 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5945 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5948 base
+= HOST_BITS_PER_WIDE_INT
;
5950 r
= wide_int::from_array (tmp
, units
,
5951 GET_MODE_PRECISION (outer_submode
));
5952 #if TARGET_SUPPORTS_WIDE_INT == 0
5953 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5954 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5957 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5962 case MODE_DECIMAL_FLOAT
:
5965 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
5967 /* real_from_target wants its input in words affected by
5968 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5969 and use WORDS_BIG_ENDIAN instead; see the documentation
5970 of SUBREG in rtl.texi. */
5971 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5974 if (WORDS_BIG_ENDIAN
)
5975 ibase
= elem_bitsize
- 1 - i
;
5978 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5981 real_from_target (&r
, tmp
, outer_submode
);
5982 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
5994 f
.mode
= outer_submode
;
5997 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5999 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6000 for (; i
< elem_bitsize
; i
+= value_bit
)
6001 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6002 << (i
- HOST_BITS_PER_WIDE_INT
));
6004 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6012 if (VECTOR_MODE_P (outermode
))
6013 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6018 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6019 Return 0 if no simplifications are possible. */
6021 simplify_subreg (machine_mode outermode
, rtx op
,
6022 machine_mode innermode
, unsigned int byte
)
6024 /* Little bit of sanity checking. */
6025 gcc_assert (innermode
!= VOIDmode
);
6026 gcc_assert (outermode
!= VOIDmode
);
6027 gcc_assert (innermode
!= BLKmode
);
6028 gcc_assert (outermode
!= BLKmode
);
6030 gcc_assert (GET_MODE (op
) == innermode
6031 || GET_MODE (op
) == VOIDmode
);
6033 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
6036 if (byte
>= GET_MODE_SIZE (innermode
))
6039 if (outermode
== innermode
&& !byte
)
6042 if (CONST_SCALAR_INT_P (op
)
6043 || CONST_DOUBLE_AS_FLOAT_P (op
)
6044 || GET_CODE (op
) == CONST_FIXED
6045 || GET_CODE (op
) == CONST_VECTOR
)
6046 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
6048 /* Changing mode twice with SUBREG => just change it once,
6049 or not at all if changing back op starting mode. */
6050 if (GET_CODE (op
) == SUBREG
)
6052 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6053 int final_offset
= byte
+ SUBREG_BYTE (op
);
6056 if (outermode
== innermostmode
6057 && byte
== 0 && SUBREG_BYTE (op
) == 0)
6058 return SUBREG_REG (op
);
6060 /* The SUBREG_BYTE represents offset, as if the value were stored
6061 in memory. Irritating exception is paradoxical subreg, where
6062 we define SUBREG_BYTE to be 0. On big endian machines, this
6063 value should be negative. For a moment, undo this exception. */
6064 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6066 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
6067 if (WORDS_BIG_ENDIAN
)
6068 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6069 if (BYTES_BIG_ENDIAN
)
6070 final_offset
+= difference
% UNITS_PER_WORD
;
6072 if (SUBREG_BYTE (op
) == 0
6073 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
6075 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
6076 if (WORDS_BIG_ENDIAN
)
6077 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6078 if (BYTES_BIG_ENDIAN
)
6079 final_offset
+= difference
% UNITS_PER_WORD
;
6082 /* See whether resulting subreg will be paradoxical. */
6083 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6085 /* In nonparadoxical subregs we can't handle negative offsets. */
6086 if (final_offset
< 0)
6088 /* Bail out in case resulting subreg would be incorrect. */
6089 if (final_offset
% GET_MODE_SIZE (outermode
)
6090 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
6096 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
6098 /* In paradoxical subreg, see if we are still looking on lower part.
6099 If so, our SUBREG_BYTE will be 0. */
6100 if (WORDS_BIG_ENDIAN
)
6101 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6102 if (BYTES_BIG_ENDIAN
)
6103 offset
+= difference
% UNITS_PER_WORD
;
6104 if (offset
== final_offset
)
6110 /* Recurse for further possible simplifications. */
6111 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6115 if (validate_subreg (outermode
, innermostmode
,
6116 SUBREG_REG (op
), final_offset
))
6118 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6119 if (SUBREG_PROMOTED_VAR_P (op
)
6120 && SUBREG_PROMOTED_SIGN (op
) >= 0
6121 && GET_MODE_CLASS (outermode
) == MODE_INT
6122 && IN_RANGE (GET_MODE_SIZE (outermode
),
6123 GET_MODE_SIZE (innermode
),
6124 GET_MODE_SIZE (innermostmode
))
6125 && subreg_lowpart_p (newx
))
6127 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6128 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6135 /* SUBREG of a hard register => just change the register number
6136 and/or mode. If the hard register is not valid in that mode,
6137 suppress this simplification. If the hard register is the stack,
6138 frame, or argument pointer, leave this as a SUBREG. */
6140 if (REG_P (op
) && HARD_REGISTER_P (op
))
6142 unsigned int regno
, final_regno
;
6145 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6146 if (HARD_REGISTER_NUM_P (final_regno
))
6149 int final_offset
= byte
;
6151 /* Adjust offset for paradoxical subregs. */
6153 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6155 int difference
= (GET_MODE_SIZE (innermode
)
6156 - GET_MODE_SIZE (outermode
));
6157 if (WORDS_BIG_ENDIAN
)
6158 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6159 if (BYTES_BIG_ENDIAN
)
6160 final_offset
+= difference
% UNITS_PER_WORD
;
6163 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6165 /* Propagate original regno. We don't have any way to specify
6166 the offset inside original regno, so do so only for lowpart.
6167 The information is used only by alias analysis that can not
6168 grog partial register anyway. */
6170 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6171 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6176 /* If we have a SUBREG of a register that we are replacing and we are
6177 replacing it with a MEM, make a new MEM and try replacing the
6178 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6179 or if we would be widening it. */
6182 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6183 /* Allow splitting of volatile memory references in case we don't
6184 have instruction to move the whole thing. */
6185 && (! MEM_VOLATILE_P (op
)
6186 || ! have_insn_for (SET
, innermode
))
6187 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6188 return adjust_address_nv (op
, outermode
, byte
);
6190 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6192 if (GET_CODE (op
) == CONCAT
6193 || GET_CODE (op
) == VEC_CONCAT
)
6195 unsigned int part_size
, final_offset
;
6198 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6199 if (part_mode
== VOIDmode
)
6200 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6201 part_size
= GET_MODE_SIZE (part_mode
);
6202 if (byte
< part_size
)
6204 part
= XEXP (op
, 0);
6205 final_offset
= byte
;
6209 part
= XEXP (op
, 1);
6210 final_offset
= byte
- part_size
;
6213 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6216 part_mode
= GET_MODE (part
);
6217 if (part_mode
== VOIDmode
)
6218 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6219 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6222 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6223 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6227 /* A SUBREG resulting from a zero extension may fold to zero if
6228 it extracts higher bits that the ZERO_EXTEND's source bits. */
6229 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6231 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6232 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6233 return CONST0_RTX (outermode
);
6236 if (SCALAR_INT_MODE_P (outermode
)
6237 && SCALAR_INT_MODE_P (innermode
)
6238 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6239 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6241 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6249 /* Make a SUBREG operation or equivalent if it folds. */
6252 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6253 machine_mode innermode
, unsigned int byte
)
6257 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6261 if (GET_CODE (op
) == SUBREG
6262 || GET_CODE (op
) == CONCAT
6263 || GET_MODE (op
) == VOIDmode
)
6266 if (validate_subreg (outermode
, innermode
, op
, byte
))
6267 return gen_rtx_SUBREG (outermode
, op
, byte
);
6272 /* Generates a subreg to get the least significant part of EXPR (in mode
6273 INNER_MODE) to OUTER_MODE. */
6276 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6277 machine_mode inner_mode
)
6279 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6280 subreg_lowpart_offset (outer_mode
, inner_mode
));
6283 /* Simplify X, an rtx expression.
6285 Return the simplified expression or NULL if no simplifications
6288 This is the preferred entry point into the simplification routines;
6289 however, we still allow passes to call the more specific routines.
6291 Right now GCC has three (yes, three) major bodies of RTL simplification
6292 code that need to be unified.
6294 1. fold_rtx in cse.c. This code uses various CSE specific
6295 information to aid in RTL simplification.
6297 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6298 it uses combine specific information to aid in RTL
6301 3. The routines in this file.
6304 Long term we want to only have one body of simplification code; to
6305 get to that state I recommend the following steps:
6307 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6308 which are not pass dependent state into these routines.
6310 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6311 use this routine whenever possible.
6313 3. Allow for pass dependent state to be provided to these
6314 routines and add simplifications based on the pass dependent
6315 state. Remove code from cse.c & combine.c that becomes
6318 It will take time, but ultimately the compiler will be easier to
6319 maintain and improve. It's totally silly that when we add a
6320 simplification that it needs to be added to 4 places (3 for RTL
6321 simplification and 1 for tree simplification. */
6324 simplify_rtx (const_rtx x
)
6326 const enum rtx_code code
= GET_CODE (x
);
6327 const machine_mode mode
= GET_MODE (x
);
6329 switch (GET_RTX_CLASS (code
))
6332 return simplify_unary_operation (code
, mode
,
6333 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6334 case RTX_COMM_ARITH
:
6335 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6336 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6341 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6344 case RTX_BITFIELD_OPS
:
6345 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6346 XEXP (x
, 0), XEXP (x
, 1),
6350 case RTX_COMM_COMPARE
:
6351 return simplify_relational_operation (code
, mode
,
6352 ((GET_MODE (XEXP (x
, 0))
6354 ? GET_MODE (XEXP (x
, 0))
6355 : GET_MODE (XEXP (x
, 1))),
6361 return simplify_subreg (mode
, SUBREG_REG (x
),
6362 GET_MODE (SUBREG_REG (x
)),
6369 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6370 if (GET_CODE (XEXP (x
, 0)) == HIGH
6371 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))