1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx
neg_const_int (machine_mode
, const_rtx
);
47 static bool plus_minus_operand_p (const_rtx
);
48 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
49 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x
))
95 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
96 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
98 for (i
= 0; i
< elts
- 1; i
++)
99 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
101 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
102 width
%= HOST_BITS_PER_WIDE_INT
;
104 width
= HOST_BITS_PER_WIDE_INT
;
107 else if (width
<= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x
)
109 && CONST_DOUBLE_LOW (x
) == 0)
111 val
= CONST_DOUBLE_HIGH (x
);
112 width
-= HOST_BITS_PER_WIDE_INT
;
116 /* X is not an integer constant. */
119 if (width
< HOST_BITS_PER_WIDE_INT
)
120 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
121 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
129 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 if (GET_MODE_CLASS (mode
) != MODE_INT
)
136 width
= GET_MODE_PRECISION (mode
);
137 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
140 val
&= GET_MODE_MASK (mode
);
141 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
147 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
151 if (GET_MODE_CLASS (mode
) != MODE_INT
)
154 width
= GET_MODE_PRECISION (mode
);
155 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
158 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
165 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
169 if (GET_MODE_CLASS (mode
) != MODE_INT
)
172 width
= GET_MODE_PRECISION (mode
);
173 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
176 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
184 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
189 /* If this simplifies, do it. */
190 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0
, op1
))
197 std::swap (op0
, op1
);
199 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
205 avoid_constant_pool_reference (rtx x
)
209 HOST_WIDE_INT offset
= 0;
211 switch (GET_CODE (x
))
217 /* Handle float extensions of constant pool references. */
219 c
= avoid_constant_pool_reference (tmp
);
220 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
229 if (GET_MODE (x
) == BLKmode
)
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr
= targetm
.delegitimize_address (addr
);
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr
) == CONST
239 && GET_CODE (XEXP (addr
, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
242 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
243 addr
= XEXP (XEXP (addr
, 0), 0);
246 if (GET_CODE (addr
) == LO_SUM
)
247 addr
= XEXP (addr
, 1);
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr
) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr
))
254 c
= get_pool_constant (addr
);
255 cmode
= get_pool_mode (addr
);
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset
== 0 && cmode
== GET_MODE (x
))
262 else if (offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
264 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
265 if (tem
&& CONSTANT_P (tem
))
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
278 delegitimize_mem_from_attrs (rtx x
)
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
284 && MEM_OFFSET_KNOWN_P (x
))
286 tree decl
= MEM_EXPR (x
);
287 machine_mode mode
= GET_MODE (x
);
288 HOST_WIDE_INT offset
= 0;
290 switch (TREE_CODE (decl
))
300 case ARRAY_RANGE_REF
:
305 case VIEW_CONVERT_EXPR
:
307 HOST_WIDE_INT bitsize
, bitpos
;
309 int unsignedp
, reversep
, volatilep
= 0;
312 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
313 &unsignedp
, &reversep
, &volatilep
);
314 if (bitsize
!= GET_MODE_BITSIZE (mode
)
315 || (bitpos
% BITS_PER_UNIT
)
316 || (toffset
&& !tree_fits_shwi_p (toffset
)))
320 offset
+= bitpos
/ BITS_PER_UNIT
;
322 offset
+= tree_to_shwi (toffset
);
329 && mode
== GET_MODE (x
)
331 && (TREE_STATIC (decl
)
332 || DECL_THREAD_LOCAL_P (decl
))
333 && DECL_RTL_SET_P (decl
)
334 && MEM_P (DECL_RTL (decl
)))
338 offset
+= MEM_OFFSET (x
);
340 newx
= DECL_RTL (decl
);
344 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
353 || (GET_CODE (o
) == PLUS
354 && GET_CODE (XEXP (o
, 1)) == CONST_INT
355 && (offset
== INTVAL (XEXP (o
, 1))
356 || (GET_CODE (n
) == PLUS
357 && GET_CODE (XEXP (n
, 1)) == CONST_INT
358 && (INTVAL (XEXP (n
, 1)) + offset
359 == INTVAL (XEXP (o
, 1)))
360 && (n
= XEXP (n
, 0))))
361 && (o
= XEXP (o
, 0))))
362 && rtx_equal_p (o
, n
)))
363 x
= adjust_address_nv (newx
, mode
, offset
);
365 else if (GET_MODE (x
) == GET_MODE (newx
)
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
378 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
379 machine_mode op_mode
)
383 /* If this simplifies, use it. */
384 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
387 return gen_rtx_fmt_e (code
, mode
, op
);
390 /* Likewise for ternary operations. */
393 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
394 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
398 /* If this simplifies, use it. */
399 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
403 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
410 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
411 machine_mode cmp_mode
, rtx op0
, rtx op1
)
415 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
419 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
428 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
429 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
431 enum rtx_code code
= GET_CODE (x
);
432 machine_mode mode
= GET_MODE (x
);
433 machine_mode op_mode
;
435 rtx op0
, op1
, op2
, newx
, op
;
439 if (__builtin_expect (fn
!= NULL
, 0))
441 newx
= fn (x
, old_rtx
, data
);
445 else if (rtx_equal_p (x
, old_rtx
))
446 return copy_rtx ((rtx
) data
);
448 switch (GET_RTX_CLASS (code
))
452 op_mode
= GET_MODE (op0
);
453 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
454 if (op0
== XEXP (x
, 0))
456 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
460 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
461 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
464 return simplify_gen_binary (code
, mode
, op0
, op1
);
467 case RTX_COMM_COMPARE
:
470 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
471 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
472 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
473 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
475 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
478 case RTX_BITFIELD_OPS
:
480 op_mode
= GET_MODE (op0
);
481 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
482 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
483 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
484 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
486 if (op_mode
== VOIDmode
)
487 op_mode
= GET_MODE (op0
);
488 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
493 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
494 if (op0
== SUBREG_REG (x
))
496 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
497 GET_MODE (SUBREG_REG (x
)),
499 return op0
? op0
: x
;
506 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
507 if (op0
== XEXP (x
, 0))
509 return replace_equiv_address_nv (x
, op0
);
511 else if (code
== LO_SUM
)
513 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
514 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0
) == HIGH
)
519 rtx base0
, base1
, offset0
, offset1
;
520 split_const (XEXP (op0
, 0), &base0
, &offset0
);
521 split_const (op1
, &base1
, &offset1
);
522 if (rtx_equal_p (base0
, base1
))
526 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
528 return gen_rtx_LO_SUM (mode
, op0
, op1
);
537 fmt
= GET_RTX_FORMAT (code
);
538 for (i
= 0; fmt
[i
]; i
++)
543 newvec
= XVEC (newx
, i
);
544 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
546 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
548 if (op
!= RTVEC_ELT (vec
, j
))
552 newvec
= shallow_copy_rtvec (vec
);
554 newx
= shallow_copy_rtx (x
);
555 XVEC (newx
, i
) = newvec
;
557 RTVEC_ELT (newvec
, j
) = op
;
565 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
566 if (op
!= XEXP (x
, i
))
569 newx
= shallow_copy_rtx (x
);
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
582 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
584 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
590 RTL provides two ways of truncating a value:
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
603 2. a TRUNCATE. This form handles both scalar and compound integers.
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
610 simplify_gen_unary (TRUNCATE, ...)
612 and leave simplify_unary_operation to work out which representation
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
628 (and:DI (reg:DI X) (const_int 63))
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
635 simplify_truncation (machine_mode mode
, rtx op
,
636 machine_mode op_mode
)
638 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
639 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
640 gcc_assert (precision
<= op_precision
);
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op
) == ZERO_EXTEND
644 || GET_CODE (op
) == SIGN_EXTEND
)
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
652 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
653 if (mode
== origmode
)
655 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
656 return simplify_gen_unary (TRUNCATE
, mode
,
657 XEXP (op
, 0), origmode
);
659 return simplify_gen_unary (GET_CODE (op
), mode
,
660 XEXP (op
, 0), origmode
);
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
667 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
668 && (GET_CODE (op
) == PLUS
669 || GET_CODE (op
) == MINUS
670 || GET_CODE (op
) == MULT
))
672 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
675 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
677 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op
) == LSHIFTRT
685 || GET_CODE (op
) == ASHIFTRT
)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision
<= op_precision
691 && CONST_INT_P (XEXP (op
, 1))
692 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
694 && UINTVAL (XEXP (op
, 1)) < precision
)
695 return simplify_gen_binary (ASHIFTRT
, mode
,
696 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op
) == LSHIFTRT
702 || GET_CODE (op
) == ASHIFTRT
)
703 && CONST_INT_P (XEXP (op
, 1))
704 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
706 && UINTVAL (XEXP (op
, 1)) < precision
)
707 return simplify_gen_binary (LSHIFTRT
, mode
,
708 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op
) == ASHIFT
714 && CONST_INT_P (XEXP (op
, 1))
715 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
717 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
718 && UINTVAL (XEXP (op
, 1)) < precision
)
719 return simplify_gen_binary (ASHIFT
, mode
,
720 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
725 if (GET_CODE (op
) == AND
726 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
728 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
729 && CONST_INT_P (XEXP (op
, 1)))
731 rtx op0
= (XEXP (XEXP (op
, 0), 0));
732 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
733 rtx mask_op
= XEXP (op
, 1);
734 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
735 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
737 if (shift
< precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
741 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
742 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
743 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
745 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
746 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
753 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
754 && REG_P (XEXP (op
, 0))
755 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
756 && CONST_INT_P (XEXP (op
, 1))
757 && CONST_INT_P (XEXP (op
, 2)))
759 rtx op0
= XEXP (op
, 0);
760 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
761 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
762 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
764 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
767 pos
-= op_precision
- precision
;
768 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
769 XEXP (op
, 1), GEN_INT (pos
));
772 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
774 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
776 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
777 XEXP (op
, 1), XEXP (op
, 2));
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op
) == LSHIFTRT
783 || GET_CODE (op
) == ASHIFTRT
)
784 && SCALAR_INT_MODE_P (mode
)
785 && SCALAR_INT_MODE_P (op_mode
)
786 && precision
>= BITS_PER_WORD
787 && 2 * precision
<= op_precision
788 && CONST_INT_P (XEXP (op
, 1))
789 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
790 && UINTVAL (XEXP (op
, 1)) < op_precision
)
792 int byte
= subreg_lowpart_offset (mode
, op_mode
);
793 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
794 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
796 ? byte
- shifted_bytes
797 : byte
+ shifted_bytes
));
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op
) == LSHIFTRT
804 || GET_CODE (op
) == ASHIFTRT
)
805 && SCALAR_INT_MODE_P (op_mode
)
806 && MEM_P (XEXP (op
, 0))
807 && CONST_INT_P (XEXP (op
, 1))
808 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
809 && INTVAL (XEXP (op
, 1)) > 0
810 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
811 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op
, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op
, 0))
814 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
817 int byte
= subreg_lowpart_offset (mode
, op_mode
);
818 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
819 return adjust_address_nv (XEXP (op
, 0), mode
,
821 ? byte
- shifted_bytes
822 : byte
+ shifted_bytes
));
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op
) == ABS
828 || GET_CODE (op
) == NEG
)
829 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
831 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
832 return simplify_gen_unary (GET_CODE (op
), mode
,
833 XEXP (XEXP (op
, 0), 0), mode
);
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
837 if (GET_CODE (op
) == SUBREG
838 && SCALAR_INT_MODE_P (mode
)
839 && SCALAR_INT_MODE_P (op_mode
)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
841 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
842 && subreg_lowpart_p (op
))
844 rtx inner
= XEXP (SUBREG_REG (op
), 0);
845 if (GET_MODE_PRECISION (mode
)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
847 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
852 GET_MODE (SUBREG_REG (op
)), 0);
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op
) == TRUNCATE
)
857 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
858 GET_MODE (XEXP (op
, 0)));
860 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
862 if (GET_CODE (op
) == IOR
863 && SCALAR_INT_MODE_P (mode
)
864 && SCALAR_INT_MODE_P (op_mode
)
865 && CONST_INT_P (XEXP (op
, 1))
866 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
872 /* Try to simplify a unary operation CODE whose output mode is to be
873 MODE with input operand OP whose mode was originally OP_MODE.
874 Return zero if no simplification can be made. */
876 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
877 rtx op
, machine_mode op_mode
)
881 trueop
= avoid_constant_pool_reference (op
);
883 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
887 return simplify_unary_operation_1 (code
, mode
, op
);
890 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
894 exact_int_to_float_conversion_p (const_rtx op
)
896 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
897 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
898 /* Constants shouldn't reach here. */
899 gcc_assert (op0_mode
!= VOIDmode
);
900 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
901 int in_bits
= in_prec
;
902 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
904 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
905 if (GET_CODE (op
) == FLOAT
)
906 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
907 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
908 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
911 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
913 return in_bits
<= out_bits
;
916 /* Perform some simplifications we can do even if the operands
919 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
921 enum rtx_code reversed
;
927 /* (not (not X)) == X. */
928 if (GET_CODE (op
) == NOT
)
931 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
932 comparison is all ones. */
933 if (COMPARISON_P (op
)
934 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
935 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
936 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
937 XEXP (op
, 0), XEXP (op
, 1));
939 /* (not (plus X -1)) can become (neg X). */
940 if (GET_CODE (op
) == PLUS
941 && XEXP (op
, 1) == constm1_rtx
)
942 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
944 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
945 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
946 and MODE_VECTOR_INT. */
947 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
948 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
951 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
952 if (GET_CODE (op
) == XOR
953 && CONST_INT_P (XEXP (op
, 1))
954 && (temp
= simplify_unary_operation (NOT
, mode
,
955 XEXP (op
, 1), mode
)) != 0)
956 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
958 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
959 if (GET_CODE (op
) == PLUS
960 && CONST_INT_P (XEXP (op
, 1))
961 && mode_signbit_p (mode
, XEXP (op
, 1))
962 && (temp
= simplify_unary_operation (NOT
, mode
,
963 XEXP (op
, 1), mode
)) != 0)
964 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
967 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
968 operands other than 1, but that is not valid. We could do a
969 similar simplification for (not (lshiftrt C X)) where C is
970 just the sign bit, but this doesn't seem common enough to
972 if (GET_CODE (op
) == ASHIFT
973 && XEXP (op
, 0) == const1_rtx
)
975 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
976 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
979 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
980 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
981 so we can perform the above simplification. */
982 if (STORE_FLAG_VALUE
== -1
983 && GET_CODE (op
) == ASHIFTRT
984 && CONST_INT_P (XEXP (op
, 1))
985 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
986 return simplify_gen_relational (GE
, mode
, VOIDmode
,
987 XEXP (op
, 0), const0_rtx
);
990 if (GET_CODE (op
) == SUBREG
991 && subreg_lowpart_p (op
)
992 && (GET_MODE_SIZE (GET_MODE (op
))
993 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
994 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
995 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
997 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1000 x
= gen_rtx_ROTATE (inner_mode
,
1001 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1003 XEXP (SUBREG_REG (op
), 1));
1004 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1009 /* Apply De Morgan's laws to reduce number of patterns for machines
1010 with negating logical insns (and-not, nand, etc.). If result has
1011 only one NOT, put it first, since that is how the patterns are
1013 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1015 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1016 machine_mode op_mode
;
1018 op_mode
= GET_MODE (in1
);
1019 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1021 op_mode
= GET_MODE (in2
);
1022 if (op_mode
== VOIDmode
)
1024 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1026 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1027 std::swap (in1
, in2
);
1029 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1033 /* (not (bswap x)) -> (bswap (not x)). */
1034 if (GET_CODE (op
) == BSWAP
)
1036 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1037 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1042 /* (neg (neg X)) == X. */
1043 if (GET_CODE (op
) == NEG
)
1044 return XEXP (op
, 0);
1046 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1047 If comparison is not reversible use
1049 if (GET_CODE (op
) == IF_THEN_ELSE
)
1051 rtx cond
= XEXP (op
, 0);
1052 rtx true_rtx
= XEXP (op
, 1);
1053 rtx false_rtx
= XEXP (op
, 2);
1055 if ((GET_CODE (true_rtx
) == NEG
1056 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1057 || (GET_CODE (false_rtx
) == NEG
1058 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1060 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1061 temp
= reversed_comparison (cond
, mode
);
1065 std::swap (true_rtx
, false_rtx
);
1067 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1068 mode
, temp
, true_rtx
, false_rtx
);
1072 /* (neg (plus X 1)) can become (not X). */
1073 if (GET_CODE (op
) == PLUS
1074 && XEXP (op
, 1) == const1_rtx
)
1075 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1077 /* Similarly, (neg (not X)) is (plus X 1). */
1078 if (GET_CODE (op
) == NOT
)
1079 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1082 /* (neg (minus X Y)) can become (minus Y X). This transformation
1083 isn't safe for modes with signed zeros, since if X and Y are
1084 both +0, (minus Y X) is the same as (minus X Y). If the
1085 rounding mode is towards +infinity (or -infinity) then the two
1086 expressions will be rounded differently. */
1087 if (GET_CODE (op
) == MINUS
1088 && !HONOR_SIGNED_ZEROS (mode
)
1089 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1090 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1092 if (GET_CODE (op
) == PLUS
1093 && !HONOR_SIGNED_ZEROS (mode
)
1094 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1096 /* (neg (plus A C)) is simplified to (minus -C A). */
1097 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1098 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1100 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1102 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1105 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1106 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1107 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1110 /* (neg (mult A B)) becomes (mult A (neg B)).
1111 This works even for floating-point values. */
1112 if (GET_CODE (op
) == MULT
1113 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1115 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1116 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1119 /* NEG commutes with ASHIFT since it is multiplication. Only do
1120 this if we can then eliminate the NEG (e.g., if the operand
1122 if (GET_CODE (op
) == ASHIFT
)
1124 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1126 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1129 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1130 C is equal to the width of MODE minus 1. */
1131 if (GET_CODE (op
) == ASHIFTRT
1132 && CONST_INT_P (XEXP (op
, 1))
1133 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1134 return simplify_gen_binary (LSHIFTRT
, mode
,
1135 XEXP (op
, 0), XEXP (op
, 1));
1137 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1138 C is equal to the width of MODE minus 1. */
1139 if (GET_CODE (op
) == LSHIFTRT
1140 && CONST_INT_P (XEXP (op
, 1))
1141 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1142 return simplify_gen_binary (ASHIFTRT
, mode
,
1143 XEXP (op
, 0), XEXP (op
, 1));
1145 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1146 if (GET_CODE (op
) == XOR
1147 && XEXP (op
, 1) == const1_rtx
1148 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1149 return plus_constant (mode
, XEXP (op
, 0), -1);
1151 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1152 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1153 if (GET_CODE (op
) == LT
1154 && XEXP (op
, 1) == const0_rtx
1155 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1157 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1158 int isize
= GET_MODE_PRECISION (inner
);
1159 if (STORE_FLAG_VALUE
== 1)
1161 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1162 GEN_INT (isize
- 1));
1165 if (GET_MODE_PRECISION (mode
) > isize
)
1166 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1167 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1169 else if (STORE_FLAG_VALUE
== -1)
1171 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1172 GEN_INT (isize
- 1));
1175 if (GET_MODE_PRECISION (mode
) > isize
)
1176 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1177 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1183 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1184 with the umulXi3_highpart patterns. */
1185 if (GET_CODE (op
) == LSHIFTRT
1186 && GET_CODE (XEXP (op
, 0)) == MULT
)
1189 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1191 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1193 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1197 /* We can't handle truncation to a partial integer mode here
1198 because we don't know the real bitsize of the partial
1203 if (GET_MODE (op
) != VOIDmode
)
1205 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1210 /* If we know that the value is already truncated, we can
1211 replace the TRUNCATE with a SUBREG. */
1212 if (GET_MODE_NUNITS (mode
) == 1
1213 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1214 || truncated_to_mode (mode
, op
)))
1216 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1221 /* A truncate of a comparison can be replaced with a subreg if
1222 STORE_FLAG_VALUE permits. This is like the previous test,
1223 but it works even if the comparison is done in a mode larger
1224 than HOST_BITS_PER_WIDE_INT. */
1225 if (HWI_COMPUTABLE_MODE_P (mode
)
1226 && COMPARISON_P (op
)
1227 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1229 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1234 /* A truncate of a memory is just loading the low part of the memory
1235 if we are not changing the meaning of the address. */
1236 if (GET_CODE (op
) == MEM
1237 && !VECTOR_MODE_P (mode
)
1238 && !MEM_VOLATILE_P (op
)
1239 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1241 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1248 case FLOAT_TRUNCATE
:
1249 if (DECIMAL_FLOAT_MODE_P (mode
))
1252 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1253 if (GET_CODE (op
) == FLOAT_EXTEND
1254 && GET_MODE (XEXP (op
, 0)) == mode
)
1255 return XEXP (op
, 0);
1257 /* (float_truncate:SF (float_truncate:DF foo:XF))
1258 = (float_truncate:SF foo:XF).
1259 This may eliminate double rounding, so it is unsafe.
1261 (float_truncate:SF (float_extend:XF foo:DF))
1262 = (float_truncate:SF foo:DF).
1264 (float_truncate:DF (float_extend:XF foo:SF))
1265 = (float_extend:DF foo:SF). */
1266 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1267 && flag_unsafe_math_optimizations
)
1268 || GET_CODE (op
) == FLOAT_EXTEND
)
1269 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1271 > GET_MODE_SIZE (mode
)
1272 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1274 XEXP (op
, 0), mode
);
1276 /* (float_truncate (float x)) is (float x) */
1277 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1278 && (flag_unsafe_math_optimizations
1279 || exact_int_to_float_conversion_p (op
)))
1280 return simplify_gen_unary (GET_CODE (op
), mode
,
1282 GET_MODE (XEXP (op
, 0)));
1284 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1285 (OP:SF foo:SF) if OP is NEG or ABS. */
1286 if ((GET_CODE (op
) == ABS
1287 || GET_CODE (op
) == NEG
)
1288 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1289 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1290 return simplify_gen_unary (GET_CODE (op
), mode
,
1291 XEXP (XEXP (op
, 0), 0), mode
);
1293 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1294 is (float_truncate:SF x). */
1295 if (GET_CODE (op
) == SUBREG
1296 && subreg_lowpart_p (op
)
1297 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1298 return SUBREG_REG (op
);
1302 if (DECIMAL_FLOAT_MODE_P (mode
))
1305 /* (float_extend (float_extend x)) is (float_extend x)
1307 (float_extend (float x)) is (float x) assuming that double
1308 rounding can't happen.
1310 if (GET_CODE (op
) == FLOAT_EXTEND
1311 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1312 && exact_int_to_float_conversion_p (op
)))
1313 return simplify_gen_unary (GET_CODE (op
), mode
,
1315 GET_MODE (XEXP (op
, 0)));
1320 /* (abs (neg <foo>)) -> (abs <foo>) */
1321 if (GET_CODE (op
) == NEG
)
1322 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1323 GET_MODE (XEXP (op
, 0)));
1325 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1327 if (GET_MODE (op
) == VOIDmode
)
1330 /* If operand is something known to be positive, ignore the ABS. */
1331 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1332 || val_signbit_known_clear_p (GET_MODE (op
),
1333 nonzero_bits (op
, GET_MODE (op
))))
1336 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1337 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1338 return gen_rtx_NEG (mode
, op
);
1343 /* (ffs (*_extend <X>)) = (ffs <X>) */
1344 if (GET_CODE (op
) == SIGN_EXTEND
1345 || GET_CODE (op
) == ZERO_EXTEND
)
1346 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1347 GET_MODE (XEXP (op
, 0)));
1351 switch (GET_CODE (op
))
1355 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1356 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1357 GET_MODE (XEXP (op
, 0)));
1361 /* Rotations don't affect popcount. */
1362 if (!side_effects_p (XEXP (op
, 1)))
1363 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1364 GET_MODE (XEXP (op
, 0)));
1373 switch (GET_CODE (op
))
1379 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1380 GET_MODE (XEXP (op
, 0)));
1384 /* Rotations don't affect parity. */
1385 if (!side_effects_p (XEXP (op
, 1)))
1386 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1387 GET_MODE (XEXP (op
, 0)));
1396 /* (bswap (bswap x)) -> x. */
1397 if (GET_CODE (op
) == BSWAP
)
1398 return XEXP (op
, 0);
1402 /* (float (sign_extend <X>)) = (float <X>). */
1403 if (GET_CODE (op
) == SIGN_EXTEND
)
1404 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1405 GET_MODE (XEXP (op
, 0)));
1409 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1410 becomes just the MINUS if its mode is MODE. This allows
1411 folding switch statements on machines using casesi (such as
1413 if (GET_CODE (op
) == TRUNCATE
1414 && GET_MODE (XEXP (op
, 0)) == mode
1415 && GET_CODE (XEXP (op
, 0)) == MINUS
1416 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1417 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1418 return XEXP (op
, 0);
1420 /* Extending a widening multiplication should be canonicalized to
1421 a wider widening multiplication. */
1422 if (GET_CODE (op
) == MULT
)
1424 rtx lhs
= XEXP (op
, 0);
1425 rtx rhs
= XEXP (op
, 1);
1426 enum rtx_code lcode
= GET_CODE (lhs
);
1427 enum rtx_code rcode
= GET_CODE (rhs
);
1429 /* Widening multiplies usually extend both operands, but sometimes
1430 they use a shift to extract a portion of a register. */
1431 if ((lcode
== SIGN_EXTEND
1432 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1433 && (rcode
== SIGN_EXTEND
1434 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1436 machine_mode lmode
= GET_MODE (lhs
);
1437 machine_mode rmode
= GET_MODE (rhs
);
1440 if (lcode
== ASHIFTRT
)
1441 /* Number of bits not shifted off the end. */
1442 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1443 else /* lcode == SIGN_EXTEND */
1444 /* Size of inner mode. */
1445 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1447 if (rcode
== ASHIFTRT
)
1448 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1449 else /* rcode == SIGN_EXTEND */
1450 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1452 /* We can only widen multiplies if the result is mathematiclly
1453 equivalent. I.e. if overflow was impossible. */
1454 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1455 return simplify_gen_binary
1457 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1458 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1462 /* Check for a sign extension of a subreg of a promoted
1463 variable, where the promotion is sign-extended, and the
1464 target mode is the same as the variable's promotion. */
1465 if (GET_CODE (op
) == SUBREG
1466 && SUBREG_PROMOTED_VAR_P (op
)
1467 && SUBREG_PROMOTED_SIGNED_P (op
)
1468 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1470 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1475 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1476 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1477 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1479 gcc_assert (GET_MODE_PRECISION (mode
)
1480 > GET_MODE_PRECISION (GET_MODE (op
)));
1481 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1482 GET_MODE (XEXP (op
, 0)));
1485 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (sign_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_BITSIZE (N) - I bits.
1488 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1489 is similarly (zero_extend:M (subreg:O <X>)). */
1490 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1491 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1492 && CONST_INT_P (XEXP (op
, 1))
1493 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1494 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1497 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1498 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1499 gcc_assert (GET_MODE_BITSIZE (mode
)
1500 > GET_MODE_BITSIZE (GET_MODE (op
)));
1501 if (tmode
!= BLKmode
)
1504 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1506 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1507 ? SIGN_EXTEND
: ZERO_EXTEND
,
1508 mode
, inner
, tmode
);
1512 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1513 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1514 if (GET_CODE (op
) == LSHIFTRT
1515 && CONST_INT_P (XEXP (op
, 1))
1516 && XEXP (op
, 1) != const0_rtx
)
1517 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1519 #if defined(POINTERS_EXTEND_UNSIGNED)
1520 /* As we do not know which address space the pointer is referring to,
1521 we can do this only if the target does not support different pointer
1522 or address modes depending on the address space. */
1523 if (target_default_pointer_address_modes_p ()
1524 && ! POINTERS_EXTEND_UNSIGNED
1525 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1527 || (GET_CODE (op
) == SUBREG
1528 && REG_P (SUBREG_REG (op
))
1529 && REG_POINTER (SUBREG_REG (op
))
1530 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1531 && !targetm
.have_ptr_extend ())
1534 = convert_memory_address_addr_space_1 (Pmode
, op
,
1535 ADDR_SPACE_GENERIC
, false,
1544 /* Check for a zero extension of a subreg of a promoted
1545 variable, where the promotion is zero-extended, and the
1546 target mode is the same as the variable's promotion. */
1547 if (GET_CODE (op
) == SUBREG
1548 && SUBREG_PROMOTED_VAR_P (op
)
1549 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1550 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1552 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1557 /* Extending a widening multiplication should be canonicalized to
1558 a wider widening multiplication. */
1559 if (GET_CODE (op
) == MULT
)
1561 rtx lhs
= XEXP (op
, 0);
1562 rtx rhs
= XEXP (op
, 1);
1563 enum rtx_code lcode
= GET_CODE (lhs
);
1564 enum rtx_code rcode
= GET_CODE (rhs
);
1566 /* Widening multiplies usually extend both operands, but sometimes
1567 they use a shift to extract a portion of a register. */
1568 if ((lcode
== ZERO_EXTEND
1569 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1570 && (rcode
== ZERO_EXTEND
1571 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1573 machine_mode lmode
= GET_MODE (lhs
);
1574 machine_mode rmode
= GET_MODE (rhs
);
1577 if (lcode
== LSHIFTRT
)
1578 /* Number of bits not shifted off the end. */
1579 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1580 else /* lcode == ZERO_EXTEND */
1581 /* Size of inner mode. */
1582 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1584 if (rcode
== LSHIFTRT
)
1585 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1586 else /* rcode == ZERO_EXTEND */
1587 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1589 /* We can only widen multiplies if the result is mathematiclly
1590 equivalent. I.e. if overflow was impossible. */
1591 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1592 return simplify_gen_binary
1594 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1595 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1599 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1600 if (GET_CODE (op
) == ZERO_EXTEND
)
1601 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1602 GET_MODE (XEXP (op
, 0)));
1604 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1605 is (zero_extend:M (subreg:O <X>)) if there is mode with
1606 GET_MODE_PRECISION (N) - I bits. */
1607 if (GET_CODE (op
) == LSHIFTRT
1608 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1609 && CONST_INT_P (XEXP (op
, 1))
1610 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1611 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1614 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1615 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1616 if (tmode
!= BLKmode
)
1619 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1621 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1625 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1626 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1628 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1629 (and:SI (reg:SI) (const_int 63)). */
1630 if (GET_CODE (op
) == SUBREG
1631 && GET_MODE_PRECISION (GET_MODE (op
))
1632 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1633 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1634 <= HOST_BITS_PER_WIDE_INT
1635 && GET_MODE_PRECISION (mode
)
1636 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1637 && subreg_lowpart_p (op
)
1638 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1639 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1641 if (GET_MODE_PRECISION (mode
)
1642 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1643 return SUBREG_REG (op
);
1644 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1645 GET_MODE (SUBREG_REG (op
)));
1648 #if defined(POINTERS_EXTEND_UNSIGNED)
1649 /* As we do not know which address space the pointer is referring to,
1650 we can do this only if the target does not support different pointer
1651 or address modes depending on the address space. */
1652 if (target_default_pointer_address_modes_p ()
1653 && POINTERS_EXTEND_UNSIGNED
> 0
1654 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1656 || (GET_CODE (op
) == SUBREG
1657 && REG_P (SUBREG_REG (op
))
1658 && REG_POINTER (SUBREG_REG (op
))
1659 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1660 && !targetm
.have_ptr_extend ())
1663 = convert_memory_address_addr_space_1 (Pmode
, op
,
1664 ADDR_SPACE_GENERIC
, false,
1679 /* Try to compute the value of a unary operation CODE whose output mode is to
1680 be MODE with input operand OP whose mode was originally OP_MODE.
1681 Return zero if the value cannot be computed. */
1683 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1684 rtx op
, machine_mode op_mode
)
1686 unsigned int width
= GET_MODE_PRECISION (mode
);
1688 if (code
== VEC_DUPLICATE
)
1690 gcc_assert (VECTOR_MODE_P (mode
));
1691 if (GET_MODE (op
) != VOIDmode
)
1693 if (!VECTOR_MODE_P (GET_MODE (op
)))
1694 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1696 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1699 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1700 || GET_CODE (op
) == CONST_VECTOR
)
1702 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1703 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1704 rtvec v
= rtvec_alloc (n_elts
);
1707 if (GET_CODE (op
) != CONST_VECTOR
)
1708 for (i
= 0; i
< n_elts
; i
++)
1709 RTVEC_ELT (v
, i
) = op
;
1712 machine_mode inmode
= GET_MODE (op
);
1713 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1714 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1716 gcc_assert (in_n_elts
< n_elts
);
1717 gcc_assert ((n_elts
% in_n_elts
) == 0);
1718 for (i
= 0; i
< n_elts
; i
++)
1719 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1721 return gen_rtx_CONST_VECTOR (mode
, v
);
1725 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1727 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1728 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1729 machine_mode opmode
= GET_MODE (op
);
1730 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1731 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1732 rtvec v
= rtvec_alloc (n_elts
);
1735 gcc_assert (op_n_elts
== n_elts
);
1736 for (i
= 0; i
< n_elts
; i
++)
1738 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1739 CONST_VECTOR_ELT (op
, i
),
1740 GET_MODE_INNER (opmode
));
1743 RTVEC_ELT (v
, i
) = x
;
1745 return gen_rtx_CONST_VECTOR (mode
, v
);
1748 /* The order of these tests is critical so that, for example, we don't
1749 check the wrong mode (input vs. output) for a conversion operation,
1750 such as FIX. At some point, this should be simplified. */
1752 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1756 if (op_mode
== VOIDmode
)
1758 /* CONST_INT have VOIDmode as the mode. We assume that all
1759 the bits of the constant are significant, though, this is
1760 a dangerous assumption as many times CONST_INTs are
1761 created and used with garbage in the bits outside of the
1762 precision of the implied mode of the const_int. */
1763 op_mode
= MAX_MODE_INT
;
1766 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1768 /* Avoid the folding if flag_signaling_nans is on and
1769 operand is a signaling NaN. */
1770 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1773 d
= real_value_truncate (mode
, d
);
1774 return const_double_from_real_value (d
, mode
);
1776 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1780 if (op_mode
== VOIDmode
)
1782 /* CONST_INT have VOIDmode as the mode. We assume that all
1783 the bits of the constant are significant, though, this is
1784 a dangerous assumption as many times CONST_INTs are
1785 created and used with garbage in the bits outside of the
1786 precision of the implied mode of the const_int. */
1787 op_mode
= MAX_MODE_INT
;
1790 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1792 /* Avoid the folding if flag_signaling_nans is on and
1793 operand is a signaling NaN. */
1794 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1797 d
= real_value_truncate (mode
, d
);
1798 return const_double_from_real_value (d
, mode
);
1801 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1804 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1805 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1808 #if TARGET_SUPPORTS_WIDE_INT == 0
1809 /* This assert keeps the simplification from producing a result
1810 that cannot be represented in a CONST_DOUBLE but a lot of
1811 upstream callers expect that this function never fails to
1812 simplify something and so you if you added this to the test
1813 above the code would die later anyway. If this assert
1814 happens, you just need to make the port support wide int. */
1815 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1821 result
= wi::bit_not (op0
);
1825 result
= wi::neg (op0
);
1829 result
= wi::abs (op0
);
1833 result
= wi::shwi (wi::ffs (op0
), mode
);
1837 if (wi::ne_p (op0
, 0))
1838 int_value
= wi::clz (op0
);
1839 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1840 int_value
= GET_MODE_PRECISION (mode
);
1841 result
= wi::shwi (int_value
, mode
);
1845 result
= wi::shwi (wi::clrsb (op0
), mode
);
1849 if (wi::ne_p (op0
, 0))
1850 int_value
= wi::ctz (op0
);
1851 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1852 int_value
= GET_MODE_PRECISION (mode
);
1853 result
= wi::shwi (int_value
, mode
);
1857 result
= wi::shwi (wi::popcount (op0
), mode
);
1861 result
= wi::shwi (wi::parity (op0
), mode
);
1865 result
= wide_int (op0
).bswap ();
1870 result
= wide_int::from (op0
, width
, UNSIGNED
);
1874 result
= wide_int::from (op0
, width
, SIGNED
);
1882 return immed_wide_int_const (result
, mode
);
1885 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1886 && SCALAR_FLOAT_MODE_P (mode
)
1887 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1889 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1895 d
= real_value_abs (&d
);
1898 d
= real_value_negate (&d
);
1900 case FLOAT_TRUNCATE
:
1901 /* Don't perform the operation if flag_signaling_nans is on
1902 and the operand is a signaling NaN. */
1903 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1905 d
= real_value_truncate (mode
, d
);
1908 /* Don't perform the operation if flag_signaling_nans is on
1909 and the operand is a signaling NaN. */
1910 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1912 /* All this does is change the mode, unless changing
1914 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1915 real_convert (&d
, mode
, &d
);
1918 /* Don't perform the operation if flag_signaling_nans is on
1919 and the operand is a signaling NaN. */
1920 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1922 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1929 real_to_target (tmp
, &d
, GET_MODE (op
));
1930 for (i
= 0; i
< 4; i
++)
1932 real_from_target (&d
, tmp
, mode
);
1938 return const_double_from_real_value (d
, mode
);
1940 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1941 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1942 && GET_MODE_CLASS (mode
) == MODE_INT
1945 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1946 operators are intentionally left unspecified (to ease implementation
1947 by target backends), for consistency, this routine implements the
1948 same semantics for constant folding as used by the middle-end. */
1950 /* This was formerly used only for non-IEEE float.
1951 eggert@twinsun.com says it is safe for IEEE also. */
1953 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1954 wide_int wmax
, wmin
;
1955 /* This is part of the abi to real_to_integer, but we check
1956 things before making this call. */
1962 if (REAL_VALUE_ISNAN (*x
))
1965 /* Test against the signed upper bound. */
1966 wmax
= wi::max_value (width
, SIGNED
);
1967 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1968 if (real_less (&t
, x
))
1969 return immed_wide_int_const (wmax
, mode
);
1971 /* Test against the signed lower bound. */
1972 wmin
= wi::min_value (width
, SIGNED
);
1973 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1974 if (real_less (x
, &t
))
1975 return immed_wide_int_const (wmin
, mode
);
1977 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1981 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1984 /* Test against the unsigned upper bound. */
1985 wmax
= wi::max_value (width
, UNSIGNED
);
1986 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1987 if (real_less (&t
, x
))
1988 return immed_wide_int_const (wmax
, mode
);
1990 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2001 /* Subroutine of simplify_binary_operation to simplify a binary operation
2002 CODE that can commute with byte swapping, with result mode MODE and
2003 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2004 Return zero if no simplification or canonicalization is possible. */
2007 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2012 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2013 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2015 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2016 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2017 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2020 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2021 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2023 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2024 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2030 /* Subroutine of simplify_binary_operation to simplify a commutative,
2031 associative binary operation CODE with result mode MODE, operating
2032 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2033 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2034 canonicalization is possible. */
2037 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2042 /* Linearize the operator to the left. */
2043 if (GET_CODE (op1
) == code
)
2045 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2046 if (GET_CODE (op0
) == code
)
2048 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2049 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2052 /* "a op (b op c)" becomes "(b op c) op a". */
2053 if (! swap_commutative_operands_p (op1
, op0
))
2054 return simplify_gen_binary (code
, mode
, op1
, op0
);
2056 std::swap (op0
, op1
);
2059 if (GET_CODE (op0
) == code
)
2061 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2062 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2064 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2065 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2068 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2069 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2071 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2073 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2074 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2076 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2083 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2084 and OP1. Return 0 if no simplification is possible.
2086 Don't use this for relational operations such as EQ or LT.
2087 Use simplify_relational_operation instead. */
2089 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2092 rtx trueop0
, trueop1
;
2095 /* Relational operations don't work here. We must know the mode
2096 of the operands in order to do the comparison correctly.
2097 Assuming a full word can give incorrect results.
2098 Consider comparing 128 with -128 in QImode. */
2099 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2100 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2102 /* Make sure the constant is second. */
2103 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2104 && swap_commutative_operands_p (op0
, op1
))
2105 std::swap (op0
, op1
);
2107 trueop0
= avoid_constant_pool_reference (op0
);
2108 trueop1
= avoid_constant_pool_reference (op1
);
2110 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2113 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2118 /* If the above steps did not result in a simplification and op0 or op1
2119 were constant pool references, use the referenced constants directly. */
2120 if (trueop0
!= op0
|| trueop1
!= op1
)
2121 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2126 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2127 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2128 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2129 actual constants. */
2132 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2133 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2135 rtx tem
, reversed
, opleft
, opright
;
2137 unsigned int width
= GET_MODE_PRECISION (mode
);
2139 /* Even if we can't compute a constant result,
2140 there are some cases worth simplifying. */
2145 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2146 when x is NaN, infinite, or finite and nonzero. They aren't
2147 when x is -0 and the rounding mode is not towards -infinity,
2148 since (-0) + 0 is then 0. */
2149 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2152 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2153 transformations are safe even for IEEE. */
2154 if (GET_CODE (op0
) == NEG
)
2155 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2156 else if (GET_CODE (op1
) == NEG
)
2157 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2159 /* (~a) + 1 -> -a */
2160 if (INTEGRAL_MODE_P (mode
)
2161 && GET_CODE (op0
) == NOT
2162 && trueop1
== const1_rtx
)
2163 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2165 /* Handle both-operands-constant cases. We can only add
2166 CONST_INTs to constants since the sum of relocatable symbols
2167 can't be handled by most assemblers. Don't add CONST_INT
2168 to CONST_INT since overflow won't be computed properly if wider
2169 than HOST_BITS_PER_WIDE_INT. */
2171 if ((GET_CODE (op0
) == CONST
2172 || GET_CODE (op0
) == SYMBOL_REF
2173 || GET_CODE (op0
) == LABEL_REF
)
2174 && CONST_INT_P (op1
))
2175 return plus_constant (mode
, op0
, INTVAL (op1
));
2176 else if ((GET_CODE (op1
) == CONST
2177 || GET_CODE (op1
) == SYMBOL_REF
2178 || GET_CODE (op1
) == LABEL_REF
)
2179 && CONST_INT_P (op0
))
2180 return plus_constant (mode
, op1
, INTVAL (op0
));
2182 /* See if this is something like X * C - X or vice versa or
2183 if the multiplication is written as a shift. If so, we can
2184 distribute and make a new multiply, shift, or maybe just
2185 have X (if C is 2 in the example above). But don't make
2186 something more expensive than we had before. */
2188 if (SCALAR_INT_MODE_P (mode
))
2190 rtx lhs
= op0
, rhs
= op1
;
2192 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2193 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2195 if (GET_CODE (lhs
) == NEG
)
2197 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2198 lhs
= XEXP (lhs
, 0);
2200 else if (GET_CODE (lhs
) == MULT
2201 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2203 coeff0
= rtx_mode_t (XEXP (lhs
, 1), mode
);
2204 lhs
= XEXP (lhs
, 0);
2206 else if (GET_CODE (lhs
) == ASHIFT
2207 && CONST_INT_P (XEXP (lhs
, 1))
2208 && INTVAL (XEXP (lhs
, 1)) >= 0
2209 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2211 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2212 GET_MODE_PRECISION (mode
));
2213 lhs
= XEXP (lhs
, 0);
2216 if (GET_CODE (rhs
) == NEG
)
2218 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2219 rhs
= XEXP (rhs
, 0);
2221 else if (GET_CODE (rhs
) == MULT
2222 && CONST_INT_P (XEXP (rhs
, 1)))
2224 coeff1
= rtx_mode_t (XEXP (rhs
, 1), mode
);
2225 rhs
= XEXP (rhs
, 0);
2227 else if (GET_CODE (rhs
) == ASHIFT
2228 && CONST_INT_P (XEXP (rhs
, 1))
2229 && INTVAL (XEXP (rhs
, 1)) >= 0
2230 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2232 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2233 GET_MODE_PRECISION (mode
));
2234 rhs
= XEXP (rhs
, 0);
2237 if (rtx_equal_p (lhs
, rhs
))
2239 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2241 bool speed
= optimize_function_for_speed_p (cfun
);
2243 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2245 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2246 return (set_src_cost (tem
, mode
, speed
)
2247 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2251 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2252 if (CONST_SCALAR_INT_P (op1
)
2253 && GET_CODE (op0
) == XOR
2254 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2255 && mode_signbit_p (mode
, op1
))
2256 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2257 simplify_gen_binary (XOR
, mode
, op1
,
2260 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2261 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2262 && GET_CODE (op0
) == MULT
2263 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2267 in1
= XEXP (XEXP (op0
, 0), 0);
2268 in2
= XEXP (op0
, 1);
2269 return simplify_gen_binary (MINUS
, mode
, op1
,
2270 simplify_gen_binary (MULT
, mode
,
2274 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2275 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2277 if (COMPARISON_P (op0
)
2278 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2279 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2280 && (reversed
= reversed_comparison (op0
, mode
)))
2282 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2284 /* If one of the operands is a PLUS or a MINUS, see if we can
2285 simplify this by the associative law.
2286 Don't use the associative law for floating point.
2287 The inaccuracy makes it nonassociative,
2288 and subtle programs can break if operations are associated. */
2290 if (INTEGRAL_MODE_P (mode
)
2291 && (plus_minus_operand_p (op0
)
2292 || plus_minus_operand_p (op1
))
2293 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2296 /* Reassociate floating point addition only when the user
2297 specifies associative math operations. */
2298 if (FLOAT_MODE_P (mode
)
2299 && flag_associative_math
)
2301 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2308 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2309 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2310 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2311 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2313 rtx xop00
= XEXP (op0
, 0);
2314 rtx xop10
= XEXP (op1
, 0);
2316 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2319 if (REG_P (xop00
) && REG_P (xop10
)
2320 && REGNO (xop00
) == REGNO (xop10
)
2321 && GET_MODE (xop00
) == mode
2322 && GET_MODE (xop10
) == mode
2323 && GET_MODE_CLASS (mode
) == MODE_CC
)
2329 /* We can't assume x-x is 0 even with non-IEEE floating point,
2330 but since it is zero except in very strange circumstances, we
2331 will treat it as zero with -ffinite-math-only. */
2332 if (rtx_equal_p (trueop0
, trueop1
)
2333 && ! side_effects_p (op0
)
2334 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2335 return CONST0_RTX (mode
);
2337 /* Change subtraction from zero into negation. (0 - x) is the
2338 same as -x when x is NaN, infinite, or finite and nonzero.
2339 But if the mode has signed zeros, and does not round towards
2340 -infinity, then 0 - 0 is 0, not -0. */
2341 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2342 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2344 /* (-1 - a) is ~a, unless the expression contains symbolic
2345 constants, in which case not retaining additions and
2346 subtractions could cause invalid assembly to be produced. */
2347 if (trueop0
== constm1_rtx
2348 && !contains_symbolic_reference_p (op1
))
2349 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2351 /* Subtracting 0 has no effect unless the mode has signed zeros
2352 and supports rounding towards -infinity. In such a case,
2354 if (!(HONOR_SIGNED_ZEROS (mode
)
2355 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2356 && trueop1
== CONST0_RTX (mode
))
2359 /* See if this is something like X * C - X or vice versa or
2360 if the multiplication is written as a shift. If so, we can
2361 distribute and make a new multiply, shift, or maybe just
2362 have X (if C is 2 in the example above). But don't make
2363 something more expensive than we had before. */
2365 if (SCALAR_INT_MODE_P (mode
))
2367 rtx lhs
= op0
, rhs
= op1
;
2369 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2370 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2372 if (GET_CODE (lhs
) == NEG
)
2374 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2375 lhs
= XEXP (lhs
, 0);
2377 else if (GET_CODE (lhs
) == MULT
2378 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2380 coeff0
= rtx_mode_t (XEXP (lhs
, 1), mode
);
2381 lhs
= XEXP (lhs
, 0);
2383 else if (GET_CODE (lhs
) == ASHIFT
2384 && CONST_INT_P (XEXP (lhs
, 1))
2385 && INTVAL (XEXP (lhs
, 1)) >= 0
2386 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2388 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2389 GET_MODE_PRECISION (mode
));
2390 lhs
= XEXP (lhs
, 0);
2393 if (GET_CODE (rhs
) == NEG
)
2395 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2396 rhs
= XEXP (rhs
, 0);
2398 else if (GET_CODE (rhs
) == MULT
2399 && CONST_INT_P (XEXP (rhs
, 1)))
2401 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), mode
));
2402 rhs
= XEXP (rhs
, 0);
2404 else if (GET_CODE (rhs
) == ASHIFT
2405 && CONST_INT_P (XEXP (rhs
, 1))
2406 && INTVAL (XEXP (rhs
, 1)) >= 0
2407 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2409 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2410 GET_MODE_PRECISION (mode
));
2411 negcoeff1
= -negcoeff1
;
2412 rhs
= XEXP (rhs
, 0);
2415 if (rtx_equal_p (lhs
, rhs
))
2417 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2419 bool speed
= optimize_function_for_speed_p (cfun
);
2421 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2423 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2424 return (set_src_cost (tem
, mode
, speed
)
2425 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2429 /* (a - (-b)) -> (a + b). True even for IEEE. */
2430 if (GET_CODE (op1
) == NEG
)
2431 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2433 /* (-x - c) may be simplified as (-c - x). */
2434 if (GET_CODE (op0
) == NEG
2435 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2437 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2439 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2442 /* Don't let a relocatable value get a negative coeff. */
2443 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2444 return simplify_gen_binary (PLUS
, mode
,
2446 neg_const_int (mode
, op1
));
2448 /* (x - (x & y)) -> (x & ~y) */
2449 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2451 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2453 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2454 GET_MODE (XEXP (op1
, 1)));
2455 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2457 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2459 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2460 GET_MODE (XEXP (op1
, 0)));
2461 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2465 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2466 by reversing the comparison code if valid. */
2467 if (STORE_FLAG_VALUE
== 1
2468 && trueop0
== const1_rtx
2469 && COMPARISON_P (op1
)
2470 && (reversed
= reversed_comparison (op1
, mode
)))
2473 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2474 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2475 && GET_CODE (op1
) == MULT
2476 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2480 in1
= XEXP (XEXP (op1
, 0), 0);
2481 in2
= XEXP (op1
, 1);
2482 return simplify_gen_binary (PLUS
, mode
,
2483 simplify_gen_binary (MULT
, mode
,
2488 /* Canonicalize (minus (neg A) (mult B C)) to
2489 (minus (mult (neg B) C) A). */
2490 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2491 && GET_CODE (op1
) == MULT
2492 && GET_CODE (op0
) == NEG
)
2496 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2497 in2
= XEXP (op1
, 1);
2498 return simplify_gen_binary (MINUS
, mode
,
2499 simplify_gen_binary (MULT
, mode
,
2504 /* If one of the operands is a PLUS or a MINUS, see if we can
2505 simplify this by the associative law. This will, for example,
2506 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2507 Don't use the associative law for floating point.
2508 The inaccuracy makes it nonassociative,
2509 and subtle programs can break if operations are associated. */
2511 if (INTEGRAL_MODE_P (mode
)
2512 && (plus_minus_operand_p (op0
)
2513 || plus_minus_operand_p (op1
))
2514 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2519 if (trueop1
== constm1_rtx
)
2520 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2522 if (GET_CODE (op0
) == NEG
)
2524 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2525 /* If op1 is a MULT as well and simplify_unary_operation
2526 just moved the NEG to the second operand, simplify_gen_binary
2527 below could through simplify_associative_operation move
2528 the NEG around again and recurse endlessly. */
2530 && GET_CODE (op1
) == MULT
2531 && GET_CODE (temp
) == MULT
2532 && XEXP (op1
, 0) == XEXP (temp
, 0)
2533 && GET_CODE (XEXP (temp
, 1)) == NEG
2534 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2537 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2539 if (GET_CODE (op1
) == NEG
)
2541 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2542 /* If op0 is a MULT as well and simplify_unary_operation
2543 just moved the NEG to the second operand, simplify_gen_binary
2544 below could through simplify_associative_operation move
2545 the NEG around again and recurse endlessly. */
2547 && GET_CODE (op0
) == MULT
2548 && GET_CODE (temp
) == MULT
2549 && XEXP (op0
, 0) == XEXP (temp
, 0)
2550 && GET_CODE (XEXP (temp
, 1)) == NEG
2551 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2554 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2557 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2558 x is NaN, since x * 0 is then also NaN. Nor is it valid
2559 when the mode has signed zeros, since multiplying a negative
2560 number by 0 will give -0, not 0. */
2561 if (!HONOR_NANS (mode
)
2562 && !HONOR_SIGNED_ZEROS (mode
)
2563 && trueop1
== CONST0_RTX (mode
)
2564 && ! side_effects_p (op0
))
2567 /* In IEEE floating point, x*1 is not equivalent to x for
2569 if (!HONOR_SNANS (mode
)
2570 && trueop1
== CONST1_RTX (mode
))
2573 /* Convert multiply by constant power of two into shift. */
2574 if (CONST_SCALAR_INT_P (trueop1
))
2576 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2578 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2581 /* x*2 is x+x and x*(-1) is -x */
2582 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2583 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2584 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2585 && GET_MODE (op0
) == mode
)
2587 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2589 if (real_equal (d1
, &dconst2
))
2590 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2592 if (!HONOR_SNANS (mode
)
2593 && real_equal (d1
, &dconstm1
))
2594 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2597 /* Optimize -x * -x as x * x. */
2598 if (FLOAT_MODE_P (mode
)
2599 && GET_CODE (op0
) == NEG
2600 && GET_CODE (op1
) == NEG
2601 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2602 && !side_effects_p (XEXP (op0
, 0)))
2603 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2605 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2606 if (SCALAR_FLOAT_MODE_P (mode
)
2607 && GET_CODE (op0
) == ABS
2608 && GET_CODE (op1
) == ABS
2609 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2610 && !side_effects_p (XEXP (op0
, 0)))
2611 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2613 /* Reassociate multiplication, but for floating point MULTs
2614 only when the user specifies unsafe math optimizations. */
2615 if (! FLOAT_MODE_P (mode
)
2616 || flag_unsafe_math_optimizations
)
2618 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2625 if (trueop1
== CONST0_RTX (mode
))
2627 if (INTEGRAL_MODE_P (mode
)
2628 && trueop1
== CONSTM1_RTX (mode
)
2629 && !side_effects_p (op0
))
2631 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2633 /* A | (~A) -> -1 */
2634 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2635 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2636 && ! side_effects_p (op0
)
2637 && SCALAR_INT_MODE_P (mode
))
2640 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2641 if (CONST_INT_P (op1
)
2642 && HWI_COMPUTABLE_MODE_P (mode
)
2643 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2644 && !side_effects_p (op0
))
2647 /* Canonicalize (X & C1) | C2. */
2648 if (GET_CODE (op0
) == AND
2649 && CONST_INT_P (trueop1
)
2650 && CONST_INT_P (XEXP (op0
, 1)))
2652 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2653 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2654 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2656 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2658 && !side_effects_p (XEXP (op0
, 0)))
2661 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2662 if (((c1
|c2
) & mask
) == mask
)
2663 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2665 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2666 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2668 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2669 gen_int_mode (c1
& ~c2
, mode
));
2670 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2674 /* Convert (A & B) | A to A. */
2675 if (GET_CODE (op0
) == AND
2676 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2677 || rtx_equal_p (XEXP (op0
, 1), op1
))
2678 && ! side_effects_p (XEXP (op0
, 0))
2679 && ! side_effects_p (XEXP (op0
, 1)))
2682 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2683 mode size to (rotate A CX). */
2685 if (GET_CODE (op1
) == ASHIFT
2686 || GET_CODE (op1
) == SUBREG
)
2697 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2698 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2699 && CONST_INT_P (XEXP (opleft
, 1))
2700 && CONST_INT_P (XEXP (opright
, 1))
2701 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2702 == GET_MODE_PRECISION (mode
)))
2703 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2705 /* Same, but for ashift that has been "simplified" to a wider mode
2706 by simplify_shift_const. */
2708 if (GET_CODE (opleft
) == SUBREG
2709 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2710 && GET_CODE (opright
) == LSHIFTRT
2711 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2712 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2713 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2714 && (GET_MODE_SIZE (GET_MODE (opleft
))
2715 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2716 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2717 SUBREG_REG (XEXP (opright
, 0)))
2718 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2719 && CONST_INT_P (XEXP (opright
, 1))
2720 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2721 == GET_MODE_PRECISION (mode
)))
2722 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2723 XEXP (SUBREG_REG (opleft
), 1));
2725 /* If we have (ior (and (X C1) C2)), simplify this by making
2726 C1 as small as possible if C1 actually changes. */
2727 if (CONST_INT_P (op1
)
2728 && (HWI_COMPUTABLE_MODE_P (mode
)
2729 || INTVAL (op1
) > 0)
2730 && GET_CODE (op0
) == AND
2731 && CONST_INT_P (XEXP (op0
, 1))
2732 && CONST_INT_P (op1
)
2733 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2735 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2736 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2739 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2742 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2743 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2744 the PLUS does not affect any of the bits in OP1: then we can do
2745 the IOR as a PLUS and we can associate. This is valid if OP1
2746 can be safely shifted left C bits. */
2747 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2748 && GET_CODE (XEXP (op0
, 0)) == PLUS
2749 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2750 && CONST_INT_P (XEXP (op0
, 1))
2751 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2753 int count
= INTVAL (XEXP (op0
, 1));
2754 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2756 if (mask
>> count
== INTVAL (trueop1
)
2757 && trunc_int_for_mode (mask
, mode
) == mask
2758 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2759 return simplify_gen_binary (ASHIFTRT
, mode
,
2760 plus_constant (mode
, XEXP (op0
, 0),
2765 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2769 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2775 if (trueop1
== CONST0_RTX (mode
))
2777 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2778 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2779 if (rtx_equal_p (trueop0
, trueop1
)
2780 && ! side_effects_p (op0
)
2781 && GET_MODE_CLASS (mode
) != MODE_CC
)
2782 return CONST0_RTX (mode
);
2784 /* Canonicalize XOR of the most significant bit to PLUS. */
2785 if (CONST_SCALAR_INT_P (op1
)
2786 && mode_signbit_p (mode
, op1
))
2787 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2788 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2789 if (CONST_SCALAR_INT_P (op1
)
2790 && GET_CODE (op0
) == PLUS
2791 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2792 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2793 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2794 simplify_gen_binary (XOR
, mode
, op1
,
2797 /* If we are XORing two things that have no bits in common,
2798 convert them into an IOR. This helps to detect rotation encoded
2799 using those methods and possibly other simplifications. */
2801 if (HWI_COMPUTABLE_MODE_P (mode
)
2802 && (nonzero_bits (op0
, mode
)
2803 & nonzero_bits (op1
, mode
)) == 0)
2804 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2806 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2807 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2810 int num_negated
= 0;
2812 if (GET_CODE (op0
) == NOT
)
2813 num_negated
++, op0
= XEXP (op0
, 0);
2814 if (GET_CODE (op1
) == NOT
)
2815 num_negated
++, op1
= XEXP (op1
, 0);
2817 if (num_negated
== 2)
2818 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2819 else if (num_negated
== 1)
2820 return simplify_gen_unary (NOT
, mode
,
2821 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2825 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2826 correspond to a machine insn or result in further simplifications
2827 if B is a constant. */
2829 if (GET_CODE (op0
) == AND
2830 && rtx_equal_p (XEXP (op0
, 1), op1
)
2831 && ! side_effects_p (op1
))
2832 return simplify_gen_binary (AND
, mode
,
2833 simplify_gen_unary (NOT
, mode
,
2834 XEXP (op0
, 0), mode
),
2837 else if (GET_CODE (op0
) == AND
2838 && rtx_equal_p (XEXP (op0
, 0), op1
)
2839 && ! side_effects_p (op1
))
2840 return simplify_gen_binary (AND
, mode
,
2841 simplify_gen_unary (NOT
, mode
,
2842 XEXP (op0
, 1), mode
),
2845 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2846 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2847 out bits inverted twice and not set by C. Similarly, given
2848 (xor (and (xor A B) C) D), simplify without inverting C in
2849 the xor operand: (xor (and A C) (B&C)^D).
2851 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2852 && GET_CODE (XEXP (op0
, 0)) == XOR
2853 && CONST_INT_P (op1
)
2854 && CONST_INT_P (XEXP (op0
, 1))
2855 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2857 enum rtx_code op
= GET_CODE (op0
);
2858 rtx a
= XEXP (XEXP (op0
, 0), 0);
2859 rtx b
= XEXP (XEXP (op0
, 0), 1);
2860 rtx c
= XEXP (op0
, 1);
2862 HOST_WIDE_INT bval
= INTVAL (b
);
2863 HOST_WIDE_INT cval
= INTVAL (c
);
2864 HOST_WIDE_INT dval
= INTVAL (d
);
2865 HOST_WIDE_INT xcval
;
2872 return simplify_gen_binary (XOR
, mode
,
2873 simplify_gen_binary (op
, mode
, a
, c
),
2874 gen_int_mode ((bval
& xcval
) ^ dval
,
2878 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2879 we can transform like this:
2880 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2881 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2882 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2883 Attempt a few simplifications when B and C are both constants. */
2884 if (GET_CODE (op0
) == AND
2885 && CONST_INT_P (op1
)
2886 && CONST_INT_P (XEXP (op0
, 1)))
2888 rtx a
= XEXP (op0
, 0);
2889 rtx b
= XEXP (op0
, 1);
2891 HOST_WIDE_INT bval
= INTVAL (b
);
2892 HOST_WIDE_INT cval
= INTVAL (c
);
2894 /* Instead of computing ~A&C, we compute its negated value,
2895 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2896 optimize for sure. If it does not simplify, we still try
2897 to compute ~A&C below, but since that always allocates
2898 RTL, we don't try that before committing to returning a
2899 simplified expression. */
2900 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2903 if ((~cval
& bval
) == 0)
2905 rtx na_c
= NULL_RTX
;
2907 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2910 /* If ~A does not simplify, don't bother: we don't
2911 want to simplify 2 operations into 3, and if na_c
2912 were to simplify with na, n_na_c would have
2913 simplified as well. */
2914 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2916 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2919 /* Try to simplify ~A&C | ~B&C. */
2920 if (na_c
!= NULL_RTX
)
2921 return simplify_gen_binary (IOR
, mode
, na_c
,
2922 gen_int_mode (~bval
& cval
, mode
));
2926 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2927 if (n_na_c
== CONSTM1_RTX (mode
))
2929 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2930 gen_int_mode (~cval
& bval
,
2932 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2933 gen_int_mode (~bval
& cval
,
2939 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2940 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2941 machines, and also has shorter instruction path length. */
2942 if (GET_CODE (op0
) == AND
2943 && GET_CODE (XEXP (op0
, 0)) == XOR
2944 && CONST_INT_P (XEXP (op0
, 1))
2945 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
2948 rtx b
= XEXP (XEXP (op0
, 0), 1);
2949 rtx c
= XEXP (op0
, 1);
2950 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2951 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
2952 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
2953 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
2955 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2956 else if (GET_CODE (op0
) == AND
2957 && GET_CODE (XEXP (op0
, 0)) == XOR
2958 && CONST_INT_P (XEXP (op0
, 1))
2959 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
2961 rtx a
= XEXP (XEXP (op0
, 0), 0);
2963 rtx c
= XEXP (op0
, 1);
2964 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2965 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
2966 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
2967 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
2970 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2971 comparison if STORE_FLAG_VALUE is 1. */
2972 if (STORE_FLAG_VALUE
== 1
2973 && trueop1
== const1_rtx
2974 && COMPARISON_P (op0
)
2975 && (reversed
= reversed_comparison (op0
, mode
)))
2978 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2979 is (lt foo (const_int 0)), so we can perform the above
2980 simplification if STORE_FLAG_VALUE is 1. */
2982 if (STORE_FLAG_VALUE
== 1
2983 && trueop1
== const1_rtx
2984 && GET_CODE (op0
) == LSHIFTRT
2985 && CONST_INT_P (XEXP (op0
, 1))
2986 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2987 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2989 /* (xor (comparison foo bar) (const_int sign-bit))
2990 when STORE_FLAG_VALUE is the sign bit. */
2991 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2992 && trueop1
== const_true_rtx
2993 && COMPARISON_P (op0
)
2994 && (reversed
= reversed_comparison (op0
, mode
)))
2997 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3001 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3007 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3009 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3011 if (HWI_COMPUTABLE_MODE_P (mode
))
3013 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3014 HOST_WIDE_INT nzop1
;
3015 if (CONST_INT_P (trueop1
))
3017 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3018 /* If we are turning off bits already known off in OP0, we need
3020 if ((nzop0
& ~val1
) == 0)
3023 nzop1
= nonzero_bits (trueop1
, mode
);
3024 /* If we are clearing all the nonzero bits, the result is zero. */
3025 if ((nzop1
& nzop0
) == 0
3026 && !side_effects_p (op0
) && !side_effects_p (op1
))
3027 return CONST0_RTX (mode
);
3029 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3030 && GET_MODE_CLASS (mode
) != MODE_CC
)
3033 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3034 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3035 && ! side_effects_p (op0
)
3036 && GET_MODE_CLASS (mode
) != MODE_CC
)
3037 return CONST0_RTX (mode
);
3039 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3040 there are no nonzero bits of C outside of X's mode. */
3041 if ((GET_CODE (op0
) == SIGN_EXTEND
3042 || GET_CODE (op0
) == ZERO_EXTEND
)
3043 && CONST_INT_P (trueop1
)
3044 && HWI_COMPUTABLE_MODE_P (mode
)
3045 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3046 & UINTVAL (trueop1
)) == 0)
3048 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3049 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3050 gen_int_mode (INTVAL (trueop1
),
3052 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3055 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3056 we might be able to further simplify the AND with X and potentially
3057 remove the truncation altogether. */
3058 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3060 rtx x
= XEXP (op0
, 0);
3061 machine_mode xmode
= GET_MODE (x
);
3062 tem
= simplify_gen_binary (AND
, xmode
, x
,
3063 gen_int_mode (INTVAL (trueop1
), xmode
));
3064 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3067 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3068 if (GET_CODE (op0
) == IOR
3069 && CONST_INT_P (trueop1
)
3070 && CONST_INT_P (XEXP (op0
, 1)))
3072 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3073 return simplify_gen_binary (IOR
, mode
,
3074 simplify_gen_binary (AND
, mode
,
3075 XEXP (op0
, 0), op1
),
3076 gen_int_mode (tmp
, mode
));
3079 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3080 insn (and may simplify more). */
3081 if (GET_CODE (op0
) == XOR
3082 && rtx_equal_p (XEXP (op0
, 0), op1
)
3083 && ! side_effects_p (op1
))
3084 return simplify_gen_binary (AND
, mode
,
3085 simplify_gen_unary (NOT
, mode
,
3086 XEXP (op0
, 1), mode
),
3089 if (GET_CODE (op0
) == XOR
3090 && rtx_equal_p (XEXP (op0
, 1), op1
)
3091 && ! side_effects_p (op1
))
3092 return simplify_gen_binary (AND
, mode
,
3093 simplify_gen_unary (NOT
, mode
,
3094 XEXP (op0
, 0), mode
),
3097 /* Similarly for (~(A ^ B)) & A. */
3098 if (GET_CODE (op0
) == NOT
3099 && GET_CODE (XEXP (op0
, 0)) == XOR
3100 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3101 && ! side_effects_p (op1
))
3102 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3104 if (GET_CODE (op0
) == NOT
3105 && GET_CODE (XEXP (op0
, 0)) == XOR
3106 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3107 && ! side_effects_p (op1
))
3108 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3110 /* Convert (A | B) & A to A. */
3111 if (GET_CODE (op0
) == IOR
3112 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3113 || rtx_equal_p (XEXP (op0
, 1), op1
))
3114 && ! side_effects_p (XEXP (op0
, 0))
3115 && ! side_effects_p (XEXP (op0
, 1)))
3118 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3119 ((A & N) + B) & M -> (A + B) & M
3120 Similarly if (N & M) == 0,
3121 ((A | N) + B) & M -> (A + B) & M
3122 and for - instead of + and/or ^ instead of |.
3123 Also, if (N & M) == 0, then
3124 (A +- N) & M -> A & M. */
3125 if (CONST_INT_P (trueop1
)
3126 && HWI_COMPUTABLE_MODE_P (mode
)
3127 && ~UINTVAL (trueop1
)
3128 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3129 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3134 pmop
[0] = XEXP (op0
, 0);
3135 pmop
[1] = XEXP (op0
, 1);
3137 if (CONST_INT_P (pmop
[1])
3138 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3139 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3141 for (which
= 0; which
< 2; which
++)
3144 switch (GET_CODE (tem
))
3147 if (CONST_INT_P (XEXP (tem
, 1))
3148 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3149 == UINTVAL (trueop1
))
3150 pmop
[which
] = XEXP (tem
, 0);
3154 if (CONST_INT_P (XEXP (tem
, 1))
3155 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3156 pmop
[which
] = XEXP (tem
, 0);
3163 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3165 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3167 return simplify_gen_binary (code
, mode
, tem
, op1
);
3171 /* (and X (ior (not X) Y) -> (and X Y) */
3172 if (GET_CODE (op1
) == IOR
3173 && GET_CODE (XEXP (op1
, 0)) == NOT
3174 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3175 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3177 /* (and (ior (not X) Y) X) -> (and X Y) */
3178 if (GET_CODE (op0
) == IOR
3179 && GET_CODE (XEXP (op0
, 0)) == NOT
3180 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3181 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3183 /* (and X (ior Y (not X)) -> (and X Y) */
3184 if (GET_CODE (op1
) == IOR
3185 && GET_CODE (XEXP (op1
, 1)) == NOT
3186 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3187 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3189 /* (and (ior Y (not X)) X) -> (and X Y) */
3190 if (GET_CODE (op0
) == IOR
3191 && GET_CODE (XEXP (op0
, 1)) == NOT
3192 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3193 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3195 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3199 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3205 /* 0/x is 0 (or x&0 if x has side-effects). */
3206 if (trueop0
== CONST0_RTX (mode
)
3207 && !cfun
->can_throw_non_call_exceptions
)
3209 if (side_effects_p (op1
))
3210 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3214 if (trueop1
== CONST1_RTX (mode
))
3216 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3220 /* Convert divide by power of two into shift. */
3221 if (CONST_INT_P (trueop1
)
3222 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3223 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3227 /* Handle floating point and integers separately. */
3228 if (SCALAR_FLOAT_MODE_P (mode
))
3230 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3231 safe for modes with NaNs, since 0.0 / 0.0 will then be
3232 NaN rather than 0.0. Nor is it safe for modes with signed
3233 zeros, since dividing 0 by a negative number gives -0.0 */
3234 if (trueop0
== CONST0_RTX (mode
)
3235 && !HONOR_NANS (mode
)
3236 && !HONOR_SIGNED_ZEROS (mode
)
3237 && ! side_effects_p (op1
))
3240 if (trueop1
== CONST1_RTX (mode
)
3241 && !HONOR_SNANS (mode
))
3244 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3245 && trueop1
!= CONST0_RTX (mode
))
3247 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3250 if (real_equal (d1
, &dconstm1
)
3251 && !HONOR_SNANS (mode
))
3252 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3254 /* Change FP division by a constant into multiplication.
3255 Only do this with -freciprocal-math. */
3256 if (flag_reciprocal_math
3257 && !real_equal (d1
, &dconst0
))
3260 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3261 tem
= const_double_from_real_value (d
, mode
);
3262 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3266 else if (SCALAR_INT_MODE_P (mode
))
3268 /* 0/x is 0 (or x&0 if x has side-effects). */
3269 if (trueop0
== CONST0_RTX (mode
)
3270 && !cfun
->can_throw_non_call_exceptions
)
3272 if (side_effects_p (op1
))
3273 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3277 if (trueop1
== CONST1_RTX (mode
))
3279 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3284 if (trueop1
== constm1_rtx
)
3286 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3288 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3294 /* 0%x is 0 (or x&0 if x has side-effects). */
3295 if (trueop0
== CONST0_RTX (mode
))
3297 if (side_effects_p (op1
))
3298 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3301 /* x%1 is 0 (of x&0 if x has side-effects). */
3302 if (trueop1
== CONST1_RTX (mode
))
3304 if (side_effects_p (op0
))
3305 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3306 return CONST0_RTX (mode
);
3308 /* Implement modulus by power of two as AND. */
3309 if (CONST_INT_P (trueop1
)
3310 && exact_log2 (UINTVAL (trueop1
)) > 0)
3311 return simplify_gen_binary (AND
, mode
, op0
,
3312 gen_int_mode (INTVAL (op1
) - 1, mode
));
3316 /* 0%x is 0 (or x&0 if x has side-effects). */
3317 if (trueop0
== CONST0_RTX (mode
))
3319 if (side_effects_p (op1
))
3320 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3323 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3324 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3326 if (side_effects_p (op0
))
3327 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3328 return CONST0_RTX (mode
);
3334 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3335 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3336 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3338 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3339 if (CONST_INT_P (trueop1
)
3340 && IN_RANGE (INTVAL (trueop1
),
3341 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3342 GET_MODE_PRECISION (mode
) - 1))
3343 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3344 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3345 - INTVAL (trueop1
)));
3349 if (trueop1
== CONST0_RTX (mode
))
3351 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3353 /* Rotating ~0 always results in ~0. */
3354 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3355 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3356 && ! side_effects_p (op1
))
3362 scalar constants c1, c2
3363 size (M2) > size (M1)
3364 c1 == size (M2) - size (M1)
3366 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3370 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3372 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3373 && !VECTOR_MODE_P (mode
)
3375 && CONST_INT_P (op1
)
3376 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3377 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3378 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3379 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3380 > GET_MODE_BITSIZE (mode
))
3381 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3382 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3383 - GET_MODE_BITSIZE (mode
)))
3384 && subreg_lowpart_p (op0
))
3386 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3388 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3389 tmp
= simplify_gen_binary (code
,
3390 GET_MODE (SUBREG_REG (op0
)),
3391 XEXP (SUBREG_REG (op0
), 0),
3393 return lowpart_subreg (mode
, tmp
, inner_mode
);
3396 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3398 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3399 if (val
!= INTVAL (op1
))
3400 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3407 if (trueop1
== CONST0_RTX (mode
))
3409 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3411 goto canonicalize_shift
;
3414 if (trueop1
== CONST0_RTX (mode
))
3416 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3418 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3419 if (GET_CODE (op0
) == CLZ
3420 && CONST_INT_P (trueop1
)
3421 && STORE_FLAG_VALUE
== 1
3422 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3424 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3425 unsigned HOST_WIDE_INT zero_val
= 0;
3427 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3428 && zero_val
== GET_MODE_PRECISION (imode
)
3429 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3430 return simplify_gen_relational (EQ
, mode
, imode
,
3431 XEXP (op0
, 0), const0_rtx
);
3433 goto canonicalize_shift
;
3436 if (width
<= HOST_BITS_PER_WIDE_INT
3437 && mode_signbit_p (mode
, trueop1
)
3438 && ! side_effects_p (op0
))
3440 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3442 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3448 if (width
<= HOST_BITS_PER_WIDE_INT
3449 && CONST_INT_P (trueop1
)
3450 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3451 && ! side_effects_p (op0
))
3453 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3455 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3461 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3463 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3465 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3471 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3473 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3475 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3488 /* ??? There are simplifications that can be done. */
3492 if (!VECTOR_MODE_P (mode
))
3494 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3495 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3496 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3497 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3498 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3500 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3501 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3504 /* Extract a scalar element from a nested VEC_SELECT expression
3505 (with optional nested VEC_CONCAT expression). Some targets
3506 (i386) extract scalar element from a vector using chain of
3507 nested VEC_SELECT expressions. When input operand is a memory
3508 operand, this operation can be simplified to a simple scalar
3509 load from an offseted memory address. */
3510 if (GET_CODE (trueop0
) == VEC_SELECT
)
3512 rtx op0
= XEXP (trueop0
, 0);
3513 rtx op1
= XEXP (trueop0
, 1);
3515 machine_mode opmode
= GET_MODE (op0
);
3516 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3517 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3519 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3525 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3526 gcc_assert (i
< n_elts
);
3528 /* Select element, pointed by nested selector. */
3529 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3531 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3532 if (GET_CODE (op0
) == VEC_CONCAT
)
3534 rtx op00
= XEXP (op0
, 0);
3535 rtx op01
= XEXP (op0
, 1);
3537 machine_mode mode00
, mode01
;
3538 int n_elts00
, n_elts01
;
3540 mode00
= GET_MODE (op00
);
3541 mode01
= GET_MODE (op01
);
3543 /* Find out number of elements of each operand. */
3544 if (VECTOR_MODE_P (mode00
))
3546 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3547 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3552 if (VECTOR_MODE_P (mode01
))
3554 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3555 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3560 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3562 /* Select correct operand of VEC_CONCAT
3563 and adjust selector. */
3564 if (elem
< n_elts01
)
3575 vec
= rtvec_alloc (1);
3576 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3578 tmp
= gen_rtx_fmt_ee (code
, mode
,
3579 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3582 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3583 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3584 return XEXP (trueop0
, 0);
3588 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3589 gcc_assert (GET_MODE_INNER (mode
)
3590 == GET_MODE_INNER (GET_MODE (trueop0
)));
3591 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3593 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3595 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3596 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3597 rtvec v
= rtvec_alloc (n_elts
);
3600 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3601 for (i
= 0; i
< n_elts
; i
++)
3603 rtx x
= XVECEXP (trueop1
, 0, i
);
3605 gcc_assert (CONST_INT_P (x
));
3606 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3610 return gen_rtx_CONST_VECTOR (mode
, v
);
3613 /* Recognize the identity. */
3614 if (GET_MODE (trueop0
) == mode
)
3616 bool maybe_ident
= true;
3617 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3619 rtx j
= XVECEXP (trueop1
, 0, i
);
3620 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3622 maybe_ident
= false;
3630 /* If we build {a,b} then permute it, build the result directly. */
3631 if (XVECLEN (trueop1
, 0) == 2
3632 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3633 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3634 && GET_CODE (trueop0
) == VEC_CONCAT
3635 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3636 && GET_MODE (XEXP (trueop0
, 0)) == mode
3637 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3638 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3640 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3641 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3644 gcc_assert (i0
< 4 && i1
< 4);
3645 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3646 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3648 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3651 if (XVECLEN (trueop1
, 0) == 2
3652 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3653 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3654 && GET_CODE (trueop0
) == VEC_CONCAT
3655 && GET_MODE (trueop0
) == mode
)
3657 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3658 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3661 gcc_assert (i0
< 2 && i1
< 2);
3662 subop0
= XEXP (trueop0
, i0
);
3663 subop1
= XEXP (trueop0
, i1
);
3665 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3668 /* If we select one half of a vec_concat, return that. */
3669 if (GET_CODE (trueop0
) == VEC_CONCAT
3670 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3672 rtx subop0
= XEXP (trueop0
, 0);
3673 rtx subop1
= XEXP (trueop0
, 1);
3674 machine_mode mode0
= GET_MODE (subop0
);
3675 machine_mode mode1
= GET_MODE (subop1
);
3676 int li
= GET_MODE_UNIT_SIZE (mode0
);
3677 int l0
= GET_MODE_SIZE (mode0
) / li
;
3678 int l1
= GET_MODE_SIZE (mode1
) / li
;
3679 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3680 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3682 bool success
= true;
3683 for (int i
= 1; i
< l0
; ++i
)
3685 rtx j
= XVECEXP (trueop1
, 0, i
);
3686 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3695 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3697 bool success
= true;
3698 for (int i
= 1; i
< l1
; ++i
)
3700 rtx j
= XVECEXP (trueop1
, 0, i
);
3701 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3713 if (XVECLEN (trueop1
, 0) == 1
3714 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3715 && GET_CODE (trueop0
) == VEC_CONCAT
)
3718 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3720 /* Try to find the element in the VEC_CONCAT. */
3721 while (GET_MODE (vec
) != mode
3722 && GET_CODE (vec
) == VEC_CONCAT
)
3724 HOST_WIDE_INT vec_size
;
3726 if (CONST_INT_P (XEXP (vec
, 0)))
3728 /* vec_concat of two const_ints doesn't make sense with
3729 respect to modes. */
3730 if (CONST_INT_P (XEXP (vec
, 1)))
3733 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3734 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3737 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3739 if (offset
< vec_size
)
3740 vec
= XEXP (vec
, 0);
3744 vec
= XEXP (vec
, 1);
3746 vec
= avoid_constant_pool_reference (vec
);
3749 if (GET_MODE (vec
) == mode
)
3753 /* If we select elements in a vec_merge that all come from the same
3754 operand, select from that operand directly. */
3755 if (GET_CODE (op0
) == VEC_MERGE
)
3757 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3758 if (CONST_INT_P (trueop02
))
3760 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3761 bool all_operand0
= true;
3762 bool all_operand1
= true;
3763 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3765 rtx j
= XVECEXP (trueop1
, 0, i
);
3766 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3767 all_operand1
= false;
3769 all_operand0
= false;
3771 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3772 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3773 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3774 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3778 /* If we have two nested selects that are inverses of each
3779 other, replace them with the source operand. */
3780 if (GET_CODE (trueop0
) == VEC_SELECT
3781 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3783 rtx op0_subop1
= XEXP (trueop0
, 1);
3784 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3785 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3787 /* Apply the outer ordering vector to the inner one. (The inner
3788 ordering vector is expressly permitted to be of a different
3789 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3790 then the two VEC_SELECTs cancel. */
3791 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3793 rtx x
= XVECEXP (trueop1
, 0, i
);
3794 if (!CONST_INT_P (x
))
3796 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3797 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3800 return XEXP (trueop0
, 0);
3806 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3807 ? GET_MODE (trueop0
)
3808 : GET_MODE_INNER (mode
));
3809 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3810 ? GET_MODE (trueop1
)
3811 : GET_MODE_INNER (mode
));
3813 gcc_assert (VECTOR_MODE_P (mode
));
3814 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3815 == GET_MODE_SIZE (mode
));
3817 if (VECTOR_MODE_P (op0_mode
))
3818 gcc_assert (GET_MODE_INNER (mode
)
3819 == GET_MODE_INNER (op0_mode
));
3821 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3823 if (VECTOR_MODE_P (op1_mode
))
3824 gcc_assert (GET_MODE_INNER (mode
)
3825 == GET_MODE_INNER (op1_mode
));
3827 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3829 if ((GET_CODE (trueop0
) == CONST_VECTOR
3830 || CONST_SCALAR_INT_P (trueop0
)
3831 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3832 && (GET_CODE (trueop1
) == CONST_VECTOR
3833 || CONST_SCALAR_INT_P (trueop1
)
3834 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3836 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3837 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3838 rtvec v
= rtvec_alloc (n_elts
);
3840 unsigned in_n_elts
= 1;
3842 if (VECTOR_MODE_P (op0_mode
))
3843 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3844 for (i
= 0; i
< n_elts
; i
++)
3848 if (!VECTOR_MODE_P (op0_mode
))
3849 RTVEC_ELT (v
, i
) = trueop0
;
3851 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3855 if (!VECTOR_MODE_P (op1_mode
))
3856 RTVEC_ELT (v
, i
) = trueop1
;
3858 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3863 return gen_rtx_CONST_VECTOR (mode
, v
);
3866 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3867 Restrict the transformation to avoid generating a VEC_SELECT with a
3868 mode unrelated to its operand. */
3869 if (GET_CODE (trueop0
) == VEC_SELECT
3870 && GET_CODE (trueop1
) == VEC_SELECT
3871 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3872 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3874 rtx par0
= XEXP (trueop0
, 1);
3875 rtx par1
= XEXP (trueop1
, 1);
3876 int len0
= XVECLEN (par0
, 0);
3877 int len1
= XVECLEN (par1
, 0);
3878 rtvec vec
= rtvec_alloc (len0
+ len1
);
3879 for (int i
= 0; i
< len0
; i
++)
3880 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3881 for (int i
= 0; i
< len1
; i
++)
3882 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3883 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3884 gen_rtx_PARALLEL (VOIDmode
, vec
));
3897 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3900 unsigned int width
= GET_MODE_PRECISION (mode
);
3902 if (VECTOR_MODE_P (mode
)
3903 && code
!= VEC_CONCAT
3904 && GET_CODE (op0
) == CONST_VECTOR
3905 && GET_CODE (op1
) == CONST_VECTOR
)
3907 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3908 machine_mode op0mode
= GET_MODE (op0
);
3909 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3910 machine_mode op1mode
= GET_MODE (op1
);
3911 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3912 rtvec v
= rtvec_alloc (n_elts
);
3915 gcc_assert (op0_n_elts
== n_elts
);
3916 gcc_assert (op1_n_elts
== n_elts
);
3917 for (i
= 0; i
< n_elts
; i
++)
3919 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3920 CONST_VECTOR_ELT (op0
, i
),
3921 CONST_VECTOR_ELT (op1
, i
));
3924 RTVEC_ELT (v
, i
) = x
;
3927 return gen_rtx_CONST_VECTOR (mode
, v
);
3930 if (VECTOR_MODE_P (mode
)
3931 && code
== VEC_CONCAT
3932 && (CONST_SCALAR_INT_P (op0
)
3933 || GET_CODE (op0
) == CONST_FIXED
3934 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3935 && (CONST_SCALAR_INT_P (op1
)
3936 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3937 || GET_CODE (op1
) == CONST_FIXED
))
3939 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3940 rtvec v
= rtvec_alloc (n_elts
);
3942 gcc_assert (n_elts
>= 2);
3945 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3946 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3948 RTVEC_ELT (v
, 0) = op0
;
3949 RTVEC_ELT (v
, 1) = op1
;
3953 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3954 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3957 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3958 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3959 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3961 for (i
= 0; i
< op0_n_elts
; ++i
)
3962 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3963 for (i
= 0; i
< op1_n_elts
; ++i
)
3964 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3967 return gen_rtx_CONST_VECTOR (mode
, v
);
3970 if (SCALAR_FLOAT_MODE_P (mode
)
3971 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3972 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3973 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3984 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3986 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3988 for (i
= 0; i
< 4; i
++)
4005 real_from_target (&r
, tmp0
, mode
);
4006 return const_double_from_real_value (r
, mode
);
4010 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4011 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4014 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4015 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4017 if (HONOR_SNANS (mode
)
4018 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4019 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4022 real_convert (&f0
, mode
, opr0
);
4023 real_convert (&f1
, mode
, opr1
);
4026 && real_equal (&f1
, &dconst0
)
4027 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4030 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4031 && flag_trapping_math
4032 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4034 int s0
= REAL_VALUE_NEGATIVE (f0
);
4035 int s1
= REAL_VALUE_NEGATIVE (f1
);
4040 /* Inf + -Inf = NaN plus exception. */
4045 /* Inf - Inf = NaN plus exception. */
4050 /* Inf / Inf = NaN plus exception. */
4057 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4058 && flag_trapping_math
4059 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4060 || (REAL_VALUE_ISINF (f1
)
4061 && real_equal (&f0
, &dconst0
))))
4062 /* Inf * 0 = NaN plus exception. */
4065 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4067 real_convert (&result
, mode
, &value
);
4069 /* Don't constant fold this floating point operation if
4070 the result has overflowed and flag_trapping_math. */
4072 if (flag_trapping_math
4073 && MODE_HAS_INFINITIES (mode
)
4074 && REAL_VALUE_ISINF (result
)
4075 && !REAL_VALUE_ISINF (f0
)
4076 && !REAL_VALUE_ISINF (f1
))
4077 /* Overflow plus exception. */
4080 /* Don't constant fold this floating point operation if the
4081 result may dependent upon the run-time rounding mode and
4082 flag_rounding_math is set, or if GCC's software emulation
4083 is unable to accurately represent the result. */
4085 if ((flag_rounding_math
4086 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4087 && (inexact
|| !real_identical (&result
, &value
)))
4090 return const_double_from_real_value (result
, mode
);
4094 /* We can fold some multi-word operations. */
4095 if ((GET_MODE_CLASS (mode
) == MODE_INT
4096 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
4097 && CONST_SCALAR_INT_P (op0
)
4098 && CONST_SCALAR_INT_P (op1
))
4102 rtx_mode_t pop0
= rtx_mode_t (op0
, mode
);
4103 rtx_mode_t pop1
= rtx_mode_t (op1
, mode
);
4105 #if TARGET_SUPPORTS_WIDE_INT == 0
4106 /* This assert keeps the simplification from producing a result
4107 that cannot be represented in a CONST_DOUBLE but a lot of
4108 upstream callers expect that this function never fails to
4109 simplify something and so you if you added this to the test
4110 above the code would die later anyway. If this assert
4111 happens, you just need to make the port support wide int. */
4112 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
4117 result
= wi::sub (pop0
, pop1
);
4121 result
= wi::add (pop0
, pop1
);
4125 result
= wi::mul (pop0
, pop1
);
4129 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4135 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4141 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4147 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4153 result
= wi::bit_and (pop0
, pop1
);
4157 result
= wi::bit_or (pop0
, pop1
);
4161 result
= wi::bit_xor (pop0
, pop1
);
4165 result
= wi::smin (pop0
, pop1
);
4169 result
= wi::smax (pop0
, pop1
);
4173 result
= wi::umin (pop0
, pop1
);
4177 result
= wi::umax (pop0
, pop1
);
4184 wide_int wop1
= pop1
;
4185 if (SHIFT_COUNT_TRUNCATED
)
4186 wop1
= wi::umod_trunc (wop1
, width
);
4187 else if (wi::geu_p (wop1
, width
))
4193 result
= wi::lrshift (pop0
, wop1
);
4197 result
= wi::arshift (pop0
, wop1
);
4201 result
= wi::lshift (pop0
, wop1
);
4212 if (wi::neg_p (pop1
))
4218 result
= wi::lrotate (pop0
, pop1
);
4222 result
= wi::rrotate (pop0
, pop1
);
4233 return immed_wide_int_const (result
, mode
);
4241 /* Return a positive integer if X should sort after Y. The value
4242 returned is 1 if and only if X and Y are both regs. */
4245 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4249 result
= (commutative_operand_precedence (y
)
4250 - commutative_operand_precedence (x
));
4252 return result
+ result
;
4254 /* Group together equal REGs to do more simplification. */
4255 if (REG_P (x
) && REG_P (y
))
4256 return REGNO (x
) > REGNO (y
);
4261 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4262 operands may be another PLUS or MINUS.
4264 Rather than test for specific case, we do this by a brute-force method
4265 and do all possible simplifications until no more changes occur. Then
4266 we rebuild the operation.
4268 May return NULL_RTX when no changes were made. */
4271 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4274 struct simplify_plus_minus_op_data
4281 int changed
, n_constants
, canonicalized
= 0;
4284 memset (ops
, 0, sizeof ops
);
4286 /* Set up the two operands and then expand them until nothing has been
4287 changed. If we run out of room in our array, give up; this should
4288 almost never happen. */
4293 ops
[1].neg
= (code
== MINUS
);
4300 for (i
= 0; i
< n_ops
; i
++)
4302 rtx this_op
= ops
[i
].op
;
4303 int this_neg
= ops
[i
].neg
;
4304 enum rtx_code this_code
= GET_CODE (this_op
);
4310 if (n_ops
== ARRAY_SIZE (ops
))
4313 ops
[n_ops
].op
= XEXP (this_op
, 1);
4314 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4317 ops
[i
].op
= XEXP (this_op
, 0);
4319 /* If this operand was negated then we will potentially
4320 canonicalize the expression. Similarly if we don't
4321 place the operands adjacent we're re-ordering the
4322 expression and thus might be performing a
4323 canonicalization. Ignore register re-ordering.
4324 ??? It might be better to shuffle the ops array here,
4325 but then (plus (plus (A, B), plus (C, D))) wouldn't
4326 be seen as non-canonical. */
4329 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4334 ops
[i
].op
= XEXP (this_op
, 0);
4335 ops
[i
].neg
= ! this_neg
;
4341 if (n_ops
!= ARRAY_SIZE (ops
)
4342 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4343 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4344 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4346 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4347 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4348 ops
[n_ops
].neg
= this_neg
;
4356 /* ~a -> (-a - 1) */
4357 if (n_ops
!= ARRAY_SIZE (ops
))
4359 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4360 ops
[n_ops
++].neg
= this_neg
;
4361 ops
[i
].op
= XEXP (this_op
, 0);
4362 ops
[i
].neg
= !this_neg
;
4372 ops
[i
].op
= neg_const_int (mode
, this_op
);
4386 if (n_constants
> 1)
4389 gcc_assert (n_ops
>= 2);
4391 /* If we only have two operands, we can avoid the loops. */
4394 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4397 /* Get the two operands. Be careful with the order, especially for
4398 the cases where code == MINUS. */
4399 if (ops
[0].neg
&& ops
[1].neg
)
4401 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4404 else if (ops
[0].neg
)
4415 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4418 /* Now simplify each pair of operands until nothing changes. */
4421 /* Insertion sort is good enough for a small array. */
4422 for (i
= 1; i
< n_ops
; i
++)
4424 struct simplify_plus_minus_op_data save
;
4428 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4431 /* Just swapping registers doesn't count as canonicalization. */
4437 ops
[j
+ 1] = ops
[j
];
4439 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4444 for (i
= n_ops
- 1; i
> 0; i
--)
4445 for (j
= i
- 1; j
>= 0; j
--)
4447 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4448 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4450 if (lhs
!= 0 && rhs
!= 0)
4452 enum rtx_code ncode
= PLUS
;
4458 std::swap (lhs
, rhs
);
4460 else if (swap_commutative_operands_p (lhs
, rhs
))
4461 std::swap (lhs
, rhs
);
4463 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4464 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4466 rtx tem_lhs
, tem_rhs
;
4468 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4469 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4470 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4473 if (tem
&& !CONSTANT_P (tem
))
4474 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4477 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4481 /* Reject "simplifications" that just wrap the two
4482 arguments in a CONST. Failure to do so can result
4483 in infinite recursion with simplify_binary_operation
4484 when it calls us to simplify CONST operations.
4485 Also, if we find such a simplification, don't try
4486 any more combinations with this rhs: We must have
4487 something like symbol+offset, ie. one of the
4488 trivial CONST expressions we handle later. */
4489 if (GET_CODE (tem
) == CONST
4490 && GET_CODE (XEXP (tem
, 0)) == ncode
4491 && XEXP (XEXP (tem
, 0), 0) == lhs
4492 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4495 if (GET_CODE (tem
) == NEG
)
4496 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4497 if (CONST_INT_P (tem
) && lneg
)
4498 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4502 ops
[j
].op
= NULL_RTX
;
4512 /* Pack all the operands to the lower-numbered entries. */
4513 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4522 /* If nothing changed, check that rematerialization of rtl instructions
4523 is still required. */
4526 /* Perform rematerialization if only all operands are registers and
4527 all operations are PLUS. */
4528 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4529 around rs6000 and how it uses the CA register. See PR67145. */
4530 for (i
= 0; i
< n_ops
; i
++)
4532 || !REG_P (ops
[i
].op
)
4533 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4534 && fixed_regs
[REGNO (ops
[i
].op
)]
4535 && !global_regs
[REGNO (ops
[i
].op
)]
4536 && ops
[i
].op
!= frame_pointer_rtx
4537 && ops
[i
].op
!= arg_pointer_rtx
4538 && ops
[i
].op
!= stack_pointer_rtx
))
4543 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4545 && CONST_INT_P (ops
[1].op
)
4546 && CONSTANT_P (ops
[0].op
)
4548 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4550 /* We suppressed creation of trivial CONST expressions in the
4551 combination loop to avoid recursion. Create one manually now.
4552 The combination loop should have ensured that there is exactly
4553 one CONST_INT, and the sort will have ensured that it is last
4554 in the array and that any other constant will be next-to-last. */
4557 && CONST_INT_P (ops
[n_ops
- 1].op
)
4558 && CONSTANT_P (ops
[n_ops
- 2].op
))
4560 rtx value
= ops
[n_ops
- 1].op
;
4561 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4562 value
= neg_const_int (mode
, value
);
4563 if (CONST_INT_P (value
))
4565 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4571 /* Put a non-negated operand first, if possible. */
4573 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4576 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4585 /* Now make the result by performing the requested operations. */
4588 for (i
= 1; i
< n_ops
; i
++)
4589 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4590 mode
, result
, ops
[i
].op
);
4595 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4597 plus_minus_operand_p (const_rtx x
)
4599 return GET_CODE (x
) == PLUS
4600 || GET_CODE (x
) == MINUS
4601 || (GET_CODE (x
) == CONST
4602 && GET_CODE (XEXP (x
, 0)) == PLUS
4603 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4604 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4607 /* Like simplify_binary_operation except used for relational operators.
4608 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4609 not also be VOIDmode.
4611 CMP_MODE specifies in which mode the comparison is done in, so it is
4612 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4613 the operands or, if both are VOIDmode, the operands are compared in
4614 "infinite precision". */
4616 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4617 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4619 rtx tem
, trueop0
, trueop1
;
4621 if (cmp_mode
== VOIDmode
)
4622 cmp_mode
= GET_MODE (op0
);
4623 if (cmp_mode
== VOIDmode
)
4624 cmp_mode
= GET_MODE (op1
);
4626 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4629 if (SCALAR_FLOAT_MODE_P (mode
))
4631 if (tem
== const0_rtx
)
4632 return CONST0_RTX (mode
);
4633 #ifdef FLOAT_STORE_FLAG_VALUE
4635 REAL_VALUE_TYPE val
;
4636 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4637 return const_double_from_real_value (val
, mode
);
4643 if (VECTOR_MODE_P (mode
))
4645 if (tem
== const0_rtx
)
4646 return CONST0_RTX (mode
);
4647 #ifdef VECTOR_STORE_FLAG_VALUE
4652 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4653 if (val
== NULL_RTX
)
4655 if (val
== const1_rtx
)
4656 return CONST1_RTX (mode
);
4658 units
= GET_MODE_NUNITS (mode
);
4659 v
= rtvec_alloc (units
);
4660 for (i
= 0; i
< units
; i
++)
4661 RTVEC_ELT (v
, i
) = val
;
4662 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4672 /* For the following tests, ensure const0_rtx is op1. */
4673 if (swap_commutative_operands_p (op0
, op1
)
4674 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4675 std::swap (op0
, op1
), code
= swap_condition (code
);
4677 /* If op0 is a compare, extract the comparison arguments from it. */
4678 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4679 return simplify_gen_relational (code
, mode
, VOIDmode
,
4680 XEXP (op0
, 0), XEXP (op0
, 1));
4682 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4686 trueop0
= avoid_constant_pool_reference (op0
);
4687 trueop1
= avoid_constant_pool_reference (op1
);
4688 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4692 /* This part of simplify_relational_operation is only used when CMP_MODE
4693 is not in class MODE_CC (i.e. it is a real comparison).
4695 MODE is the mode of the result, while CMP_MODE specifies in which
4696 mode the comparison is done in, so it is the mode of the operands. */
4699 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4700 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4702 enum rtx_code op0code
= GET_CODE (op0
);
4704 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4706 /* If op0 is a comparison, extract the comparison arguments
4710 if (GET_MODE (op0
) == mode
)
4711 return simplify_rtx (op0
);
4713 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4714 XEXP (op0
, 0), XEXP (op0
, 1));
4716 else if (code
== EQ
)
4718 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4719 if (new_code
!= UNKNOWN
)
4720 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4721 XEXP (op0
, 0), XEXP (op0
, 1));
4725 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4726 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4727 if ((code
== LTU
|| code
== GEU
)
4728 && GET_CODE (op0
) == PLUS
4729 && CONST_INT_P (XEXP (op0
, 1))
4730 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4731 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4732 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4733 && XEXP (op0
, 1) != const0_rtx
)
4736 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4737 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4738 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4741 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4742 transformed into (LTU a -C). */
4743 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4744 && CONST_INT_P (XEXP (op0
, 1))
4745 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4746 && XEXP (op0
, 1) != const0_rtx
)
4749 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4750 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4751 XEXP (op0
, 0), new_cmp
);
4754 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4755 if ((code
== LTU
|| code
== GEU
)
4756 && GET_CODE (op0
) == PLUS
4757 && rtx_equal_p (op1
, XEXP (op0
, 1))
4758 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4759 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4760 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4761 copy_rtx (XEXP (op0
, 0)));
4763 if (op1
== const0_rtx
)
4765 /* Canonicalize (GTU x 0) as (NE x 0). */
4767 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4768 /* Canonicalize (LEU x 0) as (EQ x 0). */
4770 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4772 else if (op1
== const1_rtx
)
4777 /* Canonicalize (GE x 1) as (GT x 0). */
4778 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4781 /* Canonicalize (GEU x 1) as (NE x 0). */
4782 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4785 /* Canonicalize (LT x 1) as (LE x 0). */
4786 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4789 /* Canonicalize (LTU x 1) as (EQ x 0). */
4790 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4796 else if (op1
== constm1_rtx
)
4798 /* Canonicalize (LE x -1) as (LT x 0). */
4800 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4801 /* Canonicalize (GT x -1) as (GE x 0). */
4803 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4806 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4807 if ((code
== EQ
|| code
== NE
)
4808 && (op0code
== PLUS
|| op0code
== MINUS
)
4810 && CONSTANT_P (XEXP (op0
, 1))
4811 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4813 rtx x
= XEXP (op0
, 0);
4814 rtx c
= XEXP (op0
, 1);
4815 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4816 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4818 /* Detect an infinite recursive condition, where we oscillate at this
4819 simplification case between:
4820 A + B == C <---> C - B == A,
4821 where A, B, and C are all constants with non-simplifiable expressions,
4822 usually SYMBOL_REFs. */
4823 if (GET_CODE (tem
) == invcode
4825 && rtx_equal_p (c
, XEXP (tem
, 1)))
4828 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4831 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4832 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4834 && op1
== const0_rtx
4835 && GET_MODE_CLASS (mode
) == MODE_INT
4836 && cmp_mode
!= VOIDmode
4837 /* ??? Work-around BImode bugs in the ia64 backend. */
4839 && cmp_mode
!= BImode
4840 && nonzero_bits (op0
, cmp_mode
) == 1
4841 && STORE_FLAG_VALUE
== 1)
4842 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4843 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4844 : lowpart_subreg (mode
, op0
, cmp_mode
);
4846 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4847 if ((code
== EQ
|| code
== NE
)
4848 && op1
== const0_rtx
4850 return simplify_gen_relational (code
, mode
, cmp_mode
,
4851 XEXP (op0
, 0), XEXP (op0
, 1));
4853 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4854 if ((code
== EQ
|| code
== NE
)
4856 && rtx_equal_p (XEXP (op0
, 0), op1
)
4857 && !side_effects_p (XEXP (op0
, 0)))
4858 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4861 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4862 if ((code
== EQ
|| code
== NE
)
4864 && rtx_equal_p (XEXP (op0
, 1), op1
)
4865 && !side_effects_p (XEXP (op0
, 1)))
4866 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4869 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4870 if ((code
== EQ
|| code
== NE
)
4872 && CONST_SCALAR_INT_P (op1
)
4873 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4874 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4875 simplify_gen_binary (XOR
, cmp_mode
,
4876 XEXP (op0
, 1), op1
));
4878 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4879 can be implemented with a BICS instruction on some targets, or
4880 constant-folded if y is a constant. */
4881 if ((code
== EQ
|| code
== NE
)
4883 && rtx_equal_p (XEXP (op0
, 0), op1
)
4884 && !side_effects_p (op1
)
4885 && op1
!= CONST0_RTX (cmp_mode
))
4887 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4888 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4890 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4891 CONST0_RTX (cmp_mode
));
4894 /* Likewise for (eq/ne (and x y) y). */
4895 if ((code
== EQ
|| code
== NE
)
4897 && rtx_equal_p (XEXP (op0
, 1), op1
)
4898 && !side_effects_p (op1
)
4899 && op1
!= CONST0_RTX (cmp_mode
))
4901 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4902 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4904 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4905 CONST0_RTX (cmp_mode
));
4908 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4909 if ((code
== EQ
|| code
== NE
)
4910 && GET_CODE (op0
) == BSWAP
4911 && CONST_SCALAR_INT_P (op1
))
4912 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4913 simplify_gen_unary (BSWAP
, cmp_mode
,
4916 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4917 if ((code
== EQ
|| code
== NE
)
4918 && GET_CODE (op0
) == BSWAP
4919 && GET_CODE (op1
) == BSWAP
)
4920 return simplify_gen_relational (code
, mode
, cmp_mode
,
4921 XEXP (op0
, 0), XEXP (op1
, 0));
4923 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4929 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4930 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4931 XEXP (op0
, 0), const0_rtx
);
4936 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4937 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4938 XEXP (op0
, 0), const0_rtx
);
4957 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4958 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4959 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4960 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4961 For floating-point comparisons, assume that the operands were ordered. */
4964 comparison_result (enum rtx_code code
, int known_results
)
4970 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4973 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4977 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4980 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4984 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4987 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4990 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4992 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4995 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4997 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
5000 return const_true_rtx
;
5008 /* Check if the given comparison (done in the given MODE) is actually
5009 a tautology or a contradiction. If the mode is VOID_mode, the
5010 comparison is done in "infinite precision". If no simplification
5011 is possible, this function returns zero. Otherwise, it returns
5012 either const_true_rtx or const0_rtx. */
5015 simplify_const_relational_operation (enum rtx_code code
,
5023 gcc_assert (mode
!= VOIDmode
5024 || (GET_MODE (op0
) == VOIDmode
5025 && GET_MODE (op1
) == VOIDmode
));
5027 /* If op0 is a compare, extract the comparison arguments from it. */
5028 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5030 op1
= XEXP (op0
, 1);
5031 op0
= XEXP (op0
, 0);
5033 if (GET_MODE (op0
) != VOIDmode
)
5034 mode
= GET_MODE (op0
);
5035 else if (GET_MODE (op1
) != VOIDmode
)
5036 mode
= GET_MODE (op1
);
5041 /* We can't simplify MODE_CC values since we don't know what the
5042 actual comparison is. */
5043 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5046 /* Make sure the constant is second. */
5047 if (swap_commutative_operands_p (op0
, op1
))
5049 std::swap (op0
, op1
);
5050 code
= swap_condition (code
);
5053 trueop0
= avoid_constant_pool_reference (op0
);
5054 trueop1
= avoid_constant_pool_reference (op1
);
5056 /* For integer comparisons of A and B maybe we can simplify A - B and can
5057 then simplify a comparison of that with zero. If A and B are both either
5058 a register or a CONST_INT, this can't help; testing for these cases will
5059 prevent infinite recursion here and speed things up.
5061 We can only do this for EQ and NE comparisons as otherwise we may
5062 lose or introduce overflow which we cannot disregard as undefined as
5063 we do not know the signedness of the operation on either the left or
5064 the right hand side of the comparison. */
5066 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5067 && (code
== EQ
|| code
== NE
)
5068 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5069 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5070 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5071 /* We cannot do this if tem is a nonzero address. */
5072 && ! nonzero_address_p (tem
))
5073 return simplify_const_relational_operation (signed_condition (code
),
5074 mode
, tem
, const0_rtx
);
5076 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5077 return const_true_rtx
;
5079 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5082 /* For modes without NaNs, if the two operands are equal, we know the
5083 result except if they have side-effects. Even with NaNs we know
5084 the result of unordered comparisons and, if signaling NaNs are
5085 irrelevant, also the result of LT/GT/LTGT. */
5086 if ((! HONOR_NANS (trueop0
)
5087 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5088 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5089 && ! HONOR_SNANS (trueop0
)))
5090 && rtx_equal_p (trueop0
, trueop1
)
5091 && ! side_effects_p (trueop0
))
5092 return comparison_result (code
, CMP_EQ
);
5094 /* If the operands are floating-point constants, see if we can fold
5096 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5097 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5098 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5100 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5101 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5103 /* Comparisons are unordered iff at least one of the values is NaN. */
5104 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5114 return const_true_rtx
;
5127 return comparison_result (code
,
5128 (real_equal (d0
, d1
) ? CMP_EQ
:
5129 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5132 /* Otherwise, see if the operands are both integers. */
5133 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5134 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5136 /* It would be nice if we really had a mode here. However, the
5137 largest int representable on the target is as good as
5139 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5140 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5141 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5143 if (wi::eq_p (ptrueop0
, ptrueop1
))
5144 return comparison_result (code
, CMP_EQ
);
5147 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5148 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5149 return comparison_result (code
, cr
);
5153 /* Optimize comparisons with upper and lower bounds. */
5154 if (HWI_COMPUTABLE_MODE_P (mode
)
5155 && CONST_INT_P (trueop1
)
5156 && !side_effects_p (trueop0
))
5159 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5160 HOST_WIDE_INT val
= INTVAL (trueop1
);
5161 HOST_WIDE_INT mmin
, mmax
;
5171 /* Get a reduced range if the sign bit is zero. */
5172 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5179 rtx mmin_rtx
, mmax_rtx
;
5180 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5182 mmin
= INTVAL (mmin_rtx
);
5183 mmax
= INTVAL (mmax_rtx
);
5186 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5188 mmin
>>= (sign_copies
- 1);
5189 mmax
>>= (sign_copies
- 1);
5195 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5197 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5198 return const_true_rtx
;
5199 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5204 return const_true_rtx
;
5209 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5211 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5212 return const_true_rtx
;
5213 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5218 return const_true_rtx
;
5224 /* x == y is always false for y out of range. */
5225 if (val
< mmin
|| val
> mmax
)
5229 /* x > y is always false for y >= mmax, always true for y < mmin. */
5231 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5233 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5234 return const_true_rtx
;
5240 return const_true_rtx
;
5243 /* x < y is always false for y <= mmin, always true for y > mmax. */
5245 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5247 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5248 return const_true_rtx
;
5254 return const_true_rtx
;
5258 /* x != y is always true for y out of range. */
5259 if (val
< mmin
|| val
> mmax
)
5260 return const_true_rtx
;
5268 /* Optimize integer comparisons with zero. */
5269 if (trueop1
== const0_rtx
&& !side_effects_p (trueop0
))
5271 /* Some addresses are known to be nonzero. We don't know
5272 their sign, but equality comparisons are known. */
5273 if (nonzero_address_p (trueop0
))
5275 if (code
== EQ
|| code
== LEU
)
5277 if (code
== NE
|| code
== GTU
)
5278 return const_true_rtx
;
5281 /* See if the first operand is an IOR with a constant. If so, we
5282 may be able to determine the result of this comparison. */
5283 if (GET_CODE (op0
) == IOR
)
5285 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5286 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5288 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5289 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5290 && (UINTVAL (inner_const
)
5301 return const_true_rtx
;
5305 return const_true_rtx
;
5319 /* Optimize comparison of ABS with zero. */
5320 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5321 && (GET_CODE (trueop0
) == ABS
5322 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5323 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5328 /* Optimize abs(x) < 0.0. */
5329 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5334 /* Optimize abs(x) >= 0.0. */
5335 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5336 return const_true_rtx
;
5340 /* Optimize ! (abs(x) < 0.0). */
5341 return const_true_rtx
;
5351 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5352 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5353 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5354 can be simplified to that or NULL_RTX if not.
5355 Assume X is compared against zero with CMP_CODE and the true
5356 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5359 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5361 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5364 /* Result on X == 0 and X !=0 respectively. */
5365 rtx on_zero
, on_nonzero
;
5369 on_nonzero
= false_val
;
5373 on_zero
= false_val
;
5374 on_nonzero
= true_val
;
5377 rtx_code op_code
= GET_CODE (on_nonzero
);
5378 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5379 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5380 || !CONST_INT_P (on_zero
))
5383 HOST_WIDE_INT op_val
;
5384 if (((op_code
== CLZ
5385 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero
), op_val
))
5387 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero
), op_val
)))
5388 && op_val
== INTVAL (on_zero
))
5395 /* Simplify CODE, an operation with result mode MODE and three operands,
5396 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5397 a constant. Return 0 if no simplifications is possible. */
5400 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5401 machine_mode op0_mode
, rtx op0
, rtx op1
,
5404 unsigned int width
= GET_MODE_PRECISION (mode
);
5405 bool any_change
= false;
5408 /* VOIDmode means "infinite" precision. */
5410 width
= HOST_BITS_PER_WIDE_INT
;
5415 /* Simplify negations around the multiplication. */
5416 /* -a * -b + c => a * b + c. */
5417 if (GET_CODE (op0
) == NEG
)
5419 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5421 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5423 else if (GET_CODE (op1
) == NEG
)
5425 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5427 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5430 /* Canonicalize the two multiplication operands. */
5431 /* a * -b + c => -b * a + c. */
5432 if (swap_commutative_operands_p (op0
, op1
))
5433 std::swap (op0
, op1
), any_change
= true;
5436 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5441 if (CONST_INT_P (op0
)
5442 && CONST_INT_P (op1
)
5443 && CONST_INT_P (op2
)
5444 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5445 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5447 /* Extracting a bit-field from a constant */
5448 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5449 HOST_WIDE_INT op1val
= INTVAL (op1
);
5450 HOST_WIDE_INT op2val
= INTVAL (op2
);
5451 if (BITS_BIG_ENDIAN
)
5452 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5456 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5458 /* First zero-extend. */
5459 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5460 /* If desired, propagate sign bit. */
5461 if (code
== SIGN_EXTRACT
5462 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5464 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5467 return gen_int_mode (val
, mode
);
5472 if (CONST_INT_P (op0
))
5473 return op0
!= const0_rtx
? op1
: op2
;
5475 /* Convert c ? a : a into "a". */
5476 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5479 /* Convert a != b ? a : b into "a". */
5480 if (GET_CODE (op0
) == NE
5481 && ! side_effects_p (op0
)
5482 && ! HONOR_NANS (mode
)
5483 && ! HONOR_SIGNED_ZEROS (mode
)
5484 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5485 && rtx_equal_p (XEXP (op0
, 1), op2
))
5486 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5487 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5490 /* Convert a == b ? a : b into "b". */
5491 if (GET_CODE (op0
) == EQ
5492 && ! side_effects_p (op0
)
5493 && ! HONOR_NANS (mode
)
5494 && ! HONOR_SIGNED_ZEROS (mode
)
5495 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5496 && rtx_equal_p (XEXP (op0
, 1), op2
))
5497 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5498 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5501 /* Convert (!c) != {0,...,0} ? a : b into
5502 c != {0,...,0} ? b : a for vector modes. */
5503 if (VECTOR_MODE_P (GET_MODE (op1
))
5504 && GET_CODE (op0
) == NE
5505 && GET_CODE (XEXP (op0
, 0)) == NOT
5506 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5508 rtx cv
= XEXP (op0
, 1);
5509 int nunits
= CONST_VECTOR_NUNITS (cv
);
5511 for (int i
= 0; i
< nunits
; ++i
)
5512 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5519 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5520 XEXP (XEXP (op0
, 0), 0),
5522 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5527 /* Convert x == 0 ? N : clz (x) into clz (x) when
5528 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5529 Similarly for ctz (x). */
5530 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5531 && XEXP (op0
, 1) == const0_rtx
)
5534 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5540 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5542 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5543 ? GET_MODE (XEXP (op0
, 1))
5544 : GET_MODE (XEXP (op0
, 0)));
5547 /* Look for happy constants in op1 and op2. */
5548 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5550 HOST_WIDE_INT t
= INTVAL (op1
);
5551 HOST_WIDE_INT f
= INTVAL (op2
);
5553 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5554 code
= GET_CODE (op0
);
5555 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5558 tmp
= reversed_comparison_code (op0
, NULL
);
5566 return simplify_gen_relational (code
, mode
, cmp_mode
,
5567 XEXP (op0
, 0), XEXP (op0
, 1));
5570 if (cmp_mode
== VOIDmode
)
5571 cmp_mode
= op0_mode
;
5572 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5573 cmp_mode
, XEXP (op0
, 0),
5576 /* See if any simplifications were possible. */
5579 if (CONST_INT_P (temp
))
5580 return temp
== const0_rtx
? op2
: op1
;
5582 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5588 gcc_assert (GET_MODE (op0
) == mode
);
5589 gcc_assert (GET_MODE (op1
) == mode
);
5590 gcc_assert (VECTOR_MODE_P (mode
));
5591 trueop2
= avoid_constant_pool_reference (op2
);
5592 if (CONST_INT_P (trueop2
))
5594 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5595 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5596 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5597 unsigned HOST_WIDE_INT mask
;
5598 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5601 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5603 if (!(sel
& mask
) && !side_effects_p (op0
))
5605 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5608 rtx trueop0
= avoid_constant_pool_reference (op0
);
5609 rtx trueop1
= avoid_constant_pool_reference (op1
);
5610 if (GET_CODE (trueop0
) == CONST_VECTOR
5611 && GET_CODE (trueop1
) == CONST_VECTOR
)
5613 rtvec v
= rtvec_alloc (n_elts
);
5616 for (i
= 0; i
< n_elts
; i
++)
5617 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5618 ? CONST_VECTOR_ELT (trueop0
, i
)
5619 : CONST_VECTOR_ELT (trueop1
, i
));
5620 return gen_rtx_CONST_VECTOR (mode
, v
);
5623 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5624 if no element from a appears in the result. */
5625 if (GET_CODE (op0
) == VEC_MERGE
)
5627 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5628 if (CONST_INT_P (tem
))
5630 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5631 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5632 return simplify_gen_ternary (code
, mode
, mode
,
5633 XEXP (op0
, 1), op1
, op2
);
5634 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5635 return simplify_gen_ternary (code
, mode
, mode
,
5636 XEXP (op0
, 0), op1
, op2
);
5639 if (GET_CODE (op1
) == VEC_MERGE
)
5641 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5642 if (CONST_INT_P (tem
))
5644 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5645 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5646 return simplify_gen_ternary (code
, mode
, mode
,
5647 op0
, XEXP (op1
, 1), op2
);
5648 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5649 return simplify_gen_ternary (code
, mode
, mode
,
5650 op0
, XEXP (op1
, 0), op2
);
5654 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5656 if (GET_CODE (op0
) == VEC_DUPLICATE
5657 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5658 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5659 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5661 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5662 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5664 if (XEXP (XEXP (op0
, 0), 0) == op1
5665 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5671 if (rtx_equal_p (op0
, op1
)
5672 && !side_effects_p (op2
) && !side_effects_p (op1
))
5684 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5685 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5686 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5688 Works by unpacking OP into a collection of 8-bit values
5689 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5690 and then repacking them again for OUTERMODE. */
5693 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5694 machine_mode innermode
, unsigned int byte
)
5698 value_mask
= (1 << value_bit
) - 1
5700 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5708 rtx result_s
= NULL
;
5709 rtvec result_v
= NULL
;
5710 enum mode_class outer_class
;
5711 machine_mode outer_submode
;
5714 /* Some ports misuse CCmode. */
5715 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5718 /* We have no way to represent a complex constant at the rtl level. */
5719 if (COMPLEX_MODE_P (outermode
))
5722 /* We support any size mode. */
5723 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5724 GET_MODE_BITSIZE (innermode
));
5726 /* Unpack the value. */
5728 if (GET_CODE (op
) == CONST_VECTOR
)
5730 num_elem
= CONST_VECTOR_NUNITS (op
);
5731 elems
= &CONST_VECTOR_ELT (op
, 0);
5732 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5738 elem_bitsize
= max_bitsize
;
5740 /* If this asserts, it is too complicated; reducing value_bit may help. */
5741 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5742 /* I don't know how to handle endianness of sub-units. */
5743 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5745 for (elem
= 0; elem
< num_elem
; elem
++)
5748 rtx el
= elems
[elem
];
5750 /* Vectors are kept in target memory order. (This is probably
5753 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5754 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5756 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5757 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5758 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5759 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5760 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5763 switch (GET_CODE (el
))
5767 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5769 *vp
++ = INTVAL (el
) >> i
;
5770 /* CONST_INTs are always logically sign-extended. */
5771 for (; i
< elem_bitsize
; i
+= value_bit
)
5772 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5775 case CONST_WIDE_INT
:
5777 rtx_mode_t val
= rtx_mode_t (el
, innermode
);
5778 unsigned char extend
= wi::sign_mask (val
);
5779 int prec
= wi::get_precision (val
);
5781 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
5782 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5783 for (; i
< elem_bitsize
; i
+= value_bit
)
5789 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5791 unsigned char extend
= 0;
5792 /* If this triggers, someone should have generated a
5793 CONST_INT instead. */
5794 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5796 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5797 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5798 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5801 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5805 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5807 for (; i
< elem_bitsize
; i
+= value_bit
)
5812 /* This is big enough for anything on the platform. */
5813 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5814 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5816 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5817 gcc_assert (bitsize
<= elem_bitsize
);
5818 gcc_assert (bitsize
% value_bit
== 0);
5820 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5823 /* real_to_target produces its result in words affected by
5824 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5825 and use WORDS_BIG_ENDIAN instead; see the documentation
5826 of SUBREG in rtl.texi. */
5827 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5830 if (WORDS_BIG_ENDIAN
)
5831 ibase
= bitsize
- 1 - i
;
5834 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5837 /* It shouldn't matter what's done here, so fill it with
5839 for (; i
< elem_bitsize
; i
+= value_bit
)
5845 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5847 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5848 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5852 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5853 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5854 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5856 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5857 >> (i
- HOST_BITS_PER_WIDE_INT
);
5858 for (; i
< elem_bitsize
; i
+= value_bit
)
5868 /* Now, pick the right byte to start with. */
5869 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5870 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5871 will already have offset 0. */
5872 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5874 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5876 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5877 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5878 byte
= (subword_byte
% UNITS_PER_WORD
5879 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5882 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5883 so if it's become negative it will instead be very large.) */
5884 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5886 /* Convert from bytes to chunks of size value_bit. */
5887 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5889 /* Re-pack the value. */
5890 num_elem
= GET_MODE_NUNITS (outermode
);
5892 if (VECTOR_MODE_P (outermode
))
5894 result_v
= rtvec_alloc (num_elem
);
5895 elems
= &RTVEC_ELT (result_v
, 0);
5900 outer_submode
= GET_MODE_INNER (outermode
);
5901 outer_class
= GET_MODE_CLASS (outer_submode
);
5902 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5904 gcc_assert (elem_bitsize
% value_bit
== 0);
5905 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5907 for (elem
= 0; elem
< num_elem
; elem
++)
5911 /* Vectors are stored in target memory order. (This is probably
5914 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5915 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5917 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5918 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5919 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5920 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5921 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5924 switch (outer_class
)
5927 case MODE_PARTIAL_INT
:
5932 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5933 / HOST_BITS_PER_WIDE_INT
;
5934 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5937 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5939 for (u
= 0; u
< units
; u
++)
5941 unsigned HOST_WIDE_INT buf
= 0;
5943 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5945 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5948 base
+= HOST_BITS_PER_WIDE_INT
;
5950 r
= wide_int::from_array (tmp
, units
,
5951 GET_MODE_PRECISION (outer_submode
));
5952 #if TARGET_SUPPORTS_WIDE_INT == 0
5953 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5954 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5957 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5962 case MODE_DECIMAL_FLOAT
:
5965 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
5967 /* real_from_target wants its input in words affected by
5968 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5969 and use WORDS_BIG_ENDIAN instead; see the documentation
5970 of SUBREG in rtl.texi. */
5971 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5974 if (WORDS_BIG_ENDIAN
)
5975 ibase
= elem_bitsize
- 1 - i
;
5978 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5981 real_from_target (&r
, tmp
, outer_submode
);
5982 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
5994 f
.mode
= outer_submode
;
5997 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5999 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
6000 for (; i
< elem_bitsize
; i
+= value_bit
)
6001 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
6002 << (i
- HOST_BITS_PER_WIDE_INT
));
6004 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6012 if (VECTOR_MODE_P (outermode
))
6013 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6018 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6019 Return 0 if no simplifications are possible. */
6021 simplify_subreg (machine_mode outermode
, rtx op
,
6022 machine_mode innermode
, unsigned int byte
)
6024 /* Little bit of sanity checking. */
6025 gcc_assert (innermode
!= VOIDmode
);
6026 gcc_assert (outermode
!= VOIDmode
);
6027 gcc_assert (innermode
!= BLKmode
);
6028 gcc_assert (outermode
!= BLKmode
);
6030 gcc_assert (GET_MODE (op
) == innermode
6031 || GET_MODE (op
) == VOIDmode
);
6033 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
6036 if (byte
>= GET_MODE_SIZE (innermode
))
6039 if (outermode
== innermode
&& !byte
)
6042 if (CONST_SCALAR_INT_P (op
)
6043 || CONST_DOUBLE_AS_FLOAT_P (op
)
6044 || GET_CODE (op
) == CONST_FIXED
6045 || GET_CODE (op
) == CONST_VECTOR
)
6046 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
6048 /* Changing mode twice with SUBREG => just change it once,
6049 or not at all if changing back op starting mode. */
6050 if (GET_CODE (op
) == SUBREG
)
6052 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6053 int final_offset
= byte
+ SUBREG_BYTE (op
);
6056 if (outermode
== innermostmode
6057 && byte
== 0 && SUBREG_BYTE (op
) == 0)
6058 return SUBREG_REG (op
);
6060 /* The SUBREG_BYTE represents offset, as if the value were stored
6061 in memory. Irritating exception is paradoxical subreg, where
6062 we define SUBREG_BYTE to be 0. On big endian machines, this
6063 value should be negative. For a moment, undo this exception. */
6064 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6066 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
6067 if (WORDS_BIG_ENDIAN
)
6068 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6069 if (BYTES_BIG_ENDIAN
)
6070 final_offset
+= difference
% UNITS_PER_WORD
;
6072 if (SUBREG_BYTE (op
) == 0
6073 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
6075 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
6076 if (WORDS_BIG_ENDIAN
)
6077 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6078 if (BYTES_BIG_ENDIAN
)
6079 final_offset
+= difference
% UNITS_PER_WORD
;
6082 /* See whether resulting subreg will be paradoxical. */
6083 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
6085 /* In nonparadoxical subregs we can't handle negative offsets. */
6086 if (final_offset
< 0)
6088 /* Bail out in case resulting subreg would be incorrect. */
6089 if (final_offset
% GET_MODE_SIZE (outermode
)
6090 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
6096 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
6098 /* In paradoxical subreg, see if we are still looking on lower part.
6099 If so, our SUBREG_BYTE will be 0. */
6100 if (WORDS_BIG_ENDIAN
)
6101 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6102 if (BYTES_BIG_ENDIAN
)
6103 offset
+= difference
% UNITS_PER_WORD
;
6104 if (offset
== final_offset
)
6110 /* Recurse for further possible simplifications. */
6111 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6115 if (validate_subreg (outermode
, innermostmode
,
6116 SUBREG_REG (op
), final_offset
))
6118 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6119 if (SUBREG_PROMOTED_VAR_P (op
)
6120 && SUBREG_PROMOTED_SIGN (op
) >= 0
6121 && GET_MODE_CLASS (outermode
) == MODE_INT
6122 && IN_RANGE (GET_MODE_SIZE (outermode
),
6123 GET_MODE_SIZE (innermode
),
6124 GET_MODE_SIZE (innermostmode
))
6125 && subreg_lowpart_p (newx
))
6127 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6128 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6135 /* SUBREG of a hard register => just change the register number
6136 and/or mode. If the hard register is not valid in that mode,
6137 suppress this simplification. If the hard register is the stack,
6138 frame, or argument pointer, leave this as a SUBREG. */
6140 if (REG_P (op
) && HARD_REGISTER_P (op
))
6142 unsigned int regno
, final_regno
;
6145 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6146 if (HARD_REGISTER_NUM_P (final_regno
))
6149 int final_offset
= byte
;
6151 /* Adjust offset for paradoxical subregs. */
6153 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6155 int difference
= (GET_MODE_SIZE (innermode
)
6156 - GET_MODE_SIZE (outermode
));
6157 if (WORDS_BIG_ENDIAN
)
6158 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6159 if (BYTES_BIG_ENDIAN
)
6160 final_offset
+= difference
% UNITS_PER_WORD
;
6163 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6165 /* Propagate original regno. We don't have any way to specify
6166 the offset inside original regno, so do so only for lowpart.
6167 The information is used only by alias analysis that can not
6168 grog partial register anyway. */
6170 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6171 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6176 /* If we have a SUBREG of a register that we are replacing and we are
6177 replacing it with a MEM, make a new MEM and try replacing the
6178 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6179 or if we would be widening it. */
6182 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6183 /* Allow splitting of volatile memory references in case we don't
6184 have instruction to move the whole thing. */
6185 && (! MEM_VOLATILE_P (op
)
6186 || ! have_insn_for (SET
, innermode
))
6187 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6188 return adjust_address_nv (op
, outermode
, byte
);
6190 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6192 if (GET_CODE (op
) == CONCAT
6193 || GET_CODE (op
) == VEC_CONCAT
)
6195 unsigned int part_size
, final_offset
;
6198 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6199 if (part_mode
== VOIDmode
)
6200 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6201 part_size
= GET_MODE_SIZE (part_mode
);
6202 if (byte
< part_size
)
6204 part
= XEXP (op
, 0);
6205 final_offset
= byte
;
6209 part
= XEXP (op
, 1);
6210 final_offset
= byte
- part_size
;
6213 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6216 part_mode
= GET_MODE (part
);
6217 if (part_mode
== VOIDmode
)
6218 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6219 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6222 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6223 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6227 /* A SUBREG resulting from a zero extension may fold to zero if
6228 it extracts higher bits that the ZERO_EXTEND's source bits. */
6229 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6231 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6232 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6233 return CONST0_RTX (outermode
);
6236 if (SCALAR_INT_MODE_P (outermode
)
6237 && SCALAR_INT_MODE_P (innermode
)
6238 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6239 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6241 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6249 /* Make a SUBREG operation or equivalent if it folds. */
6252 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6253 machine_mode innermode
, unsigned int byte
)
6257 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6261 if (GET_CODE (op
) == SUBREG
6262 || GET_CODE (op
) == CONCAT
6263 || GET_MODE (op
) == VOIDmode
)
6266 if (validate_subreg (outermode
, innermode
, op
, byte
))
6267 return gen_rtx_SUBREG (outermode
, op
, byte
);
6272 /* Generates a subreg to get the least significant part of EXPR (in mode
6273 INNER_MODE) to OUTER_MODE. */
6276 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6277 machine_mode inner_mode
)
6279 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6280 subreg_lowpart_offset (outer_mode
, inner_mode
));
6283 /* Simplify X, an rtx expression.
6285 Return the simplified expression or NULL if no simplifications
6288 This is the preferred entry point into the simplification routines;
6289 however, we still allow passes to call the more specific routines.
6291 Right now GCC has three (yes, three) major bodies of RTL simplification
6292 code that need to be unified.
6294 1. fold_rtx in cse.c. This code uses various CSE specific
6295 information to aid in RTL simplification.
6297 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6298 it uses combine specific information to aid in RTL
6301 3. The routines in this file.
6304 Long term we want to only have one body of simplification code; to
6305 get to that state I recommend the following steps:
6307 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6308 which are not pass dependent state into these routines.
6310 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6311 use this routine whenever possible.
6313 3. Allow for pass dependent state to be provided to these
6314 routines and add simplifications based on the pass dependent
6315 state. Remove code from cse.c & combine.c that becomes
6318 It will take time, but ultimately the compiler will be easier to
6319 maintain and improve. It's totally silly that when we add a
6320 simplification that it needs to be added to 4 places (3 for RTL
6321 simplification and 1 for tree simplification. */
6324 simplify_rtx (const_rtx x
)
6326 const enum rtx_code code
= GET_CODE (x
);
6327 const machine_mode mode
= GET_MODE (x
);
6329 switch (GET_RTX_CLASS (code
))
6332 return simplify_unary_operation (code
, mode
,
6333 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6334 case RTX_COMM_ARITH
:
6335 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6336 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6341 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6344 case RTX_BITFIELD_OPS
:
6345 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6346 XEXP (x
, 0), XEXP (x
, 1),
6350 case RTX_COMM_COMPARE
:
6351 return simplify_relational_operation (code
, mode
,
6352 ((GET_MODE (XEXP (x
, 0))
6354 ? GET_MODE (XEXP (x
, 0))
6355 : GET_MODE (XEXP (x
, 1))),
6361 return simplify_subreg (mode
, SUBREG_REG (x
),
6362 GET_MODE (SUBREG_REG (x
)),
6369 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6370 if (GET_CODE (XEXP (x
, 0)) == HIGH
6371 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))