1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx
neg_const_int (machine_mode
, const_rtx
);
47 static bool plus_minus_operand_p (const_rtx
);
48 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
49 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x
))
95 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
96 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
98 for (i
= 0; i
< elts
- 1; i
++)
99 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
101 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
102 width
%= HOST_BITS_PER_WIDE_INT
;
104 width
= HOST_BITS_PER_WIDE_INT
;
107 else if (width
<= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x
)
109 && CONST_DOUBLE_LOW (x
) == 0)
111 val
= CONST_DOUBLE_HIGH (x
);
112 width
-= HOST_BITS_PER_WIDE_INT
;
116 /* X is not an integer constant. */
119 if (width
< HOST_BITS_PER_WIDE_INT
)
120 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
121 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
129 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 if (GET_MODE_CLASS (mode
) != MODE_INT
)
136 width
= GET_MODE_PRECISION (mode
);
137 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
140 val
&= GET_MODE_MASK (mode
);
141 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
147 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
151 if (GET_MODE_CLASS (mode
) != MODE_INT
)
154 width
= GET_MODE_PRECISION (mode
);
155 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
158 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
165 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
169 if (GET_MODE_CLASS (mode
) != MODE_INT
)
172 width
= GET_MODE_PRECISION (mode
);
173 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
176 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
184 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
189 /* If this simplifies, do it. */
190 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0
, op1
))
197 std::swap (op0
, op1
);
199 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
205 avoid_constant_pool_reference (rtx x
)
209 HOST_WIDE_INT offset
= 0;
211 switch (GET_CODE (x
))
217 /* Handle float extensions of constant pool references. */
219 c
= avoid_constant_pool_reference (tmp
);
220 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
229 if (GET_MODE (x
) == BLKmode
)
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr
= targetm
.delegitimize_address (addr
);
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr
) == CONST
239 && GET_CODE (XEXP (addr
, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
242 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
243 addr
= XEXP (XEXP (addr
, 0), 0);
246 if (GET_CODE (addr
) == LO_SUM
)
247 addr
= XEXP (addr
, 1);
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr
) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr
))
254 c
= get_pool_constant (addr
);
255 cmode
= get_pool_mode (addr
);
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset
== 0 && cmode
== GET_MODE (x
))
262 else if (offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
264 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
265 if (tem
&& CONSTANT_P (tem
))
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
278 delegitimize_mem_from_attrs (rtx x
)
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
284 && MEM_OFFSET_KNOWN_P (x
))
286 tree decl
= MEM_EXPR (x
);
287 machine_mode mode
= GET_MODE (x
);
288 HOST_WIDE_INT offset
= 0;
290 switch (TREE_CODE (decl
))
300 case ARRAY_RANGE_REF
:
305 case VIEW_CONVERT_EXPR
:
307 HOST_WIDE_INT bitsize
, bitpos
;
309 int unsignedp
, reversep
, volatilep
= 0;
312 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
313 &unsignedp
, &reversep
, &volatilep
);
314 if (bitsize
!= GET_MODE_BITSIZE (mode
)
315 || (bitpos
% BITS_PER_UNIT
)
316 || (toffset
&& !tree_fits_shwi_p (toffset
)))
320 offset
+= bitpos
/ BITS_PER_UNIT
;
322 offset
+= tree_to_shwi (toffset
);
329 && mode
== GET_MODE (x
)
331 && (TREE_STATIC (decl
)
332 || DECL_THREAD_LOCAL_P (decl
))
333 && DECL_RTL_SET_P (decl
)
334 && MEM_P (DECL_RTL (decl
)))
338 offset
+= MEM_OFFSET (x
);
340 newx
= DECL_RTL (decl
);
344 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
353 || (GET_CODE (o
) == PLUS
354 && GET_CODE (XEXP (o
, 1)) == CONST_INT
355 && (offset
== INTVAL (XEXP (o
, 1))
356 || (GET_CODE (n
) == PLUS
357 && GET_CODE (XEXP (n
, 1)) == CONST_INT
358 && (INTVAL (XEXP (n
, 1)) + offset
359 == INTVAL (XEXP (o
, 1)))
360 && (n
= XEXP (n
, 0))))
361 && (o
= XEXP (o
, 0))))
362 && rtx_equal_p (o
, n
)))
363 x
= adjust_address_nv (newx
, mode
, offset
);
365 else if (GET_MODE (x
) == GET_MODE (newx
)
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
378 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
379 machine_mode op_mode
)
383 /* If this simplifies, use it. */
384 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
387 return gen_rtx_fmt_e (code
, mode
, op
);
390 /* Likewise for ternary operations. */
393 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
394 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
398 /* If this simplifies, use it. */
399 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
403 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
410 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
411 machine_mode cmp_mode
, rtx op0
, rtx op1
)
415 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
419 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
428 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
429 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
431 enum rtx_code code
= GET_CODE (x
);
432 machine_mode mode
= GET_MODE (x
);
433 machine_mode op_mode
;
435 rtx op0
, op1
, op2
, newx
, op
;
439 if (__builtin_expect (fn
!= NULL
, 0))
441 newx
= fn (x
, old_rtx
, data
);
445 else if (rtx_equal_p (x
, old_rtx
))
446 return copy_rtx ((rtx
) data
);
448 switch (GET_RTX_CLASS (code
))
452 op_mode
= GET_MODE (op0
);
453 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
454 if (op0
== XEXP (x
, 0))
456 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
460 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
461 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
462 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
464 return simplify_gen_binary (code
, mode
, op0
, op1
);
467 case RTX_COMM_COMPARE
:
470 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
471 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
472 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
473 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
475 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
478 case RTX_BITFIELD_OPS
:
480 op_mode
= GET_MODE (op0
);
481 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
482 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
483 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
484 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
486 if (op_mode
== VOIDmode
)
487 op_mode
= GET_MODE (op0
);
488 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
493 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
494 if (op0
== SUBREG_REG (x
))
496 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
497 GET_MODE (SUBREG_REG (x
)),
499 return op0
? op0
: x
;
506 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
507 if (op0
== XEXP (x
, 0))
509 return replace_equiv_address_nv (x
, op0
);
511 else if (code
== LO_SUM
)
513 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
514 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0
) == HIGH
)
519 rtx base0
, base1
, offset0
, offset1
;
520 split_const (XEXP (op0
, 0), &base0
, &offset0
);
521 split_const (op1
, &base1
, &offset1
);
522 if (rtx_equal_p (base0
, base1
))
526 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
528 return gen_rtx_LO_SUM (mode
, op0
, op1
);
537 fmt
= GET_RTX_FORMAT (code
);
538 for (i
= 0; fmt
[i
]; i
++)
543 newvec
= XVEC (newx
, i
);
544 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
546 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
548 if (op
!= RTVEC_ELT (vec
, j
))
552 newvec
= shallow_copy_rtvec (vec
);
554 newx
= shallow_copy_rtx (x
);
555 XVEC (newx
, i
) = newvec
;
557 RTVEC_ELT (newvec
, j
) = op
;
565 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
566 if (op
!= XEXP (x
, i
))
569 newx
= shallow_copy_rtx (x
);
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
582 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
584 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
590 RTL provides two ways of truncating a value:
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
603 2. a TRUNCATE. This form handles both scalar and compound integers.
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
610 simplify_gen_unary (TRUNCATE, ...)
612 and leave simplify_unary_operation to work out which representation
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
628 (and:DI (reg:DI X) (const_int 63))
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
635 simplify_truncation (machine_mode mode
, rtx op
,
636 machine_mode op_mode
)
638 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
639 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
640 gcc_assert (precision
<= op_precision
);
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op
) == ZERO_EXTEND
644 || GET_CODE (op
) == SIGN_EXTEND
)
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
652 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
653 if (mode
== origmode
)
655 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
656 return simplify_gen_unary (TRUNCATE
, mode
,
657 XEXP (op
, 0), origmode
);
659 return simplify_gen_unary (GET_CODE (op
), mode
,
660 XEXP (op
, 0), origmode
);
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
667 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
668 && (GET_CODE (op
) == PLUS
669 || GET_CODE (op
) == MINUS
670 || GET_CODE (op
) == MULT
))
672 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
675 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
677 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op
) == LSHIFTRT
685 || GET_CODE (op
) == ASHIFTRT
)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision
<= op_precision
691 && CONST_INT_P (XEXP (op
, 1))
692 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
694 && UINTVAL (XEXP (op
, 1)) < precision
)
695 return simplify_gen_binary (ASHIFTRT
, mode
,
696 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op
) == LSHIFTRT
702 || GET_CODE (op
) == ASHIFTRT
)
703 && CONST_INT_P (XEXP (op
, 1))
704 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
706 && UINTVAL (XEXP (op
, 1)) < precision
)
707 return simplify_gen_binary (LSHIFTRT
, mode
,
708 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op
) == ASHIFT
714 && CONST_INT_P (XEXP (op
, 1))
715 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
717 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
718 && UINTVAL (XEXP (op
, 1)) < precision
)
719 return simplify_gen_binary (ASHIFT
, mode
,
720 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
725 if (GET_CODE (op
) == AND
726 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
728 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
729 && CONST_INT_P (XEXP (op
, 1)))
731 rtx op0
= (XEXP (XEXP (op
, 0), 0));
732 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
733 rtx mask_op
= XEXP (op
, 1);
734 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
735 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
737 if (shift
< precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
741 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
742 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
743 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
745 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
746 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
753 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
754 && REG_P (XEXP (op
, 0))
755 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
756 && CONST_INT_P (XEXP (op
, 1))
757 && CONST_INT_P (XEXP (op
, 2)))
759 rtx op0
= XEXP (op
, 0);
760 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
761 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
762 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
764 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
767 pos
-= op_precision
- precision
;
768 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
769 XEXP (op
, 1), GEN_INT (pos
));
772 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
774 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
776 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
777 XEXP (op
, 1), XEXP (op
, 2));
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op
) == LSHIFTRT
783 || GET_CODE (op
) == ASHIFTRT
)
784 && SCALAR_INT_MODE_P (mode
)
785 && SCALAR_INT_MODE_P (op_mode
)
786 && precision
>= BITS_PER_WORD
787 && 2 * precision
<= op_precision
788 && CONST_INT_P (XEXP (op
, 1))
789 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
790 && UINTVAL (XEXP (op
, 1)) < op_precision
)
792 int byte
= subreg_lowpart_offset (mode
, op_mode
);
793 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
794 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
796 ? byte
- shifted_bytes
797 : byte
+ shifted_bytes
));
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op
) == LSHIFTRT
804 || GET_CODE (op
) == ASHIFTRT
)
805 && SCALAR_INT_MODE_P (op_mode
)
806 && MEM_P (XEXP (op
, 0))
807 && CONST_INT_P (XEXP (op
, 1))
808 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
809 && INTVAL (XEXP (op
, 1)) > 0
810 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
811 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op
, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op
, 0))
814 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
817 int byte
= subreg_lowpart_offset (mode
, op_mode
);
818 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
819 return adjust_address_nv (XEXP (op
, 0), mode
,
821 ? byte
- shifted_bytes
822 : byte
+ shifted_bytes
));
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op
) == ABS
828 || GET_CODE (op
) == NEG
)
829 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
831 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
832 return simplify_gen_unary (GET_CODE (op
), mode
,
833 XEXP (XEXP (op
, 0), 0), mode
);
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
837 if (GET_CODE (op
) == SUBREG
838 && SCALAR_INT_MODE_P (mode
)
839 && SCALAR_INT_MODE_P (op_mode
)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
841 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
842 && subreg_lowpart_p (op
))
844 rtx inner
= XEXP (SUBREG_REG (op
), 0);
845 if (GET_MODE_PRECISION (mode
)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
847 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
852 GET_MODE (SUBREG_REG (op
)), 0);
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op
) == TRUNCATE
)
857 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
858 GET_MODE (XEXP (op
, 0)));
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
867 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
868 rtx op
, machine_mode op_mode
)
872 trueop
= avoid_constant_pool_reference (op
);
874 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
878 return simplify_unary_operation_1 (code
, mode
, op
);
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
885 exact_int_to_float_conversion_p (const_rtx op
)
887 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
888 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode
!= VOIDmode
);
891 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
892 int in_bits
= in_prec
;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
895 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
896 if (GET_CODE (op
) == FLOAT
)
897 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
898 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
899 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
902 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
904 return in_bits
<= out_bits
;
907 /* Perform some simplifications we can do even if the operands
910 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
912 enum rtx_code reversed
;
918 /* (not (not X)) == X. */
919 if (GET_CODE (op
) == NOT
)
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op
)
925 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
926 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
927 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
928 XEXP (op
, 0), XEXP (op
, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op
) == PLUS
932 && XEXP (op
, 1) == constm1_rtx
)
933 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
939 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op
) == XOR
944 && CONST_INT_P (XEXP (op
, 1))
945 && (temp
= simplify_unary_operation (NOT
, mode
,
946 XEXP (op
, 1), mode
)) != 0)
947 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op
) == PLUS
951 && CONST_INT_P (XEXP (op
, 1))
952 && mode_signbit_p (mode
, XEXP (op
, 1))
953 && (temp
= simplify_unary_operation (NOT
, mode
,
954 XEXP (op
, 1), mode
)) != 0)
955 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
963 if (GET_CODE (op
) == ASHIFT
964 && XEXP (op
, 0) == const1_rtx
)
966 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
967 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE
== -1
974 && GET_CODE (op
) == ASHIFTRT
975 && CONST_INT_P (XEXP (op
, 1))
976 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
977 return simplify_gen_relational (GE
, mode
, VOIDmode
,
978 XEXP (op
, 0), const0_rtx
);
981 if (GET_CODE (op
) == SUBREG
982 && subreg_lowpart_p (op
)
983 && (GET_MODE_SIZE (GET_MODE (op
))
984 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
985 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
986 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
988 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
991 x
= gen_rtx_ROTATE (inner_mode
,
992 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
994 XEXP (SUBREG_REG (op
), 1));
995 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1004 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1006 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1007 machine_mode op_mode
;
1009 op_mode
= GET_MODE (in1
);
1010 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1012 op_mode
= GET_MODE (in2
);
1013 if (op_mode
== VOIDmode
)
1015 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1017 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1018 std::swap (in1
, in2
);
1020 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op
) == BSWAP
)
1027 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1028 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op
) == NEG
)
1035 return XEXP (op
, 0);
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1040 if (GET_CODE (op
) == IF_THEN_ELSE
)
1042 rtx cond
= XEXP (op
, 0);
1043 rtx true_rtx
= XEXP (op
, 1);
1044 rtx false_rtx
= XEXP (op
, 2);
1046 if ((GET_CODE (true_rtx
) == NEG
1047 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1048 || (GET_CODE (false_rtx
) == NEG
1049 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1051 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1052 temp
= reversed_comparison (cond
, mode
);
1056 std::swap (true_rtx
, false_rtx
);
1058 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1059 mode
, temp
, true_rtx
, false_rtx
);
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op
) == PLUS
1065 && XEXP (op
, 1) == const1_rtx
)
1066 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op
) == NOT
)
1070 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op
) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode
)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1081 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1083 if (GET_CODE (op
) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode
)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1091 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1093 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1098 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op
) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1106 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1107 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1113 if (GET_CODE (op
) == ASHIFT
)
1115 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1117 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op
) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op
, 1))
1124 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1125 return simplify_gen_binary (LSHIFTRT
, mode
,
1126 XEXP (op
, 0), XEXP (op
, 1));
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op
) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op
, 1))
1132 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1133 return simplify_gen_binary (ASHIFTRT
, mode
,
1134 XEXP (op
, 0), XEXP (op
, 1));
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op
) == XOR
1138 && XEXP (op
, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1140 return plus_constant (mode
, XEXP (op
, 0), -1);
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op
) == LT
1145 && XEXP (op
, 1) == const0_rtx
1146 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1148 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1149 int isize
= GET_MODE_PRECISION (inner
);
1150 if (STORE_FLAG_VALUE
== 1)
1152 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1153 GEN_INT (isize
- 1));
1156 if (GET_MODE_PRECISION (mode
) > isize
)
1157 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1158 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1160 else if (STORE_FLAG_VALUE
== -1)
1162 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1163 GEN_INT (isize
- 1));
1166 if (GET_MODE_PRECISION (mode
) > isize
)
1167 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1168 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1174 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1175 with the umulXi3_highpart patterns. */
1176 if (GET_CODE (op
) == LSHIFTRT
1177 && GET_CODE (XEXP (op
, 0)) == MULT
)
1180 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1182 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1184 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1188 /* We can't handle truncation to a partial integer mode here
1189 because we don't know the real bitsize of the partial
1194 if (GET_MODE (op
) != VOIDmode
)
1196 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1201 /* If we know that the value is already truncated, we can
1202 replace the TRUNCATE with a SUBREG. */
1203 if (GET_MODE_NUNITS (mode
) == 1
1204 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1205 || truncated_to_mode (mode
, op
)))
1207 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1212 /* A truncate of a comparison can be replaced with a subreg if
1213 STORE_FLAG_VALUE permits. This is like the previous test,
1214 but it works even if the comparison is done in a mode larger
1215 than HOST_BITS_PER_WIDE_INT. */
1216 if (HWI_COMPUTABLE_MODE_P (mode
)
1217 && COMPARISON_P (op
)
1218 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1220 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1225 /* A truncate of a memory is just loading the low part of the memory
1226 if we are not changing the meaning of the address. */
1227 if (GET_CODE (op
) == MEM
1228 && !VECTOR_MODE_P (mode
)
1229 && !MEM_VOLATILE_P (op
)
1230 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1232 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1239 case FLOAT_TRUNCATE
:
1240 if (DECIMAL_FLOAT_MODE_P (mode
))
1243 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1244 if (GET_CODE (op
) == FLOAT_EXTEND
1245 && GET_MODE (XEXP (op
, 0)) == mode
)
1246 return XEXP (op
, 0);
1248 /* (float_truncate:SF (float_truncate:DF foo:XF))
1249 = (float_truncate:SF foo:XF).
1250 This may eliminate double rounding, so it is unsafe.
1252 (float_truncate:SF (float_extend:XF foo:DF))
1253 = (float_truncate:SF foo:DF).
1255 (float_truncate:DF (float_extend:XF foo:SF))
1256 = (float_extend:DF foo:SF). */
1257 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1258 && flag_unsafe_math_optimizations
)
1259 || GET_CODE (op
) == FLOAT_EXTEND
)
1260 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1262 > GET_MODE_SIZE (mode
)
1263 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1265 XEXP (op
, 0), mode
);
1267 /* (float_truncate (float x)) is (float x) */
1268 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1269 && (flag_unsafe_math_optimizations
1270 || exact_int_to_float_conversion_p (op
)))
1271 return simplify_gen_unary (GET_CODE (op
), mode
,
1273 GET_MODE (XEXP (op
, 0)));
1275 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1276 (OP:SF foo:SF) if OP is NEG or ABS. */
1277 if ((GET_CODE (op
) == ABS
1278 || GET_CODE (op
) == NEG
)
1279 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1280 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1281 return simplify_gen_unary (GET_CODE (op
), mode
,
1282 XEXP (XEXP (op
, 0), 0), mode
);
1284 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1285 is (float_truncate:SF x). */
1286 if (GET_CODE (op
) == SUBREG
1287 && subreg_lowpart_p (op
)
1288 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1289 return SUBREG_REG (op
);
1293 if (DECIMAL_FLOAT_MODE_P (mode
))
1296 /* (float_extend (float_extend x)) is (float_extend x)
1298 (float_extend (float x)) is (float x) assuming that double
1299 rounding can't happen.
1301 if (GET_CODE (op
) == FLOAT_EXTEND
1302 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1303 && exact_int_to_float_conversion_p (op
)))
1304 return simplify_gen_unary (GET_CODE (op
), mode
,
1306 GET_MODE (XEXP (op
, 0)));
1311 /* (abs (neg <foo>)) -> (abs <foo>) */
1312 if (GET_CODE (op
) == NEG
)
1313 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1314 GET_MODE (XEXP (op
, 0)));
1316 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1318 if (GET_MODE (op
) == VOIDmode
)
1321 /* If operand is something known to be positive, ignore the ABS. */
1322 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1323 || val_signbit_known_clear_p (GET_MODE (op
),
1324 nonzero_bits (op
, GET_MODE (op
))))
1327 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1328 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1329 return gen_rtx_NEG (mode
, op
);
1334 /* (ffs (*_extend <X>)) = (ffs <X>) */
1335 if (GET_CODE (op
) == SIGN_EXTEND
1336 || GET_CODE (op
) == ZERO_EXTEND
)
1337 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1338 GET_MODE (XEXP (op
, 0)));
1342 switch (GET_CODE (op
))
1346 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1347 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1348 GET_MODE (XEXP (op
, 0)));
1352 /* Rotations don't affect popcount. */
1353 if (!side_effects_p (XEXP (op
, 1)))
1354 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1355 GET_MODE (XEXP (op
, 0)));
1364 switch (GET_CODE (op
))
1370 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1371 GET_MODE (XEXP (op
, 0)));
1375 /* Rotations don't affect parity. */
1376 if (!side_effects_p (XEXP (op
, 1)))
1377 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1378 GET_MODE (XEXP (op
, 0)));
1387 /* (bswap (bswap x)) -> x. */
1388 if (GET_CODE (op
) == BSWAP
)
1389 return XEXP (op
, 0);
1393 /* (float (sign_extend <X>)) = (float <X>). */
1394 if (GET_CODE (op
) == SIGN_EXTEND
)
1395 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1396 GET_MODE (XEXP (op
, 0)));
1400 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1401 becomes just the MINUS if its mode is MODE. This allows
1402 folding switch statements on machines using casesi (such as
1404 if (GET_CODE (op
) == TRUNCATE
1405 && GET_MODE (XEXP (op
, 0)) == mode
1406 && GET_CODE (XEXP (op
, 0)) == MINUS
1407 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1408 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1409 return XEXP (op
, 0);
1411 /* Extending a widening multiplication should be canonicalized to
1412 a wider widening multiplication. */
1413 if (GET_CODE (op
) == MULT
)
1415 rtx lhs
= XEXP (op
, 0);
1416 rtx rhs
= XEXP (op
, 1);
1417 enum rtx_code lcode
= GET_CODE (lhs
);
1418 enum rtx_code rcode
= GET_CODE (rhs
);
1420 /* Widening multiplies usually extend both operands, but sometimes
1421 they use a shift to extract a portion of a register. */
1422 if ((lcode
== SIGN_EXTEND
1423 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1424 && (rcode
== SIGN_EXTEND
1425 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1427 machine_mode lmode
= GET_MODE (lhs
);
1428 machine_mode rmode
= GET_MODE (rhs
);
1431 if (lcode
== ASHIFTRT
)
1432 /* Number of bits not shifted off the end. */
1433 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1434 else /* lcode == SIGN_EXTEND */
1435 /* Size of inner mode. */
1436 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1438 if (rcode
== ASHIFTRT
)
1439 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1440 else /* rcode == SIGN_EXTEND */
1441 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1443 /* We can only widen multiplies if the result is mathematiclly
1444 equivalent. I.e. if overflow was impossible. */
1445 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1446 return simplify_gen_binary
1448 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1449 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1453 /* Check for a sign extension of a subreg of a promoted
1454 variable, where the promotion is sign-extended, and the
1455 target mode is the same as the variable's promotion. */
1456 if (GET_CODE (op
) == SUBREG
1457 && SUBREG_PROMOTED_VAR_P (op
)
1458 && SUBREG_PROMOTED_SIGNED_P (op
)
1459 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1461 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1466 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1467 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1468 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1470 gcc_assert (GET_MODE_PRECISION (mode
)
1471 > GET_MODE_PRECISION (GET_MODE (op
)));
1472 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1473 GET_MODE (XEXP (op
, 0)));
1476 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1477 is (sign_extend:M (subreg:O <X>)) if there is mode with
1478 GET_MODE_BITSIZE (N) - I bits.
1479 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1480 is similarly (zero_extend:M (subreg:O <X>)). */
1481 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1482 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1483 && CONST_INT_P (XEXP (op
, 1))
1484 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1485 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1488 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1489 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1490 gcc_assert (GET_MODE_BITSIZE (mode
)
1491 > GET_MODE_BITSIZE (GET_MODE (op
)));
1492 if (tmode
!= BLKmode
)
1495 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1497 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1498 ? SIGN_EXTEND
: ZERO_EXTEND
,
1499 mode
, inner
, tmode
);
1503 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1504 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1505 if (GET_CODE (op
) == LSHIFTRT
1506 && CONST_INT_P (XEXP (op
, 1))
1507 && XEXP (op
, 1) != const0_rtx
)
1508 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1510 #if defined(POINTERS_EXTEND_UNSIGNED)
1511 /* As we do not know which address space the pointer is referring to,
1512 we can do this only if the target does not support different pointer
1513 or address modes depending on the address space. */
1514 if (target_default_pointer_address_modes_p ()
1515 && ! POINTERS_EXTEND_UNSIGNED
1516 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1518 || (GET_CODE (op
) == SUBREG
1519 && REG_P (SUBREG_REG (op
))
1520 && REG_POINTER (SUBREG_REG (op
))
1521 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1522 && !targetm
.have_ptr_extend ())
1525 = convert_memory_address_addr_space_1 (Pmode
, op
,
1526 ADDR_SPACE_GENERIC
, false,
1535 /* Check for a zero extension of a subreg of a promoted
1536 variable, where the promotion is zero-extended, and the
1537 target mode is the same as the variable's promotion. */
1538 if (GET_CODE (op
) == SUBREG
1539 && SUBREG_PROMOTED_VAR_P (op
)
1540 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1541 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1543 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1548 /* Extending a widening multiplication should be canonicalized to
1549 a wider widening multiplication. */
1550 if (GET_CODE (op
) == MULT
)
1552 rtx lhs
= XEXP (op
, 0);
1553 rtx rhs
= XEXP (op
, 1);
1554 enum rtx_code lcode
= GET_CODE (lhs
);
1555 enum rtx_code rcode
= GET_CODE (rhs
);
1557 /* Widening multiplies usually extend both operands, but sometimes
1558 they use a shift to extract a portion of a register. */
1559 if ((lcode
== ZERO_EXTEND
1560 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1561 && (rcode
== ZERO_EXTEND
1562 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1564 machine_mode lmode
= GET_MODE (lhs
);
1565 machine_mode rmode
= GET_MODE (rhs
);
1568 if (lcode
== LSHIFTRT
)
1569 /* Number of bits not shifted off the end. */
1570 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1571 else /* lcode == ZERO_EXTEND */
1572 /* Size of inner mode. */
1573 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1575 if (rcode
== LSHIFTRT
)
1576 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1577 else /* rcode == ZERO_EXTEND */
1578 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1580 /* We can only widen multiplies if the result is mathematiclly
1581 equivalent. I.e. if overflow was impossible. */
1582 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1583 return simplify_gen_binary
1585 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1586 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1590 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1591 if (GET_CODE (op
) == ZERO_EXTEND
)
1592 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1593 GET_MODE (XEXP (op
, 0)));
1595 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1596 is (zero_extend:M (subreg:O <X>)) if there is mode with
1597 GET_MODE_PRECISION (N) - I bits. */
1598 if (GET_CODE (op
) == LSHIFTRT
1599 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1600 && CONST_INT_P (XEXP (op
, 1))
1601 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1602 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1605 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1606 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1607 if (tmode
!= BLKmode
)
1610 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1612 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1616 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1617 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1619 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1620 (and:SI (reg:SI) (const_int 63)). */
1621 if (GET_CODE (op
) == SUBREG
1622 && GET_MODE_PRECISION (GET_MODE (op
))
1623 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1624 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1625 <= HOST_BITS_PER_WIDE_INT
1626 && GET_MODE_PRECISION (mode
)
1627 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1628 && subreg_lowpart_p (op
)
1629 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1630 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1632 if (GET_MODE_PRECISION (mode
)
1633 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1634 return SUBREG_REG (op
);
1635 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1636 GET_MODE (SUBREG_REG (op
)));
1639 #if defined(POINTERS_EXTEND_UNSIGNED)
1640 /* As we do not know which address space the pointer is referring to,
1641 we can do this only if the target does not support different pointer
1642 or address modes depending on the address space. */
1643 if (target_default_pointer_address_modes_p ()
1644 && POINTERS_EXTEND_UNSIGNED
> 0
1645 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1647 || (GET_CODE (op
) == SUBREG
1648 && REG_P (SUBREG_REG (op
))
1649 && REG_POINTER (SUBREG_REG (op
))
1650 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1651 && !targetm
.have_ptr_extend ())
1654 = convert_memory_address_addr_space_1 (Pmode
, op
,
1655 ADDR_SPACE_GENERIC
, false,
1670 /* Try to compute the value of a unary operation CODE whose output mode is to
1671 be MODE with input operand OP whose mode was originally OP_MODE.
1672 Return zero if the value cannot be computed. */
1674 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1675 rtx op
, machine_mode op_mode
)
1677 unsigned int width
= GET_MODE_PRECISION (mode
);
1679 if (code
== VEC_DUPLICATE
)
1681 gcc_assert (VECTOR_MODE_P (mode
));
1682 if (GET_MODE (op
) != VOIDmode
)
1684 if (!VECTOR_MODE_P (GET_MODE (op
)))
1685 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1687 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1690 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1691 || GET_CODE (op
) == CONST_VECTOR
)
1693 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1694 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1695 rtvec v
= rtvec_alloc (n_elts
);
1698 if (GET_CODE (op
) != CONST_VECTOR
)
1699 for (i
= 0; i
< n_elts
; i
++)
1700 RTVEC_ELT (v
, i
) = op
;
1703 machine_mode inmode
= GET_MODE (op
);
1704 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1705 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1707 gcc_assert (in_n_elts
< n_elts
);
1708 gcc_assert ((n_elts
% in_n_elts
) == 0);
1709 for (i
= 0; i
< n_elts
; i
++)
1710 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1712 return gen_rtx_CONST_VECTOR (mode
, v
);
1716 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1718 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1719 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1720 machine_mode opmode
= GET_MODE (op
);
1721 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1722 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1723 rtvec v
= rtvec_alloc (n_elts
);
1726 gcc_assert (op_n_elts
== n_elts
);
1727 for (i
= 0; i
< n_elts
; i
++)
1729 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1730 CONST_VECTOR_ELT (op
, i
),
1731 GET_MODE_INNER (opmode
));
1734 RTVEC_ELT (v
, i
) = x
;
1736 return gen_rtx_CONST_VECTOR (mode
, v
);
1739 /* The order of these tests is critical so that, for example, we don't
1740 check the wrong mode (input vs. output) for a conversion operation,
1741 such as FIX. At some point, this should be simplified. */
1743 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1747 if (op_mode
== VOIDmode
)
1749 /* CONST_INT have VOIDmode as the mode. We assume that all
1750 the bits of the constant are significant, though, this is
1751 a dangerous assumption as many times CONST_INTs are
1752 created and used with garbage in the bits outside of the
1753 precision of the implied mode of the const_int. */
1754 op_mode
= MAX_MODE_INT
;
1757 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1759 /* Avoid the folding if flag_signaling_nans is on and
1760 operand is a signaling NaN. */
1761 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1764 d
= real_value_truncate (mode
, d
);
1765 return const_double_from_real_value (d
, mode
);
1767 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1771 if (op_mode
== VOIDmode
)
1773 /* CONST_INT have VOIDmode as the mode. We assume that all
1774 the bits of the constant are significant, though, this is
1775 a dangerous assumption as many times CONST_INTs are
1776 created and used with garbage in the bits outside of the
1777 precision of the implied mode of the const_int. */
1778 op_mode
= MAX_MODE_INT
;
1781 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1783 /* Avoid the folding if flag_signaling_nans is on and
1784 operand is a signaling NaN. */
1785 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1788 d
= real_value_truncate (mode
, d
);
1789 return const_double_from_real_value (d
, mode
);
1792 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1795 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1796 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1799 #if TARGET_SUPPORTS_WIDE_INT == 0
1800 /* This assert keeps the simplification from producing a result
1801 that cannot be represented in a CONST_DOUBLE but a lot of
1802 upstream callers expect that this function never fails to
1803 simplify something and so you if you added this to the test
1804 above the code would die later anyway. If this assert
1805 happens, you just need to make the port support wide int. */
1806 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1812 result
= wi::bit_not (op0
);
1816 result
= wi::neg (op0
);
1820 result
= wi::abs (op0
);
1824 result
= wi::shwi (wi::ffs (op0
), mode
);
1828 if (wi::ne_p (op0
, 0))
1829 int_value
= wi::clz (op0
);
1830 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1831 int_value
= GET_MODE_PRECISION (mode
);
1832 result
= wi::shwi (int_value
, mode
);
1836 result
= wi::shwi (wi::clrsb (op0
), mode
);
1840 if (wi::ne_p (op0
, 0))
1841 int_value
= wi::ctz (op0
);
1842 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1843 int_value
= GET_MODE_PRECISION (mode
);
1844 result
= wi::shwi (int_value
, mode
);
1848 result
= wi::shwi (wi::popcount (op0
), mode
);
1852 result
= wi::shwi (wi::parity (op0
), mode
);
1856 result
= wide_int (op0
).bswap ();
1861 result
= wide_int::from (op0
, width
, UNSIGNED
);
1865 result
= wide_int::from (op0
, width
, SIGNED
);
1873 return immed_wide_int_const (result
, mode
);
1876 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1877 && SCALAR_FLOAT_MODE_P (mode
)
1878 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1880 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1886 d
= real_value_abs (&d
);
1889 d
= real_value_negate (&d
);
1891 case FLOAT_TRUNCATE
:
1892 /* Don't perform the operation if flag_signaling_nans is on
1893 and the operand is a signaling NaN. */
1894 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1896 d
= real_value_truncate (mode
, d
);
1899 /* Don't perform the operation if flag_signaling_nans is on
1900 and the operand is a signaling NaN. */
1901 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1903 /* All this does is change the mode, unless changing
1905 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1906 real_convert (&d
, mode
, &d
);
1909 /* Don't perform the operation if flag_signaling_nans is on
1910 and the operand is a signaling NaN. */
1911 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1913 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1920 real_to_target (tmp
, &d
, GET_MODE (op
));
1921 for (i
= 0; i
< 4; i
++)
1923 real_from_target (&d
, tmp
, mode
);
1929 return const_double_from_real_value (d
, mode
);
1931 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1933 && GET_MODE_CLASS (mode
) == MODE_INT
1936 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1937 operators are intentionally left unspecified (to ease implementation
1938 by target backends), for consistency, this routine implements the
1939 same semantics for constant folding as used by the middle-end. */
1941 /* This was formerly used only for non-IEEE float.
1942 eggert@twinsun.com says it is safe for IEEE also. */
1944 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1945 wide_int wmax
, wmin
;
1946 /* This is part of the abi to real_to_integer, but we check
1947 things before making this call. */
1953 if (REAL_VALUE_ISNAN (*x
))
1956 /* Test against the signed upper bound. */
1957 wmax
= wi::max_value (width
, SIGNED
);
1958 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1959 if (real_less (&t
, x
))
1960 return immed_wide_int_const (wmax
, mode
);
1962 /* Test against the signed lower bound. */
1963 wmin
= wi::min_value (width
, SIGNED
);
1964 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1965 if (real_less (x
, &t
))
1966 return immed_wide_int_const (wmin
, mode
);
1968 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1972 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1975 /* Test against the unsigned upper bound. */
1976 wmax
= wi::max_value (width
, UNSIGNED
);
1977 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1978 if (real_less (&t
, x
))
1979 return immed_wide_int_const (wmax
, mode
);
1981 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1992 /* Subroutine of simplify_binary_operation to simplify a binary operation
1993 CODE that can commute with byte swapping, with result mode MODE and
1994 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1995 Return zero if no simplification or canonicalization is possible. */
1998 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2003 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2004 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2006 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2007 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2008 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2011 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2012 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2014 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2015 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2021 /* Subroutine of simplify_binary_operation to simplify a commutative,
2022 associative binary operation CODE with result mode MODE, operating
2023 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2024 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2025 canonicalization is possible. */
2028 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2033 /* Linearize the operator to the left. */
2034 if (GET_CODE (op1
) == code
)
2036 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2037 if (GET_CODE (op0
) == code
)
2039 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2040 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2043 /* "a op (b op c)" becomes "(b op c) op a". */
2044 if (! swap_commutative_operands_p (op1
, op0
))
2045 return simplify_gen_binary (code
, mode
, op1
, op0
);
2047 std::swap (op0
, op1
);
2050 if (GET_CODE (op0
) == code
)
2052 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2053 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2055 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2056 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2059 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2060 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2062 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2064 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2065 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2067 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2074 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2075 and OP1. Return 0 if no simplification is possible.
2077 Don't use this for relational operations such as EQ or LT.
2078 Use simplify_relational_operation instead. */
2080 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2083 rtx trueop0
, trueop1
;
2086 /* Relational operations don't work here. We must know the mode
2087 of the operands in order to do the comparison correctly.
2088 Assuming a full word can give incorrect results.
2089 Consider comparing 128 with -128 in QImode. */
2090 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2091 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2093 /* Make sure the constant is second. */
2094 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2095 && swap_commutative_operands_p (op0
, op1
))
2096 std::swap (op0
, op1
);
2098 trueop0
= avoid_constant_pool_reference (op0
);
2099 trueop1
= avoid_constant_pool_reference (op1
);
2101 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2104 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2109 /* If the above steps did not result in a simplification and op0 or op1
2110 were constant pool references, use the referenced constants directly. */
2111 if (trueop0
!= op0
|| trueop1
!= op1
)
2112 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2117 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2118 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2119 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2120 actual constants. */
2123 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2124 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2126 rtx tem
, reversed
, opleft
, opright
;
2128 unsigned int width
= GET_MODE_PRECISION (mode
);
2130 /* Even if we can't compute a constant result,
2131 there are some cases worth simplifying. */
2136 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2137 when x is NaN, infinite, or finite and nonzero. They aren't
2138 when x is -0 and the rounding mode is not towards -infinity,
2139 since (-0) + 0 is then 0. */
2140 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2143 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2144 transformations are safe even for IEEE. */
2145 if (GET_CODE (op0
) == NEG
)
2146 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2147 else if (GET_CODE (op1
) == NEG
)
2148 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2150 /* (~a) + 1 -> -a */
2151 if (INTEGRAL_MODE_P (mode
)
2152 && GET_CODE (op0
) == NOT
2153 && trueop1
== const1_rtx
)
2154 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2156 /* Handle both-operands-constant cases. We can only add
2157 CONST_INTs to constants since the sum of relocatable symbols
2158 can't be handled by most assemblers. Don't add CONST_INT
2159 to CONST_INT since overflow won't be computed properly if wider
2160 than HOST_BITS_PER_WIDE_INT. */
2162 if ((GET_CODE (op0
) == CONST
2163 || GET_CODE (op0
) == SYMBOL_REF
2164 || GET_CODE (op0
) == LABEL_REF
)
2165 && CONST_INT_P (op1
))
2166 return plus_constant (mode
, op0
, INTVAL (op1
));
2167 else if ((GET_CODE (op1
) == CONST
2168 || GET_CODE (op1
) == SYMBOL_REF
2169 || GET_CODE (op1
) == LABEL_REF
)
2170 && CONST_INT_P (op0
))
2171 return plus_constant (mode
, op1
, INTVAL (op0
));
2173 /* See if this is something like X * C - X or vice versa or
2174 if the multiplication is written as a shift. If so, we can
2175 distribute and make a new multiply, shift, or maybe just
2176 have X (if C is 2 in the example above). But don't make
2177 something more expensive than we had before. */
2179 if (SCALAR_INT_MODE_P (mode
))
2181 rtx lhs
= op0
, rhs
= op1
;
2183 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2184 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2186 if (GET_CODE (lhs
) == NEG
)
2188 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2189 lhs
= XEXP (lhs
, 0);
2191 else if (GET_CODE (lhs
) == MULT
2192 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2194 coeff0
= rtx_mode_t (XEXP (lhs
, 1), mode
);
2195 lhs
= XEXP (lhs
, 0);
2197 else if (GET_CODE (lhs
) == ASHIFT
2198 && CONST_INT_P (XEXP (lhs
, 1))
2199 && INTVAL (XEXP (lhs
, 1)) >= 0
2200 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2202 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2203 GET_MODE_PRECISION (mode
));
2204 lhs
= XEXP (lhs
, 0);
2207 if (GET_CODE (rhs
) == NEG
)
2209 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2210 rhs
= XEXP (rhs
, 0);
2212 else if (GET_CODE (rhs
) == MULT
2213 && CONST_INT_P (XEXP (rhs
, 1)))
2215 coeff1
= rtx_mode_t (XEXP (rhs
, 1), mode
);
2216 rhs
= XEXP (rhs
, 0);
2218 else if (GET_CODE (rhs
) == ASHIFT
2219 && CONST_INT_P (XEXP (rhs
, 1))
2220 && INTVAL (XEXP (rhs
, 1)) >= 0
2221 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2223 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2224 GET_MODE_PRECISION (mode
));
2225 rhs
= XEXP (rhs
, 0);
2228 if (rtx_equal_p (lhs
, rhs
))
2230 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2232 bool speed
= optimize_function_for_speed_p (cfun
);
2234 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2236 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2237 return (set_src_cost (tem
, mode
, speed
)
2238 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2242 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2243 if (CONST_SCALAR_INT_P (op1
)
2244 && GET_CODE (op0
) == XOR
2245 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2246 && mode_signbit_p (mode
, op1
))
2247 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2248 simplify_gen_binary (XOR
, mode
, op1
,
2251 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2252 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2253 && GET_CODE (op0
) == MULT
2254 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2258 in1
= XEXP (XEXP (op0
, 0), 0);
2259 in2
= XEXP (op0
, 1);
2260 return simplify_gen_binary (MINUS
, mode
, op1
,
2261 simplify_gen_binary (MULT
, mode
,
2265 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2266 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2268 if (COMPARISON_P (op0
)
2269 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2270 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2271 && (reversed
= reversed_comparison (op0
, mode
)))
2273 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2275 /* If one of the operands is a PLUS or a MINUS, see if we can
2276 simplify this by the associative law.
2277 Don't use the associative law for floating point.
2278 The inaccuracy makes it nonassociative,
2279 and subtle programs can break if operations are associated. */
2281 if (INTEGRAL_MODE_P (mode
)
2282 && (plus_minus_operand_p (op0
)
2283 || plus_minus_operand_p (op1
))
2284 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2287 /* Reassociate floating point addition only when the user
2288 specifies associative math operations. */
2289 if (FLOAT_MODE_P (mode
)
2290 && flag_associative_math
)
2292 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2299 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2300 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2301 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2302 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2304 rtx xop00
= XEXP (op0
, 0);
2305 rtx xop10
= XEXP (op1
, 0);
2307 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2310 if (REG_P (xop00
) && REG_P (xop10
)
2311 && REGNO (xop00
) == REGNO (xop10
)
2312 && GET_MODE (xop00
) == mode
2313 && GET_MODE (xop10
) == mode
2314 && GET_MODE_CLASS (mode
) == MODE_CC
)
2320 /* We can't assume x-x is 0 even with non-IEEE floating point,
2321 but since it is zero except in very strange circumstances, we
2322 will treat it as zero with -ffinite-math-only. */
2323 if (rtx_equal_p (trueop0
, trueop1
)
2324 && ! side_effects_p (op0
)
2325 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2326 return CONST0_RTX (mode
);
2328 /* Change subtraction from zero into negation. (0 - x) is the
2329 same as -x when x is NaN, infinite, or finite and nonzero.
2330 But if the mode has signed zeros, and does not round towards
2331 -infinity, then 0 - 0 is 0, not -0. */
2332 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2333 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2335 /* (-1 - a) is ~a, unless the expression contains symbolic
2336 constants, in which case not retaining additions and
2337 subtractions could cause invalid assembly to be produced. */
2338 if (trueop0
== constm1_rtx
2339 && !contains_symbolic_reference_p (op1
))
2340 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2342 /* Subtracting 0 has no effect unless the mode has signed zeros
2343 and supports rounding towards -infinity. In such a case,
2345 if (!(HONOR_SIGNED_ZEROS (mode
)
2346 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2347 && trueop1
== CONST0_RTX (mode
))
2350 /* See if this is something like X * C - X or vice versa or
2351 if the multiplication is written as a shift. If so, we can
2352 distribute and make a new multiply, shift, or maybe just
2353 have X (if C is 2 in the example above). But don't make
2354 something more expensive than we had before. */
2356 if (SCALAR_INT_MODE_P (mode
))
2358 rtx lhs
= op0
, rhs
= op1
;
2360 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2361 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2363 if (GET_CODE (lhs
) == NEG
)
2365 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2366 lhs
= XEXP (lhs
, 0);
2368 else if (GET_CODE (lhs
) == MULT
2369 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2371 coeff0
= rtx_mode_t (XEXP (lhs
, 1), mode
);
2372 lhs
= XEXP (lhs
, 0);
2374 else if (GET_CODE (lhs
) == ASHIFT
2375 && CONST_INT_P (XEXP (lhs
, 1))
2376 && INTVAL (XEXP (lhs
, 1)) >= 0
2377 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2379 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2380 GET_MODE_PRECISION (mode
));
2381 lhs
= XEXP (lhs
, 0);
2384 if (GET_CODE (rhs
) == NEG
)
2386 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2387 rhs
= XEXP (rhs
, 0);
2389 else if (GET_CODE (rhs
) == MULT
2390 && CONST_INT_P (XEXP (rhs
, 1)))
2392 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), mode
));
2393 rhs
= XEXP (rhs
, 0);
2395 else if (GET_CODE (rhs
) == ASHIFT
2396 && CONST_INT_P (XEXP (rhs
, 1))
2397 && INTVAL (XEXP (rhs
, 1)) >= 0
2398 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2400 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2401 GET_MODE_PRECISION (mode
));
2402 negcoeff1
= -negcoeff1
;
2403 rhs
= XEXP (rhs
, 0);
2406 if (rtx_equal_p (lhs
, rhs
))
2408 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2410 bool speed
= optimize_function_for_speed_p (cfun
);
2412 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2414 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2415 return (set_src_cost (tem
, mode
, speed
)
2416 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2420 /* (a - (-b)) -> (a + b). True even for IEEE. */
2421 if (GET_CODE (op1
) == NEG
)
2422 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2424 /* (-x - c) may be simplified as (-c - x). */
2425 if (GET_CODE (op0
) == NEG
2426 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2428 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2430 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2433 /* Don't let a relocatable value get a negative coeff. */
2434 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2435 return simplify_gen_binary (PLUS
, mode
,
2437 neg_const_int (mode
, op1
));
2439 /* (x - (x & y)) -> (x & ~y) */
2440 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2442 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2444 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2445 GET_MODE (XEXP (op1
, 1)));
2446 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2448 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2450 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2451 GET_MODE (XEXP (op1
, 0)));
2452 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2456 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2457 by reversing the comparison code if valid. */
2458 if (STORE_FLAG_VALUE
== 1
2459 && trueop0
== const1_rtx
2460 && COMPARISON_P (op1
)
2461 && (reversed
= reversed_comparison (op1
, mode
)))
2464 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2465 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2466 && GET_CODE (op1
) == MULT
2467 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2471 in1
= XEXP (XEXP (op1
, 0), 0);
2472 in2
= XEXP (op1
, 1);
2473 return simplify_gen_binary (PLUS
, mode
,
2474 simplify_gen_binary (MULT
, mode
,
2479 /* Canonicalize (minus (neg A) (mult B C)) to
2480 (minus (mult (neg B) C) A). */
2481 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2482 && GET_CODE (op1
) == MULT
2483 && GET_CODE (op0
) == NEG
)
2487 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2488 in2
= XEXP (op1
, 1);
2489 return simplify_gen_binary (MINUS
, mode
,
2490 simplify_gen_binary (MULT
, mode
,
2495 /* If one of the operands is a PLUS or a MINUS, see if we can
2496 simplify this by the associative law. This will, for example,
2497 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2498 Don't use the associative law for floating point.
2499 The inaccuracy makes it nonassociative,
2500 and subtle programs can break if operations are associated. */
2502 if (INTEGRAL_MODE_P (mode
)
2503 && (plus_minus_operand_p (op0
)
2504 || plus_minus_operand_p (op1
))
2505 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2510 if (trueop1
== constm1_rtx
)
2511 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2513 if (GET_CODE (op0
) == NEG
)
2515 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2516 /* If op1 is a MULT as well and simplify_unary_operation
2517 just moved the NEG to the second operand, simplify_gen_binary
2518 below could through simplify_associative_operation move
2519 the NEG around again and recurse endlessly. */
2521 && GET_CODE (op1
) == MULT
2522 && GET_CODE (temp
) == MULT
2523 && XEXP (op1
, 0) == XEXP (temp
, 0)
2524 && GET_CODE (XEXP (temp
, 1)) == NEG
2525 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2528 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2530 if (GET_CODE (op1
) == NEG
)
2532 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2533 /* If op0 is a MULT as well and simplify_unary_operation
2534 just moved the NEG to the second operand, simplify_gen_binary
2535 below could through simplify_associative_operation move
2536 the NEG around again and recurse endlessly. */
2538 && GET_CODE (op0
) == MULT
2539 && GET_CODE (temp
) == MULT
2540 && XEXP (op0
, 0) == XEXP (temp
, 0)
2541 && GET_CODE (XEXP (temp
, 1)) == NEG
2542 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2545 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2548 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2549 x is NaN, since x * 0 is then also NaN. Nor is it valid
2550 when the mode has signed zeros, since multiplying a negative
2551 number by 0 will give -0, not 0. */
2552 if (!HONOR_NANS (mode
)
2553 && !HONOR_SIGNED_ZEROS (mode
)
2554 && trueop1
== CONST0_RTX (mode
)
2555 && ! side_effects_p (op0
))
2558 /* In IEEE floating point, x*1 is not equivalent to x for
2560 if (!HONOR_SNANS (mode
)
2561 && trueop1
== CONST1_RTX (mode
))
2564 /* Convert multiply by constant power of two into shift. */
2565 if (CONST_SCALAR_INT_P (trueop1
))
2567 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2569 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2572 /* x*2 is x+x and x*(-1) is -x */
2573 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2574 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2575 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2576 && GET_MODE (op0
) == mode
)
2578 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2580 if (real_equal (d1
, &dconst2
))
2581 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2583 if (!HONOR_SNANS (mode
)
2584 && real_equal (d1
, &dconstm1
))
2585 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2588 /* Optimize -x * -x as x * x. */
2589 if (FLOAT_MODE_P (mode
)
2590 && GET_CODE (op0
) == NEG
2591 && GET_CODE (op1
) == NEG
2592 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2593 && !side_effects_p (XEXP (op0
, 0)))
2594 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2596 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2597 if (SCALAR_FLOAT_MODE_P (mode
)
2598 && GET_CODE (op0
) == ABS
2599 && GET_CODE (op1
) == ABS
2600 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2601 && !side_effects_p (XEXP (op0
, 0)))
2602 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2604 /* Reassociate multiplication, but for floating point MULTs
2605 only when the user specifies unsafe math optimizations. */
2606 if (! FLOAT_MODE_P (mode
)
2607 || flag_unsafe_math_optimizations
)
2609 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2616 if (trueop1
== CONST0_RTX (mode
))
2618 if (INTEGRAL_MODE_P (mode
)
2619 && trueop1
== CONSTM1_RTX (mode
)
2620 && !side_effects_p (op0
))
2622 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2624 /* A | (~A) -> -1 */
2625 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2626 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2627 && ! side_effects_p (op0
)
2628 && SCALAR_INT_MODE_P (mode
))
2631 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2632 if (CONST_INT_P (op1
)
2633 && HWI_COMPUTABLE_MODE_P (mode
)
2634 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2635 && !side_effects_p (op0
))
2638 /* Canonicalize (X & C1) | C2. */
2639 if (GET_CODE (op0
) == AND
2640 && CONST_INT_P (trueop1
)
2641 && CONST_INT_P (XEXP (op0
, 1)))
2643 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2644 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2645 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2647 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2649 && !side_effects_p (XEXP (op0
, 0)))
2652 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2653 if (((c1
|c2
) & mask
) == mask
)
2654 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2656 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2657 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2659 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2660 gen_int_mode (c1
& ~c2
, mode
));
2661 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2665 /* Convert (A & B) | A to A. */
2666 if (GET_CODE (op0
) == AND
2667 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2668 || rtx_equal_p (XEXP (op0
, 1), op1
))
2669 && ! side_effects_p (XEXP (op0
, 0))
2670 && ! side_effects_p (XEXP (op0
, 1)))
2673 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2674 mode size to (rotate A CX). */
2676 if (GET_CODE (op1
) == ASHIFT
2677 || GET_CODE (op1
) == SUBREG
)
2688 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2689 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2690 && CONST_INT_P (XEXP (opleft
, 1))
2691 && CONST_INT_P (XEXP (opright
, 1))
2692 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2693 == GET_MODE_PRECISION (mode
)))
2694 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2696 /* Same, but for ashift that has been "simplified" to a wider mode
2697 by simplify_shift_const. */
2699 if (GET_CODE (opleft
) == SUBREG
2700 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2701 && GET_CODE (opright
) == LSHIFTRT
2702 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2703 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2704 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2705 && (GET_MODE_SIZE (GET_MODE (opleft
))
2706 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2707 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2708 SUBREG_REG (XEXP (opright
, 0)))
2709 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2710 && CONST_INT_P (XEXP (opright
, 1))
2711 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2712 == GET_MODE_PRECISION (mode
)))
2713 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2714 XEXP (SUBREG_REG (opleft
), 1));
2716 /* If we have (ior (and (X C1) C2)), simplify this by making
2717 C1 as small as possible if C1 actually changes. */
2718 if (CONST_INT_P (op1
)
2719 && (HWI_COMPUTABLE_MODE_P (mode
)
2720 || INTVAL (op1
) > 0)
2721 && GET_CODE (op0
) == AND
2722 && CONST_INT_P (XEXP (op0
, 1))
2723 && CONST_INT_P (op1
)
2724 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2726 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2727 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2730 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2733 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2734 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2735 the PLUS does not affect any of the bits in OP1: then we can do
2736 the IOR as a PLUS and we can associate. This is valid if OP1
2737 can be safely shifted left C bits. */
2738 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2739 && GET_CODE (XEXP (op0
, 0)) == PLUS
2740 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2741 && CONST_INT_P (XEXP (op0
, 1))
2742 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2744 int count
= INTVAL (XEXP (op0
, 1));
2745 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2747 if (mask
>> count
== INTVAL (trueop1
)
2748 && trunc_int_for_mode (mask
, mode
) == mask
2749 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2750 return simplify_gen_binary (ASHIFTRT
, mode
,
2751 plus_constant (mode
, XEXP (op0
, 0),
2756 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2760 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2766 if (trueop1
== CONST0_RTX (mode
))
2768 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2769 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2770 if (rtx_equal_p (trueop0
, trueop1
)
2771 && ! side_effects_p (op0
)
2772 && GET_MODE_CLASS (mode
) != MODE_CC
)
2773 return CONST0_RTX (mode
);
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if (CONST_SCALAR_INT_P (op1
)
2777 && mode_signbit_p (mode
, op1
))
2778 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if (CONST_SCALAR_INT_P (op1
)
2781 && GET_CODE (op0
) == PLUS
2782 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2783 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2784 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2785 simplify_gen_binary (XOR
, mode
, op1
,
2788 /* If we are XORing two things that have no bits in common,
2789 convert them into an IOR. This helps to detect rotation encoded
2790 using those methods and possibly other simplifications. */
2792 if (HWI_COMPUTABLE_MODE_P (mode
)
2793 && (nonzero_bits (op0
, mode
)
2794 & nonzero_bits (op1
, mode
)) == 0)
2795 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2797 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2798 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2801 int num_negated
= 0;
2803 if (GET_CODE (op0
) == NOT
)
2804 num_negated
++, op0
= XEXP (op0
, 0);
2805 if (GET_CODE (op1
) == NOT
)
2806 num_negated
++, op1
= XEXP (op1
, 0);
2808 if (num_negated
== 2)
2809 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2810 else if (num_negated
== 1)
2811 return simplify_gen_unary (NOT
, mode
,
2812 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2816 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2817 correspond to a machine insn or result in further simplifications
2818 if B is a constant. */
2820 if (GET_CODE (op0
) == AND
2821 && rtx_equal_p (XEXP (op0
, 1), op1
)
2822 && ! side_effects_p (op1
))
2823 return simplify_gen_binary (AND
, mode
,
2824 simplify_gen_unary (NOT
, mode
,
2825 XEXP (op0
, 0), mode
),
2828 else if (GET_CODE (op0
) == AND
2829 && rtx_equal_p (XEXP (op0
, 0), op1
)
2830 && ! side_effects_p (op1
))
2831 return simplify_gen_binary (AND
, mode
,
2832 simplify_gen_unary (NOT
, mode
,
2833 XEXP (op0
, 1), mode
),
2836 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2837 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2838 out bits inverted twice and not set by C. Similarly, given
2839 (xor (and (xor A B) C) D), simplify without inverting C in
2840 the xor operand: (xor (and A C) (B&C)^D).
2842 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2843 && GET_CODE (XEXP (op0
, 0)) == XOR
2844 && CONST_INT_P (op1
)
2845 && CONST_INT_P (XEXP (op0
, 1))
2846 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2848 enum rtx_code op
= GET_CODE (op0
);
2849 rtx a
= XEXP (XEXP (op0
, 0), 0);
2850 rtx b
= XEXP (XEXP (op0
, 0), 1);
2851 rtx c
= XEXP (op0
, 1);
2853 HOST_WIDE_INT bval
= INTVAL (b
);
2854 HOST_WIDE_INT cval
= INTVAL (c
);
2855 HOST_WIDE_INT dval
= INTVAL (d
);
2856 HOST_WIDE_INT xcval
;
2863 return simplify_gen_binary (XOR
, mode
,
2864 simplify_gen_binary (op
, mode
, a
, c
),
2865 gen_int_mode ((bval
& xcval
) ^ dval
,
2869 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2870 we can transform like this:
2871 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2872 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2873 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2874 Attempt a few simplifications when B and C are both constants. */
2875 if (GET_CODE (op0
) == AND
2876 && CONST_INT_P (op1
)
2877 && CONST_INT_P (XEXP (op0
, 1)))
2879 rtx a
= XEXP (op0
, 0);
2880 rtx b
= XEXP (op0
, 1);
2882 HOST_WIDE_INT bval
= INTVAL (b
);
2883 HOST_WIDE_INT cval
= INTVAL (c
);
2885 /* Instead of computing ~A&C, we compute its negated value,
2886 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2887 optimize for sure. If it does not simplify, we still try
2888 to compute ~A&C below, but since that always allocates
2889 RTL, we don't try that before committing to returning a
2890 simplified expression. */
2891 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2894 if ((~cval
& bval
) == 0)
2896 rtx na_c
= NULL_RTX
;
2898 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2901 /* If ~A does not simplify, don't bother: we don't
2902 want to simplify 2 operations into 3, and if na_c
2903 were to simplify with na, n_na_c would have
2904 simplified as well. */
2905 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2907 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2910 /* Try to simplify ~A&C | ~B&C. */
2911 if (na_c
!= NULL_RTX
)
2912 return simplify_gen_binary (IOR
, mode
, na_c
,
2913 gen_int_mode (~bval
& cval
, mode
));
2917 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2918 if (n_na_c
== CONSTM1_RTX (mode
))
2920 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2921 gen_int_mode (~cval
& bval
,
2923 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2924 gen_int_mode (~bval
& cval
,
2930 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2931 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2932 machines, and also has shorter instruction path length. */
2933 if (GET_CODE (op0
) == AND
2934 && GET_CODE (XEXP (op0
, 0)) == XOR
2935 && CONST_INT_P (XEXP (op0
, 1))
2936 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
2939 rtx b
= XEXP (XEXP (op0
, 0), 1);
2940 rtx c
= XEXP (op0
, 1);
2941 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2942 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
2943 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
2944 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
2946 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2947 else if (GET_CODE (op0
) == AND
2948 && GET_CODE (XEXP (op0
, 0)) == XOR
2949 && CONST_INT_P (XEXP (op0
, 1))
2950 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
2952 rtx a
= XEXP (XEXP (op0
, 0), 0);
2954 rtx c
= XEXP (op0
, 1);
2955 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2956 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
2957 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
2958 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
2961 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2962 comparison if STORE_FLAG_VALUE is 1. */
2963 if (STORE_FLAG_VALUE
== 1
2964 && trueop1
== const1_rtx
2965 && COMPARISON_P (op0
)
2966 && (reversed
= reversed_comparison (op0
, mode
)))
2969 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2970 is (lt foo (const_int 0)), so we can perform the above
2971 simplification if STORE_FLAG_VALUE is 1. */
2973 if (STORE_FLAG_VALUE
== 1
2974 && trueop1
== const1_rtx
2975 && GET_CODE (op0
) == LSHIFTRT
2976 && CONST_INT_P (XEXP (op0
, 1))
2977 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2978 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2980 /* (xor (comparison foo bar) (const_int sign-bit))
2981 when STORE_FLAG_VALUE is the sign bit. */
2982 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2983 && trueop1
== const_true_rtx
2984 && COMPARISON_P (op0
)
2985 && (reversed
= reversed_comparison (op0
, mode
)))
2988 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2992 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2998 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3000 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3002 if (HWI_COMPUTABLE_MODE_P (mode
))
3004 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3005 HOST_WIDE_INT nzop1
;
3006 if (CONST_INT_P (trueop1
))
3008 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3009 /* If we are turning off bits already known off in OP0, we need
3011 if ((nzop0
& ~val1
) == 0)
3014 nzop1
= nonzero_bits (trueop1
, mode
);
3015 /* If we are clearing all the nonzero bits, the result is zero. */
3016 if ((nzop1
& nzop0
) == 0
3017 && !side_effects_p (op0
) && !side_effects_p (op1
))
3018 return CONST0_RTX (mode
);
3020 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3021 && GET_MODE_CLASS (mode
) != MODE_CC
)
3024 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3025 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3026 && ! side_effects_p (op0
)
3027 && GET_MODE_CLASS (mode
) != MODE_CC
)
3028 return CONST0_RTX (mode
);
3030 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3031 there are no nonzero bits of C outside of X's mode. */
3032 if ((GET_CODE (op0
) == SIGN_EXTEND
3033 || GET_CODE (op0
) == ZERO_EXTEND
)
3034 && CONST_INT_P (trueop1
)
3035 && HWI_COMPUTABLE_MODE_P (mode
)
3036 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3037 & UINTVAL (trueop1
)) == 0)
3039 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3040 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3041 gen_int_mode (INTVAL (trueop1
),
3043 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3046 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3047 we might be able to further simplify the AND with X and potentially
3048 remove the truncation altogether. */
3049 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3051 rtx x
= XEXP (op0
, 0);
3052 machine_mode xmode
= GET_MODE (x
);
3053 tem
= simplify_gen_binary (AND
, xmode
, x
,
3054 gen_int_mode (INTVAL (trueop1
), xmode
));
3055 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3058 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3059 if (GET_CODE (op0
) == IOR
3060 && CONST_INT_P (trueop1
)
3061 && CONST_INT_P (XEXP (op0
, 1)))
3063 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3064 return simplify_gen_binary (IOR
, mode
,
3065 simplify_gen_binary (AND
, mode
,
3066 XEXP (op0
, 0), op1
),
3067 gen_int_mode (tmp
, mode
));
3070 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3071 insn (and may simplify more). */
3072 if (GET_CODE (op0
) == XOR
3073 && rtx_equal_p (XEXP (op0
, 0), op1
)
3074 && ! side_effects_p (op1
))
3075 return simplify_gen_binary (AND
, mode
,
3076 simplify_gen_unary (NOT
, mode
,
3077 XEXP (op0
, 1), mode
),
3080 if (GET_CODE (op0
) == XOR
3081 && rtx_equal_p (XEXP (op0
, 1), op1
)
3082 && ! side_effects_p (op1
))
3083 return simplify_gen_binary (AND
, mode
,
3084 simplify_gen_unary (NOT
, mode
,
3085 XEXP (op0
, 0), mode
),
3088 /* Similarly for (~(A ^ B)) & A. */
3089 if (GET_CODE (op0
) == NOT
3090 && GET_CODE (XEXP (op0
, 0)) == XOR
3091 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3092 && ! side_effects_p (op1
))
3093 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3095 if (GET_CODE (op0
) == NOT
3096 && GET_CODE (XEXP (op0
, 0)) == XOR
3097 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3098 && ! side_effects_p (op1
))
3099 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3101 /* Convert (A | B) & A to A. */
3102 if (GET_CODE (op0
) == IOR
3103 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3104 || rtx_equal_p (XEXP (op0
, 1), op1
))
3105 && ! side_effects_p (XEXP (op0
, 0))
3106 && ! side_effects_p (XEXP (op0
, 1)))
3109 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3110 ((A & N) + B) & M -> (A + B) & M
3111 Similarly if (N & M) == 0,
3112 ((A | N) + B) & M -> (A + B) & M
3113 and for - instead of + and/or ^ instead of |.
3114 Also, if (N & M) == 0, then
3115 (A +- N) & M -> A & M. */
3116 if (CONST_INT_P (trueop1
)
3117 && HWI_COMPUTABLE_MODE_P (mode
)
3118 && ~UINTVAL (trueop1
)
3119 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3120 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3125 pmop
[0] = XEXP (op0
, 0);
3126 pmop
[1] = XEXP (op0
, 1);
3128 if (CONST_INT_P (pmop
[1])
3129 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3130 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3132 for (which
= 0; which
< 2; which
++)
3135 switch (GET_CODE (tem
))
3138 if (CONST_INT_P (XEXP (tem
, 1))
3139 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3140 == UINTVAL (trueop1
))
3141 pmop
[which
] = XEXP (tem
, 0);
3145 if (CONST_INT_P (XEXP (tem
, 1))
3146 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3147 pmop
[which
] = XEXP (tem
, 0);
3154 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3156 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3158 return simplify_gen_binary (code
, mode
, tem
, op1
);
3162 /* (and X (ior (not X) Y) -> (and X Y) */
3163 if (GET_CODE (op1
) == IOR
3164 && GET_CODE (XEXP (op1
, 0)) == NOT
3165 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3166 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3168 /* (and (ior (not X) Y) X) -> (and X Y) */
3169 if (GET_CODE (op0
) == IOR
3170 && GET_CODE (XEXP (op0
, 0)) == NOT
3171 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3172 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3174 /* (and X (ior Y (not X)) -> (and X Y) */
3175 if (GET_CODE (op1
) == IOR
3176 && GET_CODE (XEXP (op1
, 1)) == NOT
3177 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3178 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3180 /* (and (ior Y (not X)) X) -> (and X Y) */
3181 if (GET_CODE (op0
) == IOR
3182 && GET_CODE (XEXP (op0
, 1)) == NOT
3183 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3184 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3186 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3190 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3196 /* 0/x is 0 (or x&0 if x has side-effects). */
3197 if (trueop0
== CONST0_RTX (mode
)
3198 && !cfun
->can_throw_non_call_exceptions
)
3200 if (side_effects_p (op1
))
3201 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3205 if (trueop1
== CONST1_RTX (mode
))
3207 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3211 /* Convert divide by power of two into shift. */
3212 if (CONST_INT_P (trueop1
)
3213 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3214 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3218 /* Handle floating point and integers separately. */
3219 if (SCALAR_FLOAT_MODE_P (mode
))
3221 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3222 safe for modes with NaNs, since 0.0 / 0.0 will then be
3223 NaN rather than 0.0. Nor is it safe for modes with signed
3224 zeros, since dividing 0 by a negative number gives -0.0 */
3225 if (trueop0
== CONST0_RTX (mode
)
3226 && !HONOR_NANS (mode
)
3227 && !HONOR_SIGNED_ZEROS (mode
)
3228 && ! side_effects_p (op1
))
3231 if (trueop1
== CONST1_RTX (mode
)
3232 && !HONOR_SNANS (mode
))
3235 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3236 && trueop1
!= CONST0_RTX (mode
))
3238 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3241 if (real_equal (d1
, &dconstm1
)
3242 && !HONOR_SNANS (mode
))
3243 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3245 /* Change FP division by a constant into multiplication.
3246 Only do this with -freciprocal-math. */
3247 if (flag_reciprocal_math
3248 && !real_equal (d1
, &dconst0
))
3251 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3252 tem
= const_double_from_real_value (d
, mode
);
3253 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3257 else if (SCALAR_INT_MODE_P (mode
))
3259 /* 0/x is 0 (or x&0 if x has side-effects). */
3260 if (trueop0
== CONST0_RTX (mode
)
3261 && !cfun
->can_throw_non_call_exceptions
)
3263 if (side_effects_p (op1
))
3264 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3268 if (trueop1
== CONST1_RTX (mode
))
3270 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3275 if (trueop1
== constm1_rtx
)
3277 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3279 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3285 /* 0%x is 0 (or x&0 if x has side-effects). */
3286 if (trueop0
== CONST0_RTX (mode
))
3288 if (side_effects_p (op1
))
3289 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3292 /* x%1 is 0 (of x&0 if x has side-effects). */
3293 if (trueop1
== CONST1_RTX (mode
))
3295 if (side_effects_p (op0
))
3296 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3297 return CONST0_RTX (mode
);
3299 /* Implement modulus by power of two as AND. */
3300 if (CONST_INT_P (trueop1
)
3301 && exact_log2 (UINTVAL (trueop1
)) > 0)
3302 return simplify_gen_binary (AND
, mode
, op0
,
3303 gen_int_mode (INTVAL (op1
) - 1, mode
));
3307 /* 0%x is 0 (or x&0 if x has side-effects). */
3308 if (trueop0
== CONST0_RTX (mode
))
3310 if (side_effects_p (op1
))
3311 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3314 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3315 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3317 if (side_effects_p (op0
))
3318 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3319 return CONST0_RTX (mode
);
3325 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3326 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3327 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3329 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3330 if (CONST_INT_P (trueop1
)
3331 && IN_RANGE (INTVAL (trueop1
),
3332 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3333 GET_MODE_PRECISION (mode
) - 1))
3334 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3335 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3336 - INTVAL (trueop1
)));
3340 if (trueop1
== CONST0_RTX (mode
))
3342 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3344 /* Rotating ~0 always results in ~0. */
3345 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3346 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3347 && ! side_effects_p (op1
))
3353 scalar constants c1, c2
3354 size (M2) > size (M1)
3355 c1 == size (M2) - size (M1)
3357 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3361 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3363 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3364 && !VECTOR_MODE_P (mode
)
3366 && CONST_INT_P (op1
)
3367 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3368 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3369 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3370 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3371 > GET_MODE_BITSIZE (mode
))
3372 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3373 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3374 - GET_MODE_BITSIZE (mode
)))
3375 && subreg_lowpart_p (op0
))
3377 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3379 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3380 tmp
= simplify_gen_binary (code
,
3381 GET_MODE (SUBREG_REG (op0
)),
3382 XEXP (SUBREG_REG (op0
), 0),
3384 return lowpart_subreg (mode
, tmp
, inner_mode
);
3387 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3389 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3390 if (val
!= INTVAL (op1
))
3391 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3398 if (trueop1
== CONST0_RTX (mode
))
3400 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3402 goto canonicalize_shift
;
3405 if (trueop1
== CONST0_RTX (mode
))
3407 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3409 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3410 if (GET_CODE (op0
) == CLZ
3411 && CONST_INT_P (trueop1
)
3412 && STORE_FLAG_VALUE
== 1
3413 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3415 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3416 unsigned HOST_WIDE_INT zero_val
= 0;
3418 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3419 && zero_val
== GET_MODE_PRECISION (imode
)
3420 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3421 return simplify_gen_relational (EQ
, mode
, imode
,
3422 XEXP (op0
, 0), const0_rtx
);
3424 goto canonicalize_shift
;
3427 if (width
<= HOST_BITS_PER_WIDE_INT
3428 && mode_signbit_p (mode
, trueop1
)
3429 && ! side_effects_p (op0
))
3431 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3433 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3439 if (width
<= HOST_BITS_PER_WIDE_INT
3440 && CONST_INT_P (trueop1
)
3441 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3442 && ! side_effects_p (op0
))
3444 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3446 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3452 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3454 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3456 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3462 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3464 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3466 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3479 /* ??? There are simplifications that can be done. */
3483 if (!VECTOR_MODE_P (mode
))
3485 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3486 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3487 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3488 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3489 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3491 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3492 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3495 /* Extract a scalar element from a nested VEC_SELECT expression
3496 (with optional nested VEC_CONCAT expression). Some targets
3497 (i386) extract scalar element from a vector using chain of
3498 nested VEC_SELECT expressions. When input operand is a memory
3499 operand, this operation can be simplified to a simple scalar
3500 load from an offseted memory address. */
3501 if (GET_CODE (trueop0
) == VEC_SELECT
)
3503 rtx op0
= XEXP (trueop0
, 0);
3504 rtx op1
= XEXP (trueop0
, 1);
3506 machine_mode opmode
= GET_MODE (op0
);
3507 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3508 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3510 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3516 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3517 gcc_assert (i
< n_elts
);
3519 /* Select element, pointed by nested selector. */
3520 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3522 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3523 if (GET_CODE (op0
) == VEC_CONCAT
)
3525 rtx op00
= XEXP (op0
, 0);
3526 rtx op01
= XEXP (op0
, 1);
3528 machine_mode mode00
, mode01
;
3529 int n_elts00
, n_elts01
;
3531 mode00
= GET_MODE (op00
);
3532 mode01
= GET_MODE (op01
);
3534 /* Find out number of elements of each operand. */
3535 if (VECTOR_MODE_P (mode00
))
3537 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3538 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3543 if (VECTOR_MODE_P (mode01
))
3545 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3546 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3551 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3553 /* Select correct operand of VEC_CONCAT
3554 and adjust selector. */
3555 if (elem
< n_elts01
)
3566 vec
= rtvec_alloc (1);
3567 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3569 tmp
= gen_rtx_fmt_ee (code
, mode
,
3570 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3573 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3574 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3575 return XEXP (trueop0
, 0);
3579 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3580 gcc_assert (GET_MODE_INNER (mode
)
3581 == GET_MODE_INNER (GET_MODE (trueop0
)));
3582 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3584 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3586 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3587 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3588 rtvec v
= rtvec_alloc (n_elts
);
3591 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3592 for (i
= 0; i
< n_elts
; i
++)
3594 rtx x
= XVECEXP (trueop1
, 0, i
);
3596 gcc_assert (CONST_INT_P (x
));
3597 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3601 return gen_rtx_CONST_VECTOR (mode
, v
);
3604 /* Recognize the identity. */
3605 if (GET_MODE (trueop0
) == mode
)
3607 bool maybe_ident
= true;
3608 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3610 rtx j
= XVECEXP (trueop1
, 0, i
);
3611 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3613 maybe_ident
= false;
3621 /* If we build {a,b} then permute it, build the result directly. */
3622 if (XVECLEN (trueop1
, 0) == 2
3623 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3624 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3625 && GET_CODE (trueop0
) == VEC_CONCAT
3626 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3627 && GET_MODE (XEXP (trueop0
, 0)) == mode
3628 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3629 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3631 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3632 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3635 gcc_assert (i0
< 4 && i1
< 4);
3636 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3637 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3639 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3642 if (XVECLEN (trueop1
, 0) == 2
3643 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3644 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3645 && GET_CODE (trueop0
) == VEC_CONCAT
3646 && GET_MODE (trueop0
) == mode
)
3648 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3649 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3652 gcc_assert (i0
< 2 && i1
< 2);
3653 subop0
= XEXP (trueop0
, i0
);
3654 subop1
= XEXP (trueop0
, i1
);
3656 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3659 /* If we select one half of a vec_concat, return that. */
3660 if (GET_CODE (trueop0
) == VEC_CONCAT
3661 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3663 rtx subop0
= XEXP (trueop0
, 0);
3664 rtx subop1
= XEXP (trueop0
, 1);
3665 machine_mode mode0
= GET_MODE (subop0
);
3666 machine_mode mode1
= GET_MODE (subop1
);
3667 int li
= GET_MODE_UNIT_SIZE (mode0
);
3668 int l0
= GET_MODE_SIZE (mode0
) / li
;
3669 int l1
= GET_MODE_SIZE (mode1
) / li
;
3670 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3671 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3673 bool success
= true;
3674 for (int i
= 1; i
< l0
; ++i
)
3676 rtx j
= XVECEXP (trueop1
, 0, i
);
3677 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3686 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3688 bool success
= true;
3689 for (int i
= 1; i
< l1
; ++i
)
3691 rtx j
= XVECEXP (trueop1
, 0, i
);
3692 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3704 if (XVECLEN (trueop1
, 0) == 1
3705 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3706 && GET_CODE (trueop0
) == VEC_CONCAT
)
3709 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3711 /* Try to find the element in the VEC_CONCAT. */
3712 while (GET_MODE (vec
) != mode
3713 && GET_CODE (vec
) == VEC_CONCAT
)
3715 HOST_WIDE_INT vec_size
;
3717 if (CONST_INT_P (XEXP (vec
, 0)))
3719 /* vec_concat of two const_ints doesn't make sense with
3720 respect to modes. */
3721 if (CONST_INT_P (XEXP (vec
, 1)))
3724 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3725 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3728 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3730 if (offset
< vec_size
)
3731 vec
= XEXP (vec
, 0);
3735 vec
= XEXP (vec
, 1);
3737 vec
= avoid_constant_pool_reference (vec
);
3740 if (GET_MODE (vec
) == mode
)
3744 /* If we select elements in a vec_merge that all come from the same
3745 operand, select from that operand directly. */
3746 if (GET_CODE (op0
) == VEC_MERGE
)
3748 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3749 if (CONST_INT_P (trueop02
))
3751 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3752 bool all_operand0
= true;
3753 bool all_operand1
= true;
3754 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3756 rtx j
= XVECEXP (trueop1
, 0, i
);
3757 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3758 all_operand1
= false;
3760 all_operand0
= false;
3762 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3763 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3764 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3765 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3769 /* If we have two nested selects that are inverses of each
3770 other, replace them with the source operand. */
3771 if (GET_CODE (trueop0
) == VEC_SELECT
3772 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3774 rtx op0_subop1
= XEXP (trueop0
, 1);
3775 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3776 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3778 /* Apply the outer ordering vector to the inner one. (The inner
3779 ordering vector is expressly permitted to be of a different
3780 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3781 then the two VEC_SELECTs cancel. */
3782 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3784 rtx x
= XVECEXP (trueop1
, 0, i
);
3785 if (!CONST_INT_P (x
))
3787 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3788 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3791 return XEXP (trueop0
, 0);
3797 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3798 ? GET_MODE (trueop0
)
3799 : GET_MODE_INNER (mode
));
3800 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3801 ? GET_MODE (trueop1
)
3802 : GET_MODE_INNER (mode
));
3804 gcc_assert (VECTOR_MODE_P (mode
));
3805 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3806 == GET_MODE_SIZE (mode
));
3808 if (VECTOR_MODE_P (op0_mode
))
3809 gcc_assert (GET_MODE_INNER (mode
)
3810 == GET_MODE_INNER (op0_mode
));
3812 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3814 if (VECTOR_MODE_P (op1_mode
))
3815 gcc_assert (GET_MODE_INNER (mode
)
3816 == GET_MODE_INNER (op1_mode
));
3818 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3820 if ((GET_CODE (trueop0
) == CONST_VECTOR
3821 || CONST_SCALAR_INT_P (trueop0
)
3822 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3823 && (GET_CODE (trueop1
) == CONST_VECTOR
3824 || CONST_SCALAR_INT_P (trueop1
)
3825 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3827 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3828 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3829 rtvec v
= rtvec_alloc (n_elts
);
3831 unsigned in_n_elts
= 1;
3833 if (VECTOR_MODE_P (op0_mode
))
3834 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3835 for (i
= 0; i
< n_elts
; i
++)
3839 if (!VECTOR_MODE_P (op0_mode
))
3840 RTVEC_ELT (v
, i
) = trueop0
;
3842 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3846 if (!VECTOR_MODE_P (op1_mode
))
3847 RTVEC_ELT (v
, i
) = trueop1
;
3849 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3854 return gen_rtx_CONST_VECTOR (mode
, v
);
3857 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3858 Restrict the transformation to avoid generating a VEC_SELECT with a
3859 mode unrelated to its operand. */
3860 if (GET_CODE (trueop0
) == VEC_SELECT
3861 && GET_CODE (trueop1
) == VEC_SELECT
3862 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3863 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3865 rtx par0
= XEXP (trueop0
, 1);
3866 rtx par1
= XEXP (trueop1
, 1);
3867 int len0
= XVECLEN (par0
, 0);
3868 int len1
= XVECLEN (par1
, 0);
3869 rtvec vec
= rtvec_alloc (len0
+ len1
);
3870 for (int i
= 0; i
< len0
; i
++)
3871 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3872 for (int i
= 0; i
< len1
; i
++)
3873 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3874 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3875 gen_rtx_PARALLEL (VOIDmode
, vec
));
3888 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3891 unsigned int width
= GET_MODE_PRECISION (mode
);
3893 if (VECTOR_MODE_P (mode
)
3894 && code
!= VEC_CONCAT
3895 && GET_CODE (op0
) == CONST_VECTOR
3896 && GET_CODE (op1
) == CONST_VECTOR
)
3898 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3899 machine_mode op0mode
= GET_MODE (op0
);
3900 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3901 machine_mode op1mode
= GET_MODE (op1
);
3902 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3903 rtvec v
= rtvec_alloc (n_elts
);
3906 gcc_assert (op0_n_elts
== n_elts
);
3907 gcc_assert (op1_n_elts
== n_elts
);
3908 for (i
= 0; i
< n_elts
; i
++)
3910 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3911 CONST_VECTOR_ELT (op0
, i
),
3912 CONST_VECTOR_ELT (op1
, i
));
3915 RTVEC_ELT (v
, i
) = x
;
3918 return gen_rtx_CONST_VECTOR (mode
, v
);
3921 if (VECTOR_MODE_P (mode
)
3922 && code
== VEC_CONCAT
3923 && (CONST_SCALAR_INT_P (op0
)
3924 || GET_CODE (op0
) == CONST_FIXED
3925 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3926 && (CONST_SCALAR_INT_P (op1
)
3927 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3928 || GET_CODE (op1
) == CONST_FIXED
))
3930 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3931 rtvec v
= rtvec_alloc (n_elts
);
3933 gcc_assert (n_elts
>= 2);
3936 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3937 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3939 RTVEC_ELT (v
, 0) = op0
;
3940 RTVEC_ELT (v
, 1) = op1
;
3944 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3945 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3948 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3949 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3950 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3952 for (i
= 0; i
< op0_n_elts
; ++i
)
3953 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3954 for (i
= 0; i
< op1_n_elts
; ++i
)
3955 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3958 return gen_rtx_CONST_VECTOR (mode
, v
);
3961 if (SCALAR_FLOAT_MODE_P (mode
)
3962 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3963 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3964 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3975 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3977 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3979 for (i
= 0; i
< 4; i
++)
3996 real_from_target (&r
, tmp0
, mode
);
3997 return const_double_from_real_value (r
, mode
);
4001 REAL_VALUE_TYPE f0
, f1
, value
, result
;
4002 const REAL_VALUE_TYPE
*opr0
, *opr1
;
4005 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4006 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4008 if (HONOR_SNANS (mode
)
4009 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4010 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4013 real_convert (&f0
, mode
, opr0
);
4014 real_convert (&f1
, mode
, opr1
);
4017 && real_equal (&f1
, &dconst0
)
4018 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4021 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4022 && flag_trapping_math
4023 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4025 int s0
= REAL_VALUE_NEGATIVE (f0
);
4026 int s1
= REAL_VALUE_NEGATIVE (f1
);
4031 /* Inf + -Inf = NaN plus exception. */
4036 /* Inf - Inf = NaN plus exception. */
4041 /* Inf / Inf = NaN plus exception. */
4048 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4049 && flag_trapping_math
4050 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4051 || (REAL_VALUE_ISINF (f1
)
4052 && real_equal (&f0
, &dconst0
))))
4053 /* Inf * 0 = NaN plus exception. */
4056 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4058 real_convert (&result
, mode
, &value
);
4060 /* Don't constant fold this floating point operation if
4061 the result has overflowed and flag_trapping_math. */
4063 if (flag_trapping_math
4064 && MODE_HAS_INFINITIES (mode
)
4065 && REAL_VALUE_ISINF (result
)
4066 && !REAL_VALUE_ISINF (f0
)
4067 && !REAL_VALUE_ISINF (f1
))
4068 /* Overflow plus exception. */
4071 /* Don't constant fold this floating point operation if the
4072 result may dependent upon the run-time rounding mode and
4073 flag_rounding_math is set, or if GCC's software emulation
4074 is unable to accurately represent the result. */
4076 if ((flag_rounding_math
4077 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4078 && (inexact
|| !real_identical (&result
, &value
)))
4081 return const_double_from_real_value (result
, mode
);
4085 /* We can fold some multi-word operations. */
4086 if ((GET_MODE_CLASS (mode
) == MODE_INT
4087 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
4088 && CONST_SCALAR_INT_P (op0
)
4089 && CONST_SCALAR_INT_P (op1
))
4093 rtx_mode_t pop0
= rtx_mode_t (op0
, mode
);
4094 rtx_mode_t pop1
= rtx_mode_t (op1
, mode
);
4096 #if TARGET_SUPPORTS_WIDE_INT == 0
4097 /* This assert keeps the simplification from producing a result
4098 that cannot be represented in a CONST_DOUBLE but a lot of
4099 upstream callers expect that this function never fails to
4100 simplify something and so you if you added this to the test
4101 above the code would die later anyway. If this assert
4102 happens, you just need to make the port support wide int. */
4103 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
4108 result
= wi::sub (pop0
, pop1
);
4112 result
= wi::add (pop0
, pop1
);
4116 result
= wi::mul (pop0
, pop1
);
4120 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4126 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4132 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4138 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4144 result
= wi::bit_and (pop0
, pop1
);
4148 result
= wi::bit_or (pop0
, pop1
);
4152 result
= wi::bit_xor (pop0
, pop1
);
4156 result
= wi::smin (pop0
, pop1
);
4160 result
= wi::smax (pop0
, pop1
);
4164 result
= wi::umin (pop0
, pop1
);
4168 result
= wi::umax (pop0
, pop1
);
4175 wide_int wop1
= pop1
;
4176 if (SHIFT_COUNT_TRUNCATED
)
4177 wop1
= wi::umod_trunc (wop1
, width
);
4178 else if (wi::geu_p (wop1
, width
))
4184 result
= wi::lrshift (pop0
, wop1
);
4188 result
= wi::arshift (pop0
, wop1
);
4192 result
= wi::lshift (pop0
, wop1
);
4203 if (wi::neg_p (pop1
))
4209 result
= wi::lrotate (pop0
, pop1
);
4213 result
= wi::rrotate (pop0
, pop1
);
4224 return immed_wide_int_const (result
, mode
);
4232 /* Return a positive integer if X should sort after Y. The value
4233 returned is 1 if and only if X and Y are both regs. */
4236 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4240 result
= (commutative_operand_precedence (y
)
4241 - commutative_operand_precedence (x
));
4243 return result
+ result
;
4245 /* Group together equal REGs to do more simplification. */
4246 if (REG_P (x
) && REG_P (y
))
4247 return REGNO (x
) > REGNO (y
);
4252 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4253 operands may be another PLUS or MINUS.
4255 Rather than test for specific case, we do this by a brute-force method
4256 and do all possible simplifications until no more changes occur. Then
4257 we rebuild the operation.
4259 May return NULL_RTX when no changes were made. */
4262 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4265 struct simplify_plus_minus_op_data
4272 int changed
, n_constants
, canonicalized
= 0;
4275 memset (ops
, 0, sizeof ops
);
4277 /* Set up the two operands and then expand them until nothing has been
4278 changed. If we run out of room in our array, give up; this should
4279 almost never happen. */
4284 ops
[1].neg
= (code
== MINUS
);
4291 for (i
= 0; i
< n_ops
; i
++)
4293 rtx this_op
= ops
[i
].op
;
4294 int this_neg
= ops
[i
].neg
;
4295 enum rtx_code this_code
= GET_CODE (this_op
);
4301 if (n_ops
== ARRAY_SIZE (ops
))
4304 ops
[n_ops
].op
= XEXP (this_op
, 1);
4305 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4308 ops
[i
].op
= XEXP (this_op
, 0);
4310 /* If this operand was negated then we will potentially
4311 canonicalize the expression. Similarly if we don't
4312 place the operands adjacent we're re-ordering the
4313 expression and thus might be performing a
4314 canonicalization. Ignore register re-ordering.
4315 ??? It might be better to shuffle the ops array here,
4316 but then (plus (plus (A, B), plus (C, D))) wouldn't
4317 be seen as non-canonical. */
4320 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4325 ops
[i
].op
= XEXP (this_op
, 0);
4326 ops
[i
].neg
= ! this_neg
;
4332 if (n_ops
!= ARRAY_SIZE (ops
)
4333 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4334 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4335 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4337 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4338 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4339 ops
[n_ops
].neg
= this_neg
;
4347 /* ~a -> (-a - 1) */
4348 if (n_ops
!= ARRAY_SIZE (ops
))
4350 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4351 ops
[n_ops
++].neg
= this_neg
;
4352 ops
[i
].op
= XEXP (this_op
, 0);
4353 ops
[i
].neg
= !this_neg
;
4363 ops
[i
].op
= neg_const_int (mode
, this_op
);
4377 if (n_constants
> 1)
4380 gcc_assert (n_ops
>= 2);
4382 /* If we only have two operands, we can avoid the loops. */
4385 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4388 /* Get the two operands. Be careful with the order, especially for
4389 the cases where code == MINUS. */
4390 if (ops
[0].neg
&& ops
[1].neg
)
4392 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4395 else if (ops
[0].neg
)
4406 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4409 /* Now simplify each pair of operands until nothing changes. */
4412 /* Insertion sort is good enough for a small array. */
4413 for (i
= 1; i
< n_ops
; i
++)
4415 struct simplify_plus_minus_op_data save
;
4419 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4422 /* Just swapping registers doesn't count as canonicalization. */
4428 ops
[j
+ 1] = ops
[j
];
4430 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4435 for (i
= n_ops
- 1; i
> 0; i
--)
4436 for (j
= i
- 1; j
>= 0; j
--)
4438 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4439 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4441 if (lhs
!= 0 && rhs
!= 0)
4443 enum rtx_code ncode
= PLUS
;
4449 std::swap (lhs
, rhs
);
4451 else if (swap_commutative_operands_p (lhs
, rhs
))
4452 std::swap (lhs
, rhs
);
4454 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4455 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4457 rtx tem_lhs
, tem_rhs
;
4459 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4460 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4461 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4464 if (tem
&& !CONSTANT_P (tem
))
4465 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4468 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4472 /* Reject "simplifications" that just wrap the two
4473 arguments in a CONST. Failure to do so can result
4474 in infinite recursion with simplify_binary_operation
4475 when it calls us to simplify CONST operations.
4476 Also, if we find such a simplification, don't try
4477 any more combinations with this rhs: We must have
4478 something like symbol+offset, ie. one of the
4479 trivial CONST expressions we handle later. */
4480 if (GET_CODE (tem
) == CONST
4481 && GET_CODE (XEXP (tem
, 0)) == ncode
4482 && XEXP (XEXP (tem
, 0), 0) == lhs
4483 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4486 if (GET_CODE (tem
) == NEG
)
4487 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4488 if (CONST_INT_P (tem
) && lneg
)
4489 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4493 ops
[j
].op
= NULL_RTX
;
4503 /* Pack all the operands to the lower-numbered entries. */
4504 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4513 /* If nothing changed, check that rematerialization of rtl instructions
4514 is still required. */
4517 /* Perform rematerialization if only all operands are registers and
4518 all operations are PLUS. */
4519 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4520 around rs6000 and how it uses the CA register. See PR67145. */
4521 for (i
= 0; i
< n_ops
; i
++)
4523 || !REG_P (ops
[i
].op
)
4524 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4525 && fixed_regs
[REGNO (ops
[i
].op
)]
4526 && !global_regs
[REGNO (ops
[i
].op
)]
4527 && ops
[i
].op
!= frame_pointer_rtx
4528 && ops
[i
].op
!= arg_pointer_rtx
4529 && ops
[i
].op
!= stack_pointer_rtx
))
4534 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4536 && CONST_INT_P (ops
[1].op
)
4537 && CONSTANT_P (ops
[0].op
)
4539 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4541 /* We suppressed creation of trivial CONST expressions in the
4542 combination loop to avoid recursion. Create one manually now.
4543 The combination loop should have ensured that there is exactly
4544 one CONST_INT, and the sort will have ensured that it is last
4545 in the array and that any other constant will be next-to-last. */
4548 && CONST_INT_P (ops
[n_ops
- 1].op
)
4549 && CONSTANT_P (ops
[n_ops
- 2].op
))
4551 rtx value
= ops
[n_ops
- 1].op
;
4552 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4553 value
= neg_const_int (mode
, value
);
4554 if (CONST_INT_P (value
))
4556 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4562 /* Put a non-negated operand first, if possible. */
4564 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4567 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4576 /* Now make the result by performing the requested operations. */
4579 for (i
= 1; i
< n_ops
; i
++)
4580 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4581 mode
, result
, ops
[i
].op
);
4586 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4588 plus_minus_operand_p (const_rtx x
)
4590 return GET_CODE (x
) == PLUS
4591 || GET_CODE (x
) == MINUS
4592 || (GET_CODE (x
) == CONST
4593 && GET_CODE (XEXP (x
, 0)) == PLUS
4594 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4595 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4598 /* Like simplify_binary_operation except used for relational operators.
4599 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4600 not also be VOIDmode.
4602 CMP_MODE specifies in which mode the comparison is done in, so it is
4603 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4604 the operands or, if both are VOIDmode, the operands are compared in
4605 "infinite precision". */
4607 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4608 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4610 rtx tem
, trueop0
, trueop1
;
4612 if (cmp_mode
== VOIDmode
)
4613 cmp_mode
= GET_MODE (op0
);
4614 if (cmp_mode
== VOIDmode
)
4615 cmp_mode
= GET_MODE (op1
);
4617 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4620 if (SCALAR_FLOAT_MODE_P (mode
))
4622 if (tem
== const0_rtx
)
4623 return CONST0_RTX (mode
);
4624 #ifdef FLOAT_STORE_FLAG_VALUE
4626 REAL_VALUE_TYPE val
;
4627 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4628 return const_double_from_real_value (val
, mode
);
4634 if (VECTOR_MODE_P (mode
))
4636 if (tem
== const0_rtx
)
4637 return CONST0_RTX (mode
);
4638 #ifdef VECTOR_STORE_FLAG_VALUE
4643 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4644 if (val
== NULL_RTX
)
4646 if (val
== const1_rtx
)
4647 return CONST1_RTX (mode
);
4649 units
= GET_MODE_NUNITS (mode
);
4650 v
= rtvec_alloc (units
);
4651 for (i
= 0; i
< units
; i
++)
4652 RTVEC_ELT (v
, i
) = val
;
4653 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4663 /* For the following tests, ensure const0_rtx is op1. */
4664 if (swap_commutative_operands_p (op0
, op1
)
4665 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4666 std::swap (op0
, op1
), code
= swap_condition (code
);
4668 /* If op0 is a compare, extract the comparison arguments from it. */
4669 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4670 return simplify_gen_relational (code
, mode
, VOIDmode
,
4671 XEXP (op0
, 0), XEXP (op0
, 1));
4673 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4677 trueop0
= avoid_constant_pool_reference (op0
);
4678 trueop1
= avoid_constant_pool_reference (op1
);
4679 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4683 /* This part of simplify_relational_operation is only used when CMP_MODE
4684 is not in class MODE_CC (i.e. it is a real comparison).
4686 MODE is the mode of the result, while CMP_MODE specifies in which
4687 mode the comparison is done in, so it is the mode of the operands. */
4690 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4691 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4693 enum rtx_code op0code
= GET_CODE (op0
);
4695 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4697 /* If op0 is a comparison, extract the comparison arguments
4701 if (GET_MODE (op0
) == mode
)
4702 return simplify_rtx (op0
);
4704 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4705 XEXP (op0
, 0), XEXP (op0
, 1));
4707 else if (code
== EQ
)
4709 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4710 if (new_code
!= UNKNOWN
)
4711 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4712 XEXP (op0
, 0), XEXP (op0
, 1));
4716 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4717 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4718 if ((code
== LTU
|| code
== GEU
)
4719 && GET_CODE (op0
) == PLUS
4720 && CONST_INT_P (XEXP (op0
, 1))
4721 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4722 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4723 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4724 && XEXP (op0
, 1) != const0_rtx
)
4727 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4728 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4729 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4732 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4733 transformed into (LTU a -C). */
4734 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4735 && CONST_INT_P (XEXP (op0
, 1))
4736 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4737 && XEXP (op0
, 1) != const0_rtx
)
4740 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4741 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4742 XEXP (op0
, 0), new_cmp
);
4745 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4746 if ((code
== LTU
|| code
== GEU
)
4747 && GET_CODE (op0
) == PLUS
4748 && rtx_equal_p (op1
, XEXP (op0
, 1))
4749 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4750 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4751 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4752 copy_rtx (XEXP (op0
, 0)));
4754 if (op1
== const0_rtx
)
4756 /* Canonicalize (GTU x 0) as (NE x 0). */
4758 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4759 /* Canonicalize (LEU x 0) as (EQ x 0). */
4761 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4763 else if (op1
== const1_rtx
)
4768 /* Canonicalize (GE x 1) as (GT x 0). */
4769 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4772 /* Canonicalize (GEU x 1) as (NE x 0). */
4773 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4776 /* Canonicalize (LT x 1) as (LE x 0). */
4777 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4780 /* Canonicalize (LTU x 1) as (EQ x 0). */
4781 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4787 else if (op1
== constm1_rtx
)
4789 /* Canonicalize (LE x -1) as (LT x 0). */
4791 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4792 /* Canonicalize (GT x -1) as (GE x 0). */
4794 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4797 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4798 if ((code
== EQ
|| code
== NE
)
4799 && (op0code
== PLUS
|| op0code
== MINUS
)
4801 && CONSTANT_P (XEXP (op0
, 1))
4802 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4804 rtx x
= XEXP (op0
, 0);
4805 rtx c
= XEXP (op0
, 1);
4806 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4807 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4809 /* Detect an infinite recursive condition, where we oscillate at this
4810 simplification case between:
4811 A + B == C <---> C - B == A,
4812 where A, B, and C are all constants with non-simplifiable expressions,
4813 usually SYMBOL_REFs. */
4814 if (GET_CODE (tem
) == invcode
4816 && rtx_equal_p (c
, XEXP (tem
, 1)))
4819 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4822 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4823 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4825 && op1
== const0_rtx
4826 && GET_MODE_CLASS (mode
) == MODE_INT
4827 && cmp_mode
!= VOIDmode
4828 /* ??? Work-around BImode bugs in the ia64 backend. */
4830 && cmp_mode
!= BImode
4831 && nonzero_bits (op0
, cmp_mode
) == 1
4832 && STORE_FLAG_VALUE
== 1)
4833 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4834 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4835 : lowpart_subreg (mode
, op0
, cmp_mode
);
4837 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4838 if ((code
== EQ
|| code
== NE
)
4839 && op1
== const0_rtx
4841 return simplify_gen_relational (code
, mode
, cmp_mode
,
4842 XEXP (op0
, 0), XEXP (op0
, 1));
4844 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4845 if ((code
== EQ
|| code
== NE
)
4847 && rtx_equal_p (XEXP (op0
, 0), op1
)
4848 && !side_effects_p (XEXP (op0
, 0)))
4849 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4852 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4853 if ((code
== EQ
|| code
== NE
)
4855 && rtx_equal_p (XEXP (op0
, 1), op1
)
4856 && !side_effects_p (XEXP (op0
, 1)))
4857 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4860 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4861 if ((code
== EQ
|| code
== NE
)
4863 && CONST_SCALAR_INT_P (op1
)
4864 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4865 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4866 simplify_gen_binary (XOR
, cmp_mode
,
4867 XEXP (op0
, 1), op1
));
4869 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4870 can be implemented with a BICS instruction on some targets, or
4871 constant-folded if y is a constant. */
4872 if ((code
== EQ
|| code
== NE
)
4874 && rtx_equal_p (XEXP (op0
, 0), op1
)
4875 && !side_effects_p (op1
)
4876 && op1
!= CONST0_RTX (cmp_mode
))
4878 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4879 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4881 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4882 CONST0_RTX (cmp_mode
));
4885 /* Likewise for (eq/ne (and x y) y). */
4886 if ((code
== EQ
|| code
== NE
)
4888 && rtx_equal_p (XEXP (op0
, 1), op1
)
4889 && !side_effects_p (op1
)
4890 && op1
!= CONST0_RTX (cmp_mode
))
4892 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4893 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4895 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4896 CONST0_RTX (cmp_mode
));
4899 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4900 if ((code
== EQ
|| code
== NE
)
4901 && GET_CODE (op0
) == BSWAP
4902 && CONST_SCALAR_INT_P (op1
))
4903 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4904 simplify_gen_unary (BSWAP
, cmp_mode
,
4907 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4908 if ((code
== EQ
|| code
== NE
)
4909 && GET_CODE (op0
) == BSWAP
4910 && GET_CODE (op1
) == BSWAP
)
4911 return simplify_gen_relational (code
, mode
, cmp_mode
,
4912 XEXP (op0
, 0), XEXP (op1
, 0));
4914 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4920 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4921 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4922 XEXP (op0
, 0), const0_rtx
);
4927 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4928 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4929 XEXP (op0
, 0), const0_rtx
);
4948 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4949 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4950 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4951 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4952 For floating-point comparisons, assume that the operands were ordered. */
4955 comparison_result (enum rtx_code code
, int known_results
)
4961 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4964 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4968 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4971 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4975 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4978 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4981 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4983 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4986 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4988 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4991 return const_true_rtx
;
4999 /* Check if the given comparison (done in the given MODE) is actually
5000 a tautology or a contradiction. If the mode is VOID_mode, the
5001 comparison is done in "infinite precision". If no simplification
5002 is possible, this function returns zero. Otherwise, it returns
5003 either const_true_rtx or const0_rtx. */
5006 simplify_const_relational_operation (enum rtx_code code
,
5014 gcc_assert (mode
!= VOIDmode
5015 || (GET_MODE (op0
) == VOIDmode
5016 && GET_MODE (op1
) == VOIDmode
));
5018 /* If op0 is a compare, extract the comparison arguments from it. */
5019 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5021 op1
= XEXP (op0
, 1);
5022 op0
= XEXP (op0
, 0);
5024 if (GET_MODE (op0
) != VOIDmode
)
5025 mode
= GET_MODE (op0
);
5026 else if (GET_MODE (op1
) != VOIDmode
)
5027 mode
= GET_MODE (op1
);
5032 /* We can't simplify MODE_CC values since we don't know what the
5033 actual comparison is. */
5034 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5037 /* Make sure the constant is second. */
5038 if (swap_commutative_operands_p (op0
, op1
))
5040 std::swap (op0
, op1
);
5041 code
= swap_condition (code
);
5044 trueop0
= avoid_constant_pool_reference (op0
);
5045 trueop1
= avoid_constant_pool_reference (op1
);
5047 /* For integer comparisons of A and B maybe we can simplify A - B and can
5048 then simplify a comparison of that with zero. If A and B are both either
5049 a register or a CONST_INT, this can't help; testing for these cases will
5050 prevent infinite recursion here and speed things up.
5052 We can only do this for EQ and NE comparisons as otherwise we may
5053 lose or introduce overflow which we cannot disregard as undefined as
5054 we do not know the signedness of the operation on either the left or
5055 the right hand side of the comparison. */
5057 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5058 && (code
== EQ
|| code
== NE
)
5059 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5060 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5061 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5062 /* We cannot do this if tem is a nonzero address. */
5063 && ! nonzero_address_p (tem
))
5064 return simplify_const_relational_operation (signed_condition (code
),
5065 mode
, tem
, const0_rtx
);
5067 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5068 return const_true_rtx
;
5070 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5073 /* For modes without NaNs, if the two operands are equal, we know the
5074 result except if they have side-effects. Even with NaNs we know
5075 the result of unordered comparisons and, if signaling NaNs are
5076 irrelevant, also the result of LT/GT/LTGT. */
5077 if ((! HONOR_NANS (trueop0
)
5078 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5079 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5080 && ! HONOR_SNANS (trueop0
)))
5081 && rtx_equal_p (trueop0
, trueop1
)
5082 && ! side_effects_p (trueop0
))
5083 return comparison_result (code
, CMP_EQ
);
5085 /* If the operands are floating-point constants, see if we can fold
5087 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5088 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5089 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5091 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5092 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5094 /* Comparisons are unordered iff at least one of the values is NaN. */
5095 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5105 return const_true_rtx
;
5118 return comparison_result (code
,
5119 (real_equal (d0
, d1
) ? CMP_EQ
:
5120 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5123 /* Otherwise, see if the operands are both integers. */
5124 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5125 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5127 /* It would be nice if we really had a mode here. However, the
5128 largest int representable on the target is as good as
5130 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5131 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5132 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5134 if (wi::eq_p (ptrueop0
, ptrueop1
))
5135 return comparison_result (code
, CMP_EQ
);
5138 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5139 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5140 return comparison_result (code
, cr
);
5144 /* Optimize comparisons with upper and lower bounds. */
5145 if (HWI_COMPUTABLE_MODE_P (mode
)
5146 && CONST_INT_P (trueop1
)
5147 && !side_effects_p (trueop0
))
5150 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5151 HOST_WIDE_INT val
= INTVAL (trueop1
);
5152 HOST_WIDE_INT mmin
, mmax
;
5162 /* Get a reduced range if the sign bit is zero. */
5163 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5170 rtx mmin_rtx
, mmax_rtx
;
5171 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5173 mmin
= INTVAL (mmin_rtx
);
5174 mmax
= INTVAL (mmax_rtx
);
5177 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5179 mmin
>>= (sign_copies
- 1);
5180 mmax
>>= (sign_copies
- 1);
5186 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5188 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5189 return const_true_rtx
;
5190 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5195 return const_true_rtx
;
5200 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5202 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5203 return const_true_rtx
;
5204 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5209 return const_true_rtx
;
5215 /* x == y is always false for y out of range. */
5216 if (val
< mmin
|| val
> mmax
)
5220 /* x > y is always false for y >= mmax, always true for y < mmin. */
5222 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5224 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5225 return const_true_rtx
;
5231 return const_true_rtx
;
5234 /* x < y is always false for y <= mmin, always true for y > mmax. */
5236 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5238 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5239 return const_true_rtx
;
5245 return const_true_rtx
;
5249 /* x != y is always true for y out of range. */
5250 if (val
< mmin
|| val
> mmax
)
5251 return const_true_rtx
;
5259 /* Optimize integer comparisons with zero. */
5260 if (trueop1
== const0_rtx
&& !side_effects_p (trueop0
))
5262 /* Some addresses are known to be nonzero. We don't know
5263 their sign, but equality comparisons are known. */
5264 if (nonzero_address_p (trueop0
))
5266 if (code
== EQ
|| code
== LEU
)
5268 if (code
== NE
|| code
== GTU
)
5269 return const_true_rtx
;
5272 /* See if the first operand is an IOR with a constant. If so, we
5273 may be able to determine the result of this comparison. */
5274 if (GET_CODE (op0
) == IOR
)
5276 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5277 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5279 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5280 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5281 && (UINTVAL (inner_const
)
5292 return const_true_rtx
;
5296 return const_true_rtx
;
5310 /* Optimize comparison of ABS with zero. */
5311 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5312 && (GET_CODE (trueop0
) == ABS
5313 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5314 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5319 /* Optimize abs(x) < 0.0. */
5320 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5325 /* Optimize abs(x) >= 0.0. */
5326 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5327 return const_true_rtx
;
5331 /* Optimize ! (abs(x) < 0.0). */
5332 return const_true_rtx
;
5342 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5343 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5344 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5345 can be simplified to that or NULL_RTX if not.
5346 Assume X is compared against zero with CMP_CODE and the true
5347 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5350 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5352 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5355 /* Result on X == 0 and X !=0 respectively. */
5356 rtx on_zero
, on_nonzero
;
5360 on_nonzero
= false_val
;
5364 on_zero
= false_val
;
5365 on_nonzero
= true_val
;
5368 rtx_code op_code
= GET_CODE (on_nonzero
);
5369 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5370 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5371 || !CONST_INT_P (on_zero
))
5374 HOST_WIDE_INT op_val
;
5375 if (((op_code
== CLZ
5376 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero
), op_val
))
5378 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero
), op_val
)))
5379 && op_val
== INTVAL (on_zero
))
5386 /* Simplify CODE, an operation with result mode MODE and three operands,
5387 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5388 a constant. Return 0 if no simplifications is possible. */
5391 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5392 machine_mode op0_mode
, rtx op0
, rtx op1
,
5395 unsigned int width
= GET_MODE_PRECISION (mode
);
5396 bool any_change
= false;
5399 /* VOIDmode means "infinite" precision. */
5401 width
= HOST_BITS_PER_WIDE_INT
;
5406 /* Simplify negations around the multiplication. */
5407 /* -a * -b + c => a * b + c. */
5408 if (GET_CODE (op0
) == NEG
)
5410 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5412 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5414 else if (GET_CODE (op1
) == NEG
)
5416 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5418 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5421 /* Canonicalize the two multiplication operands. */
5422 /* a * -b + c => -b * a + c. */
5423 if (swap_commutative_operands_p (op0
, op1
))
5424 std::swap (op0
, op1
), any_change
= true;
5427 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5432 if (CONST_INT_P (op0
)
5433 && CONST_INT_P (op1
)
5434 && CONST_INT_P (op2
)
5435 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5436 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5438 /* Extracting a bit-field from a constant */
5439 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5440 HOST_WIDE_INT op1val
= INTVAL (op1
);
5441 HOST_WIDE_INT op2val
= INTVAL (op2
);
5442 if (BITS_BIG_ENDIAN
)
5443 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5447 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5449 /* First zero-extend. */
5450 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5451 /* If desired, propagate sign bit. */
5452 if (code
== SIGN_EXTRACT
5453 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5455 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5458 return gen_int_mode (val
, mode
);
5463 if (CONST_INT_P (op0
))
5464 return op0
!= const0_rtx
? op1
: op2
;
5466 /* Convert c ? a : a into "a". */
5467 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5470 /* Convert a != b ? a : b into "a". */
5471 if (GET_CODE (op0
) == NE
5472 && ! side_effects_p (op0
)
5473 && ! HONOR_NANS (mode
)
5474 && ! HONOR_SIGNED_ZEROS (mode
)
5475 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5476 && rtx_equal_p (XEXP (op0
, 1), op2
))
5477 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5478 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5481 /* Convert a == b ? a : b into "b". */
5482 if (GET_CODE (op0
) == EQ
5483 && ! side_effects_p (op0
)
5484 && ! HONOR_NANS (mode
)
5485 && ! HONOR_SIGNED_ZEROS (mode
)
5486 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5487 && rtx_equal_p (XEXP (op0
, 1), op2
))
5488 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5489 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5492 /* Convert (!c) != {0,...,0} ? a : b into
5493 c != {0,...,0} ? b : a for vector modes. */
5494 if (VECTOR_MODE_P (GET_MODE (op1
))
5495 && GET_CODE (op0
) == NE
5496 && GET_CODE (XEXP (op0
, 0)) == NOT
5497 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5499 rtx cv
= XEXP (op0
, 1);
5500 int nunits
= CONST_VECTOR_NUNITS (cv
);
5502 for (int i
= 0; i
< nunits
; ++i
)
5503 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5510 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5511 XEXP (XEXP (op0
, 0), 0),
5513 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5518 /* Convert x == 0 ? N : clz (x) into clz (x) when
5519 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5520 Similarly for ctz (x). */
5521 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5522 && XEXP (op0
, 1) == const0_rtx
)
5525 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5531 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5533 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5534 ? GET_MODE (XEXP (op0
, 1))
5535 : GET_MODE (XEXP (op0
, 0)));
5538 /* Look for happy constants in op1 and op2. */
5539 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5541 HOST_WIDE_INT t
= INTVAL (op1
);
5542 HOST_WIDE_INT f
= INTVAL (op2
);
5544 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5545 code
= GET_CODE (op0
);
5546 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5549 tmp
= reversed_comparison_code (op0
, NULL
);
5557 return simplify_gen_relational (code
, mode
, cmp_mode
,
5558 XEXP (op0
, 0), XEXP (op0
, 1));
5561 if (cmp_mode
== VOIDmode
)
5562 cmp_mode
= op0_mode
;
5563 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5564 cmp_mode
, XEXP (op0
, 0),
5567 /* See if any simplifications were possible. */
5570 if (CONST_INT_P (temp
))
5571 return temp
== const0_rtx
? op2
: op1
;
5573 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5579 gcc_assert (GET_MODE (op0
) == mode
);
5580 gcc_assert (GET_MODE (op1
) == mode
);
5581 gcc_assert (VECTOR_MODE_P (mode
));
5582 trueop2
= avoid_constant_pool_reference (op2
);
5583 if (CONST_INT_P (trueop2
))
5585 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5586 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5587 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5588 unsigned HOST_WIDE_INT mask
;
5589 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5592 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5594 if (!(sel
& mask
) && !side_effects_p (op0
))
5596 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5599 rtx trueop0
= avoid_constant_pool_reference (op0
);
5600 rtx trueop1
= avoid_constant_pool_reference (op1
);
5601 if (GET_CODE (trueop0
) == CONST_VECTOR
5602 && GET_CODE (trueop1
) == CONST_VECTOR
)
5604 rtvec v
= rtvec_alloc (n_elts
);
5607 for (i
= 0; i
< n_elts
; i
++)
5608 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5609 ? CONST_VECTOR_ELT (trueop0
, i
)
5610 : CONST_VECTOR_ELT (trueop1
, i
));
5611 return gen_rtx_CONST_VECTOR (mode
, v
);
5614 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5615 if no element from a appears in the result. */
5616 if (GET_CODE (op0
) == VEC_MERGE
)
5618 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5619 if (CONST_INT_P (tem
))
5621 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5622 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5623 return simplify_gen_ternary (code
, mode
, mode
,
5624 XEXP (op0
, 1), op1
, op2
);
5625 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5626 return simplify_gen_ternary (code
, mode
, mode
,
5627 XEXP (op0
, 0), op1
, op2
);
5630 if (GET_CODE (op1
) == VEC_MERGE
)
5632 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5633 if (CONST_INT_P (tem
))
5635 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5636 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5637 return simplify_gen_ternary (code
, mode
, mode
,
5638 op0
, XEXP (op1
, 1), op2
);
5639 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5640 return simplify_gen_ternary (code
, mode
, mode
,
5641 op0
, XEXP (op1
, 0), op2
);
5645 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5647 if (GET_CODE (op0
) == VEC_DUPLICATE
5648 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5649 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5650 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5652 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5653 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5655 if (XEXP (XEXP (op0
, 0), 0) == op1
5656 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5662 if (rtx_equal_p (op0
, op1
)
5663 && !side_effects_p (op2
) && !side_effects_p (op1
))
5675 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5676 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5677 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5679 Works by unpacking OP into a collection of 8-bit values
5680 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5681 and then repacking them again for OUTERMODE. */
5684 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5685 machine_mode innermode
, unsigned int byte
)
5689 value_mask
= (1 << value_bit
) - 1
5691 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5699 rtx result_s
= NULL
;
5700 rtvec result_v
= NULL
;
5701 enum mode_class outer_class
;
5702 machine_mode outer_submode
;
5705 /* Some ports misuse CCmode. */
5706 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5709 /* We have no way to represent a complex constant at the rtl level. */
5710 if (COMPLEX_MODE_P (outermode
))
5713 /* We support any size mode. */
5714 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5715 GET_MODE_BITSIZE (innermode
));
5717 /* Unpack the value. */
5719 if (GET_CODE (op
) == CONST_VECTOR
)
5721 num_elem
= CONST_VECTOR_NUNITS (op
);
5722 elems
= &CONST_VECTOR_ELT (op
, 0);
5723 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5729 elem_bitsize
= max_bitsize
;
5731 /* If this asserts, it is too complicated; reducing value_bit may help. */
5732 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5733 /* I don't know how to handle endianness of sub-units. */
5734 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5736 for (elem
= 0; elem
< num_elem
; elem
++)
5739 rtx el
= elems
[elem
];
5741 /* Vectors are kept in target memory order. (This is probably
5744 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5745 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5747 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5748 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5749 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5750 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5751 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5754 switch (GET_CODE (el
))
5758 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5760 *vp
++ = INTVAL (el
) >> i
;
5761 /* CONST_INTs are always logically sign-extended. */
5762 for (; i
< elem_bitsize
; i
+= value_bit
)
5763 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5766 case CONST_WIDE_INT
:
5768 rtx_mode_t val
= rtx_mode_t (el
, innermode
);
5769 unsigned char extend
= wi::sign_mask (val
);
5770 int prec
= wi::get_precision (val
);
5772 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
5773 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5774 for (; i
< elem_bitsize
; i
+= value_bit
)
5780 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5782 unsigned char extend
= 0;
5783 /* If this triggers, someone should have generated a
5784 CONST_INT instead. */
5785 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5787 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5788 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5789 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5792 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5796 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5798 for (; i
< elem_bitsize
; i
+= value_bit
)
5803 /* This is big enough for anything on the platform. */
5804 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5805 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5807 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5808 gcc_assert (bitsize
<= elem_bitsize
);
5809 gcc_assert (bitsize
% value_bit
== 0);
5811 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5814 /* real_to_target produces its result in words affected by
5815 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5816 and use WORDS_BIG_ENDIAN instead; see the documentation
5817 of SUBREG in rtl.texi. */
5818 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5821 if (WORDS_BIG_ENDIAN
)
5822 ibase
= bitsize
- 1 - i
;
5825 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5828 /* It shouldn't matter what's done here, so fill it with
5830 for (; i
< elem_bitsize
; i
+= value_bit
)
5836 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5838 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5839 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5843 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5844 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5845 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5847 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5848 >> (i
- HOST_BITS_PER_WIDE_INT
);
5849 for (; i
< elem_bitsize
; i
+= value_bit
)
5859 /* Now, pick the right byte to start with. */
5860 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5861 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5862 will already have offset 0. */
5863 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5865 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5867 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5868 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5869 byte
= (subword_byte
% UNITS_PER_WORD
5870 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5873 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5874 so if it's become negative it will instead be very large.) */
5875 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5877 /* Convert from bytes to chunks of size value_bit. */
5878 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5880 /* Re-pack the value. */
5881 num_elem
= GET_MODE_NUNITS (outermode
);
5883 if (VECTOR_MODE_P (outermode
))
5885 result_v
= rtvec_alloc (num_elem
);
5886 elems
= &RTVEC_ELT (result_v
, 0);
5891 outer_submode
= GET_MODE_INNER (outermode
);
5892 outer_class
= GET_MODE_CLASS (outer_submode
);
5893 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5895 gcc_assert (elem_bitsize
% value_bit
== 0);
5896 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5898 for (elem
= 0; elem
< num_elem
; elem
++)
5902 /* Vectors are stored in target memory order. (This is probably
5905 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5906 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5908 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5909 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5910 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5911 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5912 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5915 switch (outer_class
)
5918 case MODE_PARTIAL_INT
:
5923 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5924 / HOST_BITS_PER_WIDE_INT
;
5925 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5928 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5930 for (u
= 0; u
< units
; u
++)
5932 unsigned HOST_WIDE_INT buf
= 0;
5934 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5936 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5939 base
+= HOST_BITS_PER_WIDE_INT
;
5941 r
= wide_int::from_array (tmp
, units
,
5942 GET_MODE_PRECISION (outer_submode
));
5943 #if TARGET_SUPPORTS_WIDE_INT == 0
5944 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5945 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5948 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5953 case MODE_DECIMAL_FLOAT
:
5956 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
5958 /* real_from_target wants its input in words affected by
5959 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5960 and use WORDS_BIG_ENDIAN instead; see the documentation
5961 of SUBREG in rtl.texi. */
5962 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5965 if (WORDS_BIG_ENDIAN
)
5966 ibase
= elem_bitsize
- 1 - i
;
5969 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5972 real_from_target (&r
, tmp
, outer_submode
);
5973 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
5985 f
.mode
= outer_submode
;
5988 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5990 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5991 for (; i
< elem_bitsize
; i
+= value_bit
)
5992 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5993 << (i
- HOST_BITS_PER_WIDE_INT
));
5995 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
6003 if (VECTOR_MODE_P (outermode
))
6004 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6009 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6010 Return 0 if no simplifications are possible. */
6012 simplify_subreg (machine_mode outermode
, rtx op
,
6013 machine_mode innermode
, unsigned int byte
)
6015 /* Little bit of sanity checking. */
6016 gcc_assert (innermode
!= VOIDmode
);
6017 gcc_assert (outermode
!= VOIDmode
);
6018 gcc_assert (innermode
!= BLKmode
);
6019 gcc_assert (outermode
!= BLKmode
);
6021 gcc_assert (GET_MODE (op
) == innermode
6022 || GET_MODE (op
) == VOIDmode
);
6024 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
6027 if (byte
>= GET_MODE_SIZE (innermode
))
6030 if (outermode
== innermode
&& !byte
)
6033 if (CONST_SCALAR_INT_P (op
)
6034 || CONST_DOUBLE_AS_FLOAT_P (op
)
6035 || GET_CODE (op
) == CONST_FIXED
6036 || GET_CODE (op
) == CONST_VECTOR
)
6037 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
6039 /* Changing mode twice with SUBREG => just change it once,
6040 or not at all if changing back op starting mode. */
6041 if (GET_CODE (op
) == SUBREG
)
6043 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6044 int final_offset
= byte
+ SUBREG_BYTE (op
);
6047 if (outermode
== innermostmode
6048 && byte
== 0 && SUBREG_BYTE (op
) == 0)
6049 return SUBREG_REG (op
);
6051 /* The SUBREG_BYTE represents offset, as if the value were stored
6052 in memory. Irritating exception is paradoxical subreg, where
6053 we define SUBREG_BYTE to be 0. On big endian machines, this
6054 value should be negative. For a moment, undo this exception. */
6055 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6057 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
6058 if (WORDS_BIG_ENDIAN
)
6059 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6060 if (BYTES_BIG_ENDIAN
)
6061 final_offset
+= difference
% UNITS_PER_WORD
;
6063 if (SUBREG_BYTE (op
) == 0
6064 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
6066 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
6067 if (WORDS_BIG_ENDIAN
)
6068 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6069 if (BYTES_BIG_ENDIAN
)
6070 final_offset
+= difference
% UNITS_PER_WORD
;
6073 /* See whether resulting subreg will be paradoxical. */
6074 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
6076 /* In nonparadoxical subregs we can't handle negative offsets. */
6077 if (final_offset
< 0)
6079 /* Bail out in case resulting subreg would be incorrect. */
6080 if (final_offset
% GET_MODE_SIZE (outermode
)
6081 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
6087 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
6089 /* In paradoxical subreg, see if we are still looking on lower part.
6090 If so, our SUBREG_BYTE will be 0. */
6091 if (WORDS_BIG_ENDIAN
)
6092 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6093 if (BYTES_BIG_ENDIAN
)
6094 offset
+= difference
% UNITS_PER_WORD
;
6095 if (offset
== final_offset
)
6101 /* Recurse for further possible simplifications. */
6102 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6106 if (validate_subreg (outermode
, innermostmode
,
6107 SUBREG_REG (op
), final_offset
))
6109 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6110 if (SUBREG_PROMOTED_VAR_P (op
)
6111 && SUBREG_PROMOTED_SIGN (op
) >= 0
6112 && GET_MODE_CLASS (outermode
) == MODE_INT
6113 && IN_RANGE (GET_MODE_SIZE (outermode
),
6114 GET_MODE_SIZE (innermode
),
6115 GET_MODE_SIZE (innermostmode
))
6116 && subreg_lowpart_p (newx
))
6118 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6119 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6126 /* SUBREG of a hard register => just change the register number
6127 and/or mode. If the hard register is not valid in that mode,
6128 suppress this simplification. If the hard register is the stack,
6129 frame, or argument pointer, leave this as a SUBREG. */
6131 if (REG_P (op
) && HARD_REGISTER_P (op
))
6133 unsigned int regno
, final_regno
;
6136 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6137 if (HARD_REGISTER_NUM_P (final_regno
))
6140 int final_offset
= byte
;
6142 /* Adjust offset for paradoxical subregs. */
6144 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6146 int difference
= (GET_MODE_SIZE (innermode
)
6147 - GET_MODE_SIZE (outermode
));
6148 if (WORDS_BIG_ENDIAN
)
6149 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6150 if (BYTES_BIG_ENDIAN
)
6151 final_offset
+= difference
% UNITS_PER_WORD
;
6154 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6156 /* Propagate original regno. We don't have any way to specify
6157 the offset inside original regno, so do so only for lowpart.
6158 The information is used only by alias analysis that can not
6159 grog partial register anyway. */
6161 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6162 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6167 /* If we have a SUBREG of a register that we are replacing and we are
6168 replacing it with a MEM, make a new MEM and try replacing the
6169 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6170 or if we would be widening it. */
6173 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6174 /* Allow splitting of volatile memory references in case we don't
6175 have instruction to move the whole thing. */
6176 && (! MEM_VOLATILE_P (op
)
6177 || ! have_insn_for (SET
, innermode
))
6178 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6179 return adjust_address_nv (op
, outermode
, byte
);
6181 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6183 if (GET_CODE (op
) == CONCAT
6184 || GET_CODE (op
) == VEC_CONCAT
)
6186 unsigned int part_size
, final_offset
;
6189 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6190 if (part_mode
== VOIDmode
)
6191 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6192 part_size
= GET_MODE_SIZE (part_mode
);
6193 if (byte
< part_size
)
6195 part
= XEXP (op
, 0);
6196 final_offset
= byte
;
6200 part
= XEXP (op
, 1);
6201 final_offset
= byte
- part_size
;
6204 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6207 part_mode
= GET_MODE (part
);
6208 if (part_mode
== VOIDmode
)
6209 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6210 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6213 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6214 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6218 /* A SUBREG resulting from a zero extension may fold to zero if
6219 it extracts higher bits that the ZERO_EXTEND's source bits. */
6220 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6222 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6223 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6224 return CONST0_RTX (outermode
);
6227 if (SCALAR_INT_MODE_P (outermode
)
6228 && SCALAR_INT_MODE_P (innermode
)
6229 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6230 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6232 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6240 /* Make a SUBREG operation or equivalent if it folds. */
6243 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6244 machine_mode innermode
, unsigned int byte
)
6248 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6252 if (GET_CODE (op
) == SUBREG
6253 || GET_CODE (op
) == CONCAT
6254 || GET_MODE (op
) == VOIDmode
)
6257 if (validate_subreg (outermode
, innermode
, op
, byte
))
6258 return gen_rtx_SUBREG (outermode
, op
, byte
);
6263 /* Generates a subreg to get the least significant part of EXPR (in mode
6264 INNER_MODE) to OUTER_MODE. */
6267 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6268 machine_mode inner_mode
)
6270 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6271 subreg_lowpart_offset (outer_mode
, inner_mode
));
6274 /* Simplify X, an rtx expression.
6276 Return the simplified expression or NULL if no simplifications
6279 This is the preferred entry point into the simplification routines;
6280 however, we still allow passes to call the more specific routines.
6282 Right now GCC has three (yes, three) major bodies of RTL simplification
6283 code that need to be unified.
6285 1. fold_rtx in cse.c. This code uses various CSE specific
6286 information to aid in RTL simplification.
6288 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6289 it uses combine specific information to aid in RTL
6292 3. The routines in this file.
6295 Long term we want to only have one body of simplification code; to
6296 get to that state I recommend the following steps:
6298 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6299 which are not pass dependent state into these routines.
6301 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6302 use this routine whenever possible.
6304 3. Allow for pass dependent state to be provided to these
6305 routines and add simplifications based on the pass dependent
6306 state. Remove code from cse.c & combine.c that becomes
6309 It will take time, but ultimately the compiler will be easier to
6310 maintain and improve. It's totally silly that when we add a
6311 simplification that it needs to be added to 4 places (3 for RTL
6312 simplification and 1 for tree simplification. */
6315 simplify_rtx (const_rtx x
)
6317 const enum rtx_code code
= GET_CODE (x
);
6318 const machine_mode mode
= GET_MODE (x
);
6320 switch (GET_RTX_CLASS (code
))
6323 return simplify_unary_operation (code
, mode
,
6324 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6325 case RTX_COMM_ARITH
:
6326 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6327 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6332 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6335 case RTX_BITFIELD_OPS
:
6336 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6337 XEXP (x
, 0), XEXP (x
, 1),
6341 case RTX_COMM_COMPARE
:
6342 return simplify_relational_operation (code
, mode
,
6343 ((GET_MODE (XEXP (x
, 0))
6345 ? GET_MODE (XEXP (x
, 0))
6346 : GET_MODE (XEXP (x
, 1))),
6352 return simplify_subreg (mode
, SUBREG_REG (x
),
6353 GET_MODE (SUBREG_REG (x
)),
6360 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6361 if (GET_CODE (XEXP (x
, 0)) == HIGH
6362 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))