1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "fold-const.h"
34 #include "insn-config.h"
36 #include "insn-codes.h"
45 #include "diagnostic-core.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx
neg_const_int (machine_mode
, const_rtx
);
58 static bool plus_minus_operand_p (const_rtx
);
59 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
60 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
62 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
64 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
65 machine_mode
, rtx
, rtx
);
66 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
67 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
70 /* Negate a CONST_INT rtx, truncating (because a conversion from a
71 maximally negative number can overflow). */
73 neg_const_int (machine_mode mode
, const_rtx i
)
75 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
78 /* Test whether expression, X, is an immediate constant that represents
79 the most significant bit of machine mode MODE. */
82 mode_signbit_p (machine_mode mode
, const_rtx x
)
84 unsigned HOST_WIDE_INT val
;
87 if (GET_MODE_CLASS (mode
) != MODE_INT
)
90 width
= GET_MODE_PRECISION (mode
);
94 if (width
<= HOST_BITS_PER_WIDE_INT
97 #if TARGET_SUPPORTS_WIDE_INT
98 else if (CONST_WIDE_INT_P (x
))
101 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
102 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
104 for (i
= 0; i
< elts
- 1; i
++)
105 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
107 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
108 width
%= HOST_BITS_PER_WIDE_INT
;
110 width
= HOST_BITS_PER_WIDE_INT
;
113 else if (width
<= HOST_BITS_PER_DOUBLE_INT
114 && CONST_DOUBLE_AS_INT_P (x
)
115 && CONST_DOUBLE_LOW (x
) == 0)
117 val
= CONST_DOUBLE_HIGH (x
);
118 width
-= HOST_BITS_PER_WIDE_INT
;
122 /* X is not an integer constant. */
125 if (width
< HOST_BITS_PER_WIDE_INT
)
126 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
127 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
130 /* Test whether VAL is equal to the most significant bit of mode MODE
131 (after masking with the mode mask of MODE). Returns false if the
132 precision of MODE is too large to handle. */
135 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
139 if (GET_MODE_CLASS (mode
) != MODE_INT
)
142 width
= GET_MODE_PRECISION (mode
);
143 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
146 val
&= GET_MODE_MASK (mode
);
147 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
150 /* Test whether the most significant bit of mode MODE is set in VAL.
151 Returns false if the precision of MODE is too large to handle. */
153 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
157 if (GET_MODE_CLASS (mode
) != MODE_INT
)
160 width
= GET_MODE_PRECISION (mode
);
161 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
164 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
168 /* Test whether the most significant bit of mode MODE is clear in VAL.
169 Returns false if the precision of MODE is too large to handle. */
171 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
175 if (GET_MODE_CLASS (mode
) != MODE_INT
)
178 width
= GET_MODE_PRECISION (mode
);
179 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
182 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
190 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
195 /* If this simplifies, do it. */
196 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0
, op1
))
203 std::swap (op0
, op1
);
205 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
211 avoid_constant_pool_reference (rtx x
)
215 HOST_WIDE_INT offset
= 0;
217 switch (GET_CODE (x
))
223 /* Handle float extensions of constant pool references. */
225 c
= avoid_constant_pool_reference (tmp
);
226 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
227 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
235 if (GET_MODE (x
) == BLKmode
)
240 /* Call target hook to avoid the effects of -fpic etc.... */
241 addr
= targetm
.delegitimize_address (addr
);
243 /* Split the address into a base and integer offset. */
244 if (GET_CODE (addr
) == CONST
245 && GET_CODE (XEXP (addr
, 0)) == PLUS
246 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
248 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
249 addr
= XEXP (XEXP (addr
, 0), 0);
252 if (GET_CODE (addr
) == LO_SUM
)
253 addr
= XEXP (addr
, 1);
255 /* If this is a constant pool reference, we can turn it into its
256 constant and hope that simplifications happen. */
257 if (GET_CODE (addr
) == SYMBOL_REF
258 && CONSTANT_POOL_ADDRESS_P (addr
))
260 c
= get_pool_constant (addr
);
261 cmode
= get_pool_mode (addr
);
263 /* If we're accessing the constant in a different mode than it was
264 originally stored, attempt to fix that up via subreg simplifications.
265 If that fails we have no choice but to return the original memory. */
266 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
267 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
269 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
270 if (tem
&& CONSTANT_P (tem
))
280 /* Simplify a MEM based on its attributes. This is the default
281 delegitimize_address target hook, and it's recommended that every
282 overrider call it. */
285 delegitimize_mem_from_attrs (rtx x
)
287 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
288 use their base addresses as equivalent. */
291 && MEM_OFFSET_KNOWN_P (x
))
293 tree decl
= MEM_EXPR (x
);
294 machine_mode mode
= GET_MODE (x
);
295 HOST_WIDE_INT offset
= 0;
297 switch (TREE_CODE (decl
))
307 case ARRAY_RANGE_REF
:
312 case VIEW_CONVERT_EXPR
:
314 HOST_WIDE_INT bitsize
, bitpos
;
316 int unsignedp
, volatilep
= 0;
318 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
319 &mode
, &unsignedp
, &volatilep
, false);
320 if (bitsize
!= GET_MODE_BITSIZE (mode
)
321 || (bitpos
% BITS_PER_UNIT
)
322 || (toffset
&& !tree_fits_shwi_p (toffset
)))
326 offset
+= bitpos
/ BITS_PER_UNIT
;
328 offset
+= tree_to_shwi (toffset
);
335 && mode
== GET_MODE (x
)
336 && TREE_CODE (decl
) == VAR_DECL
337 && (TREE_STATIC (decl
)
338 || DECL_THREAD_LOCAL_P (decl
))
339 && DECL_RTL_SET_P (decl
)
340 && MEM_P (DECL_RTL (decl
)))
344 offset
+= MEM_OFFSET (x
);
346 newx
= DECL_RTL (decl
);
350 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
352 /* Avoid creating a new MEM needlessly if we already had
353 the same address. We do if there's no OFFSET and the
354 old address X is identical to NEWX, or if X is of the
355 form (plus NEWX OFFSET), or the NEWX is of the form
356 (plus Y (const_int Z)) and X is that with the offset
357 added: (plus Y (const_int Z+OFFSET)). */
359 || (GET_CODE (o
) == PLUS
360 && GET_CODE (XEXP (o
, 1)) == CONST_INT
361 && (offset
== INTVAL (XEXP (o
, 1))
362 || (GET_CODE (n
) == PLUS
363 && GET_CODE (XEXP (n
, 1)) == CONST_INT
364 && (INTVAL (XEXP (n
, 1)) + offset
365 == INTVAL (XEXP (o
, 1)))
366 && (n
= XEXP (n
, 0))))
367 && (o
= XEXP (o
, 0))))
368 && rtx_equal_p (o
, n
)))
369 x
= adjust_address_nv (newx
, mode
, offset
);
371 else if (GET_MODE (x
) == GET_MODE (newx
)
380 /* Make a unary operation by first seeing if it folds and otherwise making
381 the specified operation. */
384 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
385 machine_mode op_mode
)
389 /* If this simplifies, use it. */
390 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
393 return gen_rtx_fmt_e (code
, mode
, op
);
396 /* Likewise for ternary operations. */
399 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
400 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
404 /* If this simplifies, use it. */
405 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
409 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
412 /* Likewise, for relational operations.
413 CMP_MODE specifies mode comparison is done in. */
416 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
417 machine_mode cmp_mode
, rtx op0
, rtx op1
)
421 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
425 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
428 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
429 and simplify the result. If FN is non-NULL, call this callback on each
430 X, if it returns non-NULL, replace X with its return value and simplify the
434 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
435 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
437 enum rtx_code code
= GET_CODE (x
);
438 machine_mode mode
= GET_MODE (x
);
439 machine_mode op_mode
;
441 rtx op0
, op1
, op2
, newx
, op
;
445 if (__builtin_expect (fn
!= NULL
, 0))
447 newx
= fn (x
, old_rtx
, data
);
451 else if (rtx_equal_p (x
, old_rtx
))
452 return copy_rtx ((rtx
) data
);
454 switch (GET_RTX_CLASS (code
))
458 op_mode
= GET_MODE (op0
);
459 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
460 if (op0
== XEXP (x
, 0))
462 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
466 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
467 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
468 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
470 return simplify_gen_binary (code
, mode
, op0
, op1
);
473 case RTX_COMM_COMPARE
:
476 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
477 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
478 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
479 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
481 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
484 case RTX_BITFIELD_OPS
:
486 op_mode
= GET_MODE (op0
);
487 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
488 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
489 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
490 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
492 if (op_mode
== VOIDmode
)
493 op_mode
= GET_MODE (op0
);
494 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
499 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
500 if (op0
== SUBREG_REG (x
))
502 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
503 GET_MODE (SUBREG_REG (x
)),
505 return op0
? op0
: x
;
512 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
513 if (op0
== XEXP (x
, 0))
515 return replace_equiv_address_nv (x
, op0
);
517 else if (code
== LO_SUM
)
519 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
520 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
522 /* (lo_sum (high x) y) -> y where x and y have the same base. */
523 if (GET_CODE (op0
) == HIGH
)
525 rtx base0
, base1
, offset0
, offset1
;
526 split_const (XEXP (op0
, 0), &base0
, &offset0
);
527 split_const (op1
, &base1
, &offset1
);
528 if (rtx_equal_p (base0
, base1
))
532 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
534 return gen_rtx_LO_SUM (mode
, op0
, op1
);
543 fmt
= GET_RTX_FORMAT (code
);
544 for (i
= 0; fmt
[i
]; i
++)
549 newvec
= XVEC (newx
, i
);
550 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
552 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
554 if (op
!= RTVEC_ELT (vec
, j
))
558 newvec
= shallow_copy_rtvec (vec
);
560 newx
= shallow_copy_rtx (x
);
561 XVEC (newx
, i
) = newvec
;
563 RTVEC_ELT (newvec
, j
) = op
;
571 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
572 if (op
!= XEXP (x
, i
))
575 newx
= shallow_copy_rtx (x
);
584 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
585 resulting RTX. Return a new RTX which is as simplified as possible. */
588 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
590 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
593 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
594 Only handle cases where the truncated value is inherently an rvalue.
596 RTL provides two ways of truncating a value:
598 1. a lowpart subreg. This form is only a truncation when both
599 the outer and inner modes (here MODE and OP_MODE respectively)
600 are scalar integers, and only then when the subreg is used as
603 It is only valid to form such truncating subregs if the
604 truncation requires no action by the target. The onus for
605 proving this is on the creator of the subreg -- e.g. the
606 caller to simplify_subreg or simplify_gen_subreg -- and typically
607 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
609 2. a TRUNCATE. This form handles both scalar and compound integers.
611 The first form is preferred where valid. However, the TRUNCATE
612 handling in simplify_unary_operation turns the second form into the
613 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
614 so it is generally safe to form rvalue truncations using:
616 simplify_gen_unary (TRUNCATE, ...)
618 and leave simplify_unary_operation to work out which representation
621 Because of the proof requirements on (1), simplify_truncation must
622 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
623 regardless of whether the outer truncation came from a SUBREG or a
624 TRUNCATE. For example, if the caller has proven that an SImode
629 is a no-op and can be represented as a subreg, it does not follow
630 that SImode truncations of X and Y are also no-ops. On a target
631 like 64-bit MIPS that requires SImode values to be stored in
632 sign-extended form, an SImode truncation of:
634 (and:DI (reg:DI X) (const_int 63))
636 is trivially a no-op because only the lower 6 bits can be set.
637 However, X is still an arbitrary 64-bit number and so we cannot
638 assume that truncating it too is a no-op. */
641 simplify_truncation (machine_mode mode
, rtx op
,
642 machine_mode op_mode
)
644 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
645 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
646 gcc_assert (precision
<= op_precision
);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op
) == ZERO_EXTEND
650 || GET_CODE (op
) == SIGN_EXTEND
)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
658 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
659 if (mode
== origmode
)
661 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
662 return simplify_gen_unary (TRUNCATE
, mode
,
663 XEXP (op
, 0), origmode
);
665 return simplify_gen_unary (GET_CODE (op
), mode
,
666 XEXP (op
, 0), origmode
);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
673 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
674 && (GET_CODE (op
) == PLUS
675 || GET_CODE (op
) == MINUS
676 || GET_CODE (op
) == MULT
))
678 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
681 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
683 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op
) == LSHIFTRT
691 || GET_CODE (op
) == ASHIFTRT
)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision
<= op_precision
697 && CONST_INT_P (XEXP (op
, 1))
698 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
700 && UINTVAL (XEXP (op
, 1)) < precision
)
701 return simplify_gen_binary (ASHIFTRT
, mode
,
702 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op
) == LSHIFTRT
708 || GET_CODE (op
) == ASHIFTRT
)
709 && CONST_INT_P (XEXP (op
, 1))
710 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
712 && UINTVAL (XEXP (op
, 1)) < precision
)
713 return simplify_gen_binary (LSHIFTRT
, mode
,
714 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op
) == ASHIFT
720 && CONST_INT_P (XEXP (op
, 1))
721 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
723 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
724 && UINTVAL (XEXP (op
, 1)) < precision
)
725 return simplify_gen_binary (ASHIFT
, mode
,
726 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
728 /* Recognize a word extraction from a multi-word subreg. */
729 if ((GET_CODE (op
) == LSHIFTRT
730 || GET_CODE (op
) == ASHIFTRT
)
731 && SCALAR_INT_MODE_P (mode
)
732 && SCALAR_INT_MODE_P (op_mode
)
733 && precision
>= BITS_PER_WORD
734 && 2 * precision
<= op_precision
735 && CONST_INT_P (XEXP (op
, 1))
736 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
737 && UINTVAL (XEXP (op
, 1)) < op_precision
)
739 int byte
= subreg_lowpart_offset (mode
, op_mode
);
740 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
741 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
743 ? byte
- shifted_bytes
744 : byte
+ shifted_bytes
));
747 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
748 and try replacing the TRUNCATE and shift with it. Don't do this
749 if the MEM has a mode-dependent address. */
750 if ((GET_CODE (op
) == LSHIFTRT
751 || GET_CODE (op
) == ASHIFTRT
)
752 && SCALAR_INT_MODE_P (op_mode
)
753 && MEM_P (XEXP (op
, 0))
754 && CONST_INT_P (XEXP (op
, 1))
755 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
756 && INTVAL (XEXP (op
, 1)) > 0
757 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
758 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
759 MEM_ADDR_SPACE (XEXP (op
, 0)))
760 && ! MEM_VOLATILE_P (XEXP (op
, 0))
761 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
762 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
764 int byte
= subreg_lowpart_offset (mode
, op_mode
);
765 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
766 return adjust_address_nv (XEXP (op
, 0), mode
,
768 ? byte
- shifted_bytes
769 : byte
+ shifted_bytes
));
772 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
773 (OP:SI foo:SI) if OP is NEG or ABS. */
774 if ((GET_CODE (op
) == ABS
775 || GET_CODE (op
) == NEG
)
776 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
777 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
778 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
779 return simplify_gen_unary (GET_CODE (op
), mode
,
780 XEXP (XEXP (op
, 0), 0), mode
);
782 /* (truncate:A (subreg:B (truncate:C X) 0)) is
784 if (GET_CODE (op
) == SUBREG
785 && SCALAR_INT_MODE_P (mode
)
786 && SCALAR_INT_MODE_P (op_mode
)
787 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
788 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
789 && subreg_lowpart_p (op
))
791 rtx inner
= XEXP (SUBREG_REG (op
), 0);
792 if (GET_MODE_PRECISION (mode
)
793 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
794 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
796 /* If subreg above is paradoxical and C is narrower
797 than A, return (subreg:A (truncate:C X) 0). */
798 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
799 GET_MODE (SUBREG_REG (op
)), 0);
802 /* (truncate:A (truncate:B X)) is (truncate:A X). */
803 if (GET_CODE (op
) == TRUNCATE
)
804 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
805 GET_MODE (XEXP (op
, 0)));
810 /* Try to simplify a unary operation CODE whose output mode is to be
811 MODE with input operand OP whose mode was originally OP_MODE.
812 Return zero if no simplification can be made. */
814 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
815 rtx op
, machine_mode op_mode
)
819 trueop
= avoid_constant_pool_reference (op
);
821 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
825 return simplify_unary_operation_1 (code
, mode
, op
);
828 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
832 exact_int_to_float_conversion_p (const_rtx op
)
834 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
835 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
836 /* Constants shouldn't reach here. */
837 gcc_assert (op0_mode
!= VOIDmode
);
838 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
839 int in_bits
= in_prec
;
840 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
842 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
843 if (GET_CODE (op
) == FLOAT
)
844 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
845 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
846 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
849 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
851 return in_bits
<= out_bits
;
854 /* Perform some simplifications we can do even if the operands
857 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
859 enum rtx_code reversed
;
865 /* (not (not X)) == X. */
866 if (GET_CODE (op
) == NOT
)
869 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
870 comparison is all ones. */
871 if (COMPARISON_P (op
)
872 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
873 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
874 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
875 XEXP (op
, 0), XEXP (op
, 1));
877 /* (not (plus X -1)) can become (neg X). */
878 if (GET_CODE (op
) == PLUS
879 && XEXP (op
, 1) == constm1_rtx
)
880 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
882 /* Similarly, (not (neg X)) is (plus X -1). */
883 if (GET_CODE (op
) == NEG
)
884 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
887 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
888 if (GET_CODE (op
) == XOR
889 && CONST_INT_P (XEXP (op
, 1))
890 && (temp
= simplify_unary_operation (NOT
, mode
,
891 XEXP (op
, 1), mode
)) != 0)
892 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
894 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
895 if (GET_CODE (op
) == PLUS
896 && CONST_INT_P (XEXP (op
, 1))
897 && mode_signbit_p (mode
, XEXP (op
, 1))
898 && (temp
= simplify_unary_operation (NOT
, mode
,
899 XEXP (op
, 1), mode
)) != 0)
900 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
903 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
904 operands other than 1, but that is not valid. We could do a
905 similar simplification for (not (lshiftrt C X)) where C is
906 just the sign bit, but this doesn't seem common enough to
908 if (GET_CODE (op
) == ASHIFT
909 && XEXP (op
, 0) == const1_rtx
)
911 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
912 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
915 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
916 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
917 so we can perform the above simplification. */
918 if (STORE_FLAG_VALUE
== -1
919 && GET_CODE (op
) == ASHIFTRT
920 && CONST_INT_P (XEXP (op
, 1))
921 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
922 return simplify_gen_relational (GE
, mode
, VOIDmode
,
923 XEXP (op
, 0), const0_rtx
);
926 if (GET_CODE (op
) == SUBREG
927 && subreg_lowpart_p (op
)
928 && (GET_MODE_SIZE (GET_MODE (op
))
929 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
930 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
931 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
933 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
936 x
= gen_rtx_ROTATE (inner_mode
,
937 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
939 XEXP (SUBREG_REG (op
), 1));
940 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
945 /* Apply De Morgan's laws to reduce number of patterns for machines
946 with negating logical insns (and-not, nand, etc.). If result has
947 only one NOT, put it first, since that is how the patterns are
949 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
951 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
952 machine_mode op_mode
;
954 op_mode
= GET_MODE (in1
);
955 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
957 op_mode
= GET_MODE (in2
);
958 if (op_mode
== VOIDmode
)
960 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
962 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
963 std::swap (in1
, in2
);
965 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
969 /* (not (bswap x)) -> (bswap (not x)). */
970 if (GET_CODE (op
) == BSWAP
)
972 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
973 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
978 /* (neg (neg X)) == X. */
979 if (GET_CODE (op
) == NEG
)
982 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
983 If comparison is not reversible use
985 if (GET_CODE (op
) == IF_THEN_ELSE
)
987 rtx cond
= XEXP (op
, 0);
988 rtx true_rtx
= XEXP (op
, 1);
989 rtx false_rtx
= XEXP (op
, 2);
991 if ((GET_CODE (true_rtx
) == NEG
992 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
993 || (GET_CODE (false_rtx
) == NEG
994 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
996 if (reversed_comparison_code (cond
, NULL_RTX
) != UNKNOWN
)
997 temp
= reversed_comparison (cond
, mode
);
1001 std::swap (true_rtx
, false_rtx
);
1003 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1004 mode
, temp
, true_rtx
, false_rtx
);
1008 /* (neg (plus X 1)) can become (not X). */
1009 if (GET_CODE (op
) == PLUS
1010 && XEXP (op
, 1) == const1_rtx
)
1011 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1013 /* Similarly, (neg (not X)) is (plus X 1). */
1014 if (GET_CODE (op
) == NOT
)
1015 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1018 /* (neg (minus X Y)) can become (minus Y X). This transformation
1019 isn't safe for modes with signed zeros, since if X and Y are
1020 both +0, (minus Y X) is the same as (minus X Y). If the
1021 rounding mode is towards +infinity (or -infinity) then the two
1022 expressions will be rounded differently. */
1023 if (GET_CODE (op
) == MINUS
1024 && !HONOR_SIGNED_ZEROS (mode
)
1025 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1026 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1028 if (GET_CODE (op
) == PLUS
1029 && !HONOR_SIGNED_ZEROS (mode
)
1030 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1032 /* (neg (plus A C)) is simplified to (minus -C A). */
1033 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1034 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1036 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1038 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1041 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1042 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1043 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1046 /* (neg (mult A B)) becomes (mult A (neg B)).
1047 This works even for floating-point values. */
1048 if (GET_CODE (op
) == MULT
1049 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1051 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1052 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1055 /* NEG commutes with ASHIFT since it is multiplication. Only do
1056 this if we can then eliminate the NEG (e.g., if the operand
1058 if (GET_CODE (op
) == ASHIFT
)
1060 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1062 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1065 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1066 C is equal to the width of MODE minus 1. */
1067 if (GET_CODE (op
) == ASHIFTRT
1068 && CONST_INT_P (XEXP (op
, 1))
1069 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1070 return simplify_gen_binary (LSHIFTRT
, mode
,
1071 XEXP (op
, 0), XEXP (op
, 1));
1073 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1074 C is equal to the width of MODE minus 1. */
1075 if (GET_CODE (op
) == LSHIFTRT
1076 && CONST_INT_P (XEXP (op
, 1))
1077 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1078 return simplify_gen_binary (ASHIFTRT
, mode
,
1079 XEXP (op
, 0), XEXP (op
, 1));
1081 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1082 if (GET_CODE (op
) == XOR
1083 && XEXP (op
, 1) == const1_rtx
1084 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1085 return plus_constant (mode
, XEXP (op
, 0), -1);
1087 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1088 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1089 if (GET_CODE (op
) == LT
1090 && XEXP (op
, 1) == const0_rtx
1091 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1093 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1094 int isize
= GET_MODE_PRECISION (inner
);
1095 if (STORE_FLAG_VALUE
== 1)
1097 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1098 GEN_INT (isize
- 1));
1101 if (GET_MODE_PRECISION (mode
) > isize
)
1102 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1103 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1105 else if (STORE_FLAG_VALUE
== -1)
1107 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1108 GEN_INT (isize
- 1));
1111 if (GET_MODE_PRECISION (mode
) > isize
)
1112 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1113 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1119 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1120 with the umulXi3_highpart patterns. */
1121 if (GET_CODE (op
) == LSHIFTRT
1122 && GET_CODE (XEXP (op
, 0)) == MULT
)
1125 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1127 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1129 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1133 /* We can't handle truncation to a partial integer mode here
1134 because we don't know the real bitsize of the partial
1139 if (GET_MODE (op
) != VOIDmode
)
1141 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1146 /* If we know that the value is already truncated, we can
1147 replace the TRUNCATE with a SUBREG. */
1148 if (GET_MODE_NUNITS (mode
) == 1
1149 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1150 || truncated_to_mode (mode
, op
)))
1152 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1157 /* A truncate of a comparison can be replaced with a subreg if
1158 STORE_FLAG_VALUE permits. This is like the previous test,
1159 but it works even if the comparison is done in a mode larger
1160 than HOST_BITS_PER_WIDE_INT. */
1161 if (HWI_COMPUTABLE_MODE_P (mode
)
1162 && COMPARISON_P (op
)
1163 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1165 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1170 /* A truncate of a memory is just loading the low part of the memory
1171 if we are not changing the meaning of the address. */
1172 if (GET_CODE (op
) == MEM
1173 && !VECTOR_MODE_P (mode
)
1174 && !MEM_VOLATILE_P (op
)
1175 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1177 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1184 case FLOAT_TRUNCATE
:
1185 if (DECIMAL_FLOAT_MODE_P (mode
))
1188 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1189 if (GET_CODE (op
) == FLOAT_EXTEND
1190 && GET_MODE (XEXP (op
, 0)) == mode
)
1191 return XEXP (op
, 0);
1193 /* (float_truncate:SF (float_truncate:DF foo:XF))
1194 = (float_truncate:SF foo:XF).
1195 This may eliminate double rounding, so it is unsafe.
1197 (float_truncate:SF (float_extend:XF foo:DF))
1198 = (float_truncate:SF foo:DF).
1200 (float_truncate:DF (float_extend:XF foo:SF))
1201 = (float_extend:DF foo:SF). */
1202 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1203 && flag_unsafe_math_optimizations
)
1204 || GET_CODE (op
) == FLOAT_EXTEND
)
1205 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1207 > GET_MODE_SIZE (mode
)
1208 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1210 XEXP (op
, 0), mode
);
1212 /* (float_truncate (float x)) is (float x) */
1213 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1214 && (flag_unsafe_math_optimizations
1215 || exact_int_to_float_conversion_p (op
)))
1216 return simplify_gen_unary (GET_CODE (op
), mode
,
1218 GET_MODE (XEXP (op
, 0)));
1220 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1221 (OP:SF foo:SF) if OP is NEG or ABS. */
1222 if ((GET_CODE (op
) == ABS
1223 || GET_CODE (op
) == NEG
)
1224 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1225 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1226 return simplify_gen_unary (GET_CODE (op
), mode
,
1227 XEXP (XEXP (op
, 0), 0), mode
);
1229 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1230 is (float_truncate:SF x). */
1231 if (GET_CODE (op
) == SUBREG
1232 && subreg_lowpart_p (op
)
1233 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1234 return SUBREG_REG (op
);
1238 if (DECIMAL_FLOAT_MODE_P (mode
))
1241 /* (float_extend (float_extend x)) is (float_extend x)
1243 (float_extend (float x)) is (float x) assuming that double
1244 rounding can't happen.
1246 if (GET_CODE (op
) == FLOAT_EXTEND
1247 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1248 && exact_int_to_float_conversion_p (op
)))
1249 return simplify_gen_unary (GET_CODE (op
), mode
,
1251 GET_MODE (XEXP (op
, 0)));
1256 /* (abs (neg <foo>)) -> (abs <foo>) */
1257 if (GET_CODE (op
) == NEG
)
1258 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1259 GET_MODE (XEXP (op
, 0)));
1261 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1263 if (GET_MODE (op
) == VOIDmode
)
1266 /* If operand is something known to be positive, ignore the ABS. */
1267 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1268 || val_signbit_known_clear_p (GET_MODE (op
),
1269 nonzero_bits (op
, GET_MODE (op
))))
1272 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1273 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1274 return gen_rtx_NEG (mode
, op
);
1279 /* (ffs (*_extend <X>)) = (ffs <X>) */
1280 if (GET_CODE (op
) == SIGN_EXTEND
1281 || GET_CODE (op
) == ZERO_EXTEND
)
1282 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1283 GET_MODE (XEXP (op
, 0)));
1287 switch (GET_CODE (op
))
1291 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1292 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1293 GET_MODE (XEXP (op
, 0)));
1297 /* Rotations don't affect popcount. */
1298 if (!side_effects_p (XEXP (op
, 1)))
1299 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1300 GET_MODE (XEXP (op
, 0)));
1309 switch (GET_CODE (op
))
1315 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1316 GET_MODE (XEXP (op
, 0)));
1320 /* Rotations don't affect parity. */
1321 if (!side_effects_p (XEXP (op
, 1)))
1322 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1323 GET_MODE (XEXP (op
, 0)));
1332 /* (bswap (bswap x)) -> x. */
1333 if (GET_CODE (op
) == BSWAP
)
1334 return XEXP (op
, 0);
1338 /* (float (sign_extend <X>)) = (float <X>). */
1339 if (GET_CODE (op
) == SIGN_EXTEND
)
1340 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1341 GET_MODE (XEXP (op
, 0)));
1345 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1346 becomes just the MINUS if its mode is MODE. This allows
1347 folding switch statements on machines using casesi (such as
1349 if (GET_CODE (op
) == TRUNCATE
1350 && GET_MODE (XEXP (op
, 0)) == mode
1351 && GET_CODE (XEXP (op
, 0)) == MINUS
1352 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1353 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1354 return XEXP (op
, 0);
1356 /* Extending a widening multiplication should be canonicalized to
1357 a wider widening multiplication. */
1358 if (GET_CODE (op
) == MULT
)
1360 rtx lhs
= XEXP (op
, 0);
1361 rtx rhs
= XEXP (op
, 1);
1362 enum rtx_code lcode
= GET_CODE (lhs
);
1363 enum rtx_code rcode
= GET_CODE (rhs
);
1365 /* Widening multiplies usually extend both operands, but sometimes
1366 they use a shift to extract a portion of a register. */
1367 if ((lcode
== SIGN_EXTEND
1368 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1369 && (rcode
== SIGN_EXTEND
1370 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1372 machine_mode lmode
= GET_MODE (lhs
);
1373 machine_mode rmode
= GET_MODE (rhs
);
1376 if (lcode
== ASHIFTRT
)
1377 /* Number of bits not shifted off the end. */
1378 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1379 else /* lcode == SIGN_EXTEND */
1380 /* Size of inner mode. */
1381 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1383 if (rcode
== ASHIFTRT
)
1384 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1385 else /* rcode == SIGN_EXTEND */
1386 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1388 /* We can only widen multiplies if the result is mathematiclly
1389 equivalent. I.e. if overflow was impossible. */
1390 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1391 return simplify_gen_binary
1393 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1394 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1398 /* Check for a sign extension of a subreg of a promoted
1399 variable, where the promotion is sign-extended, and the
1400 target mode is the same as the variable's promotion. */
1401 if (GET_CODE (op
) == SUBREG
1402 && SUBREG_PROMOTED_VAR_P (op
)
1403 && SUBREG_PROMOTED_SIGNED_P (op
)
1404 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1406 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1411 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1412 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1413 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1415 gcc_assert (GET_MODE_PRECISION (mode
)
1416 > GET_MODE_PRECISION (GET_MODE (op
)));
1417 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1418 GET_MODE (XEXP (op
, 0)));
1421 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1422 is (sign_extend:M (subreg:O <X>)) if there is mode with
1423 GET_MODE_BITSIZE (N) - I bits.
1424 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1425 is similarly (zero_extend:M (subreg:O <X>)). */
1426 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1427 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1428 && CONST_INT_P (XEXP (op
, 1))
1429 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1430 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1433 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1434 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1435 gcc_assert (GET_MODE_BITSIZE (mode
)
1436 > GET_MODE_BITSIZE (GET_MODE (op
)));
1437 if (tmode
!= BLKmode
)
1440 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1442 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1443 ? SIGN_EXTEND
: ZERO_EXTEND
,
1444 mode
, inner
, tmode
);
1448 #if defined(POINTERS_EXTEND_UNSIGNED)
1449 /* As we do not know which address space the pointer is referring to,
1450 we can do this only if the target does not support different pointer
1451 or address modes depending on the address space. */
1452 if (target_default_pointer_address_modes_p ()
1453 && ! POINTERS_EXTEND_UNSIGNED
1454 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1456 || (GET_CODE (op
) == SUBREG
1457 && REG_P (SUBREG_REG (op
))
1458 && REG_POINTER (SUBREG_REG (op
))
1459 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1460 && !targetm
.have_ptr_extend ())
1461 return convert_memory_address (Pmode
, op
);
1466 /* Check for a zero extension of a subreg of a promoted
1467 variable, where the promotion is zero-extended, and the
1468 target mode is the same as the variable's promotion. */
1469 if (GET_CODE (op
) == SUBREG
1470 && SUBREG_PROMOTED_VAR_P (op
)
1471 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1472 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1474 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1479 /* Extending a widening multiplication should be canonicalized to
1480 a wider widening multiplication. */
1481 if (GET_CODE (op
) == MULT
)
1483 rtx lhs
= XEXP (op
, 0);
1484 rtx rhs
= XEXP (op
, 1);
1485 enum rtx_code lcode
= GET_CODE (lhs
);
1486 enum rtx_code rcode
= GET_CODE (rhs
);
1488 /* Widening multiplies usually extend both operands, but sometimes
1489 they use a shift to extract a portion of a register. */
1490 if ((lcode
== ZERO_EXTEND
1491 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1492 && (rcode
== ZERO_EXTEND
1493 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1495 machine_mode lmode
= GET_MODE (lhs
);
1496 machine_mode rmode
= GET_MODE (rhs
);
1499 if (lcode
== LSHIFTRT
)
1500 /* Number of bits not shifted off the end. */
1501 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1502 else /* lcode == ZERO_EXTEND */
1503 /* Size of inner mode. */
1504 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1506 if (rcode
== LSHIFTRT
)
1507 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1508 else /* rcode == ZERO_EXTEND */
1509 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1511 /* We can only widen multiplies if the result is mathematiclly
1512 equivalent. I.e. if overflow was impossible. */
1513 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1514 return simplify_gen_binary
1516 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1517 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1521 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1522 if (GET_CODE (op
) == ZERO_EXTEND
)
1523 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1524 GET_MODE (XEXP (op
, 0)));
1526 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1527 is (zero_extend:M (subreg:O <X>)) if there is mode with
1528 GET_MODE_PRECISION (N) - I bits. */
1529 if (GET_CODE (op
) == LSHIFTRT
1530 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1531 && CONST_INT_P (XEXP (op
, 1))
1532 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1533 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1536 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1537 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1538 if (tmode
!= BLKmode
)
1541 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1543 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1547 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1548 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1550 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1551 (and:SI (reg:SI) (const_int 63)). */
1552 if (GET_CODE (op
) == SUBREG
1553 && GET_MODE_PRECISION (GET_MODE (op
))
1554 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1555 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1556 <= HOST_BITS_PER_WIDE_INT
1557 && GET_MODE_PRECISION (mode
)
1558 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1559 && subreg_lowpart_p (op
)
1560 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1561 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1563 if (GET_MODE_PRECISION (mode
)
1564 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1565 return SUBREG_REG (op
);
1566 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1567 GET_MODE (SUBREG_REG (op
)));
1570 #if defined(POINTERS_EXTEND_UNSIGNED)
1571 /* As we do not know which address space the pointer is referring to,
1572 we can do this only if the target does not support different pointer
1573 or address modes depending on the address space. */
1574 if (target_default_pointer_address_modes_p ()
1575 && POINTERS_EXTEND_UNSIGNED
> 0
1576 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1578 || (GET_CODE (op
) == SUBREG
1579 && REG_P (SUBREG_REG (op
))
1580 && REG_POINTER (SUBREG_REG (op
))
1581 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1582 && !targetm
.have_ptr_extend ())
1583 return convert_memory_address (Pmode
, op
);
1594 /* Try to compute the value of a unary operation CODE whose output mode is to
1595 be MODE with input operand OP whose mode was originally OP_MODE.
1596 Return zero if the value cannot be computed. */
1598 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1599 rtx op
, machine_mode op_mode
)
1601 unsigned int width
= GET_MODE_PRECISION (mode
);
1603 if (code
== VEC_DUPLICATE
)
1605 gcc_assert (VECTOR_MODE_P (mode
));
1606 if (GET_MODE (op
) != VOIDmode
)
1608 if (!VECTOR_MODE_P (GET_MODE (op
)))
1609 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1611 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1614 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1615 || GET_CODE (op
) == CONST_VECTOR
)
1617 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1618 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1619 rtvec v
= rtvec_alloc (n_elts
);
1622 if (GET_CODE (op
) != CONST_VECTOR
)
1623 for (i
= 0; i
< n_elts
; i
++)
1624 RTVEC_ELT (v
, i
) = op
;
1627 machine_mode inmode
= GET_MODE (op
);
1628 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1629 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1631 gcc_assert (in_n_elts
< n_elts
);
1632 gcc_assert ((n_elts
% in_n_elts
) == 0);
1633 for (i
= 0; i
< n_elts
; i
++)
1634 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1636 return gen_rtx_CONST_VECTOR (mode
, v
);
1640 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1642 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1643 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1644 machine_mode opmode
= GET_MODE (op
);
1645 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1646 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1647 rtvec v
= rtvec_alloc (n_elts
);
1650 gcc_assert (op_n_elts
== n_elts
);
1651 for (i
= 0; i
< n_elts
; i
++)
1653 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1654 CONST_VECTOR_ELT (op
, i
),
1655 GET_MODE_INNER (opmode
));
1658 RTVEC_ELT (v
, i
) = x
;
1660 return gen_rtx_CONST_VECTOR (mode
, v
);
1663 /* The order of these tests is critical so that, for example, we don't
1664 check the wrong mode (input vs. output) for a conversion operation,
1665 such as FIX. At some point, this should be simplified. */
1667 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1671 if (op_mode
== VOIDmode
)
1673 /* CONST_INT have VOIDmode as the mode. We assume that all
1674 the bits of the constant are significant, though, this is
1675 a dangerous assumption as many times CONST_INTs are
1676 created and used with garbage in the bits outside of the
1677 precision of the implied mode of the const_int. */
1678 op_mode
= MAX_MODE_INT
;
1681 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1682 d
= real_value_truncate (mode
, d
);
1683 return const_double_from_real_value (d
, mode
);
1685 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1689 if (op_mode
== VOIDmode
)
1691 /* CONST_INT have VOIDmode as the mode. We assume that all
1692 the bits of the constant are significant, though, this is
1693 a dangerous assumption as many times CONST_INTs are
1694 created and used with garbage in the bits outside of the
1695 precision of the implied mode of the const_int. */
1696 op_mode
= MAX_MODE_INT
;
1699 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1700 d
= real_value_truncate (mode
, d
);
1701 return const_double_from_real_value (d
, mode
);
1704 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1707 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1708 rtx_mode_t op0
= std::make_pair (op
, imode
);
1711 #if TARGET_SUPPORTS_WIDE_INT == 0
1712 /* This assert keeps the simplification from producing a result
1713 that cannot be represented in a CONST_DOUBLE but a lot of
1714 upstream callers expect that this function never fails to
1715 simplify something and so you if you added this to the test
1716 above the code would die later anyway. If this assert
1717 happens, you just need to make the port support wide int. */
1718 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1724 result
= wi::bit_not (op0
);
1728 result
= wi::neg (op0
);
1732 result
= wi::abs (op0
);
1736 result
= wi::shwi (wi::ffs (op0
), mode
);
1740 if (wi::ne_p (op0
, 0))
1741 int_value
= wi::clz (op0
);
1742 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1743 int_value
= GET_MODE_PRECISION (mode
);
1744 result
= wi::shwi (int_value
, mode
);
1748 result
= wi::shwi (wi::clrsb (op0
), mode
);
1752 if (wi::ne_p (op0
, 0))
1753 int_value
= wi::ctz (op0
);
1754 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1755 int_value
= GET_MODE_PRECISION (mode
);
1756 result
= wi::shwi (int_value
, mode
);
1760 result
= wi::shwi (wi::popcount (op0
), mode
);
1764 result
= wi::shwi (wi::parity (op0
), mode
);
1768 result
= wide_int (op0
).bswap ();
1773 result
= wide_int::from (op0
, width
, UNSIGNED
);
1777 result
= wide_int::from (op0
, width
, SIGNED
);
1785 return immed_wide_int_const (result
, mode
);
1788 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1789 && SCALAR_FLOAT_MODE_P (mode
)
1790 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1792 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1798 d
= real_value_abs (&d
);
1801 d
= real_value_negate (&d
);
1803 case FLOAT_TRUNCATE
:
1804 d
= real_value_truncate (mode
, d
);
1807 /* All this does is change the mode, unless changing
1809 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1810 real_convert (&d
, mode
, &d
);
1813 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1820 real_to_target (tmp
, &d
, GET_MODE (op
));
1821 for (i
= 0; i
< 4; i
++)
1823 real_from_target (&d
, tmp
, mode
);
1829 return const_double_from_real_value (d
, mode
);
1831 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1832 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1833 && GET_MODE_CLASS (mode
) == MODE_INT
1836 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1837 operators are intentionally left unspecified (to ease implementation
1838 by target backends), for consistency, this routine implements the
1839 same semantics for constant folding as used by the middle-end. */
1841 /* This was formerly used only for non-IEEE float.
1842 eggert@twinsun.com says it is safe for IEEE also. */
1844 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1845 wide_int wmax
, wmin
;
1846 /* This is part of the abi to real_to_integer, but we check
1847 things before making this call. */
1853 if (REAL_VALUE_ISNAN (*x
))
1856 /* Test against the signed upper bound. */
1857 wmax
= wi::max_value (width
, SIGNED
);
1858 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1859 if (real_less (&t
, x
))
1860 return immed_wide_int_const (wmax
, mode
);
1862 /* Test against the signed lower bound. */
1863 wmin
= wi::min_value (width
, SIGNED
);
1864 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1865 if (real_less (x
, &t
))
1866 return immed_wide_int_const (wmin
, mode
);
1868 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1872 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1875 /* Test against the unsigned upper bound. */
1876 wmax
= wi::max_value (width
, UNSIGNED
);
1877 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1878 if (real_less (&t
, x
))
1879 return immed_wide_int_const (wmax
, mode
);
1881 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1892 /* Subroutine of simplify_binary_operation to simplify a binary operation
1893 CODE that can commute with byte swapping, with result mode MODE and
1894 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1895 Return zero if no simplification or canonicalization is possible. */
1898 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1903 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1904 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1906 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1907 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1908 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1911 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1912 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1914 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1915 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1921 /* Subroutine of simplify_binary_operation to simplify a commutative,
1922 associative binary operation CODE with result mode MODE, operating
1923 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1924 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1925 canonicalization is possible. */
1928 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1933 /* Linearize the operator to the left. */
1934 if (GET_CODE (op1
) == code
)
1936 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1937 if (GET_CODE (op0
) == code
)
1939 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1940 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1943 /* "a op (b op c)" becomes "(b op c) op a". */
1944 if (! swap_commutative_operands_p (op1
, op0
))
1945 return simplify_gen_binary (code
, mode
, op1
, op0
);
1947 std::swap (op0
, op1
);
1950 if (GET_CODE (op0
) == code
)
1952 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1953 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1955 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1956 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1959 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1960 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1962 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1964 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1965 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1967 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1974 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1975 and OP1. Return 0 if no simplification is possible.
1977 Don't use this for relational operations such as EQ or LT.
1978 Use simplify_relational_operation instead. */
1980 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1983 rtx trueop0
, trueop1
;
1986 /* Relational operations don't work here. We must know the mode
1987 of the operands in order to do the comparison correctly.
1988 Assuming a full word can give incorrect results.
1989 Consider comparing 128 with -128 in QImode. */
1990 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1991 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1993 /* Make sure the constant is second. */
1994 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1995 && swap_commutative_operands_p (op0
, op1
))
1996 std::swap (op0
, op1
);
1998 trueop0
= avoid_constant_pool_reference (op0
);
1999 trueop1
= avoid_constant_pool_reference (op1
);
2001 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2004 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2007 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2008 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2009 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2010 actual constants. */
2013 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2014 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2016 rtx tem
, reversed
, opleft
, opright
;
2018 unsigned int width
= GET_MODE_PRECISION (mode
);
2020 /* Even if we can't compute a constant result,
2021 there are some cases worth simplifying. */
2026 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2027 when x is NaN, infinite, or finite and nonzero. They aren't
2028 when x is -0 and the rounding mode is not towards -infinity,
2029 since (-0) + 0 is then 0. */
2030 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2033 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2034 transformations are safe even for IEEE. */
2035 if (GET_CODE (op0
) == NEG
)
2036 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2037 else if (GET_CODE (op1
) == NEG
)
2038 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2040 /* (~a) + 1 -> -a */
2041 if (INTEGRAL_MODE_P (mode
)
2042 && GET_CODE (op0
) == NOT
2043 && trueop1
== const1_rtx
)
2044 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2046 /* Handle both-operands-constant cases. We can only add
2047 CONST_INTs to constants since the sum of relocatable symbols
2048 can't be handled by most assemblers. Don't add CONST_INT
2049 to CONST_INT since overflow won't be computed properly if wider
2050 than HOST_BITS_PER_WIDE_INT. */
2052 if ((GET_CODE (op0
) == CONST
2053 || GET_CODE (op0
) == SYMBOL_REF
2054 || GET_CODE (op0
) == LABEL_REF
)
2055 && CONST_INT_P (op1
))
2056 return plus_constant (mode
, op0
, INTVAL (op1
));
2057 else if ((GET_CODE (op1
) == CONST
2058 || GET_CODE (op1
) == SYMBOL_REF
2059 || GET_CODE (op1
) == LABEL_REF
)
2060 && CONST_INT_P (op0
))
2061 return plus_constant (mode
, op1
, INTVAL (op0
));
2063 /* See if this is something like X * C - X or vice versa or
2064 if the multiplication is written as a shift. If so, we can
2065 distribute and make a new multiply, shift, or maybe just
2066 have X (if C is 2 in the example above). But don't make
2067 something more expensive than we had before. */
2069 if (SCALAR_INT_MODE_P (mode
))
2071 rtx lhs
= op0
, rhs
= op1
;
2073 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2074 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2076 if (GET_CODE (lhs
) == NEG
)
2078 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2079 lhs
= XEXP (lhs
, 0);
2081 else if (GET_CODE (lhs
) == MULT
2082 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2084 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2085 lhs
= XEXP (lhs
, 0);
2087 else if (GET_CODE (lhs
) == ASHIFT
2088 && CONST_INT_P (XEXP (lhs
, 1))
2089 && INTVAL (XEXP (lhs
, 1)) >= 0
2090 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2092 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2093 GET_MODE_PRECISION (mode
));
2094 lhs
= XEXP (lhs
, 0);
2097 if (GET_CODE (rhs
) == NEG
)
2099 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2100 rhs
= XEXP (rhs
, 0);
2102 else if (GET_CODE (rhs
) == MULT
2103 && CONST_INT_P (XEXP (rhs
, 1)))
2105 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2106 rhs
= XEXP (rhs
, 0);
2108 else if (GET_CODE (rhs
) == ASHIFT
2109 && CONST_INT_P (XEXP (rhs
, 1))
2110 && INTVAL (XEXP (rhs
, 1)) >= 0
2111 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2113 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2114 GET_MODE_PRECISION (mode
));
2115 rhs
= XEXP (rhs
, 0);
2118 if (rtx_equal_p (lhs
, rhs
))
2120 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2122 bool speed
= optimize_function_for_speed_p (cfun
);
2124 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2126 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2127 return (set_src_cost (tem
, mode
, speed
)
2128 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2132 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2133 if (CONST_SCALAR_INT_P (op1
)
2134 && GET_CODE (op0
) == XOR
2135 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2136 && mode_signbit_p (mode
, op1
))
2137 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2138 simplify_gen_binary (XOR
, mode
, op1
,
2141 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2142 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2143 && GET_CODE (op0
) == MULT
2144 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2148 in1
= XEXP (XEXP (op0
, 0), 0);
2149 in2
= XEXP (op0
, 1);
2150 return simplify_gen_binary (MINUS
, mode
, op1
,
2151 simplify_gen_binary (MULT
, mode
,
2155 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2156 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2158 if (COMPARISON_P (op0
)
2159 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2160 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2161 && (reversed
= reversed_comparison (op0
, mode
)))
2163 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2165 /* If one of the operands is a PLUS or a MINUS, see if we can
2166 simplify this by the associative law.
2167 Don't use the associative law for floating point.
2168 The inaccuracy makes it nonassociative,
2169 and subtle programs can break if operations are associated. */
2171 if (INTEGRAL_MODE_P (mode
)
2172 && (plus_minus_operand_p (op0
)
2173 || plus_minus_operand_p (op1
))
2174 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2177 /* Reassociate floating point addition only when the user
2178 specifies associative math operations. */
2179 if (FLOAT_MODE_P (mode
)
2180 && flag_associative_math
)
2182 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2189 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2190 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2191 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2192 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2194 rtx xop00
= XEXP (op0
, 0);
2195 rtx xop10
= XEXP (op1
, 0);
2197 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2200 if (REG_P (xop00
) && REG_P (xop10
)
2201 && GET_MODE (xop00
) == GET_MODE (xop10
)
2202 && REGNO (xop00
) == REGNO (xop10
)
2203 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2204 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2210 /* We can't assume x-x is 0 even with non-IEEE floating point,
2211 but since it is zero except in very strange circumstances, we
2212 will treat it as zero with -ffinite-math-only. */
2213 if (rtx_equal_p (trueop0
, trueop1
)
2214 && ! side_effects_p (op0
)
2215 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2216 return CONST0_RTX (mode
);
2218 /* Change subtraction from zero into negation. (0 - x) is the
2219 same as -x when x is NaN, infinite, or finite and nonzero.
2220 But if the mode has signed zeros, and does not round towards
2221 -infinity, then 0 - 0 is 0, not -0. */
2222 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2223 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2225 /* (-1 - a) is ~a. */
2226 if (trueop0
== constm1_rtx
)
2227 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2229 /* Subtracting 0 has no effect unless the mode has signed zeros
2230 and supports rounding towards -infinity. In such a case,
2232 if (!(HONOR_SIGNED_ZEROS (mode
)
2233 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2234 && trueop1
== CONST0_RTX (mode
))
2237 /* See if this is something like X * C - X or vice versa or
2238 if the multiplication is written as a shift. If so, we can
2239 distribute and make a new multiply, shift, or maybe just
2240 have X (if C is 2 in the example above). But don't make
2241 something more expensive than we had before. */
2243 if (SCALAR_INT_MODE_P (mode
))
2245 rtx lhs
= op0
, rhs
= op1
;
2247 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2248 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2250 if (GET_CODE (lhs
) == NEG
)
2252 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2253 lhs
= XEXP (lhs
, 0);
2255 else if (GET_CODE (lhs
) == MULT
2256 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2258 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2259 lhs
= XEXP (lhs
, 0);
2261 else if (GET_CODE (lhs
) == ASHIFT
2262 && CONST_INT_P (XEXP (lhs
, 1))
2263 && INTVAL (XEXP (lhs
, 1)) >= 0
2264 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2266 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2267 GET_MODE_PRECISION (mode
));
2268 lhs
= XEXP (lhs
, 0);
2271 if (GET_CODE (rhs
) == NEG
)
2273 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2274 rhs
= XEXP (rhs
, 0);
2276 else if (GET_CODE (rhs
) == MULT
2277 && CONST_INT_P (XEXP (rhs
, 1)))
2279 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2280 rhs
= XEXP (rhs
, 0);
2282 else if (GET_CODE (rhs
) == ASHIFT
2283 && CONST_INT_P (XEXP (rhs
, 1))
2284 && INTVAL (XEXP (rhs
, 1)) >= 0
2285 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2287 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2288 GET_MODE_PRECISION (mode
));
2289 negcoeff1
= -negcoeff1
;
2290 rhs
= XEXP (rhs
, 0);
2293 if (rtx_equal_p (lhs
, rhs
))
2295 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2297 bool speed
= optimize_function_for_speed_p (cfun
);
2299 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2301 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2302 return (set_src_cost (tem
, mode
, speed
)
2303 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2307 /* (a - (-b)) -> (a + b). True even for IEEE. */
2308 if (GET_CODE (op1
) == NEG
)
2309 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2311 /* (-x - c) may be simplified as (-c - x). */
2312 if (GET_CODE (op0
) == NEG
2313 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2315 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2317 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2320 /* Don't let a relocatable value get a negative coeff. */
2321 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2322 return simplify_gen_binary (PLUS
, mode
,
2324 neg_const_int (mode
, op1
));
2326 /* (x - (x & y)) -> (x & ~y) */
2327 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2329 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2331 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2332 GET_MODE (XEXP (op1
, 1)));
2333 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2335 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2337 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2338 GET_MODE (XEXP (op1
, 0)));
2339 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2343 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2344 by reversing the comparison code if valid. */
2345 if (STORE_FLAG_VALUE
== 1
2346 && trueop0
== const1_rtx
2347 && COMPARISON_P (op1
)
2348 && (reversed
= reversed_comparison (op1
, mode
)))
2351 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2352 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2353 && GET_CODE (op1
) == MULT
2354 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2358 in1
= XEXP (XEXP (op1
, 0), 0);
2359 in2
= XEXP (op1
, 1);
2360 return simplify_gen_binary (PLUS
, mode
,
2361 simplify_gen_binary (MULT
, mode
,
2366 /* Canonicalize (minus (neg A) (mult B C)) to
2367 (minus (mult (neg B) C) A). */
2368 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2369 && GET_CODE (op1
) == MULT
2370 && GET_CODE (op0
) == NEG
)
2374 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2375 in2
= XEXP (op1
, 1);
2376 return simplify_gen_binary (MINUS
, mode
,
2377 simplify_gen_binary (MULT
, mode
,
2382 /* If one of the operands is a PLUS or a MINUS, see if we can
2383 simplify this by the associative law. This will, for example,
2384 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2385 Don't use the associative law for floating point.
2386 The inaccuracy makes it nonassociative,
2387 and subtle programs can break if operations are associated. */
2389 if (INTEGRAL_MODE_P (mode
)
2390 && (plus_minus_operand_p (op0
)
2391 || plus_minus_operand_p (op1
))
2392 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2397 if (trueop1
== constm1_rtx
)
2398 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2400 if (GET_CODE (op0
) == NEG
)
2402 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2403 /* If op1 is a MULT as well and simplify_unary_operation
2404 just moved the NEG to the second operand, simplify_gen_binary
2405 below could through simplify_associative_operation move
2406 the NEG around again and recurse endlessly. */
2408 && GET_CODE (op1
) == MULT
2409 && GET_CODE (temp
) == MULT
2410 && XEXP (op1
, 0) == XEXP (temp
, 0)
2411 && GET_CODE (XEXP (temp
, 1)) == NEG
2412 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2415 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2417 if (GET_CODE (op1
) == NEG
)
2419 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2420 /* If op0 is a MULT as well and simplify_unary_operation
2421 just moved the NEG to the second operand, simplify_gen_binary
2422 below could through simplify_associative_operation move
2423 the NEG around again and recurse endlessly. */
2425 && GET_CODE (op0
) == MULT
2426 && GET_CODE (temp
) == MULT
2427 && XEXP (op0
, 0) == XEXP (temp
, 0)
2428 && GET_CODE (XEXP (temp
, 1)) == NEG
2429 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2432 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2435 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2436 x is NaN, since x * 0 is then also NaN. Nor is it valid
2437 when the mode has signed zeros, since multiplying a negative
2438 number by 0 will give -0, not 0. */
2439 if (!HONOR_NANS (mode
)
2440 && !HONOR_SIGNED_ZEROS (mode
)
2441 && trueop1
== CONST0_RTX (mode
)
2442 && ! side_effects_p (op0
))
2445 /* In IEEE floating point, x*1 is not equivalent to x for
2447 if (!HONOR_SNANS (mode
)
2448 && trueop1
== CONST1_RTX (mode
))
2451 /* Convert multiply by constant power of two into shift. */
2452 if (CONST_SCALAR_INT_P (trueop1
))
2454 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2456 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2459 /* x*2 is x+x and x*(-1) is -x */
2460 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2461 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2462 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2463 && GET_MODE (op0
) == mode
)
2465 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2467 if (real_equal (d1
, &dconst2
))
2468 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2470 if (!HONOR_SNANS (mode
)
2471 && real_equal (d1
, &dconstm1
))
2472 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2475 /* Optimize -x * -x as x * x. */
2476 if (FLOAT_MODE_P (mode
)
2477 && GET_CODE (op0
) == NEG
2478 && GET_CODE (op1
) == NEG
2479 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2480 && !side_effects_p (XEXP (op0
, 0)))
2481 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2483 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2484 if (SCALAR_FLOAT_MODE_P (mode
)
2485 && GET_CODE (op0
) == ABS
2486 && GET_CODE (op1
) == ABS
2487 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2488 && !side_effects_p (XEXP (op0
, 0)))
2489 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2491 /* Reassociate multiplication, but for floating point MULTs
2492 only when the user specifies unsafe math optimizations. */
2493 if (! FLOAT_MODE_P (mode
)
2494 || flag_unsafe_math_optimizations
)
2496 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2503 if (trueop1
== CONST0_RTX (mode
))
2505 if (INTEGRAL_MODE_P (mode
)
2506 && trueop1
== CONSTM1_RTX (mode
)
2507 && !side_effects_p (op0
))
2509 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2511 /* A | (~A) -> -1 */
2512 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2513 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2514 && ! side_effects_p (op0
)
2515 && SCALAR_INT_MODE_P (mode
))
2518 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2519 if (CONST_INT_P (op1
)
2520 && HWI_COMPUTABLE_MODE_P (mode
)
2521 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2522 && !side_effects_p (op0
))
2525 /* Canonicalize (X & C1) | C2. */
2526 if (GET_CODE (op0
) == AND
2527 && CONST_INT_P (trueop1
)
2528 && CONST_INT_P (XEXP (op0
, 1)))
2530 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2531 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2532 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2534 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2536 && !side_effects_p (XEXP (op0
, 0)))
2539 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2540 if (((c1
|c2
) & mask
) == mask
)
2541 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2543 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2544 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2546 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2547 gen_int_mode (c1
& ~c2
, mode
));
2548 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2552 /* Convert (A & B) | A to A. */
2553 if (GET_CODE (op0
) == AND
2554 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2555 || rtx_equal_p (XEXP (op0
, 1), op1
))
2556 && ! side_effects_p (XEXP (op0
, 0))
2557 && ! side_effects_p (XEXP (op0
, 1)))
2560 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2561 mode size to (rotate A CX). */
2563 if (GET_CODE (op1
) == ASHIFT
2564 || GET_CODE (op1
) == SUBREG
)
2575 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2576 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2577 && CONST_INT_P (XEXP (opleft
, 1))
2578 && CONST_INT_P (XEXP (opright
, 1))
2579 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2580 == GET_MODE_PRECISION (mode
)))
2581 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2583 /* Same, but for ashift that has been "simplified" to a wider mode
2584 by simplify_shift_const. */
2586 if (GET_CODE (opleft
) == SUBREG
2587 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2588 && GET_CODE (opright
) == LSHIFTRT
2589 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2590 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2591 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2592 && (GET_MODE_SIZE (GET_MODE (opleft
))
2593 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2594 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2595 SUBREG_REG (XEXP (opright
, 0)))
2596 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2597 && CONST_INT_P (XEXP (opright
, 1))
2598 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2599 == GET_MODE_PRECISION (mode
)))
2600 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2601 XEXP (SUBREG_REG (opleft
), 1));
2603 /* If we have (ior (and (X C1) C2)), simplify this by making
2604 C1 as small as possible if C1 actually changes. */
2605 if (CONST_INT_P (op1
)
2606 && (HWI_COMPUTABLE_MODE_P (mode
)
2607 || INTVAL (op1
) > 0)
2608 && GET_CODE (op0
) == AND
2609 && CONST_INT_P (XEXP (op0
, 1))
2610 && CONST_INT_P (op1
)
2611 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2613 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2614 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2617 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2620 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2621 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2622 the PLUS does not affect any of the bits in OP1: then we can do
2623 the IOR as a PLUS and we can associate. This is valid if OP1
2624 can be safely shifted left C bits. */
2625 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2626 && GET_CODE (XEXP (op0
, 0)) == PLUS
2627 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2628 && CONST_INT_P (XEXP (op0
, 1))
2629 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2631 int count
= INTVAL (XEXP (op0
, 1));
2632 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2634 if (mask
>> count
== INTVAL (trueop1
)
2635 && trunc_int_for_mode (mask
, mode
) == mask
2636 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2637 return simplify_gen_binary (ASHIFTRT
, mode
,
2638 plus_constant (mode
, XEXP (op0
, 0),
2643 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2647 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2653 if (trueop1
== CONST0_RTX (mode
))
2655 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2656 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2657 if (rtx_equal_p (trueop0
, trueop1
)
2658 && ! side_effects_p (op0
)
2659 && GET_MODE_CLASS (mode
) != MODE_CC
)
2660 return CONST0_RTX (mode
);
2662 /* Canonicalize XOR of the most significant bit to PLUS. */
2663 if (CONST_SCALAR_INT_P (op1
)
2664 && mode_signbit_p (mode
, op1
))
2665 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2666 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2667 if (CONST_SCALAR_INT_P (op1
)
2668 && GET_CODE (op0
) == PLUS
2669 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2670 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2671 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2672 simplify_gen_binary (XOR
, mode
, op1
,
2675 /* If we are XORing two things that have no bits in common,
2676 convert them into an IOR. This helps to detect rotation encoded
2677 using those methods and possibly other simplifications. */
2679 if (HWI_COMPUTABLE_MODE_P (mode
)
2680 && (nonzero_bits (op0
, mode
)
2681 & nonzero_bits (op1
, mode
)) == 0)
2682 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2684 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2685 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2688 int num_negated
= 0;
2690 if (GET_CODE (op0
) == NOT
)
2691 num_negated
++, op0
= XEXP (op0
, 0);
2692 if (GET_CODE (op1
) == NOT
)
2693 num_negated
++, op1
= XEXP (op1
, 0);
2695 if (num_negated
== 2)
2696 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2697 else if (num_negated
== 1)
2698 return simplify_gen_unary (NOT
, mode
,
2699 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2703 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2704 correspond to a machine insn or result in further simplifications
2705 if B is a constant. */
2707 if (GET_CODE (op0
) == AND
2708 && rtx_equal_p (XEXP (op0
, 1), op1
)
2709 && ! side_effects_p (op1
))
2710 return simplify_gen_binary (AND
, mode
,
2711 simplify_gen_unary (NOT
, mode
,
2712 XEXP (op0
, 0), mode
),
2715 else if (GET_CODE (op0
) == AND
2716 && rtx_equal_p (XEXP (op0
, 0), op1
)
2717 && ! side_effects_p (op1
))
2718 return simplify_gen_binary (AND
, mode
,
2719 simplify_gen_unary (NOT
, mode
,
2720 XEXP (op0
, 1), mode
),
2723 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2724 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2725 out bits inverted twice and not set by C. Similarly, given
2726 (xor (and (xor A B) C) D), simplify without inverting C in
2727 the xor operand: (xor (and A C) (B&C)^D).
2729 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2730 && GET_CODE (XEXP (op0
, 0)) == XOR
2731 && CONST_INT_P (op1
)
2732 && CONST_INT_P (XEXP (op0
, 1))
2733 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2735 enum rtx_code op
= GET_CODE (op0
);
2736 rtx a
= XEXP (XEXP (op0
, 0), 0);
2737 rtx b
= XEXP (XEXP (op0
, 0), 1);
2738 rtx c
= XEXP (op0
, 1);
2740 HOST_WIDE_INT bval
= INTVAL (b
);
2741 HOST_WIDE_INT cval
= INTVAL (c
);
2742 HOST_WIDE_INT dval
= INTVAL (d
);
2743 HOST_WIDE_INT xcval
;
2750 return simplify_gen_binary (XOR
, mode
,
2751 simplify_gen_binary (op
, mode
, a
, c
),
2752 gen_int_mode ((bval
& xcval
) ^ dval
,
2756 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2757 we can transform like this:
2758 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2759 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2760 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2761 Attempt a few simplifications when B and C are both constants. */
2762 if (GET_CODE (op0
) == AND
2763 && CONST_INT_P (op1
)
2764 && CONST_INT_P (XEXP (op0
, 1)))
2766 rtx a
= XEXP (op0
, 0);
2767 rtx b
= XEXP (op0
, 1);
2769 HOST_WIDE_INT bval
= INTVAL (b
);
2770 HOST_WIDE_INT cval
= INTVAL (c
);
2772 /* Instead of computing ~A&C, we compute its negated value,
2773 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2774 optimize for sure. If it does not simplify, we still try
2775 to compute ~A&C below, but since that always allocates
2776 RTL, we don't try that before committing to returning a
2777 simplified expression. */
2778 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2781 if ((~cval
& bval
) == 0)
2783 rtx na_c
= NULL_RTX
;
2785 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2788 /* If ~A does not simplify, don't bother: we don't
2789 want to simplify 2 operations into 3, and if na_c
2790 were to simplify with na, n_na_c would have
2791 simplified as well. */
2792 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2794 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2797 /* Try to simplify ~A&C | ~B&C. */
2798 if (na_c
!= NULL_RTX
)
2799 return simplify_gen_binary (IOR
, mode
, na_c
,
2800 gen_int_mode (~bval
& cval
, mode
));
2804 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2805 if (n_na_c
== CONSTM1_RTX (mode
))
2807 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2808 gen_int_mode (~cval
& bval
,
2810 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2811 gen_int_mode (~bval
& cval
,
2817 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2818 comparison if STORE_FLAG_VALUE is 1. */
2819 if (STORE_FLAG_VALUE
== 1
2820 && trueop1
== const1_rtx
2821 && COMPARISON_P (op0
)
2822 && (reversed
= reversed_comparison (op0
, mode
)))
2825 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2826 is (lt foo (const_int 0)), so we can perform the above
2827 simplification if STORE_FLAG_VALUE is 1. */
2829 if (STORE_FLAG_VALUE
== 1
2830 && trueop1
== const1_rtx
2831 && GET_CODE (op0
) == LSHIFTRT
2832 && CONST_INT_P (XEXP (op0
, 1))
2833 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2834 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2836 /* (xor (comparison foo bar) (const_int sign-bit))
2837 when STORE_FLAG_VALUE is the sign bit. */
2838 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2839 && trueop1
== const_true_rtx
2840 && COMPARISON_P (op0
)
2841 && (reversed
= reversed_comparison (op0
, mode
)))
2844 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2848 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2854 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2856 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2858 if (HWI_COMPUTABLE_MODE_P (mode
))
2860 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2861 HOST_WIDE_INT nzop1
;
2862 if (CONST_INT_P (trueop1
))
2864 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2865 /* If we are turning off bits already known off in OP0, we need
2867 if ((nzop0
& ~val1
) == 0)
2870 nzop1
= nonzero_bits (trueop1
, mode
);
2871 /* If we are clearing all the nonzero bits, the result is zero. */
2872 if ((nzop1
& nzop0
) == 0
2873 && !side_effects_p (op0
) && !side_effects_p (op1
))
2874 return CONST0_RTX (mode
);
2876 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2877 && GET_MODE_CLASS (mode
) != MODE_CC
)
2880 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2881 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2882 && ! side_effects_p (op0
)
2883 && GET_MODE_CLASS (mode
) != MODE_CC
)
2884 return CONST0_RTX (mode
);
2886 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2887 there are no nonzero bits of C outside of X's mode. */
2888 if ((GET_CODE (op0
) == SIGN_EXTEND
2889 || GET_CODE (op0
) == ZERO_EXTEND
)
2890 && CONST_INT_P (trueop1
)
2891 && HWI_COMPUTABLE_MODE_P (mode
)
2892 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2893 & UINTVAL (trueop1
)) == 0)
2895 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2896 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2897 gen_int_mode (INTVAL (trueop1
),
2899 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2902 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2903 we might be able to further simplify the AND with X and potentially
2904 remove the truncation altogether. */
2905 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2907 rtx x
= XEXP (op0
, 0);
2908 machine_mode xmode
= GET_MODE (x
);
2909 tem
= simplify_gen_binary (AND
, xmode
, x
,
2910 gen_int_mode (INTVAL (trueop1
), xmode
));
2911 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2914 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2915 if (GET_CODE (op0
) == IOR
2916 && CONST_INT_P (trueop1
)
2917 && CONST_INT_P (XEXP (op0
, 1)))
2919 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2920 return simplify_gen_binary (IOR
, mode
,
2921 simplify_gen_binary (AND
, mode
,
2922 XEXP (op0
, 0), op1
),
2923 gen_int_mode (tmp
, mode
));
2926 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2927 insn (and may simplify more). */
2928 if (GET_CODE (op0
) == XOR
2929 && rtx_equal_p (XEXP (op0
, 0), op1
)
2930 && ! side_effects_p (op1
))
2931 return simplify_gen_binary (AND
, mode
,
2932 simplify_gen_unary (NOT
, mode
,
2933 XEXP (op0
, 1), mode
),
2936 if (GET_CODE (op0
) == XOR
2937 && rtx_equal_p (XEXP (op0
, 1), op1
)
2938 && ! side_effects_p (op1
))
2939 return simplify_gen_binary (AND
, mode
,
2940 simplify_gen_unary (NOT
, mode
,
2941 XEXP (op0
, 0), mode
),
2944 /* Similarly for (~(A ^ B)) & A. */
2945 if (GET_CODE (op0
) == NOT
2946 && GET_CODE (XEXP (op0
, 0)) == XOR
2947 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2948 && ! side_effects_p (op1
))
2949 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2951 if (GET_CODE (op0
) == NOT
2952 && GET_CODE (XEXP (op0
, 0)) == XOR
2953 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2954 && ! side_effects_p (op1
))
2955 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2957 /* Convert (A | B) & A to A. */
2958 if (GET_CODE (op0
) == IOR
2959 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2960 || rtx_equal_p (XEXP (op0
, 1), op1
))
2961 && ! side_effects_p (XEXP (op0
, 0))
2962 && ! side_effects_p (XEXP (op0
, 1)))
2965 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2966 ((A & N) + B) & M -> (A + B) & M
2967 Similarly if (N & M) == 0,
2968 ((A | N) + B) & M -> (A + B) & M
2969 and for - instead of + and/or ^ instead of |.
2970 Also, if (N & M) == 0, then
2971 (A +- N) & M -> A & M. */
2972 if (CONST_INT_P (trueop1
)
2973 && HWI_COMPUTABLE_MODE_P (mode
)
2974 && ~UINTVAL (trueop1
)
2975 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2976 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2981 pmop
[0] = XEXP (op0
, 0);
2982 pmop
[1] = XEXP (op0
, 1);
2984 if (CONST_INT_P (pmop
[1])
2985 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2986 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2988 for (which
= 0; which
< 2; which
++)
2991 switch (GET_CODE (tem
))
2994 if (CONST_INT_P (XEXP (tem
, 1))
2995 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2996 == UINTVAL (trueop1
))
2997 pmop
[which
] = XEXP (tem
, 0);
3001 if (CONST_INT_P (XEXP (tem
, 1))
3002 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3003 pmop
[which
] = XEXP (tem
, 0);
3010 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3012 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3014 return simplify_gen_binary (code
, mode
, tem
, op1
);
3018 /* (and X (ior (not X) Y) -> (and X Y) */
3019 if (GET_CODE (op1
) == IOR
3020 && GET_CODE (XEXP (op1
, 0)) == NOT
3021 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3022 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3024 /* (and (ior (not X) Y) X) -> (and X Y) */
3025 if (GET_CODE (op0
) == IOR
3026 && GET_CODE (XEXP (op0
, 0)) == NOT
3027 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3028 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3030 /* (and X (ior Y (not X)) -> (and X Y) */
3031 if (GET_CODE (op1
) == IOR
3032 && GET_CODE (XEXP (op1
, 1)) == NOT
3033 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3034 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3036 /* (and (ior Y (not X)) X) -> (and X Y) */
3037 if (GET_CODE (op0
) == IOR
3038 && GET_CODE (XEXP (op0
, 1)) == NOT
3039 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3040 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3042 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3046 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3052 /* 0/x is 0 (or x&0 if x has side-effects). */
3053 if (trueop0
== CONST0_RTX (mode
))
3055 if (side_effects_p (op1
))
3056 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3060 if (trueop1
== CONST1_RTX (mode
))
3062 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3066 /* Convert divide by power of two into shift. */
3067 if (CONST_INT_P (trueop1
)
3068 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3069 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3073 /* Handle floating point and integers separately. */
3074 if (SCALAR_FLOAT_MODE_P (mode
))
3076 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3077 safe for modes with NaNs, since 0.0 / 0.0 will then be
3078 NaN rather than 0.0. Nor is it safe for modes with signed
3079 zeros, since dividing 0 by a negative number gives -0.0 */
3080 if (trueop0
== CONST0_RTX (mode
)
3081 && !HONOR_NANS (mode
)
3082 && !HONOR_SIGNED_ZEROS (mode
)
3083 && ! side_effects_p (op1
))
3086 if (trueop1
== CONST1_RTX (mode
)
3087 && !HONOR_SNANS (mode
))
3090 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3091 && trueop1
!= CONST0_RTX (mode
))
3093 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3096 if (real_equal (d1
, &dconstm1
)
3097 && !HONOR_SNANS (mode
))
3098 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3100 /* Change FP division by a constant into multiplication.
3101 Only do this with -freciprocal-math. */
3102 if (flag_reciprocal_math
3103 && !real_equal (d1
, &dconst0
))
3106 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3107 tem
= const_double_from_real_value (d
, mode
);
3108 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3112 else if (SCALAR_INT_MODE_P (mode
))
3114 /* 0/x is 0 (or x&0 if x has side-effects). */
3115 if (trueop0
== CONST0_RTX (mode
)
3116 && !cfun
->can_throw_non_call_exceptions
)
3118 if (side_effects_p (op1
))
3119 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3123 if (trueop1
== CONST1_RTX (mode
))
3125 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3130 if (trueop1
== constm1_rtx
)
3132 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3134 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3140 /* 0%x is 0 (or x&0 if x has side-effects). */
3141 if (trueop0
== CONST0_RTX (mode
))
3143 if (side_effects_p (op1
))
3144 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3147 /* x%1 is 0 (of x&0 if x has side-effects). */
3148 if (trueop1
== CONST1_RTX (mode
))
3150 if (side_effects_p (op0
))
3151 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3152 return CONST0_RTX (mode
);
3154 /* Implement modulus by power of two as AND. */
3155 if (CONST_INT_P (trueop1
)
3156 && exact_log2 (UINTVAL (trueop1
)) > 0)
3157 return simplify_gen_binary (AND
, mode
, op0
,
3158 gen_int_mode (INTVAL (op1
) - 1, mode
));
3162 /* 0%x is 0 (or x&0 if x has side-effects). */
3163 if (trueop0
== CONST0_RTX (mode
))
3165 if (side_effects_p (op1
))
3166 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3169 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3170 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3172 if (side_effects_p (op0
))
3173 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3174 return CONST0_RTX (mode
);
3180 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3181 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3182 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3184 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3185 if (CONST_INT_P (trueop1
)
3186 && IN_RANGE (INTVAL (trueop1
),
3187 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3188 GET_MODE_PRECISION (mode
) - 1))
3189 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3190 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3191 - INTVAL (trueop1
)));
3195 if (trueop1
== CONST0_RTX (mode
))
3197 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3199 /* Rotating ~0 always results in ~0. */
3200 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3201 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3202 && ! side_effects_p (op1
))
3206 scalar constants c1, c2
3207 size (M2) > size (M1)
3208 c1 == size (M2) - size (M1)
3210 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3214 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3216 if (code
== ASHIFTRT
3217 && !VECTOR_MODE_P (mode
)
3219 && CONST_INT_P (op1
)
3220 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3221 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3222 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3223 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3224 > GET_MODE_BITSIZE (mode
))
3225 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3226 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3227 - GET_MODE_BITSIZE (mode
)))
3228 && subreg_lowpart_p (op0
))
3230 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3232 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3233 tmp
= simplify_gen_binary (ASHIFTRT
,
3234 GET_MODE (SUBREG_REG (op0
)),
3235 XEXP (SUBREG_REG (op0
), 0),
3237 return lowpart_subreg (mode
, tmp
, inner_mode
);
3240 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3242 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3243 if (val
!= INTVAL (op1
))
3244 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3251 if (trueop1
== CONST0_RTX (mode
))
3253 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3255 goto canonicalize_shift
;
3258 if (trueop1
== CONST0_RTX (mode
))
3260 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3262 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3263 if (GET_CODE (op0
) == CLZ
3264 && CONST_INT_P (trueop1
)
3265 && STORE_FLAG_VALUE
== 1
3266 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3268 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3269 unsigned HOST_WIDE_INT zero_val
= 0;
3271 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3272 && zero_val
== GET_MODE_PRECISION (imode
)
3273 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3274 return simplify_gen_relational (EQ
, mode
, imode
,
3275 XEXP (op0
, 0), const0_rtx
);
3277 goto canonicalize_shift
;
3280 if (width
<= HOST_BITS_PER_WIDE_INT
3281 && mode_signbit_p (mode
, trueop1
)
3282 && ! side_effects_p (op0
))
3284 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3286 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3292 if (width
<= HOST_BITS_PER_WIDE_INT
3293 && CONST_INT_P (trueop1
)
3294 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3295 && ! side_effects_p (op0
))
3297 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3299 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3305 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3307 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3309 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3315 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3317 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3319 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3332 /* ??? There are simplifications that can be done. */
3336 if (!VECTOR_MODE_P (mode
))
3338 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3339 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3340 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3341 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3342 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3344 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3345 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3348 /* Extract a scalar element from a nested VEC_SELECT expression
3349 (with optional nested VEC_CONCAT expression). Some targets
3350 (i386) extract scalar element from a vector using chain of
3351 nested VEC_SELECT expressions. When input operand is a memory
3352 operand, this operation can be simplified to a simple scalar
3353 load from an offseted memory address. */
3354 if (GET_CODE (trueop0
) == VEC_SELECT
)
3356 rtx op0
= XEXP (trueop0
, 0);
3357 rtx op1
= XEXP (trueop0
, 1);
3359 machine_mode opmode
= GET_MODE (op0
);
3360 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3361 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3363 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3369 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3370 gcc_assert (i
< n_elts
);
3372 /* Select element, pointed by nested selector. */
3373 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3375 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3376 if (GET_CODE (op0
) == VEC_CONCAT
)
3378 rtx op00
= XEXP (op0
, 0);
3379 rtx op01
= XEXP (op0
, 1);
3381 machine_mode mode00
, mode01
;
3382 int n_elts00
, n_elts01
;
3384 mode00
= GET_MODE (op00
);
3385 mode01
= GET_MODE (op01
);
3387 /* Find out number of elements of each operand. */
3388 if (VECTOR_MODE_P (mode00
))
3390 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3391 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3396 if (VECTOR_MODE_P (mode01
))
3398 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3399 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3404 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3406 /* Select correct operand of VEC_CONCAT
3407 and adjust selector. */
3408 if (elem
< n_elts01
)
3419 vec
= rtvec_alloc (1);
3420 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3422 tmp
= gen_rtx_fmt_ee (code
, mode
,
3423 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3426 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3427 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3428 return XEXP (trueop0
, 0);
3432 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3433 gcc_assert (GET_MODE_INNER (mode
)
3434 == GET_MODE_INNER (GET_MODE (trueop0
)));
3435 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3437 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3439 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3440 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3441 rtvec v
= rtvec_alloc (n_elts
);
3444 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3445 for (i
= 0; i
< n_elts
; i
++)
3447 rtx x
= XVECEXP (trueop1
, 0, i
);
3449 gcc_assert (CONST_INT_P (x
));
3450 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3454 return gen_rtx_CONST_VECTOR (mode
, v
);
3457 /* Recognize the identity. */
3458 if (GET_MODE (trueop0
) == mode
)
3460 bool maybe_ident
= true;
3461 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3463 rtx j
= XVECEXP (trueop1
, 0, i
);
3464 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3466 maybe_ident
= false;
3474 /* If we build {a,b} then permute it, build the result directly. */
3475 if (XVECLEN (trueop1
, 0) == 2
3476 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3477 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3478 && GET_CODE (trueop0
) == VEC_CONCAT
3479 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3480 && GET_MODE (XEXP (trueop0
, 0)) == mode
3481 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3482 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3484 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3485 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3488 gcc_assert (i0
< 4 && i1
< 4);
3489 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3490 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3492 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3495 if (XVECLEN (trueop1
, 0) == 2
3496 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3497 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3498 && GET_CODE (trueop0
) == VEC_CONCAT
3499 && GET_MODE (trueop0
) == mode
)
3501 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3502 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3505 gcc_assert (i0
< 2 && i1
< 2);
3506 subop0
= XEXP (trueop0
, i0
);
3507 subop1
= XEXP (trueop0
, i1
);
3509 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3512 /* If we select one half of a vec_concat, return that. */
3513 if (GET_CODE (trueop0
) == VEC_CONCAT
3514 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3516 rtx subop0
= XEXP (trueop0
, 0);
3517 rtx subop1
= XEXP (trueop0
, 1);
3518 machine_mode mode0
= GET_MODE (subop0
);
3519 machine_mode mode1
= GET_MODE (subop1
);
3520 int li
= GET_MODE_UNIT_SIZE (mode0
);
3521 int l0
= GET_MODE_SIZE (mode0
) / li
;
3522 int l1
= GET_MODE_SIZE (mode1
) / li
;
3523 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3524 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3526 bool success
= true;
3527 for (int i
= 1; i
< l0
; ++i
)
3529 rtx j
= XVECEXP (trueop1
, 0, i
);
3530 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3539 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3541 bool success
= true;
3542 for (int i
= 1; i
< l1
; ++i
)
3544 rtx j
= XVECEXP (trueop1
, 0, i
);
3545 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3557 if (XVECLEN (trueop1
, 0) == 1
3558 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3559 && GET_CODE (trueop0
) == VEC_CONCAT
)
3562 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3564 /* Try to find the element in the VEC_CONCAT. */
3565 while (GET_MODE (vec
) != mode
3566 && GET_CODE (vec
) == VEC_CONCAT
)
3568 HOST_WIDE_INT vec_size
;
3570 if (CONST_INT_P (XEXP (vec
, 0)))
3572 /* vec_concat of two const_ints doesn't make sense with
3573 respect to modes. */
3574 if (CONST_INT_P (XEXP (vec
, 1)))
3577 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3578 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3581 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3583 if (offset
< vec_size
)
3584 vec
= XEXP (vec
, 0);
3588 vec
= XEXP (vec
, 1);
3590 vec
= avoid_constant_pool_reference (vec
);
3593 if (GET_MODE (vec
) == mode
)
3597 /* If we select elements in a vec_merge that all come from the same
3598 operand, select from that operand directly. */
3599 if (GET_CODE (op0
) == VEC_MERGE
)
3601 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3602 if (CONST_INT_P (trueop02
))
3604 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3605 bool all_operand0
= true;
3606 bool all_operand1
= true;
3607 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3609 rtx j
= XVECEXP (trueop1
, 0, i
);
3610 if (sel
& (1 << UINTVAL (j
)))
3611 all_operand1
= false;
3613 all_operand0
= false;
3615 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3616 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3617 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3618 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3622 /* If we have two nested selects that are inverses of each
3623 other, replace them with the source operand. */
3624 if (GET_CODE (trueop0
) == VEC_SELECT
3625 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3627 rtx op0_subop1
= XEXP (trueop0
, 1);
3628 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3629 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3631 /* Apply the outer ordering vector to the inner one. (The inner
3632 ordering vector is expressly permitted to be of a different
3633 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3634 then the two VEC_SELECTs cancel. */
3635 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3637 rtx x
= XVECEXP (trueop1
, 0, i
);
3638 if (!CONST_INT_P (x
))
3640 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3641 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3644 return XEXP (trueop0
, 0);
3650 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3651 ? GET_MODE (trueop0
)
3652 : GET_MODE_INNER (mode
));
3653 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3654 ? GET_MODE (trueop1
)
3655 : GET_MODE_INNER (mode
));
3657 gcc_assert (VECTOR_MODE_P (mode
));
3658 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3659 == GET_MODE_SIZE (mode
));
3661 if (VECTOR_MODE_P (op0_mode
))
3662 gcc_assert (GET_MODE_INNER (mode
)
3663 == GET_MODE_INNER (op0_mode
));
3665 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3667 if (VECTOR_MODE_P (op1_mode
))
3668 gcc_assert (GET_MODE_INNER (mode
)
3669 == GET_MODE_INNER (op1_mode
));
3671 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3673 if ((GET_CODE (trueop0
) == CONST_VECTOR
3674 || CONST_SCALAR_INT_P (trueop0
)
3675 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3676 && (GET_CODE (trueop1
) == CONST_VECTOR
3677 || CONST_SCALAR_INT_P (trueop1
)
3678 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3680 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3681 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3682 rtvec v
= rtvec_alloc (n_elts
);
3684 unsigned in_n_elts
= 1;
3686 if (VECTOR_MODE_P (op0_mode
))
3687 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3688 for (i
= 0; i
< n_elts
; i
++)
3692 if (!VECTOR_MODE_P (op0_mode
))
3693 RTVEC_ELT (v
, i
) = trueop0
;
3695 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3699 if (!VECTOR_MODE_P (op1_mode
))
3700 RTVEC_ELT (v
, i
) = trueop1
;
3702 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3707 return gen_rtx_CONST_VECTOR (mode
, v
);
3710 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3711 Restrict the transformation to avoid generating a VEC_SELECT with a
3712 mode unrelated to its operand. */
3713 if (GET_CODE (trueop0
) == VEC_SELECT
3714 && GET_CODE (trueop1
) == VEC_SELECT
3715 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3716 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3718 rtx par0
= XEXP (trueop0
, 1);
3719 rtx par1
= XEXP (trueop1
, 1);
3720 int len0
= XVECLEN (par0
, 0);
3721 int len1
= XVECLEN (par1
, 0);
3722 rtvec vec
= rtvec_alloc (len0
+ len1
);
3723 for (int i
= 0; i
< len0
; i
++)
3724 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3725 for (int i
= 0; i
< len1
; i
++)
3726 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3727 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3728 gen_rtx_PARALLEL (VOIDmode
, vec
));
3741 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3744 unsigned int width
= GET_MODE_PRECISION (mode
);
3746 if (VECTOR_MODE_P (mode
)
3747 && code
!= VEC_CONCAT
3748 && GET_CODE (op0
) == CONST_VECTOR
3749 && GET_CODE (op1
) == CONST_VECTOR
)
3751 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3752 machine_mode op0mode
= GET_MODE (op0
);
3753 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3754 machine_mode op1mode
= GET_MODE (op1
);
3755 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3756 rtvec v
= rtvec_alloc (n_elts
);
3759 gcc_assert (op0_n_elts
== n_elts
);
3760 gcc_assert (op1_n_elts
== n_elts
);
3761 for (i
= 0; i
< n_elts
; i
++)
3763 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3764 CONST_VECTOR_ELT (op0
, i
),
3765 CONST_VECTOR_ELT (op1
, i
));
3768 RTVEC_ELT (v
, i
) = x
;
3771 return gen_rtx_CONST_VECTOR (mode
, v
);
3774 if (VECTOR_MODE_P (mode
)
3775 && code
== VEC_CONCAT
3776 && (CONST_SCALAR_INT_P (op0
)
3777 || GET_CODE (op0
) == CONST_FIXED
3778 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3779 && (CONST_SCALAR_INT_P (op1
)
3780 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3781 || GET_CODE (op1
) == CONST_FIXED
))
3783 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3784 rtvec v
= rtvec_alloc (n_elts
);
3786 gcc_assert (n_elts
>= 2);
3789 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3790 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3792 RTVEC_ELT (v
, 0) = op0
;
3793 RTVEC_ELT (v
, 1) = op1
;
3797 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3798 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3801 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3802 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3803 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3805 for (i
= 0; i
< op0_n_elts
; ++i
)
3806 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3807 for (i
= 0; i
< op1_n_elts
; ++i
)
3808 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3811 return gen_rtx_CONST_VECTOR (mode
, v
);
3814 if (SCALAR_FLOAT_MODE_P (mode
)
3815 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3816 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3817 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3828 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3830 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3832 for (i
= 0; i
< 4; i
++)
3849 real_from_target (&r
, tmp0
, mode
);
3850 return const_double_from_real_value (r
, mode
);
3854 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3857 real_convert (&f0
, mode
, CONST_DOUBLE_REAL_VALUE (op0
));
3858 real_convert (&f1
, mode
, CONST_DOUBLE_REAL_VALUE (op1
));
3860 if (HONOR_SNANS (mode
)
3861 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3865 && real_equal (&f1
, &dconst0
)
3866 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3869 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3870 && flag_trapping_math
3871 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3873 int s0
= REAL_VALUE_NEGATIVE (f0
);
3874 int s1
= REAL_VALUE_NEGATIVE (f1
);
3879 /* Inf + -Inf = NaN plus exception. */
3884 /* Inf - Inf = NaN plus exception. */
3889 /* Inf / Inf = NaN plus exception. */
3896 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3897 && flag_trapping_math
3898 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
3899 || (REAL_VALUE_ISINF (f1
)
3900 && real_equal (&f0
, &dconst0
))))
3901 /* Inf * 0 = NaN plus exception. */
3904 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3906 real_convert (&result
, mode
, &value
);
3908 /* Don't constant fold this floating point operation if
3909 the result has overflowed and flag_trapping_math. */
3911 if (flag_trapping_math
3912 && MODE_HAS_INFINITIES (mode
)
3913 && REAL_VALUE_ISINF (result
)
3914 && !REAL_VALUE_ISINF (f0
)
3915 && !REAL_VALUE_ISINF (f1
))
3916 /* Overflow plus exception. */
3919 /* Don't constant fold this floating point operation if the
3920 result may dependent upon the run-time rounding mode and
3921 flag_rounding_math is set, or if GCC's software emulation
3922 is unable to accurately represent the result. */
3924 if ((flag_rounding_math
3925 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3926 && (inexact
|| !real_identical (&result
, &value
)))
3929 return const_double_from_real_value (result
, mode
);
3933 /* We can fold some multi-word operations. */
3934 if ((GET_MODE_CLASS (mode
) == MODE_INT
3935 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3936 && CONST_SCALAR_INT_P (op0
)
3937 && CONST_SCALAR_INT_P (op1
))
3941 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3942 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3944 #if TARGET_SUPPORTS_WIDE_INT == 0
3945 /* This assert keeps the simplification from producing a result
3946 that cannot be represented in a CONST_DOUBLE but a lot of
3947 upstream callers expect that this function never fails to
3948 simplify something and so you if you added this to the test
3949 above the code would die later anyway. If this assert
3950 happens, you just need to make the port support wide int. */
3951 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3956 result
= wi::sub (pop0
, pop1
);
3960 result
= wi::add (pop0
, pop1
);
3964 result
= wi::mul (pop0
, pop1
);
3968 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3974 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3980 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3986 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3992 result
= wi::bit_and (pop0
, pop1
);
3996 result
= wi::bit_or (pop0
, pop1
);
4000 result
= wi::bit_xor (pop0
, pop1
);
4004 result
= wi::smin (pop0
, pop1
);
4008 result
= wi::smax (pop0
, pop1
);
4012 result
= wi::umin (pop0
, pop1
);
4016 result
= wi::umax (pop0
, pop1
);
4023 wide_int wop1
= pop1
;
4024 if (SHIFT_COUNT_TRUNCATED
)
4025 wop1
= wi::umod_trunc (wop1
, width
);
4026 else if (wi::geu_p (wop1
, width
))
4032 result
= wi::lrshift (pop0
, wop1
);
4036 result
= wi::arshift (pop0
, wop1
);
4040 result
= wi::lshift (pop0
, wop1
);
4051 if (wi::neg_p (pop1
))
4057 result
= wi::lrotate (pop0
, pop1
);
4061 result
= wi::rrotate (pop0
, pop1
);
4072 return immed_wide_int_const (result
, mode
);
4080 /* Return a positive integer if X should sort after Y. The value
4081 returned is 1 if and only if X and Y are both regs. */
4084 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4088 result
= (commutative_operand_precedence (y
)
4089 - commutative_operand_precedence (x
));
4091 return result
+ result
;
4093 /* Group together equal REGs to do more simplification. */
4094 if (REG_P (x
) && REG_P (y
))
4095 return REGNO (x
) > REGNO (y
);
4100 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4101 operands may be another PLUS or MINUS.
4103 Rather than test for specific case, we do this by a brute-force method
4104 and do all possible simplifications until no more changes occur. Then
4105 we rebuild the operation.
4107 May return NULL_RTX when no changes were made. */
4110 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4113 struct simplify_plus_minus_op_data
4120 int changed
, n_constants
, canonicalized
= 0;
4123 memset (ops
, 0, sizeof ops
);
4125 /* Set up the two operands and then expand them until nothing has been
4126 changed. If we run out of room in our array, give up; this should
4127 almost never happen. */
4132 ops
[1].neg
= (code
== MINUS
);
4139 for (i
= 0; i
< n_ops
; i
++)
4141 rtx this_op
= ops
[i
].op
;
4142 int this_neg
= ops
[i
].neg
;
4143 enum rtx_code this_code
= GET_CODE (this_op
);
4149 if (n_ops
== ARRAY_SIZE (ops
))
4152 ops
[n_ops
].op
= XEXP (this_op
, 1);
4153 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4156 ops
[i
].op
= XEXP (this_op
, 0);
4158 /* If this operand was negated then we will potentially
4159 canonicalize the expression. Similarly if we don't
4160 place the operands adjacent we're re-ordering the
4161 expression and thus might be performing a
4162 canonicalization. Ignore register re-ordering.
4163 ??? It might be better to shuffle the ops array here,
4164 but then (plus (plus (A, B), plus (C, D))) wouldn't
4165 be seen as non-canonical. */
4168 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4173 ops
[i
].op
= XEXP (this_op
, 0);
4174 ops
[i
].neg
= ! this_neg
;
4180 if (n_ops
!= ARRAY_SIZE (ops
)
4181 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4182 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4183 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4185 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4186 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4187 ops
[n_ops
].neg
= this_neg
;
4195 /* ~a -> (-a - 1) */
4196 if (n_ops
!= ARRAY_SIZE (ops
))
4198 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4199 ops
[n_ops
++].neg
= this_neg
;
4200 ops
[i
].op
= XEXP (this_op
, 0);
4201 ops
[i
].neg
= !this_neg
;
4211 ops
[i
].op
= neg_const_int (mode
, this_op
);
4225 if (n_constants
> 1)
4228 gcc_assert (n_ops
>= 2);
4230 /* If we only have two operands, we can avoid the loops. */
4233 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4236 /* Get the two operands. Be careful with the order, especially for
4237 the cases where code == MINUS. */
4238 if (ops
[0].neg
&& ops
[1].neg
)
4240 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4243 else if (ops
[0].neg
)
4254 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4257 /* Now simplify each pair of operands until nothing changes. */
4260 /* Insertion sort is good enough for a small array. */
4261 for (i
= 1; i
< n_ops
; i
++)
4263 struct simplify_plus_minus_op_data save
;
4267 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4270 /* Just swapping registers doesn't count as canonicalization. */
4276 ops
[j
+ 1] = ops
[j
];
4278 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4283 for (i
= n_ops
- 1; i
> 0; i
--)
4284 for (j
= i
- 1; j
>= 0; j
--)
4286 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4287 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4289 if (lhs
!= 0 && rhs
!= 0)
4291 enum rtx_code ncode
= PLUS
;
4297 std::swap (lhs
, rhs
);
4299 else if (swap_commutative_operands_p (lhs
, rhs
))
4300 std::swap (lhs
, rhs
);
4302 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4303 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4305 rtx tem_lhs
, tem_rhs
;
4307 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4308 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4309 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4312 if (tem
&& !CONSTANT_P (tem
))
4313 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4316 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4320 /* Reject "simplifications" that just wrap the two
4321 arguments in a CONST. Failure to do so can result
4322 in infinite recursion with simplify_binary_operation
4323 when it calls us to simplify CONST operations.
4324 Also, if we find such a simplification, don't try
4325 any more combinations with this rhs: We must have
4326 something like symbol+offset, ie. one of the
4327 trivial CONST expressions we handle later. */
4328 if (GET_CODE (tem
) == CONST
4329 && GET_CODE (XEXP (tem
, 0)) == ncode
4330 && XEXP (XEXP (tem
, 0), 0) == lhs
4331 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4334 if (GET_CODE (tem
) == NEG
)
4335 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4336 if (CONST_INT_P (tem
) && lneg
)
4337 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4341 ops
[j
].op
= NULL_RTX
;
4351 /* Pack all the operands to the lower-numbered entries. */
4352 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4361 /* If nothing changed, fail. */
4365 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4367 && CONST_INT_P (ops
[1].op
)
4368 && CONSTANT_P (ops
[0].op
)
4370 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4372 /* We suppressed creation of trivial CONST expressions in the
4373 combination loop to avoid recursion. Create one manually now.
4374 The combination loop should have ensured that there is exactly
4375 one CONST_INT, and the sort will have ensured that it is last
4376 in the array and that any other constant will be next-to-last. */
4379 && CONST_INT_P (ops
[n_ops
- 1].op
)
4380 && CONSTANT_P (ops
[n_ops
- 2].op
))
4382 rtx value
= ops
[n_ops
- 1].op
;
4383 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4384 value
= neg_const_int (mode
, value
);
4385 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4390 /* Put a non-negated operand first, if possible. */
4392 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4395 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4404 /* Now make the result by performing the requested operations. */
4406 for (i
= 1; i
< n_ops
; i
++)
4407 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4408 mode
, result
, ops
[i
].op
);
4413 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4415 plus_minus_operand_p (const_rtx x
)
4417 return GET_CODE (x
) == PLUS
4418 || GET_CODE (x
) == MINUS
4419 || (GET_CODE (x
) == CONST
4420 && GET_CODE (XEXP (x
, 0)) == PLUS
4421 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4422 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4425 /* Like simplify_binary_operation except used for relational operators.
4426 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4427 not also be VOIDmode.
4429 CMP_MODE specifies in which mode the comparison is done in, so it is
4430 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4431 the operands or, if both are VOIDmode, the operands are compared in
4432 "infinite precision". */
4434 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4435 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4437 rtx tem
, trueop0
, trueop1
;
4439 if (cmp_mode
== VOIDmode
)
4440 cmp_mode
= GET_MODE (op0
);
4441 if (cmp_mode
== VOIDmode
)
4442 cmp_mode
= GET_MODE (op1
);
4444 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4447 if (SCALAR_FLOAT_MODE_P (mode
))
4449 if (tem
== const0_rtx
)
4450 return CONST0_RTX (mode
);
4451 #ifdef FLOAT_STORE_FLAG_VALUE
4453 REAL_VALUE_TYPE val
;
4454 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4455 return const_double_from_real_value (val
, mode
);
4461 if (VECTOR_MODE_P (mode
))
4463 if (tem
== const0_rtx
)
4464 return CONST0_RTX (mode
);
4465 #ifdef VECTOR_STORE_FLAG_VALUE
4470 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4471 if (val
== NULL_RTX
)
4473 if (val
== const1_rtx
)
4474 return CONST1_RTX (mode
);
4476 units
= GET_MODE_NUNITS (mode
);
4477 v
= rtvec_alloc (units
);
4478 for (i
= 0; i
< units
; i
++)
4479 RTVEC_ELT (v
, i
) = val
;
4480 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4490 /* For the following tests, ensure const0_rtx is op1. */
4491 if (swap_commutative_operands_p (op0
, op1
)
4492 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4493 std::swap (op0
, op1
), code
= swap_condition (code
);
4495 /* If op0 is a compare, extract the comparison arguments from it. */
4496 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4497 return simplify_gen_relational (code
, mode
, VOIDmode
,
4498 XEXP (op0
, 0), XEXP (op0
, 1));
4500 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4504 trueop0
= avoid_constant_pool_reference (op0
);
4505 trueop1
= avoid_constant_pool_reference (op1
);
4506 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4510 /* This part of simplify_relational_operation is only used when CMP_MODE
4511 is not in class MODE_CC (i.e. it is a real comparison).
4513 MODE is the mode of the result, while CMP_MODE specifies in which
4514 mode the comparison is done in, so it is the mode of the operands. */
4517 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4518 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4520 enum rtx_code op0code
= GET_CODE (op0
);
4522 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4524 /* If op0 is a comparison, extract the comparison arguments
4528 if (GET_MODE (op0
) == mode
)
4529 return simplify_rtx (op0
);
4531 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4532 XEXP (op0
, 0), XEXP (op0
, 1));
4534 else if (code
== EQ
)
4536 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4537 if (new_code
!= UNKNOWN
)
4538 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4539 XEXP (op0
, 0), XEXP (op0
, 1));
4543 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4544 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4545 if ((code
== LTU
|| code
== GEU
)
4546 && GET_CODE (op0
) == PLUS
4547 && CONST_INT_P (XEXP (op0
, 1))
4548 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4549 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4550 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4551 && XEXP (op0
, 1) != const0_rtx
)
4554 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4555 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4556 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4559 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4560 if ((code
== LTU
|| code
== GEU
)
4561 && GET_CODE (op0
) == PLUS
4562 && rtx_equal_p (op1
, XEXP (op0
, 1))
4563 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4564 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4565 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4566 copy_rtx (XEXP (op0
, 0)));
4568 if (op1
== const0_rtx
)
4570 /* Canonicalize (GTU x 0) as (NE x 0). */
4572 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4573 /* Canonicalize (LEU x 0) as (EQ x 0). */
4575 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4577 else if (op1
== const1_rtx
)
4582 /* Canonicalize (GE x 1) as (GT x 0). */
4583 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4586 /* Canonicalize (GEU x 1) as (NE x 0). */
4587 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4590 /* Canonicalize (LT x 1) as (LE x 0). */
4591 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4594 /* Canonicalize (LTU x 1) as (EQ x 0). */
4595 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4601 else if (op1
== constm1_rtx
)
4603 /* Canonicalize (LE x -1) as (LT x 0). */
4605 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4606 /* Canonicalize (GT x -1) as (GE x 0). */
4608 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4611 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4612 if ((code
== EQ
|| code
== NE
)
4613 && (op0code
== PLUS
|| op0code
== MINUS
)
4615 && CONSTANT_P (XEXP (op0
, 1))
4616 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4618 rtx x
= XEXP (op0
, 0);
4619 rtx c
= XEXP (op0
, 1);
4620 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4621 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4623 /* Detect an infinite recursive condition, where we oscillate at this
4624 simplification case between:
4625 A + B == C <---> C - B == A,
4626 where A, B, and C are all constants with non-simplifiable expressions,
4627 usually SYMBOL_REFs. */
4628 if (GET_CODE (tem
) == invcode
4630 && rtx_equal_p (c
, XEXP (tem
, 1)))
4633 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4636 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4637 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4639 && op1
== const0_rtx
4640 && GET_MODE_CLASS (mode
) == MODE_INT
4641 && cmp_mode
!= VOIDmode
4642 /* ??? Work-around BImode bugs in the ia64 backend. */
4644 && cmp_mode
!= BImode
4645 && nonzero_bits (op0
, cmp_mode
) == 1
4646 && STORE_FLAG_VALUE
== 1)
4647 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4648 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4649 : lowpart_subreg (mode
, op0
, cmp_mode
);
4651 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4652 if ((code
== EQ
|| code
== NE
)
4653 && op1
== const0_rtx
4655 return simplify_gen_relational (code
, mode
, cmp_mode
,
4656 XEXP (op0
, 0), XEXP (op0
, 1));
4658 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4659 if ((code
== EQ
|| code
== NE
)
4661 && rtx_equal_p (XEXP (op0
, 0), op1
)
4662 && !side_effects_p (XEXP (op0
, 0)))
4663 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4666 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4667 if ((code
== EQ
|| code
== NE
)
4669 && rtx_equal_p (XEXP (op0
, 1), op1
)
4670 && !side_effects_p (XEXP (op0
, 1)))
4671 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4674 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4675 if ((code
== EQ
|| code
== NE
)
4677 && CONST_SCALAR_INT_P (op1
)
4678 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4679 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4680 simplify_gen_binary (XOR
, cmp_mode
,
4681 XEXP (op0
, 1), op1
));
4683 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4684 can be implemented with a BICS instruction on some targets, or
4685 constant-folded if y is a constant. */
4686 if ((code
== EQ
|| code
== NE
)
4688 && rtx_equal_p (XEXP (op0
, 0), op1
)
4689 && !side_effects_p (op1
)
4690 && op1
!= CONST0_RTX (cmp_mode
))
4692 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4693 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4695 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4696 CONST0_RTX (cmp_mode
));
4699 /* Likewise for (eq/ne (and x y) y). */
4700 if ((code
== EQ
|| code
== NE
)
4702 && rtx_equal_p (XEXP (op0
, 1), op1
)
4703 && !side_effects_p (op1
)
4704 && op1
!= CONST0_RTX (cmp_mode
))
4706 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4707 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4709 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4710 CONST0_RTX (cmp_mode
));
4713 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4714 if ((code
== EQ
|| code
== NE
)
4715 && GET_CODE (op0
) == BSWAP
4716 && CONST_SCALAR_INT_P (op1
))
4717 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4718 simplify_gen_unary (BSWAP
, cmp_mode
,
4721 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4722 if ((code
== EQ
|| code
== NE
)
4723 && GET_CODE (op0
) == BSWAP
4724 && GET_CODE (op1
) == BSWAP
)
4725 return simplify_gen_relational (code
, mode
, cmp_mode
,
4726 XEXP (op0
, 0), XEXP (op1
, 0));
4728 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4734 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4735 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4736 XEXP (op0
, 0), const0_rtx
);
4741 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4742 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4743 XEXP (op0
, 0), const0_rtx
);
4762 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4763 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4764 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4765 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4766 For floating-point comparisons, assume that the operands were ordered. */
4769 comparison_result (enum rtx_code code
, int known_results
)
4775 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4778 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4782 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4785 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4789 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4792 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4795 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4797 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4800 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4802 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4805 return const_true_rtx
;
4813 /* Check if the given comparison (done in the given MODE) is actually
4814 a tautology or a contradiction. If the mode is VOID_mode, the
4815 comparison is done in "infinite precision". If no simplification
4816 is possible, this function returns zero. Otherwise, it returns
4817 either const_true_rtx or const0_rtx. */
4820 simplify_const_relational_operation (enum rtx_code code
,
4828 gcc_assert (mode
!= VOIDmode
4829 || (GET_MODE (op0
) == VOIDmode
4830 && GET_MODE (op1
) == VOIDmode
));
4832 /* If op0 is a compare, extract the comparison arguments from it. */
4833 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4835 op1
= XEXP (op0
, 1);
4836 op0
= XEXP (op0
, 0);
4838 if (GET_MODE (op0
) != VOIDmode
)
4839 mode
= GET_MODE (op0
);
4840 else if (GET_MODE (op1
) != VOIDmode
)
4841 mode
= GET_MODE (op1
);
4846 /* We can't simplify MODE_CC values since we don't know what the
4847 actual comparison is. */
4848 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4851 /* Make sure the constant is second. */
4852 if (swap_commutative_operands_p (op0
, op1
))
4854 std::swap (op0
, op1
);
4855 code
= swap_condition (code
);
4858 trueop0
= avoid_constant_pool_reference (op0
);
4859 trueop1
= avoid_constant_pool_reference (op1
);
4861 /* For integer comparisons of A and B maybe we can simplify A - B and can
4862 then simplify a comparison of that with zero. If A and B are both either
4863 a register or a CONST_INT, this can't help; testing for these cases will
4864 prevent infinite recursion here and speed things up.
4866 We can only do this for EQ and NE comparisons as otherwise we may
4867 lose or introduce overflow which we cannot disregard as undefined as
4868 we do not know the signedness of the operation on either the left or
4869 the right hand side of the comparison. */
4871 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4872 && (code
== EQ
|| code
== NE
)
4873 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4874 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4875 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4876 /* We cannot do this if tem is a nonzero address. */
4877 && ! nonzero_address_p (tem
))
4878 return simplify_const_relational_operation (signed_condition (code
),
4879 mode
, tem
, const0_rtx
);
4881 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4882 return const_true_rtx
;
4884 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4887 /* For modes without NaNs, if the two operands are equal, we know the
4888 result except if they have side-effects. Even with NaNs we know
4889 the result of unordered comparisons and, if signaling NaNs are
4890 irrelevant, also the result of LT/GT/LTGT. */
4891 if ((! HONOR_NANS (trueop0
)
4892 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4893 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4894 && ! HONOR_SNANS (trueop0
)))
4895 && rtx_equal_p (trueop0
, trueop1
)
4896 && ! side_effects_p (trueop0
))
4897 return comparison_result (code
, CMP_EQ
);
4899 /* If the operands are floating-point constants, see if we can fold
4901 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4902 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4903 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4905 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
4906 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
4908 /* Comparisons are unordered iff at least one of the values is NaN. */
4909 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
4919 return const_true_rtx
;
4932 return comparison_result (code
,
4933 (real_equal (d0
, d1
) ? CMP_EQ
:
4934 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
4937 /* Otherwise, see if the operands are both integers. */
4938 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4939 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4941 /* It would be nice if we really had a mode here. However, the
4942 largest int representable on the target is as good as
4944 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4945 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4946 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4948 if (wi::eq_p (ptrueop0
, ptrueop1
))
4949 return comparison_result (code
, CMP_EQ
);
4952 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4953 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4954 return comparison_result (code
, cr
);
4958 /* Optimize comparisons with upper and lower bounds. */
4959 if (HWI_COMPUTABLE_MODE_P (mode
)
4960 && CONST_INT_P (trueop1
)
4961 && !side_effects_p (trueop0
))
4964 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4965 HOST_WIDE_INT val
= INTVAL (trueop1
);
4966 HOST_WIDE_INT mmin
, mmax
;
4976 /* Get a reduced range if the sign bit is zero. */
4977 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4984 rtx mmin_rtx
, mmax_rtx
;
4985 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4987 mmin
= INTVAL (mmin_rtx
);
4988 mmax
= INTVAL (mmax_rtx
);
4991 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4993 mmin
>>= (sign_copies
- 1);
4994 mmax
>>= (sign_copies
- 1);
5000 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5002 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5003 return const_true_rtx
;
5004 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5009 return const_true_rtx
;
5014 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5016 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5017 return const_true_rtx
;
5018 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5023 return const_true_rtx
;
5029 /* x == y is always false for y out of range. */
5030 if (val
< mmin
|| val
> mmax
)
5034 /* x > y is always false for y >= mmax, always true for y < mmin. */
5036 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5038 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5039 return const_true_rtx
;
5045 return const_true_rtx
;
5048 /* x < y is always false for y <= mmin, always true for y > mmax. */
5050 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5052 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5053 return const_true_rtx
;
5059 return const_true_rtx
;
5063 /* x != y is always true for y out of range. */
5064 if (val
< mmin
|| val
> mmax
)
5065 return const_true_rtx
;
5073 /* Optimize integer comparisons with zero. */
5074 if (trueop1
== const0_rtx
&& !side_effects_p (trueop0
))
5076 /* Some addresses are known to be nonzero. We don't know
5077 their sign, but equality comparisons are known. */
5078 if (nonzero_address_p (trueop0
))
5080 if (code
== EQ
|| code
== LEU
)
5082 if (code
== NE
|| code
== GTU
)
5083 return const_true_rtx
;
5086 /* See if the first operand is an IOR with a constant. If so, we
5087 may be able to determine the result of this comparison. */
5088 if (GET_CODE (op0
) == IOR
)
5090 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5091 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5093 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5094 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5095 && (UINTVAL (inner_const
)
5096 & ((unsigned HOST_WIDE_INT
) 1
5106 return const_true_rtx
;
5110 return const_true_rtx
;
5124 /* Optimize comparison of ABS with zero. */
5125 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5126 && (GET_CODE (trueop0
) == ABS
5127 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5128 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5133 /* Optimize abs(x) < 0.0. */
5134 if (!HONOR_SNANS (mode
)
5135 && (!INTEGRAL_MODE_P (mode
)
5136 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5138 if (INTEGRAL_MODE_P (mode
)
5139 && (issue_strict_overflow_warning
5140 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5141 warning (OPT_Wstrict_overflow
,
5142 ("assuming signed overflow does not occur when "
5143 "assuming abs (x) < 0 is false"));
5149 /* Optimize abs(x) >= 0.0. */
5150 if (!HONOR_NANS (mode
)
5151 && (!INTEGRAL_MODE_P (mode
)
5152 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5154 if (INTEGRAL_MODE_P (mode
)
5155 && (issue_strict_overflow_warning
5156 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5157 warning (OPT_Wstrict_overflow
,
5158 ("assuming signed overflow does not occur when "
5159 "assuming abs (x) >= 0 is true"));
5160 return const_true_rtx
;
5165 /* Optimize ! (abs(x) < 0.0). */
5166 return const_true_rtx
;
5176 /* Simplify CODE, an operation with result mode MODE and three operands,
5177 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5178 a constant. Return 0 if no simplifications is possible. */
5181 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5182 machine_mode op0_mode
, rtx op0
, rtx op1
,
5185 unsigned int width
= GET_MODE_PRECISION (mode
);
5186 bool any_change
= false;
5189 /* VOIDmode means "infinite" precision. */
5191 width
= HOST_BITS_PER_WIDE_INT
;
5196 /* Simplify negations around the multiplication. */
5197 /* -a * -b + c => a * b + c. */
5198 if (GET_CODE (op0
) == NEG
)
5200 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5202 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5204 else if (GET_CODE (op1
) == NEG
)
5206 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5208 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5211 /* Canonicalize the two multiplication operands. */
5212 /* a * -b + c => -b * a + c. */
5213 if (swap_commutative_operands_p (op0
, op1
))
5214 std::swap (op0
, op1
), any_change
= true;
5217 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5222 if (CONST_INT_P (op0
)
5223 && CONST_INT_P (op1
)
5224 && CONST_INT_P (op2
)
5225 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5226 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5228 /* Extracting a bit-field from a constant */
5229 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5230 HOST_WIDE_INT op1val
= INTVAL (op1
);
5231 HOST_WIDE_INT op2val
= INTVAL (op2
);
5232 if (BITS_BIG_ENDIAN
)
5233 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5237 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5239 /* First zero-extend. */
5240 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5241 /* If desired, propagate sign bit. */
5242 if (code
== SIGN_EXTRACT
5243 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5245 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5248 return gen_int_mode (val
, mode
);
5253 if (CONST_INT_P (op0
))
5254 return op0
!= const0_rtx
? op1
: op2
;
5256 /* Convert c ? a : a into "a". */
5257 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5260 /* Convert a != b ? a : b into "a". */
5261 if (GET_CODE (op0
) == NE
5262 && ! side_effects_p (op0
)
5263 && ! HONOR_NANS (mode
)
5264 && ! HONOR_SIGNED_ZEROS (mode
)
5265 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5266 && rtx_equal_p (XEXP (op0
, 1), op2
))
5267 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5268 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5271 /* Convert a == b ? a : b into "b". */
5272 if (GET_CODE (op0
) == EQ
5273 && ! side_effects_p (op0
)
5274 && ! HONOR_NANS (mode
)
5275 && ! HONOR_SIGNED_ZEROS (mode
)
5276 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5277 && rtx_equal_p (XEXP (op0
, 1), op2
))
5278 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5279 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5282 /* Convert (!c) != {0,...,0} ? a : b into
5283 c != {0,...,0} ? b : a for vector modes. */
5284 if (VECTOR_MODE_P (GET_MODE (op1
))
5285 && GET_CODE (op0
) == NE
5286 && GET_CODE (XEXP (op0
, 0)) == NOT
5287 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5289 rtx cv
= XEXP (op0
, 1);
5290 int nunits
= CONST_VECTOR_NUNITS (cv
);
5292 for (int i
= 0; i
< nunits
; ++i
)
5293 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5300 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5301 XEXP (XEXP (op0
, 0), 0),
5303 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5308 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5310 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5311 ? GET_MODE (XEXP (op0
, 1))
5312 : GET_MODE (XEXP (op0
, 0)));
5315 /* Look for happy constants in op1 and op2. */
5316 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5318 HOST_WIDE_INT t
= INTVAL (op1
);
5319 HOST_WIDE_INT f
= INTVAL (op2
);
5321 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5322 code
= GET_CODE (op0
);
5323 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5326 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5334 return simplify_gen_relational (code
, mode
, cmp_mode
,
5335 XEXP (op0
, 0), XEXP (op0
, 1));
5338 if (cmp_mode
== VOIDmode
)
5339 cmp_mode
= op0_mode
;
5340 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5341 cmp_mode
, XEXP (op0
, 0),
5344 /* See if any simplifications were possible. */
5347 if (CONST_INT_P (temp
))
5348 return temp
== const0_rtx
? op2
: op1
;
5350 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5356 gcc_assert (GET_MODE (op0
) == mode
);
5357 gcc_assert (GET_MODE (op1
) == mode
);
5358 gcc_assert (VECTOR_MODE_P (mode
));
5359 trueop2
= avoid_constant_pool_reference (op2
);
5360 if (CONST_INT_P (trueop2
))
5362 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5363 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5364 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5365 unsigned HOST_WIDE_INT mask
;
5366 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5369 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5371 if (!(sel
& mask
) && !side_effects_p (op0
))
5373 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5376 rtx trueop0
= avoid_constant_pool_reference (op0
);
5377 rtx trueop1
= avoid_constant_pool_reference (op1
);
5378 if (GET_CODE (trueop0
) == CONST_VECTOR
5379 && GET_CODE (trueop1
) == CONST_VECTOR
)
5381 rtvec v
= rtvec_alloc (n_elts
);
5384 for (i
= 0; i
< n_elts
; i
++)
5385 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5386 ? CONST_VECTOR_ELT (trueop0
, i
)
5387 : CONST_VECTOR_ELT (trueop1
, i
));
5388 return gen_rtx_CONST_VECTOR (mode
, v
);
5391 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5392 if no element from a appears in the result. */
5393 if (GET_CODE (op0
) == VEC_MERGE
)
5395 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5396 if (CONST_INT_P (tem
))
5398 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5399 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5400 return simplify_gen_ternary (code
, mode
, mode
,
5401 XEXP (op0
, 1), op1
, op2
);
5402 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5403 return simplify_gen_ternary (code
, mode
, mode
,
5404 XEXP (op0
, 0), op1
, op2
);
5407 if (GET_CODE (op1
) == VEC_MERGE
)
5409 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5410 if (CONST_INT_P (tem
))
5412 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5413 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5414 return simplify_gen_ternary (code
, mode
, mode
,
5415 op0
, XEXP (op1
, 1), op2
);
5416 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5417 return simplify_gen_ternary (code
, mode
, mode
,
5418 op0
, XEXP (op1
, 0), op2
);
5422 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5424 if (GET_CODE (op0
) == VEC_DUPLICATE
5425 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5426 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5427 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5429 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5430 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5432 if (XEXP (XEXP (op0
, 0), 0) == op1
5433 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5439 if (rtx_equal_p (op0
, op1
)
5440 && !side_effects_p (op2
) && !side_effects_p (op1
))
5452 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5453 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5454 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5456 Works by unpacking OP into a collection of 8-bit values
5457 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5458 and then repacking them again for OUTERMODE. */
5461 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5462 machine_mode innermode
, unsigned int byte
)
5466 value_mask
= (1 << value_bit
) - 1
5468 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5477 rtvec result_v
= NULL
;
5478 enum mode_class outer_class
;
5479 machine_mode outer_submode
;
5482 /* Some ports misuse CCmode. */
5483 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5486 /* We have no way to represent a complex constant at the rtl level. */
5487 if (COMPLEX_MODE_P (outermode
))
5490 /* We support any size mode. */
5491 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5492 GET_MODE_BITSIZE (innermode
));
5494 /* Unpack the value. */
5496 if (GET_CODE (op
) == CONST_VECTOR
)
5498 num_elem
= CONST_VECTOR_NUNITS (op
);
5499 elems
= &CONST_VECTOR_ELT (op
, 0);
5500 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5506 elem_bitsize
= max_bitsize
;
5508 /* If this asserts, it is too complicated; reducing value_bit may help. */
5509 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5510 /* I don't know how to handle endianness of sub-units. */
5511 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5513 for (elem
= 0; elem
< num_elem
; elem
++)
5516 rtx el
= elems
[elem
];
5518 /* Vectors are kept in target memory order. (This is probably
5521 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5522 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5524 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5525 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5526 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5527 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5528 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5531 switch (GET_CODE (el
))
5535 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5537 *vp
++ = INTVAL (el
) >> i
;
5538 /* CONST_INTs are always logically sign-extended. */
5539 for (; i
< elem_bitsize
; i
+= value_bit
)
5540 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5543 case CONST_WIDE_INT
:
5545 rtx_mode_t val
= std::make_pair (el
, innermode
);
5546 unsigned char extend
= wi::sign_mask (val
);
5548 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5549 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5550 for (; i
< elem_bitsize
; i
+= value_bit
)
5556 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5558 unsigned char extend
= 0;
5559 /* If this triggers, someone should have generated a
5560 CONST_INT instead. */
5561 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5563 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5564 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5565 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5568 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5572 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5574 for (; i
< elem_bitsize
; i
+= value_bit
)
5579 /* This is big enough for anything on the platform. */
5580 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5581 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5583 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5584 gcc_assert (bitsize
<= elem_bitsize
);
5585 gcc_assert (bitsize
% value_bit
== 0);
5587 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5590 /* real_to_target produces its result in words affected by
5591 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5592 and use WORDS_BIG_ENDIAN instead; see the documentation
5593 of SUBREG in rtl.texi. */
5594 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5597 if (WORDS_BIG_ENDIAN
)
5598 ibase
= bitsize
- 1 - i
;
5601 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5604 /* It shouldn't matter what's done here, so fill it with
5606 for (; i
< elem_bitsize
; i
+= value_bit
)
5612 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5614 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5615 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5619 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5620 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5621 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5623 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5624 >> (i
- HOST_BITS_PER_WIDE_INT
);
5625 for (; i
< elem_bitsize
; i
+= value_bit
)
5635 /* Now, pick the right byte to start with. */
5636 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5637 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5638 will already have offset 0. */
5639 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5641 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5643 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5644 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5645 byte
= (subword_byte
% UNITS_PER_WORD
5646 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5649 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5650 so if it's become negative it will instead be very large.) */
5651 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5653 /* Convert from bytes to chunks of size value_bit. */
5654 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5656 /* Re-pack the value. */
5657 num_elem
= GET_MODE_NUNITS (outermode
);
5659 if (VECTOR_MODE_P (outermode
))
5661 result_v
= rtvec_alloc (num_elem
);
5662 elems
= &RTVEC_ELT (result_v
, 0);
5667 outer_submode
= GET_MODE_INNER (outermode
);
5668 outer_class
= GET_MODE_CLASS (outer_submode
);
5669 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5671 gcc_assert (elem_bitsize
% value_bit
== 0);
5672 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5674 for (elem
= 0; elem
< num_elem
; elem
++)
5678 /* Vectors are stored in target memory order. (This is probably
5681 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5682 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5684 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5685 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5686 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5687 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5688 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5691 switch (outer_class
)
5694 case MODE_PARTIAL_INT
:
5699 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5700 / HOST_BITS_PER_WIDE_INT
;
5701 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5704 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5706 for (u
= 0; u
< units
; u
++)
5708 unsigned HOST_WIDE_INT buf
= 0;
5710 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5712 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5715 base
+= HOST_BITS_PER_WIDE_INT
;
5717 r
= wide_int::from_array (tmp
, units
,
5718 GET_MODE_PRECISION (outer_submode
));
5719 #if TARGET_SUPPORTS_WIDE_INT == 0
5720 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5721 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5724 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5729 case MODE_DECIMAL_FLOAT
:
5732 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5734 /* real_from_target wants its input in words affected by
5735 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5736 and use WORDS_BIG_ENDIAN instead; see the documentation
5737 of SUBREG in rtl.texi. */
5738 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5740 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5743 if (WORDS_BIG_ENDIAN
)
5744 ibase
= elem_bitsize
- 1 - i
;
5747 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5750 real_from_target (&r
, tmp
, outer_submode
);
5751 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
5763 f
.mode
= outer_submode
;
5766 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5768 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5769 for (; i
< elem_bitsize
; i
+= value_bit
)
5770 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5771 << (i
- HOST_BITS_PER_WIDE_INT
));
5773 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5781 if (VECTOR_MODE_P (outermode
))
5782 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5787 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5788 Return 0 if no simplifications are possible. */
5790 simplify_subreg (machine_mode outermode
, rtx op
,
5791 machine_mode innermode
, unsigned int byte
)
5793 /* Little bit of sanity checking. */
5794 gcc_assert (innermode
!= VOIDmode
);
5795 gcc_assert (outermode
!= VOIDmode
);
5796 gcc_assert (innermode
!= BLKmode
);
5797 gcc_assert (outermode
!= BLKmode
);
5799 gcc_assert (GET_MODE (op
) == innermode
5800 || GET_MODE (op
) == VOIDmode
);
5802 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5805 if (byte
>= GET_MODE_SIZE (innermode
))
5808 if (outermode
== innermode
&& !byte
)
5811 if (CONST_SCALAR_INT_P (op
)
5812 || CONST_DOUBLE_AS_FLOAT_P (op
)
5813 || GET_CODE (op
) == CONST_FIXED
5814 || GET_CODE (op
) == CONST_VECTOR
)
5815 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5817 /* Changing mode twice with SUBREG => just change it once,
5818 or not at all if changing back op starting mode. */
5819 if (GET_CODE (op
) == SUBREG
)
5821 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5822 int final_offset
= byte
+ SUBREG_BYTE (op
);
5825 if (outermode
== innermostmode
5826 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5827 return SUBREG_REG (op
);
5829 /* The SUBREG_BYTE represents offset, as if the value were stored
5830 in memory. Irritating exception is paradoxical subreg, where
5831 we define SUBREG_BYTE to be 0. On big endian machines, this
5832 value should be negative. For a moment, undo this exception. */
5833 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5835 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5836 if (WORDS_BIG_ENDIAN
)
5837 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5838 if (BYTES_BIG_ENDIAN
)
5839 final_offset
+= difference
% UNITS_PER_WORD
;
5841 if (SUBREG_BYTE (op
) == 0
5842 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5844 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5845 if (WORDS_BIG_ENDIAN
)
5846 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5847 if (BYTES_BIG_ENDIAN
)
5848 final_offset
+= difference
% UNITS_PER_WORD
;
5851 /* See whether resulting subreg will be paradoxical. */
5852 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5854 /* In nonparadoxical subregs we can't handle negative offsets. */
5855 if (final_offset
< 0)
5857 /* Bail out in case resulting subreg would be incorrect. */
5858 if (final_offset
% GET_MODE_SIZE (outermode
)
5859 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5865 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5867 /* In paradoxical subreg, see if we are still looking on lower part.
5868 If so, our SUBREG_BYTE will be 0. */
5869 if (WORDS_BIG_ENDIAN
)
5870 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5871 if (BYTES_BIG_ENDIAN
)
5872 offset
+= difference
% UNITS_PER_WORD
;
5873 if (offset
== final_offset
)
5879 /* Recurse for further possible simplifications. */
5880 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5884 if (validate_subreg (outermode
, innermostmode
,
5885 SUBREG_REG (op
), final_offset
))
5887 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5888 if (SUBREG_PROMOTED_VAR_P (op
)
5889 && SUBREG_PROMOTED_SIGN (op
) >= 0
5890 && GET_MODE_CLASS (outermode
) == MODE_INT
5891 && IN_RANGE (GET_MODE_SIZE (outermode
),
5892 GET_MODE_SIZE (innermode
),
5893 GET_MODE_SIZE (innermostmode
))
5894 && subreg_lowpart_p (newx
))
5896 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5897 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5904 /* SUBREG of a hard register => just change the register number
5905 and/or mode. If the hard register is not valid in that mode,
5906 suppress this simplification. If the hard register is the stack,
5907 frame, or argument pointer, leave this as a SUBREG. */
5909 if (REG_P (op
) && HARD_REGISTER_P (op
))
5911 unsigned int regno
, final_regno
;
5914 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5915 if (HARD_REGISTER_NUM_P (final_regno
))
5918 int final_offset
= byte
;
5920 /* Adjust offset for paradoxical subregs. */
5922 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5924 int difference
= (GET_MODE_SIZE (innermode
)
5925 - GET_MODE_SIZE (outermode
));
5926 if (WORDS_BIG_ENDIAN
)
5927 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5928 if (BYTES_BIG_ENDIAN
)
5929 final_offset
+= difference
% UNITS_PER_WORD
;
5932 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5934 /* Propagate original regno. We don't have any way to specify
5935 the offset inside original regno, so do so only for lowpart.
5936 The information is used only by alias analysis that can not
5937 grog partial register anyway. */
5939 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5940 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5945 /* If we have a SUBREG of a register that we are replacing and we are
5946 replacing it with a MEM, make a new MEM and try replacing the
5947 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5948 or if we would be widening it. */
5951 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5952 /* Allow splitting of volatile memory references in case we don't
5953 have instruction to move the whole thing. */
5954 && (! MEM_VOLATILE_P (op
)
5955 || ! have_insn_for (SET
, innermode
))
5956 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5957 return adjust_address_nv (op
, outermode
, byte
);
5959 /* Handle complex values represented as CONCAT
5960 of real and imaginary part. */
5961 if (GET_CODE (op
) == CONCAT
)
5963 unsigned int part_size
, final_offset
;
5966 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5967 if (byte
< part_size
)
5969 part
= XEXP (op
, 0);
5970 final_offset
= byte
;
5974 part
= XEXP (op
, 1);
5975 final_offset
= byte
- part_size
;
5978 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5981 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5984 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5985 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5989 /* A SUBREG resulting from a zero extension may fold to zero if
5990 it extracts higher bits that the ZERO_EXTEND's source bits. */
5991 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5993 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5994 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5995 return CONST0_RTX (outermode
);
5998 if (SCALAR_INT_MODE_P (outermode
)
5999 && SCALAR_INT_MODE_P (innermode
)
6000 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6001 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6003 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6011 /* Make a SUBREG operation or equivalent if it folds. */
6014 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6015 machine_mode innermode
, unsigned int byte
)
6019 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6023 if (GET_CODE (op
) == SUBREG
6024 || GET_CODE (op
) == CONCAT
6025 || GET_MODE (op
) == VOIDmode
)
6028 if (validate_subreg (outermode
, innermode
, op
, byte
))
6029 return gen_rtx_SUBREG (outermode
, op
, byte
);
6034 /* Generates a subreg to get the least significant part of EXPR (in mode
6035 INNER_MODE) to OUTER_MODE. */
6038 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6039 machine_mode inner_mode
)
6041 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6042 subreg_lowpart_offset (outer_mode
, inner_mode
));
6045 /* Simplify X, an rtx expression.
6047 Return the simplified expression or NULL if no simplifications
6050 This is the preferred entry point into the simplification routines;
6051 however, we still allow passes to call the more specific routines.
6053 Right now GCC has three (yes, three) major bodies of RTL simplification
6054 code that need to be unified.
6056 1. fold_rtx in cse.c. This code uses various CSE specific
6057 information to aid in RTL simplification.
6059 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6060 it uses combine specific information to aid in RTL
6063 3. The routines in this file.
6066 Long term we want to only have one body of simplification code; to
6067 get to that state I recommend the following steps:
6069 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6070 which are not pass dependent state into these routines.
6072 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6073 use this routine whenever possible.
6075 3. Allow for pass dependent state to be provided to these
6076 routines and add simplifications based on the pass dependent
6077 state. Remove code from cse.c & combine.c that becomes
6080 It will take time, but ultimately the compiler will be easier to
6081 maintain and improve. It's totally silly that when we add a
6082 simplification that it needs to be added to 4 places (3 for RTL
6083 simplification and 1 for tree simplification. */
6086 simplify_rtx (const_rtx x
)
6088 const enum rtx_code code
= GET_CODE (x
);
6089 const machine_mode mode
= GET_MODE (x
);
6091 switch (GET_RTX_CLASS (code
))
6094 return simplify_unary_operation (code
, mode
,
6095 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6096 case RTX_COMM_ARITH
:
6097 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6098 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6100 /* Fall through.... */
6103 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6106 case RTX_BITFIELD_OPS
:
6107 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6108 XEXP (x
, 0), XEXP (x
, 1),
6112 case RTX_COMM_COMPARE
:
6113 return simplify_relational_operation (code
, mode
,
6114 ((GET_MODE (XEXP (x
, 0))
6116 ? GET_MODE (XEXP (x
, 0))
6117 : GET_MODE (XEXP (x
, 1))),
6123 return simplify_subreg (mode
, SUBREG_REG (x
),
6124 GET_MODE (SUBREG_REG (x
)),
6131 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6132 if (GET_CODE (XEXP (x
, 0)) == HIGH
6133 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))