1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
40 #include "insn-codes.h"
43 #include "diagnostic-core.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx
neg_const_int (machine_mode
, const_rtx
);
58 static bool plus_minus_operand_p (const_rtx
);
59 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
60 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
61 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
63 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
65 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
66 machine_mode
, rtx
, rtx
);
67 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
68 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
71 /* Negate a CONST_INT rtx, truncating (because a conversion from a
72 maximally negative number can overflow). */
74 neg_const_int (machine_mode mode
, const_rtx i
)
76 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
79 /* Test whether expression, X, is an immediate constant that represents
80 the most significant bit of machine mode MODE. */
83 mode_signbit_p (machine_mode mode
, const_rtx x
)
85 unsigned HOST_WIDE_INT val
;
88 if (GET_MODE_CLASS (mode
) != MODE_INT
)
91 width
= GET_MODE_PRECISION (mode
);
95 if (width
<= HOST_BITS_PER_WIDE_INT
98 #if TARGET_SUPPORTS_WIDE_INT
99 else if (CONST_WIDE_INT_P (x
))
102 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
103 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
105 for (i
= 0; i
< elts
- 1; i
++)
106 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
108 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
109 width
%= HOST_BITS_PER_WIDE_INT
;
111 width
= HOST_BITS_PER_WIDE_INT
;
114 else if (width
<= HOST_BITS_PER_DOUBLE_INT
115 && CONST_DOUBLE_AS_INT_P (x
)
116 && CONST_DOUBLE_LOW (x
) == 0)
118 val
= CONST_DOUBLE_HIGH (x
);
119 width
-= HOST_BITS_PER_WIDE_INT
;
123 /* X is not an integer constant. */
126 if (width
< HOST_BITS_PER_WIDE_INT
)
127 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
128 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
131 /* Test whether VAL is equal to the most significant bit of mode MODE
132 (after masking with the mode mask of MODE). Returns false if the
133 precision of MODE is too large to handle. */
136 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
140 if (GET_MODE_CLASS (mode
) != MODE_INT
)
143 width
= GET_MODE_PRECISION (mode
);
144 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
147 val
&= GET_MODE_MASK (mode
);
148 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
151 /* Test whether the most significant bit of mode MODE is set in VAL.
152 Returns false if the precision of MODE is too large to handle. */
154 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
158 if (GET_MODE_CLASS (mode
) != MODE_INT
)
161 width
= GET_MODE_PRECISION (mode
);
162 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
165 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
169 /* Test whether the most significant bit of mode MODE is clear in VAL.
170 Returns false if the precision of MODE is too large to handle. */
172 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
176 if (GET_MODE_CLASS (mode
) != MODE_INT
)
179 width
= GET_MODE_PRECISION (mode
);
180 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
183 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
187 /* Make a binary operation by properly ordering the operands and
188 seeing if the expression folds. */
191 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
196 /* If this simplifies, do it. */
197 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
201 /* Put complex operands first and constants second if commutative. */
202 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
203 && swap_commutative_operands_p (op0
, op1
))
204 tem
= op0
, op0
= op1
, op1
= tem
;
206 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
209 /* If X is a MEM referencing the constant pool, return the real value.
210 Otherwise return X. */
212 avoid_constant_pool_reference (rtx x
)
216 HOST_WIDE_INT offset
= 0;
218 switch (GET_CODE (x
))
224 /* Handle float extensions of constant pool references. */
226 c
= avoid_constant_pool_reference (tmp
);
227 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
231 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
232 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
240 if (GET_MODE (x
) == BLKmode
)
245 /* Call target hook to avoid the effects of -fpic etc.... */
246 addr
= targetm
.delegitimize_address (addr
);
248 /* Split the address into a base and integer offset. */
249 if (GET_CODE (addr
) == CONST
250 && GET_CODE (XEXP (addr
, 0)) == PLUS
251 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
253 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
254 addr
= XEXP (XEXP (addr
, 0), 0);
257 if (GET_CODE (addr
) == LO_SUM
)
258 addr
= XEXP (addr
, 1);
260 /* If this is a constant pool reference, we can turn it into its
261 constant and hope that simplifications happen. */
262 if (GET_CODE (addr
) == SYMBOL_REF
263 && CONSTANT_POOL_ADDRESS_P (addr
))
265 c
= get_pool_constant (addr
);
266 cmode
= get_pool_mode (addr
);
268 /* If we're accessing the constant in a different mode than it was
269 originally stored, attempt to fix that up via subreg simplifications.
270 If that fails we have no choice but to return the original memory. */
271 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
272 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
274 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
275 if (tem
&& CONSTANT_P (tem
))
285 /* Simplify a MEM based on its attributes. This is the default
286 delegitimize_address target hook, and it's recommended that every
287 overrider call it. */
290 delegitimize_mem_from_attrs (rtx x
)
292 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
293 use their base addresses as equivalent. */
296 && MEM_OFFSET_KNOWN_P (x
))
298 tree decl
= MEM_EXPR (x
);
299 machine_mode mode
= GET_MODE (x
);
300 HOST_WIDE_INT offset
= 0;
302 switch (TREE_CODE (decl
))
312 case ARRAY_RANGE_REF
:
317 case VIEW_CONVERT_EXPR
:
319 HOST_WIDE_INT bitsize
, bitpos
;
321 int unsignedp
, volatilep
= 0;
323 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
324 &mode
, &unsignedp
, &volatilep
, false);
325 if (bitsize
!= GET_MODE_BITSIZE (mode
)
326 || (bitpos
% BITS_PER_UNIT
)
327 || (toffset
&& !tree_fits_shwi_p (toffset
)))
331 offset
+= bitpos
/ BITS_PER_UNIT
;
333 offset
+= tree_to_shwi (toffset
);
340 && mode
== GET_MODE (x
)
341 && TREE_CODE (decl
) == VAR_DECL
342 && (TREE_STATIC (decl
)
343 || DECL_THREAD_LOCAL_P (decl
))
344 && DECL_RTL_SET_P (decl
)
345 && MEM_P (DECL_RTL (decl
)))
349 offset
+= MEM_OFFSET (x
);
351 newx
= DECL_RTL (decl
);
355 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
357 /* Avoid creating a new MEM needlessly if we already had
358 the same address. We do if there's no OFFSET and the
359 old address X is identical to NEWX, or if X is of the
360 form (plus NEWX OFFSET), or the NEWX is of the form
361 (plus Y (const_int Z)) and X is that with the offset
362 added: (plus Y (const_int Z+OFFSET)). */
364 || (GET_CODE (o
) == PLUS
365 && GET_CODE (XEXP (o
, 1)) == CONST_INT
366 && (offset
== INTVAL (XEXP (o
, 1))
367 || (GET_CODE (n
) == PLUS
368 && GET_CODE (XEXP (n
, 1)) == CONST_INT
369 && (INTVAL (XEXP (n
, 1)) + offset
370 == INTVAL (XEXP (o
, 1)))
371 && (n
= XEXP (n
, 0))))
372 && (o
= XEXP (o
, 0))))
373 && rtx_equal_p (o
, n
)))
374 x
= adjust_address_nv (newx
, mode
, offset
);
376 else if (GET_MODE (x
) == GET_MODE (newx
)
385 /* Make a unary operation by first seeing if it folds and otherwise making
386 the specified operation. */
389 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
390 machine_mode op_mode
)
394 /* If this simplifies, use it. */
395 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
398 return gen_rtx_fmt_e (code
, mode
, op
);
401 /* Likewise for ternary operations. */
404 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
405 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
409 /* If this simplifies, use it. */
410 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
414 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
417 /* Likewise, for relational operations.
418 CMP_MODE specifies mode comparison is done in. */
421 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
422 machine_mode cmp_mode
, rtx op0
, rtx op1
)
426 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
430 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
433 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
434 and simplify the result. If FN is non-NULL, call this callback on each
435 X, if it returns non-NULL, replace X with its return value and simplify the
439 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
440 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
442 enum rtx_code code
= GET_CODE (x
);
443 machine_mode mode
= GET_MODE (x
);
444 machine_mode op_mode
;
446 rtx op0
, op1
, op2
, newx
, op
;
450 if (__builtin_expect (fn
!= NULL
, 0))
452 newx
= fn (x
, old_rtx
, data
);
456 else if (rtx_equal_p (x
, old_rtx
))
457 return copy_rtx ((rtx
) data
);
459 switch (GET_RTX_CLASS (code
))
463 op_mode
= GET_MODE (op0
);
464 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
465 if (op0
== XEXP (x
, 0))
467 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
471 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
472 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
473 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
475 return simplify_gen_binary (code
, mode
, op0
, op1
);
478 case RTX_COMM_COMPARE
:
481 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
482 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
483 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
484 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
486 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
489 case RTX_BITFIELD_OPS
:
491 op_mode
= GET_MODE (op0
);
492 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
493 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
494 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
495 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
497 if (op_mode
== VOIDmode
)
498 op_mode
= GET_MODE (op0
);
499 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
504 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
505 if (op0
== SUBREG_REG (x
))
507 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
508 GET_MODE (SUBREG_REG (x
)),
510 return op0
? op0
: x
;
517 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
518 if (op0
== XEXP (x
, 0))
520 return replace_equiv_address_nv (x
, op0
);
522 else if (code
== LO_SUM
)
524 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
525 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
527 /* (lo_sum (high x) x) -> x */
528 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
531 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
533 return gen_rtx_LO_SUM (mode
, op0
, op1
);
542 fmt
= GET_RTX_FORMAT (code
);
543 for (i
= 0; fmt
[i
]; i
++)
548 newvec
= XVEC (newx
, i
);
549 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
551 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
553 if (op
!= RTVEC_ELT (vec
, j
))
557 newvec
= shallow_copy_rtvec (vec
);
559 newx
= shallow_copy_rtx (x
);
560 XVEC (newx
, i
) = newvec
;
562 RTVEC_ELT (newvec
, j
) = op
;
570 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
571 if (op
!= XEXP (x
, i
))
574 newx
= shallow_copy_rtx (x
);
583 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
584 resulting RTX. Return a new RTX which is as simplified as possible. */
587 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
589 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
592 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
593 Only handle cases where the truncated value is inherently an rvalue.
595 RTL provides two ways of truncating a value:
597 1. a lowpart subreg. This form is only a truncation when both
598 the outer and inner modes (here MODE and OP_MODE respectively)
599 are scalar integers, and only then when the subreg is used as
602 It is only valid to form such truncating subregs if the
603 truncation requires no action by the target. The onus for
604 proving this is on the creator of the subreg -- e.g. the
605 caller to simplify_subreg or simplify_gen_subreg -- and typically
606 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
608 2. a TRUNCATE. This form handles both scalar and compound integers.
610 The first form is preferred where valid. However, the TRUNCATE
611 handling in simplify_unary_operation turns the second form into the
612 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
613 so it is generally safe to form rvalue truncations using:
615 simplify_gen_unary (TRUNCATE, ...)
617 and leave simplify_unary_operation to work out which representation
620 Because of the proof requirements on (1), simplify_truncation must
621 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
622 regardless of whether the outer truncation came from a SUBREG or a
623 TRUNCATE. For example, if the caller has proven that an SImode
628 is a no-op and can be represented as a subreg, it does not follow
629 that SImode truncations of X and Y are also no-ops. On a target
630 like 64-bit MIPS that requires SImode values to be stored in
631 sign-extended form, an SImode truncation of:
633 (and:DI (reg:DI X) (const_int 63))
635 is trivially a no-op because only the lower 6 bits can be set.
636 However, X is still an arbitrary 64-bit number and so we cannot
637 assume that truncating it too is a no-op. */
640 simplify_truncation (machine_mode mode
, rtx op
,
641 machine_mode op_mode
)
643 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
644 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
645 gcc_assert (precision
<= op_precision
);
647 /* Optimize truncations of zero and sign extended values. */
648 if (GET_CODE (op
) == ZERO_EXTEND
649 || GET_CODE (op
) == SIGN_EXTEND
)
651 /* There are three possibilities. If MODE is the same as the
652 origmode, we can omit both the extension and the subreg.
653 If MODE is not larger than the origmode, we can apply the
654 truncation without the extension. Finally, if the outermode
655 is larger than the origmode, we can just extend to the appropriate
657 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
658 if (mode
== origmode
)
660 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
661 return simplify_gen_unary (TRUNCATE
, mode
,
662 XEXP (op
, 0), origmode
);
664 return simplify_gen_unary (GET_CODE (op
), mode
,
665 XEXP (op
, 0), origmode
);
668 /* If the machine can perform operations in the truncated mode, distribute
669 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
670 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 #ifdef WORD_REGISTER_OPERATIONS
673 && precision
>= BITS_PER_WORD
675 && (GET_CODE (op
) == PLUS
676 || GET_CODE (op
) == MINUS
677 || GET_CODE (op
) == MULT
))
679 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
682 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
684 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
688 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
689 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op
) == LSHIFTRT
692 || GET_CODE (op
) == ASHIFTRT
)
693 /* Ensure that OP_MODE is at least twice as wide as MODE
694 to avoid the possibility that an outer LSHIFTRT shifts by more
695 than the sign extension's sign_bit_copies and introduces zeros
696 into the high bits of the result. */
697 && 2 * precision
<= op_precision
698 && CONST_INT_P (XEXP (op
, 1))
699 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
700 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
701 && UINTVAL (XEXP (op
, 1)) < precision
)
702 return simplify_gen_binary (ASHIFTRT
, mode
,
703 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
705 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
706 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if ((GET_CODE (op
) == LSHIFTRT
709 || GET_CODE (op
) == ASHIFTRT
)
710 && CONST_INT_P (XEXP (op
, 1))
711 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
712 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
713 && UINTVAL (XEXP (op
, 1)) < precision
)
714 return simplify_gen_binary (LSHIFTRT
, mode
,
715 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
717 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
718 to (ashift:QI (x:QI) C), where C is a suitable small constant and
719 the outer subreg is effectively a truncation to the original mode. */
720 if (GET_CODE (op
) == ASHIFT
721 && CONST_INT_P (XEXP (op
, 1))
722 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
723 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
724 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
725 && UINTVAL (XEXP (op
, 1)) < precision
)
726 return simplify_gen_binary (ASHIFT
, mode
,
727 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
729 /* Recognize a word extraction from a multi-word subreg. */
730 if ((GET_CODE (op
) == LSHIFTRT
731 || GET_CODE (op
) == ASHIFTRT
)
732 && SCALAR_INT_MODE_P (mode
)
733 && SCALAR_INT_MODE_P (op_mode
)
734 && precision
>= BITS_PER_WORD
735 && 2 * precision
<= op_precision
736 && CONST_INT_P (XEXP (op
, 1))
737 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
738 && UINTVAL (XEXP (op
, 1)) < op_precision
)
740 int byte
= subreg_lowpart_offset (mode
, op_mode
);
741 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
742 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
744 ? byte
- shifted_bytes
745 : byte
+ shifted_bytes
));
748 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
749 and try replacing the TRUNCATE and shift with it. Don't do this
750 if the MEM has a mode-dependent address. */
751 if ((GET_CODE (op
) == LSHIFTRT
752 || GET_CODE (op
) == ASHIFTRT
)
753 && SCALAR_INT_MODE_P (op_mode
)
754 && MEM_P (XEXP (op
, 0))
755 && CONST_INT_P (XEXP (op
, 1))
756 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
757 && INTVAL (XEXP (op
, 1)) > 0
758 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
759 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
760 MEM_ADDR_SPACE (XEXP (op
, 0)))
761 && ! MEM_VOLATILE_P (XEXP (op
, 0))
762 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
763 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
765 int byte
= subreg_lowpart_offset (mode
, op_mode
);
766 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
767 return adjust_address_nv (XEXP (op
, 0), mode
,
769 ? byte
- shifted_bytes
770 : byte
+ shifted_bytes
));
773 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
774 (OP:SI foo:SI) if OP is NEG or ABS. */
775 if ((GET_CODE (op
) == ABS
776 || GET_CODE (op
) == NEG
)
777 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
778 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
779 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
780 return simplify_gen_unary (GET_CODE (op
), mode
,
781 XEXP (XEXP (op
, 0), 0), mode
);
783 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 if (GET_CODE (op
) == SUBREG
786 && SCALAR_INT_MODE_P (mode
)
787 && SCALAR_INT_MODE_P (op_mode
)
788 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
789 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
790 && subreg_lowpart_p (op
))
792 rtx inner
= XEXP (SUBREG_REG (op
), 0);
793 if (GET_MODE_PRECISION (mode
)
794 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
795 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
797 /* If subreg above is paradoxical and C is narrower
798 than A, return (subreg:A (truncate:C X) 0). */
799 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
800 GET_MODE (SUBREG_REG (op
)), 0);
803 /* (truncate:A (truncate:B X)) is (truncate:A X). */
804 if (GET_CODE (op
) == TRUNCATE
)
805 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
806 GET_MODE (XEXP (op
, 0)));
811 /* Try to simplify a unary operation CODE whose output mode is to be
812 MODE with input operand OP whose mode was originally OP_MODE.
813 Return zero if no simplification can be made. */
815 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
816 rtx op
, machine_mode op_mode
)
820 trueop
= avoid_constant_pool_reference (op
);
822 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
826 return simplify_unary_operation_1 (code
, mode
, op
);
829 /* Perform some simplifications we can do even if the operands
832 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
834 enum rtx_code reversed
;
840 /* (not (not X)) == X. */
841 if (GET_CODE (op
) == NOT
)
844 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
845 comparison is all ones. */
846 if (COMPARISON_P (op
)
847 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
848 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
849 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
850 XEXP (op
, 0), XEXP (op
, 1));
852 /* (not (plus X -1)) can become (neg X). */
853 if (GET_CODE (op
) == PLUS
854 && XEXP (op
, 1) == constm1_rtx
)
855 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
857 /* Similarly, (not (neg X)) is (plus X -1). */
858 if (GET_CODE (op
) == NEG
)
859 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
862 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
863 if (GET_CODE (op
) == XOR
864 && CONST_INT_P (XEXP (op
, 1))
865 && (temp
= simplify_unary_operation (NOT
, mode
,
866 XEXP (op
, 1), mode
)) != 0)
867 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
869 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
870 if (GET_CODE (op
) == PLUS
871 && CONST_INT_P (XEXP (op
, 1))
872 && mode_signbit_p (mode
, XEXP (op
, 1))
873 && (temp
= simplify_unary_operation (NOT
, mode
,
874 XEXP (op
, 1), mode
)) != 0)
875 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
878 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
879 operands other than 1, but that is not valid. We could do a
880 similar simplification for (not (lshiftrt C X)) where C is
881 just the sign bit, but this doesn't seem common enough to
883 if (GET_CODE (op
) == ASHIFT
884 && XEXP (op
, 0) == const1_rtx
)
886 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
887 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
890 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
891 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
892 so we can perform the above simplification. */
893 if (STORE_FLAG_VALUE
== -1
894 && GET_CODE (op
) == ASHIFTRT
895 && CONST_INT_P (XEXP (op
, 1))
896 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
897 return simplify_gen_relational (GE
, mode
, VOIDmode
,
898 XEXP (op
, 0), const0_rtx
);
901 if (GET_CODE (op
) == SUBREG
902 && subreg_lowpart_p (op
)
903 && (GET_MODE_SIZE (GET_MODE (op
))
904 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
905 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
906 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
908 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
911 x
= gen_rtx_ROTATE (inner_mode
,
912 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
914 XEXP (SUBREG_REG (op
), 1));
915 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
920 /* Apply De Morgan's laws to reduce number of patterns for machines
921 with negating logical insns (and-not, nand, etc.). If result has
922 only one NOT, put it first, since that is how the patterns are
924 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
926 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
927 machine_mode op_mode
;
929 op_mode
= GET_MODE (in1
);
930 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
932 op_mode
= GET_MODE (in2
);
933 if (op_mode
== VOIDmode
)
935 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
937 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
940 in2
= in1
; in1
= tem
;
943 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op
) == BSWAP
)
950 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
951 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op
) == NEG
)
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op
) == PLUS
962 && XEXP (op
, 1) == const1_rtx
)
963 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op
) == NOT
)
967 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op
) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode
)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
978 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
980 if (GET_CODE (op
) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode
)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
986 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
988 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
990 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
993 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
994 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
995 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
998 /* (neg (mult A B)) becomes (mult A (neg B)).
999 This works even for floating-point values. */
1000 if (GET_CODE (op
) == MULT
1001 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1003 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1004 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1007 /* NEG commutes with ASHIFT since it is multiplication. Only do
1008 this if we can then eliminate the NEG (e.g., if the operand
1010 if (GET_CODE (op
) == ASHIFT
)
1012 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1014 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op
) == ASHIFTRT
1020 && CONST_INT_P (XEXP (op
, 1))
1021 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1022 return simplify_gen_binary (LSHIFTRT
, mode
,
1023 XEXP (op
, 0), XEXP (op
, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op
) == LSHIFTRT
1028 && CONST_INT_P (XEXP (op
, 1))
1029 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1030 return simplify_gen_binary (ASHIFTRT
, mode
,
1031 XEXP (op
, 0), XEXP (op
, 1));
1033 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1034 if (GET_CODE (op
) == XOR
1035 && XEXP (op
, 1) == const1_rtx
1036 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1037 return plus_constant (mode
, XEXP (op
, 0), -1);
1039 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1040 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1041 if (GET_CODE (op
) == LT
1042 && XEXP (op
, 1) == const0_rtx
1043 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1045 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1046 int isize
= GET_MODE_PRECISION (inner
);
1047 if (STORE_FLAG_VALUE
== 1)
1049 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1050 GEN_INT (isize
- 1));
1053 if (GET_MODE_PRECISION (mode
) > isize
)
1054 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1055 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1057 else if (STORE_FLAG_VALUE
== -1)
1059 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1060 GEN_INT (isize
- 1));
1063 if (GET_MODE_PRECISION (mode
) > isize
)
1064 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1065 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1071 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1072 with the umulXi3_highpart patterns. */
1073 if (GET_CODE (op
) == LSHIFTRT
1074 && GET_CODE (XEXP (op
, 0)) == MULT
)
1077 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1079 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1081 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1085 /* We can't handle truncation to a partial integer mode here
1086 because we don't know the real bitsize of the partial
1091 if (GET_MODE (op
) != VOIDmode
)
1093 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1098 /* If we know that the value is already truncated, we can
1099 replace the TRUNCATE with a SUBREG. */
1100 if (GET_MODE_NUNITS (mode
) == 1
1101 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1102 || truncated_to_mode (mode
, op
)))
1104 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1109 /* A truncate of a comparison can be replaced with a subreg if
1110 STORE_FLAG_VALUE permits. This is like the previous test,
1111 but it works even if the comparison is done in a mode larger
1112 than HOST_BITS_PER_WIDE_INT. */
1113 if (HWI_COMPUTABLE_MODE_P (mode
)
1114 && COMPARISON_P (op
)
1115 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1117 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1122 /* A truncate of a memory is just loading the low part of the memory
1123 if we are not changing the meaning of the address. */
1124 if (GET_CODE (op
) == MEM
1125 && !VECTOR_MODE_P (mode
)
1126 && !MEM_VOLATILE_P (op
)
1127 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1129 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1136 case FLOAT_TRUNCATE
:
1137 if (DECIMAL_FLOAT_MODE_P (mode
))
1140 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1141 if (GET_CODE (op
) == FLOAT_EXTEND
1142 && GET_MODE (XEXP (op
, 0)) == mode
)
1143 return XEXP (op
, 0);
1145 /* (float_truncate:SF (float_truncate:DF foo:XF))
1146 = (float_truncate:SF foo:XF).
1147 This may eliminate double rounding, so it is unsafe.
1149 (float_truncate:SF (float_extend:XF foo:DF))
1150 = (float_truncate:SF foo:DF).
1152 (float_truncate:DF (float_extend:XF foo:SF))
1153 = (float_extend:SF foo:DF). */
1154 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1155 && flag_unsafe_math_optimizations
)
1156 || GET_CODE (op
) == FLOAT_EXTEND
)
1157 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1159 > GET_MODE_SIZE (mode
)
1160 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1162 XEXP (op
, 0), mode
);
1164 /* (float_truncate (float x)) is (float x) */
1165 if (GET_CODE (op
) == FLOAT
1166 && (flag_unsafe_math_optimizations
1167 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1168 && ((unsigned)significand_size (GET_MODE (op
))
1169 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1170 - num_sign_bit_copies (XEXP (op
, 0),
1171 GET_MODE (XEXP (op
, 0))))))))
1172 return simplify_gen_unary (FLOAT
, mode
,
1174 GET_MODE (XEXP (op
, 0)));
1176 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1177 (OP:SF foo:SF) if OP is NEG or ABS. */
1178 if ((GET_CODE (op
) == ABS
1179 || GET_CODE (op
) == NEG
)
1180 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1181 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1182 return simplify_gen_unary (GET_CODE (op
), mode
,
1183 XEXP (XEXP (op
, 0), 0), mode
);
1185 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1186 is (float_truncate:SF x). */
1187 if (GET_CODE (op
) == SUBREG
1188 && subreg_lowpart_p (op
)
1189 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1190 return SUBREG_REG (op
);
1194 if (DECIMAL_FLOAT_MODE_P (mode
))
1197 /* (float_extend (float_extend x)) is (float_extend x)
1199 (float_extend (float x)) is (float x) assuming that double
1200 rounding can't happen.
1202 if (GET_CODE (op
) == FLOAT_EXTEND
1203 || (GET_CODE (op
) == FLOAT
1204 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1205 && ((unsigned)significand_size (GET_MODE (op
))
1206 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1207 - num_sign_bit_copies (XEXP (op
, 0),
1208 GET_MODE (XEXP (op
, 0)))))))
1209 return simplify_gen_unary (GET_CODE (op
), mode
,
1211 GET_MODE (XEXP (op
, 0)));
1216 /* (abs (neg <foo>)) -> (abs <foo>) */
1217 if (GET_CODE (op
) == NEG
)
1218 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1219 GET_MODE (XEXP (op
, 0)));
1221 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1223 if (GET_MODE (op
) == VOIDmode
)
1226 /* If operand is something known to be positive, ignore the ABS. */
1227 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1228 || val_signbit_known_clear_p (GET_MODE (op
),
1229 nonzero_bits (op
, GET_MODE (op
))))
1232 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1233 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1234 return gen_rtx_NEG (mode
, op
);
1239 /* (ffs (*_extend <X>)) = (ffs <X>) */
1240 if (GET_CODE (op
) == SIGN_EXTEND
1241 || GET_CODE (op
) == ZERO_EXTEND
)
1242 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1243 GET_MODE (XEXP (op
, 0)));
1247 switch (GET_CODE (op
))
1251 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1252 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1253 GET_MODE (XEXP (op
, 0)));
1257 /* Rotations don't affect popcount. */
1258 if (!side_effects_p (XEXP (op
, 1)))
1259 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1260 GET_MODE (XEXP (op
, 0)));
1269 switch (GET_CODE (op
))
1275 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1276 GET_MODE (XEXP (op
, 0)));
1280 /* Rotations don't affect parity. */
1281 if (!side_effects_p (XEXP (op
, 1)))
1282 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1283 GET_MODE (XEXP (op
, 0)));
1292 /* (bswap (bswap x)) -> x. */
1293 if (GET_CODE (op
) == BSWAP
)
1294 return XEXP (op
, 0);
1298 /* (float (sign_extend <X>)) = (float <X>). */
1299 if (GET_CODE (op
) == SIGN_EXTEND
)
1300 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1301 GET_MODE (XEXP (op
, 0)));
1305 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1306 becomes just the MINUS if its mode is MODE. This allows
1307 folding switch statements on machines using casesi (such as
1309 if (GET_CODE (op
) == TRUNCATE
1310 && GET_MODE (XEXP (op
, 0)) == mode
1311 && GET_CODE (XEXP (op
, 0)) == MINUS
1312 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1313 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1314 return XEXP (op
, 0);
1316 /* Extending a widening multiplication should be canonicalized to
1317 a wider widening multiplication. */
1318 if (GET_CODE (op
) == MULT
)
1320 rtx lhs
= XEXP (op
, 0);
1321 rtx rhs
= XEXP (op
, 1);
1322 enum rtx_code lcode
= GET_CODE (lhs
);
1323 enum rtx_code rcode
= GET_CODE (rhs
);
1325 /* Widening multiplies usually extend both operands, but sometimes
1326 they use a shift to extract a portion of a register. */
1327 if ((lcode
== SIGN_EXTEND
1328 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1329 && (rcode
== SIGN_EXTEND
1330 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1332 machine_mode lmode
= GET_MODE (lhs
);
1333 machine_mode rmode
= GET_MODE (rhs
);
1336 if (lcode
== ASHIFTRT
)
1337 /* Number of bits not shifted off the end. */
1338 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1339 else /* lcode == SIGN_EXTEND */
1340 /* Size of inner mode. */
1341 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1343 if (rcode
== ASHIFTRT
)
1344 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1345 else /* rcode == SIGN_EXTEND */
1346 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1348 /* We can only widen multiplies if the result is mathematiclly
1349 equivalent. I.e. if overflow was impossible. */
1350 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1351 return simplify_gen_binary
1353 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1354 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1358 /* Check for a sign extension of a subreg of a promoted
1359 variable, where the promotion is sign-extended, and the
1360 target mode is the same as the variable's promotion. */
1361 if (GET_CODE (op
) == SUBREG
1362 && SUBREG_PROMOTED_VAR_P (op
)
1363 && SUBREG_PROMOTED_SIGNED_P (op
)
1364 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1366 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1371 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1372 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1373 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1375 gcc_assert (GET_MODE_PRECISION (mode
)
1376 > GET_MODE_PRECISION (GET_MODE (op
)));
1377 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1378 GET_MODE (XEXP (op
, 0)));
1381 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1382 is (sign_extend:M (subreg:O <X>)) if there is mode with
1383 GET_MODE_BITSIZE (N) - I bits.
1384 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1385 is similarly (zero_extend:M (subreg:O <X>)). */
1386 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1387 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1388 && CONST_INT_P (XEXP (op
, 1))
1389 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1390 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1393 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1394 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1395 gcc_assert (GET_MODE_BITSIZE (mode
)
1396 > GET_MODE_BITSIZE (GET_MODE (op
)));
1397 if (tmode
!= BLKmode
)
1400 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1402 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1403 ? SIGN_EXTEND
: ZERO_EXTEND
,
1404 mode
, inner
, tmode
);
1408 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1409 /* As we do not know which address space the pointer is referring to,
1410 we can do this only if the target does not support different pointer
1411 or address modes depending on the address space. */
1412 if (target_default_pointer_address_modes_p ()
1413 && ! POINTERS_EXTEND_UNSIGNED
1414 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1416 || (GET_CODE (op
) == SUBREG
1417 && REG_P (SUBREG_REG (op
))
1418 && REG_POINTER (SUBREG_REG (op
))
1419 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1420 return convert_memory_address (Pmode
, op
);
1425 /* Check for a zero extension of a subreg of a promoted
1426 variable, where the promotion is zero-extended, and the
1427 target mode is the same as the variable's promotion. */
1428 if (GET_CODE (op
) == SUBREG
1429 && SUBREG_PROMOTED_VAR_P (op
)
1430 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1431 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1433 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1438 /* Extending a widening multiplication should be canonicalized to
1439 a wider widening multiplication. */
1440 if (GET_CODE (op
) == MULT
)
1442 rtx lhs
= XEXP (op
, 0);
1443 rtx rhs
= XEXP (op
, 1);
1444 enum rtx_code lcode
= GET_CODE (lhs
);
1445 enum rtx_code rcode
= GET_CODE (rhs
);
1447 /* Widening multiplies usually extend both operands, but sometimes
1448 they use a shift to extract a portion of a register. */
1449 if ((lcode
== ZERO_EXTEND
1450 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1451 && (rcode
== ZERO_EXTEND
1452 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1454 machine_mode lmode
= GET_MODE (lhs
);
1455 machine_mode rmode
= GET_MODE (rhs
);
1458 if (lcode
== LSHIFTRT
)
1459 /* Number of bits not shifted off the end. */
1460 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1461 else /* lcode == ZERO_EXTEND */
1462 /* Size of inner mode. */
1463 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1465 if (rcode
== LSHIFTRT
)
1466 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1467 else /* rcode == ZERO_EXTEND */
1468 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1470 /* We can only widen multiplies if the result is mathematiclly
1471 equivalent. I.e. if overflow was impossible. */
1472 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1473 return simplify_gen_binary
1475 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1476 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1480 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1481 if (GET_CODE (op
) == ZERO_EXTEND
)
1482 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1483 GET_MODE (XEXP (op
, 0)));
1485 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (zero_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_PRECISION (N) - I bits. */
1488 if (GET_CODE (op
) == LSHIFTRT
1489 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1490 && CONST_INT_P (XEXP (op
, 1))
1491 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1492 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1495 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1496 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1497 if (tmode
!= BLKmode
)
1500 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1502 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1506 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1507 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1509 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1510 (and:SI (reg:SI) (const_int 63)). */
1511 if (GET_CODE (op
) == SUBREG
1512 && GET_MODE_PRECISION (GET_MODE (op
))
1513 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1514 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1515 <= HOST_BITS_PER_WIDE_INT
1516 && GET_MODE_PRECISION (mode
)
1517 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1518 && subreg_lowpart_p (op
)
1519 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1520 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1522 if (GET_MODE_PRECISION (mode
)
1523 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1524 return SUBREG_REG (op
);
1525 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1526 GET_MODE (SUBREG_REG (op
)));
1529 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && POINTERS_EXTEND_UNSIGNED
> 0
1535 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1537 || (GET_CODE (op
) == SUBREG
1538 && REG_P (SUBREG_REG (op
))
1539 && REG_POINTER (SUBREG_REG (op
))
1540 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1541 return convert_memory_address (Pmode
, op
);
1552 /* Try to compute the value of a unary operation CODE whose output mode is to
1553 be MODE with input operand OP whose mode was originally OP_MODE.
1554 Return zero if the value cannot be computed. */
1556 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1557 rtx op
, machine_mode op_mode
)
1559 unsigned int width
= GET_MODE_PRECISION (mode
);
1561 if (code
== VEC_DUPLICATE
)
1563 gcc_assert (VECTOR_MODE_P (mode
));
1564 if (GET_MODE (op
) != VOIDmode
)
1566 if (!VECTOR_MODE_P (GET_MODE (op
)))
1567 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1569 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1572 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1573 || GET_CODE (op
) == CONST_VECTOR
)
1575 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1576 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1577 rtvec v
= rtvec_alloc (n_elts
);
1580 if (GET_CODE (op
) != CONST_VECTOR
)
1581 for (i
= 0; i
< n_elts
; i
++)
1582 RTVEC_ELT (v
, i
) = op
;
1585 machine_mode inmode
= GET_MODE (op
);
1586 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1587 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1589 gcc_assert (in_n_elts
< n_elts
);
1590 gcc_assert ((n_elts
% in_n_elts
) == 0);
1591 for (i
= 0; i
< n_elts
; i
++)
1592 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1594 return gen_rtx_CONST_VECTOR (mode
, v
);
1598 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1600 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1601 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1602 machine_mode opmode
= GET_MODE (op
);
1603 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1604 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1605 rtvec v
= rtvec_alloc (n_elts
);
1608 gcc_assert (op_n_elts
== n_elts
);
1609 for (i
= 0; i
< n_elts
; i
++)
1611 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1612 CONST_VECTOR_ELT (op
, i
),
1613 GET_MODE_INNER (opmode
));
1616 RTVEC_ELT (v
, i
) = x
;
1618 return gen_rtx_CONST_VECTOR (mode
, v
);
1621 /* The order of these tests is critical so that, for example, we don't
1622 check the wrong mode (input vs. output) for a conversion operation,
1623 such as FIX. At some point, this should be simplified. */
1625 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1629 if (op_mode
== VOIDmode
)
1631 /* CONST_INT have VOIDmode as the mode. We assume that all
1632 the bits of the constant are significant, though, this is
1633 a dangerous assumption as many times CONST_INTs are
1634 created and used with garbage in the bits outside of the
1635 precision of the implied mode of the const_int. */
1636 op_mode
= MAX_MODE_INT
;
1639 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1640 d
= real_value_truncate (mode
, d
);
1641 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1643 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1647 if (op_mode
== VOIDmode
)
1649 /* CONST_INT have VOIDmode as the mode. We assume that all
1650 the bits of the constant are significant, though, this is
1651 a dangerous assumption as many times CONST_INTs are
1652 created and used with garbage in the bits outside of the
1653 precision of the implied mode of the const_int. */
1654 op_mode
= MAX_MODE_INT
;
1657 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1658 d
= real_value_truncate (mode
, d
);
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1662 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1665 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1666 rtx_mode_t op0
= std::make_pair (op
, imode
);
1669 #if TARGET_SUPPORTS_WIDE_INT == 0
1670 /* This assert keeps the simplification from producing a result
1671 that cannot be represented in a CONST_DOUBLE but a lot of
1672 upstream callers expect that this function never fails to
1673 simplify something and so you if you added this to the test
1674 above the code would die later anyway. If this assert
1675 happens, you just need to make the port support wide int. */
1676 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1682 result
= wi::bit_not (op0
);
1686 result
= wi::neg (op0
);
1690 result
= wi::abs (op0
);
1694 result
= wi::shwi (wi::ffs (op0
), mode
);
1698 if (wi::ne_p (op0
, 0))
1699 int_value
= wi::clz (op0
);
1700 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1701 int_value
= GET_MODE_PRECISION (mode
);
1702 result
= wi::shwi (int_value
, mode
);
1706 result
= wi::shwi (wi::clrsb (op0
), mode
);
1710 if (wi::ne_p (op0
, 0))
1711 int_value
= wi::ctz (op0
);
1712 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1713 int_value
= GET_MODE_PRECISION (mode
);
1714 result
= wi::shwi (int_value
, mode
);
1718 result
= wi::shwi (wi::popcount (op0
), mode
);
1722 result
= wi::shwi (wi::parity (op0
), mode
);
1726 result
= wide_int (op0
).bswap ();
1731 result
= wide_int::from (op0
, width
, UNSIGNED
);
1735 result
= wide_int::from (op0
, width
, SIGNED
);
1743 return immed_wide_int_const (result
, mode
);
1746 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1747 && SCALAR_FLOAT_MODE_P (mode
)
1748 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1751 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1758 d
= real_value_abs (&d
);
1761 d
= real_value_negate (&d
);
1763 case FLOAT_TRUNCATE
:
1764 d
= real_value_truncate (mode
, d
);
1767 /* All this does is change the mode, unless changing
1769 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1770 real_convert (&d
, mode
, &d
);
1773 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1780 real_to_target (tmp
, &d
, GET_MODE (op
));
1781 for (i
= 0; i
< 4; i
++)
1783 real_from_target (&d
, tmp
, mode
);
1789 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1791 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1792 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1793 && GET_MODE_CLASS (mode
) == MODE_INT
1796 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1797 operators are intentionally left unspecified (to ease implementation
1798 by target backends), for consistency, this routine implements the
1799 same semantics for constant folding as used by the middle-end. */
1801 /* This was formerly used only for non-IEEE float.
1802 eggert@twinsun.com says it is safe for IEEE also. */
1803 REAL_VALUE_TYPE x
, t
;
1804 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1805 wide_int wmax
, wmin
;
1806 /* This is part of the abi to real_to_integer, but we check
1807 things before making this call. */
1813 if (REAL_VALUE_ISNAN (x
))
1816 /* Test against the signed upper bound. */
1817 wmax
= wi::max_value (width
, SIGNED
);
1818 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1819 if (REAL_VALUES_LESS (t
, x
))
1820 return immed_wide_int_const (wmax
, mode
);
1822 /* Test against the signed lower bound. */
1823 wmin
= wi::min_value (width
, SIGNED
);
1824 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1825 if (REAL_VALUES_LESS (x
, t
))
1826 return immed_wide_int_const (wmin
, mode
);
1828 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1832 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1835 /* Test against the unsigned upper bound. */
1836 wmax
= wi::max_value (width
, UNSIGNED
);
1837 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1838 if (REAL_VALUES_LESS (t
, x
))
1839 return immed_wide_int_const (wmax
, mode
);
1841 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1853 /* Subroutine of simplify_binary_operation to simplify a binary operation
1854 CODE that can commute with byte swapping, with result mode MODE and
1855 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1856 Return zero if no simplification or canonicalization is possible. */
1859 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1864 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1865 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1867 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1868 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1869 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1872 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1873 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1875 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1876 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1882 /* Subroutine of simplify_binary_operation to simplify a commutative,
1883 associative binary operation CODE with result mode MODE, operating
1884 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1885 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1886 canonicalization is possible. */
1889 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1894 /* Linearize the operator to the left. */
1895 if (GET_CODE (op1
) == code
)
1897 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1898 if (GET_CODE (op0
) == code
)
1900 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1901 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1904 /* "a op (b op c)" becomes "(b op c) op a". */
1905 if (! swap_commutative_operands_p (op1
, op0
))
1906 return simplify_gen_binary (code
, mode
, op1
, op0
);
1913 if (GET_CODE (op0
) == code
)
1915 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1916 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1918 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1919 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1922 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1923 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1925 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1927 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1928 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1930 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1937 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1938 and OP1. Return 0 if no simplification is possible.
1940 Don't use this for relational operations such as EQ or LT.
1941 Use simplify_relational_operation instead. */
1943 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1946 rtx trueop0
, trueop1
;
1949 /* Relational operations don't work here. We must know the mode
1950 of the operands in order to do the comparison correctly.
1951 Assuming a full word can give incorrect results.
1952 Consider comparing 128 with -128 in QImode. */
1953 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1954 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1956 /* Make sure the constant is second. */
1957 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1958 && swap_commutative_operands_p (op0
, op1
))
1960 tem
= op0
, op0
= op1
, op1
= tem
;
1963 trueop0
= avoid_constant_pool_reference (op0
);
1964 trueop1
= avoid_constant_pool_reference (op1
);
1966 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1969 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1972 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1973 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1974 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1975 actual constants. */
1978 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
1979 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1981 rtx tem
, reversed
, opleft
, opright
;
1983 unsigned int width
= GET_MODE_PRECISION (mode
);
1985 /* Even if we can't compute a constant result,
1986 there are some cases worth simplifying. */
1991 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1992 when x is NaN, infinite, or finite and nonzero. They aren't
1993 when x is -0 and the rounding mode is not towards -infinity,
1994 since (-0) + 0 is then 0. */
1995 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1998 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1999 transformations are safe even for IEEE. */
2000 if (GET_CODE (op0
) == NEG
)
2001 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2002 else if (GET_CODE (op1
) == NEG
)
2003 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2005 /* (~a) + 1 -> -a */
2006 if (INTEGRAL_MODE_P (mode
)
2007 && GET_CODE (op0
) == NOT
2008 && trueop1
== const1_rtx
)
2009 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2011 /* Handle both-operands-constant cases. We can only add
2012 CONST_INTs to constants since the sum of relocatable symbols
2013 can't be handled by most assemblers. Don't add CONST_INT
2014 to CONST_INT since overflow won't be computed properly if wider
2015 than HOST_BITS_PER_WIDE_INT. */
2017 if ((GET_CODE (op0
) == CONST
2018 || GET_CODE (op0
) == SYMBOL_REF
2019 || GET_CODE (op0
) == LABEL_REF
)
2020 && CONST_INT_P (op1
))
2021 return plus_constant (mode
, op0
, INTVAL (op1
));
2022 else if ((GET_CODE (op1
) == CONST
2023 || GET_CODE (op1
) == SYMBOL_REF
2024 || GET_CODE (op1
) == LABEL_REF
)
2025 && CONST_INT_P (op0
))
2026 return plus_constant (mode
, op1
, INTVAL (op0
));
2028 /* See if this is something like X * C - X or vice versa or
2029 if the multiplication is written as a shift. If so, we can
2030 distribute and make a new multiply, shift, or maybe just
2031 have X (if C is 2 in the example above). But don't make
2032 something more expensive than we had before. */
2034 if (SCALAR_INT_MODE_P (mode
))
2036 rtx lhs
= op0
, rhs
= op1
;
2038 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2039 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2041 if (GET_CODE (lhs
) == NEG
)
2043 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2044 lhs
= XEXP (lhs
, 0);
2046 else if (GET_CODE (lhs
) == MULT
2047 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2049 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2050 lhs
= XEXP (lhs
, 0);
2052 else if (GET_CODE (lhs
) == ASHIFT
2053 && CONST_INT_P (XEXP (lhs
, 1))
2054 && INTVAL (XEXP (lhs
, 1)) >= 0
2055 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2057 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2058 GET_MODE_PRECISION (mode
));
2059 lhs
= XEXP (lhs
, 0);
2062 if (GET_CODE (rhs
) == NEG
)
2064 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2065 rhs
= XEXP (rhs
, 0);
2067 else if (GET_CODE (rhs
) == MULT
2068 && CONST_INT_P (XEXP (rhs
, 1)))
2070 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2071 rhs
= XEXP (rhs
, 0);
2073 else if (GET_CODE (rhs
) == ASHIFT
2074 && CONST_INT_P (XEXP (rhs
, 1))
2075 && INTVAL (XEXP (rhs
, 1)) >= 0
2076 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2078 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2079 GET_MODE_PRECISION (mode
));
2080 rhs
= XEXP (rhs
, 0);
2083 if (rtx_equal_p (lhs
, rhs
))
2085 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2087 bool speed
= optimize_function_for_speed_p (cfun
);
2089 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2091 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2092 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2097 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2098 if (CONST_SCALAR_INT_P (op1
)
2099 && GET_CODE (op0
) == XOR
2100 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2101 && mode_signbit_p (mode
, op1
))
2102 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2103 simplify_gen_binary (XOR
, mode
, op1
,
2106 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2107 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2108 && GET_CODE (op0
) == MULT
2109 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2113 in1
= XEXP (XEXP (op0
, 0), 0);
2114 in2
= XEXP (op0
, 1);
2115 return simplify_gen_binary (MINUS
, mode
, op1
,
2116 simplify_gen_binary (MULT
, mode
,
2120 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2121 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2123 if (COMPARISON_P (op0
)
2124 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2125 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2126 && (reversed
= reversed_comparison (op0
, mode
)))
2128 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2130 /* If one of the operands is a PLUS or a MINUS, see if we can
2131 simplify this by the associative law.
2132 Don't use the associative law for floating point.
2133 The inaccuracy makes it nonassociative,
2134 and subtle programs can break if operations are associated. */
2136 if (INTEGRAL_MODE_P (mode
)
2137 && (plus_minus_operand_p (op0
)
2138 || plus_minus_operand_p (op1
))
2139 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2142 /* Reassociate floating point addition only when the user
2143 specifies associative math operations. */
2144 if (FLOAT_MODE_P (mode
)
2145 && flag_associative_math
)
2147 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2154 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2155 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2156 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2157 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2159 rtx xop00
= XEXP (op0
, 0);
2160 rtx xop10
= XEXP (op1
, 0);
2163 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2165 if (REG_P (xop00
) && REG_P (xop10
)
2166 && GET_MODE (xop00
) == GET_MODE (xop10
)
2167 && REGNO (xop00
) == REGNO (xop10
)
2168 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2169 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2176 /* We can't assume x-x is 0 even with non-IEEE floating point,
2177 but since it is zero except in very strange circumstances, we
2178 will treat it as zero with -ffinite-math-only. */
2179 if (rtx_equal_p (trueop0
, trueop1
)
2180 && ! side_effects_p (op0
)
2181 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2182 return CONST0_RTX (mode
);
2184 /* Change subtraction from zero into negation. (0 - x) is the
2185 same as -x when x is NaN, infinite, or finite and nonzero.
2186 But if the mode has signed zeros, and does not round towards
2187 -infinity, then 0 - 0 is 0, not -0. */
2188 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2189 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2191 /* (-1 - a) is ~a. */
2192 if (trueop0
== constm1_rtx
)
2193 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2195 /* Subtracting 0 has no effect unless the mode has signed zeros
2196 and supports rounding towards -infinity. In such a case,
2198 if (!(HONOR_SIGNED_ZEROS (mode
)
2199 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2200 && trueop1
== CONST0_RTX (mode
))
2203 /* See if this is something like X * C - X or vice versa or
2204 if the multiplication is written as a shift. If so, we can
2205 distribute and make a new multiply, shift, or maybe just
2206 have X (if C is 2 in the example above). But don't make
2207 something more expensive than we had before. */
2209 if (SCALAR_INT_MODE_P (mode
))
2211 rtx lhs
= op0
, rhs
= op1
;
2213 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2214 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2216 if (GET_CODE (lhs
) == NEG
)
2218 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2219 lhs
= XEXP (lhs
, 0);
2221 else if (GET_CODE (lhs
) == MULT
2222 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2224 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2225 lhs
= XEXP (lhs
, 0);
2227 else if (GET_CODE (lhs
) == ASHIFT
2228 && CONST_INT_P (XEXP (lhs
, 1))
2229 && INTVAL (XEXP (lhs
, 1)) >= 0
2230 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2232 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2233 GET_MODE_PRECISION (mode
));
2234 lhs
= XEXP (lhs
, 0);
2237 if (GET_CODE (rhs
) == NEG
)
2239 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2240 rhs
= XEXP (rhs
, 0);
2242 else if (GET_CODE (rhs
) == MULT
2243 && CONST_INT_P (XEXP (rhs
, 1)))
2245 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2246 rhs
= XEXP (rhs
, 0);
2248 else if (GET_CODE (rhs
) == ASHIFT
2249 && CONST_INT_P (XEXP (rhs
, 1))
2250 && INTVAL (XEXP (rhs
, 1)) >= 0
2251 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2253 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2254 GET_MODE_PRECISION (mode
));
2255 negcoeff1
= -negcoeff1
;
2256 rhs
= XEXP (rhs
, 0);
2259 if (rtx_equal_p (lhs
, rhs
))
2261 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2263 bool speed
= optimize_function_for_speed_p (cfun
);
2265 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2267 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2268 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2273 /* (a - (-b)) -> (a + b). True even for IEEE. */
2274 if (GET_CODE (op1
) == NEG
)
2275 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2277 /* (-x - c) may be simplified as (-c - x). */
2278 if (GET_CODE (op0
) == NEG
2279 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2281 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2283 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2286 /* Don't let a relocatable value get a negative coeff. */
2287 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2288 return simplify_gen_binary (PLUS
, mode
,
2290 neg_const_int (mode
, op1
));
2292 /* (x - (x & y)) -> (x & ~y) */
2293 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2295 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2297 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2298 GET_MODE (XEXP (op1
, 1)));
2299 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2301 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2303 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2304 GET_MODE (XEXP (op1
, 0)));
2305 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2309 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2310 by reversing the comparison code if valid. */
2311 if (STORE_FLAG_VALUE
== 1
2312 && trueop0
== const1_rtx
2313 && COMPARISON_P (op1
)
2314 && (reversed
= reversed_comparison (op1
, mode
)))
2317 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2318 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2319 && GET_CODE (op1
) == MULT
2320 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2324 in1
= XEXP (XEXP (op1
, 0), 0);
2325 in2
= XEXP (op1
, 1);
2326 return simplify_gen_binary (PLUS
, mode
,
2327 simplify_gen_binary (MULT
, mode
,
2332 /* Canonicalize (minus (neg A) (mult B C)) to
2333 (minus (mult (neg B) C) A). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2335 && GET_CODE (op1
) == MULT
2336 && GET_CODE (op0
) == NEG
)
2340 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2341 in2
= XEXP (op1
, 1);
2342 return simplify_gen_binary (MINUS
, mode
,
2343 simplify_gen_binary (MULT
, mode
,
2348 /* If one of the operands is a PLUS or a MINUS, see if we can
2349 simplify this by the associative law. This will, for example,
2350 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2351 Don't use the associative law for floating point.
2352 The inaccuracy makes it nonassociative,
2353 and subtle programs can break if operations are associated. */
2355 if (INTEGRAL_MODE_P (mode
)
2356 && (plus_minus_operand_p (op0
)
2357 || plus_minus_operand_p (op1
))
2358 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2363 if (trueop1
== constm1_rtx
)
2364 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2366 if (GET_CODE (op0
) == NEG
)
2368 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2369 /* If op1 is a MULT as well and simplify_unary_operation
2370 just moved the NEG to the second operand, simplify_gen_binary
2371 below could through simplify_associative_operation move
2372 the NEG around again and recurse endlessly. */
2374 && GET_CODE (op1
) == MULT
2375 && GET_CODE (temp
) == MULT
2376 && XEXP (op1
, 0) == XEXP (temp
, 0)
2377 && GET_CODE (XEXP (temp
, 1)) == NEG
2378 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2381 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2383 if (GET_CODE (op1
) == NEG
)
2385 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2386 /* If op0 is a MULT as well and simplify_unary_operation
2387 just moved the NEG to the second operand, simplify_gen_binary
2388 below could through simplify_associative_operation move
2389 the NEG around again and recurse endlessly. */
2391 && GET_CODE (op0
) == MULT
2392 && GET_CODE (temp
) == MULT
2393 && XEXP (op0
, 0) == XEXP (temp
, 0)
2394 && GET_CODE (XEXP (temp
, 1)) == NEG
2395 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2398 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2401 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2402 x is NaN, since x * 0 is then also NaN. Nor is it valid
2403 when the mode has signed zeros, since multiplying a negative
2404 number by 0 will give -0, not 0. */
2405 if (!HONOR_NANS (mode
)
2406 && !HONOR_SIGNED_ZEROS (mode
)
2407 && trueop1
== CONST0_RTX (mode
)
2408 && ! side_effects_p (op0
))
2411 /* In IEEE floating point, x*1 is not equivalent to x for
2413 if (!HONOR_SNANS (mode
)
2414 && trueop1
== CONST1_RTX (mode
))
2417 /* Convert multiply by constant power of two into shift. */
2418 if (CONST_SCALAR_INT_P (trueop1
))
2420 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2422 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2425 /* x*2 is x+x and x*(-1) is -x */
2426 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2427 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2428 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2429 && GET_MODE (op0
) == mode
)
2432 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2434 if (REAL_VALUES_EQUAL (d
, dconst2
))
2435 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2437 if (!HONOR_SNANS (mode
)
2438 && REAL_VALUES_EQUAL (d
, dconstm1
))
2439 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2442 /* Optimize -x * -x as x * x. */
2443 if (FLOAT_MODE_P (mode
)
2444 && GET_CODE (op0
) == NEG
2445 && GET_CODE (op1
) == NEG
2446 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2447 && !side_effects_p (XEXP (op0
, 0)))
2448 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2450 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2451 if (SCALAR_FLOAT_MODE_P (mode
)
2452 && GET_CODE (op0
) == ABS
2453 && GET_CODE (op1
) == ABS
2454 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2455 && !side_effects_p (XEXP (op0
, 0)))
2456 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2458 /* Reassociate multiplication, but for floating point MULTs
2459 only when the user specifies unsafe math optimizations. */
2460 if (! FLOAT_MODE_P (mode
)
2461 || flag_unsafe_math_optimizations
)
2463 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2470 if (trueop1
== CONST0_RTX (mode
))
2472 if (INTEGRAL_MODE_P (mode
)
2473 && trueop1
== CONSTM1_RTX (mode
)
2474 && !side_effects_p (op0
))
2476 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2478 /* A | (~A) -> -1 */
2479 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2480 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2481 && ! side_effects_p (op0
)
2482 && SCALAR_INT_MODE_P (mode
))
2485 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2486 if (CONST_INT_P (op1
)
2487 && HWI_COMPUTABLE_MODE_P (mode
)
2488 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2489 && !side_effects_p (op0
))
2492 /* Canonicalize (X & C1) | C2. */
2493 if (GET_CODE (op0
) == AND
2494 && CONST_INT_P (trueop1
)
2495 && CONST_INT_P (XEXP (op0
, 1)))
2497 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2498 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2499 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2501 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2503 && !side_effects_p (XEXP (op0
, 0)))
2506 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2507 if (((c1
|c2
) & mask
) == mask
)
2508 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2510 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2511 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2513 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2514 gen_int_mode (c1
& ~c2
, mode
));
2515 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2519 /* Convert (A & B) | A to A. */
2520 if (GET_CODE (op0
) == AND
2521 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2522 || rtx_equal_p (XEXP (op0
, 1), op1
))
2523 && ! side_effects_p (XEXP (op0
, 0))
2524 && ! side_effects_p (XEXP (op0
, 1)))
2527 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2528 mode size to (rotate A CX). */
2530 if (GET_CODE (op1
) == ASHIFT
2531 || GET_CODE (op1
) == SUBREG
)
2542 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2543 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2544 && CONST_INT_P (XEXP (opleft
, 1))
2545 && CONST_INT_P (XEXP (opright
, 1))
2546 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2547 == GET_MODE_PRECISION (mode
)))
2548 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2550 /* Same, but for ashift that has been "simplified" to a wider mode
2551 by simplify_shift_const. */
2553 if (GET_CODE (opleft
) == SUBREG
2554 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2555 && GET_CODE (opright
) == LSHIFTRT
2556 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2557 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2558 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2559 && (GET_MODE_SIZE (GET_MODE (opleft
))
2560 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2561 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2562 SUBREG_REG (XEXP (opright
, 0)))
2563 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2564 && CONST_INT_P (XEXP (opright
, 1))
2565 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2566 == GET_MODE_PRECISION (mode
)))
2567 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2568 XEXP (SUBREG_REG (opleft
), 1));
2570 /* If we have (ior (and (X C1) C2)), simplify this by making
2571 C1 as small as possible if C1 actually changes. */
2572 if (CONST_INT_P (op1
)
2573 && (HWI_COMPUTABLE_MODE_P (mode
)
2574 || INTVAL (op1
) > 0)
2575 && GET_CODE (op0
) == AND
2576 && CONST_INT_P (XEXP (op0
, 1))
2577 && CONST_INT_P (op1
)
2578 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2580 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2581 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2584 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2587 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2588 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2589 the PLUS does not affect any of the bits in OP1: then we can do
2590 the IOR as a PLUS and we can associate. This is valid if OP1
2591 can be safely shifted left C bits. */
2592 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2593 && GET_CODE (XEXP (op0
, 0)) == PLUS
2594 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2595 && CONST_INT_P (XEXP (op0
, 1))
2596 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2598 int count
= INTVAL (XEXP (op0
, 1));
2599 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2601 if (mask
>> count
== INTVAL (trueop1
)
2602 && trunc_int_for_mode (mask
, mode
) == mask
2603 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2604 return simplify_gen_binary (ASHIFTRT
, mode
,
2605 plus_constant (mode
, XEXP (op0
, 0),
2610 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2614 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2620 if (trueop1
== CONST0_RTX (mode
))
2622 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2623 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2624 if (rtx_equal_p (trueop0
, trueop1
)
2625 && ! side_effects_p (op0
)
2626 && GET_MODE_CLASS (mode
) != MODE_CC
)
2627 return CONST0_RTX (mode
);
2629 /* Canonicalize XOR of the most significant bit to PLUS. */
2630 if (CONST_SCALAR_INT_P (op1
)
2631 && mode_signbit_p (mode
, op1
))
2632 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2633 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2634 if (CONST_SCALAR_INT_P (op1
)
2635 && GET_CODE (op0
) == PLUS
2636 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2637 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2638 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2639 simplify_gen_binary (XOR
, mode
, op1
,
2642 /* If we are XORing two things that have no bits in common,
2643 convert them into an IOR. This helps to detect rotation encoded
2644 using those methods and possibly other simplifications. */
2646 if (HWI_COMPUTABLE_MODE_P (mode
)
2647 && (nonzero_bits (op0
, mode
)
2648 & nonzero_bits (op1
, mode
)) == 0)
2649 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2651 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2652 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2655 int num_negated
= 0;
2657 if (GET_CODE (op0
) == NOT
)
2658 num_negated
++, op0
= XEXP (op0
, 0);
2659 if (GET_CODE (op1
) == NOT
)
2660 num_negated
++, op1
= XEXP (op1
, 0);
2662 if (num_negated
== 2)
2663 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2664 else if (num_negated
== 1)
2665 return simplify_gen_unary (NOT
, mode
,
2666 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2670 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2671 correspond to a machine insn or result in further simplifications
2672 if B is a constant. */
2674 if (GET_CODE (op0
) == AND
2675 && rtx_equal_p (XEXP (op0
, 1), op1
)
2676 && ! side_effects_p (op1
))
2677 return simplify_gen_binary (AND
, mode
,
2678 simplify_gen_unary (NOT
, mode
,
2679 XEXP (op0
, 0), mode
),
2682 else if (GET_CODE (op0
) == AND
2683 && rtx_equal_p (XEXP (op0
, 0), op1
)
2684 && ! side_effects_p (op1
))
2685 return simplify_gen_binary (AND
, mode
,
2686 simplify_gen_unary (NOT
, mode
,
2687 XEXP (op0
, 1), mode
),
2690 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2691 we can transform like this:
2692 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2693 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2694 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2695 Attempt a few simplifications when B and C are both constants. */
2696 if (GET_CODE (op0
) == AND
2697 && CONST_INT_P (op1
)
2698 && CONST_INT_P (XEXP (op0
, 1)))
2700 rtx a
= XEXP (op0
, 0);
2701 rtx b
= XEXP (op0
, 1);
2703 HOST_WIDE_INT bval
= INTVAL (b
);
2704 HOST_WIDE_INT cval
= INTVAL (c
);
2707 = simplify_binary_operation (AND
, mode
,
2708 simplify_gen_unary (NOT
, mode
, a
, mode
),
2710 if ((~cval
& bval
) == 0)
2712 /* Try to simplify ~A&C | ~B&C. */
2713 if (na_c
!= NULL_RTX
)
2714 return simplify_gen_binary (IOR
, mode
, na_c
,
2715 gen_int_mode (~bval
& cval
, mode
));
2719 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2720 if (na_c
== const0_rtx
)
2722 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2723 gen_int_mode (~cval
& bval
,
2725 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2726 gen_int_mode (~bval
& cval
,
2732 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2733 comparison if STORE_FLAG_VALUE is 1. */
2734 if (STORE_FLAG_VALUE
== 1
2735 && trueop1
== const1_rtx
2736 && COMPARISON_P (op0
)
2737 && (reversed
= reversed_comparison (op0
, mode
)))
2740 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2741 is (lt foo (const_int 0)), so we can perform the above
2742 simplification if STORE_FLAG_VALUE is 1. */
2744 if (STORE_FLAG_VALUE
== 1
2745 && trueop1
== const1_rtx
2746 && GET_CODE (op0
) == LSHIFTRT
2747 && CONST_INT_P (XEXP (op0
, 1))
2748 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2749 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2751 /* (xor (comparison foo bar) (const_int sign-bit))
2752 when STORE_FLAG_VALUE is the sign bit. */
2753 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2754 && trueop1
== const_true_rtx
2755 && COMPARISON_P (op0
)
2756 && (reversed
= reversed_comparison (op0
, mode
)))
2759 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2763 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2769 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2771 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2773 if (HWI_COMPUTABLE_MODE_P (mode
))
2775 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2776 HOST_WIDE_INT nzop1
;
2777 if (CONST_INT_P (trueop1
))
2779 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2780 /* If we are turning off bits already known off in OP0, we need
2782 if ((nzop0
& ~val1
) == 0)
2785 nzop1
= nonzero_bits (trueop1
, mode
);
2786 /* If we are clearing all the nonzero bits, the result is zero. */
2787 if ((nzop1
& nzop0
) == 0
2788 && !side_effects_p (op0
) && !side_effects_p (op1
))
2789 return CONST0_RTX (mode
);
2791 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2792 && GET_MODE_CLASS (mode
) != MODE_CC
)
2795 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2796 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2797 && ! side_effects_p (op0
)
2798 && GET_MODE_CLASS (mode
) != MODE_CC
)
2799 return CONST0_RTX (mode
);
2801 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2802 there are no nonzero bits of C outside of X's mode. */
2803 if ((GET_CODE (op0
) == SIGN_EXTEND
2804 || GET_CODE (op0
) == ZERO_EXTEND
)
2805 && CONST_INT_P (trueop1
)
2806 && HWI_COMPUTABLE_MODE_P (mode
)
2807 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2808 & UINTVAL (trueop1
)) == 0)
2810 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2811 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2812 gen_int_mode (INTVAL (trueop1
),
2814 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2817 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2818 we might be able to further simplify the AND with X and potentially
2819 remove the truncation altogether. */
2820 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2822 rtx x
= XEXP (op0
, 0);
2823 machine_mode xmode
= GET_MODE (x
);
2824 tem
= simplify_gen_binary (AND
, xmode
, x
,
2825 gen_int_mode (INTVAL (trueop1
), xmode
));
2826 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2829 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2830 if (GET_CODE (op0
) == IOR
2831 && CONST_INT_P (trueop1
)
2832 && CONST_INT_P (XEXP (op0
, 1)))
2834 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2835 return simplify_gen_binary (IOR
, mode
,
2836 simplify_gen_binary (AND
, mode
,
2837 XEXP (op0
, 0), op1
),
2838 gen_int_mode (tmp
, mode
));
2841 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2842 insn (and may simplify more). */
2843 if (GET_CODE (op0
) == XOR
2844 && rtx_equal_p (XEXP (op0
, 0), op1
)
2845 && ! side_effects_p (op1
))
2846 return simplify_gen_binary (AND
, mode
,
2847 simplify_gen_unary (NOT
, mode
,
2848 XEXP (op0
, 1), mode
),
2851 if (GET_CODE (op0
) == XOR
2852 && rtx_equal_p (XEXP (op0
, 1), op1
)
2853 && ! side_effects_p (op1
))
2854 return simplify_gen_binary (AND
, mode
,
2855 simplify_gen_unary (NOT
, mode
,
2856 XEXP (op0
, 0), mode
),
2859 /* Similarly for (~(A ^ B)) & A. */
2860 if (GET_CODE (op0
) == NOT
2861 && GET_CODE (XEXP (op0
, 0)) == XOR
2862 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2863 && ! side_effects_p (op1
))
2864 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2866 if (GET_CODE (op0
) == NOT
2867 && GET_CODE (XEXP (op0
, 0)) == XOR
2868 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2869 && ! side_effects_p (op1
))
2870 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2872 /* Convert (A | B) & A to A. */
2873 if (GET_CODE (op0
) == IOR
2874 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2875 || rtx_equal_p (XEXP (op0
, 1), op1
))
2876 && ! side_effects_p (XEXP (op0
, 0))
2877 && ! side_effects_p (XEXP (op0
, 1)))
2880 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2881 ((A & N) + B) & M -> (A + B) & M
2882 Similarly if (N & M) == 0,
2883 ((A | N) + B) & M -> (A + B) & M
2884 and for - instead of + and/or ^ instead of |.
2885 Also, if (N & M) == 0, then
2886 (A +- N) & M -> A & M. */
2887 if (CONST_INT_P (trueop1
)
2888 && HWI_COMPUTABLE_MODE_P (mode
)
2889 && ~UINTVAL (trueop1
)
2890 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2891 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2896 pmop
[0] = XEXP (op0
, 0);
2897 pmop
[1] = XEXP (op0
, 1);
2899 if (CONST_INT_P (pmop
[1])
2900 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2901 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2903 for (which
= 0; which
< 2; which
++)
2906 switch (GET_CODE (tem
))
2909 if (CONST_INT_P (XEXP (tem
, 1))
2910 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2911 == UINTVAL (trueop1
))
2912 pmop
[which
] = XEXP (tem
, 0);
2916 if (CONST_INT_P (XEXP (tem
, 1))
2917 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2918 pmop
[which
] = XEXP (tem
, 0);
2925 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2927 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2929 return simplify_gen_binary (code
, mode
, tem
, op1
);
2933 /* (and X (ior (not X) Y) -> (and X Y) */
2934 if (GET_CODE (op1
) == IOR
2935 && GET_CODE (XEXP (op1
, 0)) == NOT
2936 && op0
== XEXP (XEXP (op1
, 0), 0))
2937 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2939 /* (and (ior (not X) Y) X) -> (and X Y) */
2940 if (GET_CODE (op0
) == IOR
2941 && GET_CODE (XEXP (op0
, 0)) == NOT
2942 && op1
== XEXP (XEXP (op0
, 0), 0))
2943 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2945 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2949 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2955 /* 0/x is 0 (or x&0 if x has side-effects). */
2956 if (trueop0
== CONST0_RTX (mode
))
2958 if (side_effects_p (op1
))
2959 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2963 if (trueop1
== CONST1_RTX (mode
))
2965 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2969 /* Convert divide by power of two into shift. */
2970 if (CONST_INT_P (trueop1
)
2971 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2972 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2976 /* Handle floating point and integers separately. */
2977 if (SCALAR_FLOAT_MODE_P (mode
))
2979 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2980 safe for modes with NaNs, since 0.0 / 0.0 will then be
2981 NaN rather than 0.0. Nor is it safe for modes with signed
2982 zeros, since dividing 0 by a negative number gives -0.0 */
2983 if (trueop0
== CONST0_RTX (mode
)
2984 && !HONOR_NANS (mode
)
2985 && !HONOR_SIGNED_ZEROS (mode
)
2986 && ! side_effects_p (op1
))
2989 if (trueop1
== CONST1_RTX (mode
)
2990 && !HONOR_SNANS (mode
))
2993 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2994 && trueop1
!= CONST0_RTX (mode
))
2997 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3000 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3001 && !HONOR_SNANS (mode
))
3002 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3004 /* Change FP division by a constant into multiplication.
3005 Only do this with -freciprocal-math. */
3006 if (flag_reciprocal_math
3007 && !REAL_VALUES_EQUAL (d
, dconst0
))
3009 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3010 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3011 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3015 else if (SCALAR_INT_MODE_P (mode
))
3017 /* 0/x is 0 (or x&0 if x has side-effects). */
3018 if (trueop0
== CONST0_RTX (mode
)
3019 && !cfun
->can_throw_non_call_exceptions
)
3021 if (side_effects_p (op1
))
3022 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3026 if (trueop1
== CONST1_RTX (mode
))
3028 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3033 if (trueop1
== constm1_rtx
)
3035 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3037 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3043 /* 0%x is 0 (or x&0 if x has side-effects). */
3044 if (trueop0
== CONST0_RTX (mode
))
3046 if (side_effects_p (op1
))
3047 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3050 /* x%1 is 0 (of x&0 if x has side-effects). */
3051 if (trueop1
== CONST1_RTX (mode
))
3053 if (side_effects_p (op0
))
3054 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3055 return CONST0_RTX (mode
);
3057 /* Implement modulus by power of two as AND. */
3058 if (CONST_INT_P (trueop1
)
3059 && exact_log2 (UINTVAL (trueop1
)) > 0)
3060 return simplify_gen_binary (AND
, mode
, op0
,
3061 gen_int_mode (INTVAL (op1
) - 1, mode
));
3065 /* 0%x is 0 (or x&0 if x has side-effects). */
3066 if (trueop0
== CONST0_RTX (mode
))
3068 if (side_effects_p (op1
))
3069 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3072 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3073 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3075 if (side_effects_p (op0
))
3076 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3077 return CONST0_RTX (mode
);
3083 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3084 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3085 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3087 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3088 if (CONST_INT_P (trueop1
)
3089 && IN_RANGE (INTVAL (trueop1
),
3090 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3091 GET_MODE_PRECISION (mode
) - 1))
3092 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3093 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3094 - INTVAL (trueop1
)));
3098 if (trueop1
== CONST0_RTX (mode
))
3100 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3102 /* Rotating ~0 always results in ~0. */
3103 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3104 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3105 && ! side_effects_p (op1
))
3109 scalar constants c1, c2
3110 size (M2) > size (M1)
3111 c1 == size (M2) - size (M1)
3113 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2)
3118 (subreg:M1 (ashiftrt:M2 (reg:M2)
3119 (const_int <c1 + c2>))
3121 if (!VECTOR_MODE_P (mode
)
3123 && CONST_INT_P (op1
)
3124 && (GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
)
3125 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3126 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3127 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3128 > GET_MODE_BITSIZE (mode
))
3129 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3130 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3131 - GET_MODE_BITSIZE (mode
)))
3132 && subreg_lowpart_p (op0
))
3134 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3136 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3137 tmp
= simplify_gen_binary (ASHIFTRT
,
3138 GET_MODE (SUBREG_REG (op0
)),
3139 XEXP (SUBREG_REG (op0
), 0),
3141 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3142 subreg_lowpart_offset (mode
,
3146 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3148 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3149 if (val
!= INTVAL (op1
))
3150 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3157 if (trueop1
== CONST0_RTX (mode
))
3159 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3161 goto canonicalize_shift
;
3164 if (trueop1
== CONST0_RTX (mode
))
3166 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3168 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3169 if (GET_CODE (op0
) == CLZ
3170 && CONST_INT_P (trueop1
)
3171 && STORE_FLAG_VALUE
== 1
3172 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3174 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3175 unsigned HOST_WIDE_INT zero_val
= 0;
3177 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3178 && zero_val
== GET_MODE_PRECISION (imode
)
3179 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3180 return simplify_gen_relational (EQ
, mode
, imode
,
3181 XEXP (op0
, 0), const0_rtx
);
3183 goto canonicalize_shift
;
3186 if (width
<= HOST_BITS_PER_WIDE_INT
3187 && mode_signbit_p (mode
, trueop1
)
3188 && ! side_effects_p (op0
))
3190 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3192 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3198 if (width
<= HOST_BITS_PER_WIDE_INT
3199 && CONST_INT_P (trueop1
)
3200 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3201 && ! side_effects_p (op0
))
3203 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3205 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3211 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3213 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3215 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3221 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3223 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3225 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3238 /* ??? There are simplifications that can be done. */
3242 if (!VECTOR_MODE_P (mode
))
3244 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3245 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3246 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3247 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3248 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3250 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3251 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3254 /* Extract a scalar element from a nested VEC_SELECT expression
3255 (with optional nested VEC_CONCAT expression). Some targets
3256 (i386) extract scalar element from a vector using chain of
3257 nested VEC_SELECT expressions. When input operand is a memory
3258 operand, this operation can be simplified to a simple scalar
3259 load from an offseted memory address. */
3260 if (GET_CODE (trueop0
) == VEC_SELECT
)
3262 rtx op0
= XEXP (trueop0
, 0);
3263 rtx op1
= XEXP (trueop0
, 1);
3265 machine_mode opmode
= GET_MODE (op0
);
3266 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3267 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3269 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3275 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3276 gcc_assert (i
< n_elts
);
3278 /* Select element, pointed by nested selector. */
3279 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3281 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3282 if (GET_CODE (op0
) == VEC_CONCAT
)
3284 rtx op00
= XEXP (op0
, 0);
3285 rtx op01
= XEXP (op0
, 1);
3287 machine_mode mode00
, mode01
;
3288 int n_elts00
, n_elts01
;
3290 mode00
= GET_MODE (op00
);
3291 mode01
= GET_MODE (op01
);
3293 /* Find out number of elements of each operand. */
3294 if (VECTOR_MODE_P (mode00
))
3296 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3297 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3302 if (VECTOR_MODE_P (mode01
))
3304 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3305 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3310 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3312 /* Select correct operand of VEC_CONCAT
3313 and adjust selector. */
3314 if (elem
< n_elts01
)
3325 vec
= rtvec_alloc (1);
3326 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3328 tmp
= gen_rtx_fmt_ee (code
, mode
,
3329 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3332 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3333 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3334 return XEXP (trueop0
, 0);
3338 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3339 gcc_assert (GET_MODE_INNER (mode
)
3340 == GET_MODE_INNER (GET_MODE (trueop0
)));
3341 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3343 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3345 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3346 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3347 rtvec v
= rtvec_alloc (n_elts
);
3350 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3351 for (i
= 0; i
< n_elts
; i
++)
3353 rtx x
= XVECEXP (trueop1
, 0, i
);
3355 gcc_assert (CONST_INT_P (x
));
3356 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3360 return gen_rtx_CONST_VECTOR (mode
, v
);
3363 /* Recognize the identity. */
3364 if (GET_MODE (trueop0
) == mode
)
3366 bool maybe_ident
= true;
3367 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3369 rtx j
= XVECEXP (trueop1
, 0, i
);
3370 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3372 maybe_ident
= false;
3380 /* If we build {a,b} then permute it, build the result directly. */
3381 if (XVECLEN (trueop1
, 0) == 2
3382 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3383 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3384 && GET_CODE (trueop0
) == VEC_CONCAT
3385 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3386 && GET_MODE (XEXP (trueop0
, 0)) == mode
3387 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3388 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3390 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3391 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3394 gcc_assert (i0
< 4 && i1
< 4);
3395 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3396 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3398 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3401 if (XVECLEN (trueop1
, 0) == 2
3402 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3403 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3404 && GET_CODE (trueop0
) == VEC_CONCAT
3405 && GET_MODE (trueop0
) == mode
)
3407 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3408 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3411 gcc_assert (i0
< 2 && i1
< 2);
3412 subop0
= XEXP (trueop0
, i0
);
3413 subop1
= XEXP (trueop0
, i1
);
3415 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3418 /* If we select one half of a vec_concat, return that. */
3419 if (GET_CODE (trueop0
) == VEC_CONCAT
3420 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3422 rtx subop0
= XEXP (trueop0
, 0);
3423 rtx subop1
= XEXP (trueop0
, 1);
3424 machine_mode mode0
= GET_MODE (subop0
);
3425 machine_mode mode1
= GET_MODE (subop1
);
3426 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3427 int l0
= GET_MODE_SIZE (mode0
) / li
;
3428 int l1
= GET_MODE_SIZE (mode1
) / li
;
3429 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3430 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3432 bool success
= true;
3433 for (int i
= 1; i
< l0
; ++i
)
3435 rtx j
= XVECEXP (trueop1
, 0, i
);
3436 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3445 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3447 bool success
= true;
3448 for (int i
= 1; i
< l1
; ++i
)
3450 rtx j
= XVECEXP (trueop1
, 0, i
);
3451 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3463 if (XVECLEN (trueop1
, 0) == 1
3464 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3465 && GET_CODE (trueop0
) == VEC_CONCAT
)
3468 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3470 /* Try to find the element in the VEC_CONCAT. */
3471 while (GET_MODE (vec
) != mode
3472 && GET_CODE (vec
) == VEC_CONCAT
)
3474 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3475 if (offset
< vec_size
)
3476 vec
= XEXP (vec
, 0);
3480 vec
= XEXP (vec
, 1);
3482 vec
= avoid_constant_pool_reference (vec
);
3485 if (GET_MODE (vec
) == mode
)
3489 /* If we select elements in a vec_merge that all come from the same
3490 operand, select from that operand directly. */
3491 if (GET_CODE (op0
) == VEC_MERGE
)
3493 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3494 if (CONST_INT_P (trueop02
))
3496 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3497 bool all_operand0
= true;
3498 bool all_operand1
= true;
3499 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3501 rtx j
= XVECEXP (trueop1
, 0, i
);
3502 if (sel
& (1 << UINTVAL (j
)))
3503 all_operand1
= false;
3505 all_operand0
= false;
3507 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3508 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3509 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3510 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3514 /* If we have two nested selects that are inverses of each
3515 other, replace them with the source operand. */
3516 if (GET_CODE (trueop0
) == VEC_SELECT
3517 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3519 rtx op0_subop1
= XEXP (trueop0
, 1);
3520 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3521 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3523 /* Apply the outer ordering vector to the inner one. (The inner
3524 ordering vector is expressly permitted to be of a different
3525 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3526 then the two VEC_SELECTs cancel. */
3527 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3529 rtx x
= XVECEXP (trueop1
, 0, i
);
3530 if (!CONST_INT_P (x
))
3532 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3533 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3536 return XEXP (trueop0
, 0);
3542 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3543 ? GET_MODE (trueop0
)
3544 : GET_MODE_INNER (mode
));
3545 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3546 ? GET_MODE (trueop1
)
3547 : GET_MODE_INNER (mode
));
3549 gcc_assert (VECTOR_MODE_P (mode
));
3550 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3551 == GET_MODE_SIZE (mode
));
3553 if (VECTOR_MODE_P (op0_mode
))
3554 gcc_assert (GET_MODE_INNER (mode
)
3555 == GET_MODE_INNER (op0_mode
));
3557 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3559 if (VECTOR_MODE_P (op1_mode
))
3560 gcc_assert (GET_MODE_INNER (mode
)
3561 == GET_MODE_INNER (op1_mode
));
3563 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3565 if ((GET_CODE (trueop0
) == CONST_VECTOR
3566 || CONST_SCALAR_INT_P (trueop0
)
3567 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3568 && (GET_CODE (trueop1
) == CONST_VECTOR
3569 || CONST_SCALAR_INT_P (trueop1
)
3570 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3572 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3573 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3574 rtvec v
= rtvec_alloc (n_elts
);
3576 unsigned in_n_elts
= 1;
3578 if (VECTOR_MODE_P (op0_mode
))
3579 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3580 for (i
= 0; i
< n_elts
; i
++)
3584 if (!VECTOR_MODE_P (op0_mode
))
3585 RTVEC_ELT (v
, i
) = trueop0
;
3587 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3591 if (!VECTOR_MODE_P (op1_mode
))
3592 RTVEC_ELT (v
, i
) = trueop1
;
3594 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3599 return gen_rtx_CONST_VECTOR (mode
, v
);
3602 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3603 Restrict the transformation to avoid generating a VEC_SELECT with a
3604 mode unrelated to its operand. */
3605 if (GET_CODE (trueop0
) == VEC_SELECT
3606 && GET_CODE (trueop1
) == VEC_SELECT
3607 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3608 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3610 rtx par0
= XEXP (trueop0
, 1);
3611 rtx par1
= XEXP (trueop1
, 1);
3612 int len0
= XVECLEN (par0
, 0);
3613 int len1
= XVECLEN (par1
, 0);
3614 rtvec vec
= rtvec_alloc (len0
+ len1
);
3615 for (int i
= 0; i
< len0
; i
++)
3616 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3617 for (int i
= 0; i
< len1
; i
++)
3618 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3619 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3620 gen_rtx_PARALLEL (VOIDmode
, vec
));
3633 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3636 unsigned int width
= GET_MODE_PRECISION (mode
);
3638 if (VECTOR_MODE_P (mode
)
3639 && code
!= VEC_CONCAT
3640 && GET_CODE (op0
) == CONST_VECTOR
3641 && GET_CODE (op1
) == CONST_VECTOR
)
3643 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3644 machine_mode op0mode
= GET_MODE (op0
);
3645 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3646 machine_mode op1mode
= GET_MODE (op1
);
3647 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3648 rtvec v
= rtvec_alloc (n_elts
);
3651 gcc_assert (op0_n_elts
== n_elts
);
3652 gcc_assert (op1_n_elts
== n_elts
);
3653 for (i
= 0; i
< n_elts
; i
++)
3655 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3656 CONST_VECTOR_ELT (op0
, i
),
3657 CONST_VECTOR_ELT (op1
, i
));
3660 RTVEC_ELT (v
, i
) = x
;
3663 return gen_rtx_CONST_VECTOR (mode
, v
);
3666 if (VECTOR_MODE_P (mode
)
3667 && code
== VEC_CONCAT
3668 && (CONST_SCALAR_INT_P (op0
)
3669 || GET_CODE (op0
) == CONST_FIXED
3670 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3671 && (CONST_SCALAR_INT_P (op1
)
3672 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3673 || GET_CODE (op1
) == CONST_FIXED
))
3675 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3676 rtvec v
= rtvec_alloc (n_elts
);
3678 gcc_assert (n_elts
>= 2);
3681 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3682 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3684 RTVEC_ELT (v
, 0) = op0
;
3685 RTVEC_ELT (v
, 1) = op1
;
3689 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3690 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3693 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3694 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3695 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3697 for (i
= 0; i
< op0_n_elts
; ++i
)
3698 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3699 for (i
= 0; i
< op1_n_elts
; ++i
)
3700 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3703 return gen_rtx_CONST_VECTOR (mode
, v
);
3706 if (SCALAR_FLOAT_MODE_P (mode
)
3707 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3708 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3709 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3720 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3722 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3724 for (i
= 0; i
< 4; i
++)
3741 real_from_target (&r
, tmp0
, mode
);
3742 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3746 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3749 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3750 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3751 real_convert (&f0
, mode
, &f0
);
3752 real_convert (&f1
, mode
, &f1
);
3754 if (HONOR_SNANS (mode
)
3755 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3759 && REAL_VALUES_EQUAL (f1
, dconst0
)
3760 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3763 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3764 && flag_trapping_math
3765 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3767 int s0
= REAL_VALUE_NEGATIVE (f0
);
3768 int s1
= REAL_VALUE_NEGATIVE (f1
);
3773 /* Inf + -Inf = NaN plus exception. */
3778 /* Inf - Inf = NaN plus exception. */
3783 /* Inf / Inf = NaN plus exception. */
3790 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3791 && flag_trapping_math
3792 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3793 || (REAL_VALUE_ISINF (f1
)
3794 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3795 /* Inf * 0 = NaN plus exception. */
3798 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3800 real_convert (&result
, mode
, &value
);
3802 /* Don't constant fold this floating point operation if
3803 the result has overflowed and flag_trapping_math. */
3805 if (flag_trapping_math
3806 && MODE_HAS_INFINITIES (mode
)
3807 && REAL_VALUE_ISINF (result
)
3808 && !REAL_VALUE_ISINF (f0
)
3809 && !REAL_VALUE_ISINF (f1
))
3810 /* Overflow plus exception. */
3813 /* Don't constant fold this floating point operation if the
3814 result may dependent upon the run-time rounding mode and
3815 flag_rounding_math is set, or if GCC's software emulation
3816 is unable to accurately represent the result. */
3818 if ((flag_rounding_math
3819 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3820 && (inexact
|| !real_identical (&result
, &value
)))
3823 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3827 /* We can fold some multi-word operations. */
3828 if ((GET_MODE_CLASS (mode
) == MODE_INT
3829 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3830 && CONST_SCALAR_INT_P (op0
)
3831 && CONST_SCALAR_INT_P (op1
))
3835 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3836 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3838 #if TARGET_SUPPORTS_WIDE_INT == 0
3839 /* This assert keeps the simplification from producing a result
3840 that cannot be represented in a CONST_DOUBLE but a lot of
3841 upstream callers expect that this function never fails to
3842 simplify something and so you if you added this to the test
3843 above the code would die later anyway. If this assert
3844 happens, you just need to make the port support wide int. */
3845 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3850 result
= wi::sub (pop0
, pop1
);
3854 result
= wi::add (pop0
, pop1
);
3858 result
= wi::mul (pop0
, pop1
);
3862 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3868 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3874 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3880 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3886 result
= wi::bit_and (pop0
, pop1
);
3890 result
= wi::bit_or (pop0
, pop1
);
3894 result
= wi::bit_xor (pop0
, pop1
);
3898 result
= wi::smin (pop0
, pop1
);
3902 result
= wi::smax (pop0
, pop1
);
3906 result
= wi::umin (pop0
, pop1
);
3910 result
= wi::umax (pop0
, pop1
);
3917 wide_int wop1
= pop1
;
3918 if (SHIFT_COUNT_TRUNCATED
)
3919 wop1
= wi::umod_trunc (wop1
, width
);
3920 else if (wi::geu_p (wop1
, width
))
3926 result
= wi::lrshift (pop0
, wop1
);
3930 result
= wi::arshift (pop0
, wop1
);
3934 result
= wi::lshift (pop0
, wop1
);
3945 if (wi::neg_p (pop1
))
3951 result
= wi::lrotate (pop0
, pop1
);
3955 result
= wi::rrotate (pop0
, pop1
);
3966 return immed_wide_int_const (result
, mode
);
3974 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3977 Rather than test for specific case, we do this by a brute-force method
3978 and do all possible simplifications until no more changes occur. Then
3979 we rebuild the operation. */
3981 struct simplify_plus_minus_op_data
3988 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3992 result
= (commutative_operand_precedence (y
)
3993 - commutative_operand_precedence (x
));
3997 /* Group together equal REGs to do more simplification. */
3998 if (REG_P (x
) && REG_P (y
))
3999 return REGNO (x
) > REGNO (y
);
4005 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4008 struct simplify_plus_minus_op_data ops
[16];
4011 int changed
, n_constants
, canonicalized
= 0;
4014 memset (ops
, 0, sizeof ops
);
4016 /* Set up the two operands and then expand them until nothing has been
4017 changed. If we run out of room in our array, give up; this should
4018 almost never happen. */
4023 ops
[1].neg
= (code
== MINUS
);
4030 for (i
= 0; i
< n_ops
; i
++)
4032 rtx this_op
= ops
[i
].op
;
4033 int this_neg
= ops
[i
].neg
;
4034 enum rtx_code this_code
= GET_CODE (this_op
);
4040 if (n_ops
== ARRAY_SIZE (ops
))
4043 ops
[n_ops
].op
= XEXP (this_op
, 1);
4044 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4047 ops
[i
].op
= XEXP (this_op
, 0);
4049 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4053 ops
[i
].op
= XEXP (this_op
, 0);
4054 ops
[i
].neg
= ! this_neg
;
4060 if (n_ops
!= ARRAY_SIZE (ops
)
4061 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4062 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4063 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4065 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4066 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4067 ops
[n_ops
].neg
= this_neg
;
4075 /* ~a -> (-a - 1) */
4076 if (n_ops
!= ARRAY_SIZE (ops
))
4078 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4079 ops
[n_ops
++].neg
= this_neg
;
4080 ops
[i
].op
= XEXP (this_op
, 0);
4081 ops
[i
].neg
= !this_neg
;
4091 ops
[i
].op
= neg_const_int (mode
, this_op
);
4105 if (n_constants
> 1)
4108 gcc_assert (n_ops
>= 2);
4110 /* If we only have two operands, we can avoid the loops. */
4113 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4116 /* Get the two operands. Be careful with the order, especially for
4117 the cases where code == MINUS. */
4118 if (ops
[0].neg
&& ops
[1].neg
)
4120 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4123 else if (ops
[0].neg
)
4134 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4137 /* Now simplify each pair of operands until nothing changes. */
4140 /* Insertion sort is good enough for a small array. */
4141 for (i
= 1; i
< n_ops
; i
++)
4143 struct simplify_plus_minus_op_data save
;
4145 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4151 ops
[j
+ 1] = ops
[j
];
4152 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4157 for (i
= n_ops
- 1; i
> 0; i
--)
4158 for (j
= i
- 1; j
>= 0; j
--)
4160 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4161 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4163 if (lhs
!= 0 && rhs
!= 0)
4165 enum rtx_code ncode
= PLUS
;
4171 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4173 else if (swap_commutative_operands_p (lhs
, rhs
))
4174 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4176 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4177 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4179 rtx tem_lhs
, tem_rhs
;
4181 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4182 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4183 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4185 if (tem
&& !CONSTANT_P (tem
))
4186 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4189 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4193 /* Reject "simplifications" that just wrap the two
4194 arguments in a CONST. Failure to do so can result
4195 in infinite recursion with simplify_binary_operation
4196 when it calls us to simplify CONST operations.
4197 Also, if we find such a simplification, don't try
4198 any more combinations with this rhs: We must have
4199 something like symbol+offset, ie. one of the
4200 trivial CONST expressions we handle later. */
4201 if (GET_CODE (tem
) == CONST
4202 && GET_CODE (XEXP (tem
, 0)) == ncode
4203 && XEXP (XEXP (tem
, 0), 0) == lhs
4204 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4207 if (GET_CODE (tem
) == NEG
)
4208 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4209 if (CONST_INT_P (tem
) && lneg
)
4210 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4214 ops
[j
].op
= NULL_RTX
;
4221 /* If nothing changed, fail. */
4225 /* Pack all the operands to the lower-numbered entries. */
4226 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4236 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4238 && CONST_INT_P (ops
[1].op
)
4239 && CONSTANT_P (ops
[0].op
)
4241 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4243 /* We suppressed creation of trivial CONST expressions in the
4244 combination loop to avoid recursion. Create one manually now.
4245 The combination loop should have ensured that there is exactly
4246 one CONST_INT, and the sort will have ensured that it is last
4247 in the array and that any other constant will be next-to-last. */
4250 && CONST_INT_P (ops
[n_ops
- 1].op
)
4251 && CONSTANT_P (ops
[n_ops
- 2].op
))
4253 rtx value
= ops
[n_ops
- 1].op
;
4254 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4255 value
= neg_const_int (mode
, value
);
4256 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4261 /* Put a non-negated operand first, if possible. */
4263 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4266 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4275 /* Now make the result by performing the requested operations. */
4277 for (i
= 1; i
< n_ops
; i
++)
4278 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4279 mode
, result
, ops
[i
].op
);
4284 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4286 plus_minus_operand_p (const_rtx x
)
4288 return GET_CODE (x
) == PLUS
4289 || GET_CODE (x
) == MINUS
4290 || (GET_CODE (x
) == CONST
4291 && GET_CODE (XEXP (x
, 0)) == PLUS
4292 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4293 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4296 /* Like simplify_binary_operation except used for relational operators.
4297 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4298 not also be VOIDmode.
4300 CMP_MODE specifies in which mode the comparison is done in, so it is
4301 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4302 the operands or, if both are VOIDmode, the operands are compared in
4303 "infinite precision". */
4305 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4306 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4308 rtx tem
, trueop0
, trueop1
;
4310 if (cmp_mode
== VOIDmode
)
4311 cmp_mode
= GET_MODE (op0
);
4312 if (cmp_mode
== VOIDmode
)
4313 cmp_mode
= GET_MODE (op1
);
4315 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4318 if (SCALAR_FLOAT_MODE_P (mode
))
4320 if (tem
== const0_rtx
)
4321 return CONST0_RTX (mode
);
4322 #ifdef FLOAT_STORE_FLAG_VALUE
4324 REAL_VALUE_TYPE val
;
4325 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4326 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4332 if (VECTOR_MODE_P (mode
))
4334 if (tem
== const0_rtx
)
4335 return CONST0_RTX (mode
);
4336 #ifdef VECTOR_STORE_FLAG_VALUE
4341 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4342 if (val
== NULL_RTX
)
4344 if (val
== const1_rtx
)
4345 return CONST1_RTX (mode
);
4347 units
= GET_MODE_NUNITS (mode
);
4348 v
= rtvec_alloc (units
);
4349 for (i
= 0; i
< units
; i
++)
4350 RTVEC_ELT (v
, i
) = val
;
4351 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4361 /* For the following tests, ensure const0_rtx is op1. */
4362 if (swap_commutative_operands_p (op0
, op1
)
4363 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4364 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4366 /* If op0 is a compare, extract the comparison arguments from it. */
4367 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4368 return simplify_gen_relational (code
, mode
, VOIDmode
,
4369 XEXP (op0
, 0), XEXP (op0
, 1));
4371 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4375 trueop0
= avoid_constant_pool_reference (op0
);
4376 trueop1
= avoid_constant_pool_reference (op1
);
4377 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4381 /* This part of simplify_relational_operation is only used when CMP_MODE
4382 is not in class MODE_CC (i.e. it is a real comparison).
4384 MODE is the mode of the result, while CMP_MODE specifies in which
4385 mode the comparison is done in, so it is the mode of the operands. */
4388 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4389 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4391 enum rtx_code op0code
= GET_CODE (op0
);
4393 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4395 /* If op0 is a comparison, extract the comparison arguments
4399 if (GET_MODE (op0
) == mode
)
4400 return simplify_rtx (op0
);
4402 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4403 XEXP (op0
, 0), XEXP (op0
, 1));
4405 else if (code
== EQ
)
4407 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4408 if (new_code
!= UNKNOWN
)
4409 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4410 XEXP (op0
, 0), XEXP (op0
, 1));
4414 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4415 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4416 if ((code
== LTU
|| code
== GEU
)
4417 && GET_CODE (op0
) == PLUS
4418 && CONST_INT_P (XEXP (op0
, 1))
4419 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4420 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4421 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4422 && XEXP (op0
, 1) != const0_rtx
)
4425 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4426 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4427 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4430 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4431 if ((code
== LTU
|| code
== GEU
)
4432 && GET_CODE (op0
) == PLUS
4433 && rtx_equal_p (op1
, XEXP (op0
, 1))
4434 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4435 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4436 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4437 copy_rtx (XEXP (op0
, 0)));
4439 if (op1
== const0_rtx
)
4441 /* Canonicalize (GTU x 0) as (NE x 0). */
4443 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4444 /* Canonicalize (LEU x 0) as (EQ x 0). */
4446 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4448 else if (op1
== const1_rtx
)
4453 /* Canonicalize (GE x 1) as (GT x 0). */
4454 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4457 /* Canonicalize (GEU x 1) as (NE x 0). */
4458 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4461 /* Canonicalize (LT x 1) as (LE x 0). */
4462 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4465 /* Canonicalize (LTU x 1) as (EQ x 0). */
4466 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4472 else if (op1
== constm1_rtx
)
4474 /* Canonicalize (LE x -1) as (LT x 0). */
4476 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4477 /* Canonicalize (GT x -1) as (GE x 0). */
4479 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4482 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4483 if ((code
== EQ
|| code
== NE
)
4484 && (op0code
== PLUS
|| op0code
== MINUS
)
4486 && CONSTANT_P (XEXP (op0
, 1))
4487 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4489 rtx x
= XEXP (op0
, 0);
4490 rtx c
= XEXP (op0
, 1);
4491 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4492 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4494 /* Detect an infinite recursive condition, where we oscillate at this
4495 simplification case between:
4496 A + B == C <---> C - B == A,
4497 where A, B, and C are all constants with non-simplifiable expressions,
4498 usually SYMBOL_REFs. */
4499 if (GET_CODE (tem
) == invcode
4501 && rtx_equal_p (c
, XEXP (tem
, 1)))
4504 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4507 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4508 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4510 && op1
== const0_rtx
4511 && GET_MODE_CLASS (mode
) == MODE_INT
4512 && cmp_mode
!= VOIDmode
4513 /* ??? Work-around BImode bugs in the ia64 backend. */
4515 && cmp_mode
!= BImode
4516 && nonzero_bits (op0
, cmp_mode
) == 1
4517 && STORE_FLAG_VALUE
== 1)
4518 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4519 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4520 : lowpart_subreg (mode
, op0
, cmp_mode
);
4522 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4523 if ((code
== EQ
|| code
== NE
)
4524 && op1
== const0_rtx
4526 return simplify_gen_relational (code
, mode
, cmp_mode
,
4527 XEXP (op0
, 0), XEXP (op0
, 1));
4529 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4530 if ((code
== EQ
|| code
== NE
)
4532 && rtx_equal_p (XEXP (op0
, 0), op1
)
4533 && !side_effects_p (XEXP (op0
, 0)))
4534 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4537 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4538 if ((code
== EQ
|| code
== NE
)
4540 && rtx_equal_p (XEXP (op0
, 1), op1
)
4541 && !side_effects_p (XEXP (op0
, 1)))
4542 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4545 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4546 if ((code
== EQ
|| code
== NE
)
4548 && CONST_SCALAR_INT_P (op1
)
4549 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4550 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4551 simplify_gen_binary (XOR
, cmp_mode
,
4552 XEXP (op0
, 1), op1
));
4554 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4555 if ((code
== EQ
|| code
== NE
)
4556 && GET_CODE (op0
) == BSWAP
4557 && CONST_SCALAR_INT_P (op1
))
4558 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4559 simplify_gen_unary (BSWAP
, cmp_mode
,
4562 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4563 if ((code
== EQ
|| code
== NE
)
4564 && GET_CODE (op0
) == BSWAP
4565 && GET_CODE (op1
) == BSWAP
)
4566 return simplify_gen_relational (code
, mode
, cmp_mode
,
4567 XEXP (op0
, 0), XEXP (op1
, 0));
4569 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4575 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4576 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4577 XEXP (op0
, 0), const0_rtx
);
4582 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4583 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4584 XEXP (op0
, 0), const0_rtx
);
4603 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4604 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4605 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4606 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4607 For floating-point comparisons, assume that the operands were ordered. */
4610 comparison_result (enum rtx_code code
, int known_results
)
4616 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4619 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4623 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4626 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4630 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4633 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4636 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4638 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4641 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4643 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4646 return const_true_rtx
;
4654 /* Check if the given comparison (done in the given MODE) is actually
4655 a tautology or a contradiction. If the mode is VOID_mode, the
4656 comparison is done in "infinite precision". If no simplification
4657 is possible, this function returns zero. Otherwise, it returns
4658 either const_true_rtx or const0_rtx. */
4661 simplify_const_relational_operation (enum rtx_code code
,
4669 gcc_assert (mode
!= VOIDmode
4670 || (GET_MODE (op0
) == VOIDmode
4671 && GET_MODE (op1
) == VOIDmode
));
4673 /* If op0 is a compare, extract the comparison arguments from it. */
4674 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4676 op1
= XEXP (op0
, 1);
4677 op0
= XEXP (op0
, 0);
4679 if (GET_MODE (op0
) != VOIDmode
)
4680 mode
= GET_MODE (op0
);
4681 else if (GET_MODE (op1
) != VOIDmode
)
4682 mode
= GET_MODE (op1
);
4687 /* We can't simplify MODE_CC values since we don't know what the
4688 actual comparison is. */
4689 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4692 /* Make sure the constant is second. */
4693 if (swap_commutative_operands_p (op0
, op1
))
4695 tem
= op0
, op0
= op1
, op1
= tem
;
4696 code
= swap_condition (code
);
4699 trueop0
= avoid_constant_pool_reference (op0
);
4700 trueop1
= avoid_constant_pool_reference (op1
);
4702 /* For integer comparisons of A and B maybe we can simplify A - B and can
4703 then simplify a comparison of that with zero. If A and B are both either
4704 a register or a CONST_INT, this can't help; testing for these cases will
4705 prevent infinite recursion here and speed things up.
4707 We can only do this for EQ and NE comparisons as otherwise we may
4708 lose or introduce overflow which we cannot disregard as undefined as
4709 we do not know the signedness of the operation on either the left or
4710 the right hand side of the comparison. */
4712 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4713 && (code
== EQ
|| code
== NE
)
4714 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4715 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4716 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4717 /* We cannot do this if tem is a nonzero address. */
4718 && ! nonzero_address_p (tem
))
4719 return simplify_const_relational_operation (signed_condition (code
),
4720 mode
, tem
, const0_rtx
);
4722 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4723 return const_true_rtx
;
4725 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4728 /* For modes without NaNs, if the two operands are equal, we know the
4729 result except if they have side-effects. Even with NaNs we know
4730 the result of unordered comparisons and, if signaling NaNs are
4731 irrelevant, also the result of LT/GT/LTGT. */
4732 if ((! HONOR_NANS (GET_MODE (trueop0
))
4733 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4734 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4735 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4736 && rtx_equal_p (trueop0
, trueop1
)
4737 && ! side_effects_p (trueop0
))
4738 return comparison_result (code
, CMP_EQ
);
4740 /* If the operands are floating-point constants, see if we can fold
4742 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4743 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4744 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4746 REAL_VALUE_TYPE d0
, d1
;
4748 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4749 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4751 /* Comparisons are unordered iff at least one of the values is NaN. */
4752 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4762 return const_true_rtx
;
4775 return comparison_result (code
,
4776 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4777 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4780 /* Otherwise, see if the operands are both integers. */
4781 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4782 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4784 /* It would be nice if we really had a mode here. However, the
4785 largest int representable on the target is as good as
4787 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4788 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4789 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4791 if (wi::eq_p (ptrueop0
, ptrueop1
))
4792 return comparison_result (code
, CMP_EQ
);
4795 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4796 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4797 return comparison_result (code
, cr
);
4801 /* Optimize comparisons with upper and lower bounds. */
4802 if (HWI_COMPUTABLE_MODE_P (mode
)
4803 && CONST_INT_P (trueop1
))
4806 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4807 HOST_WIDE_INT val
= INTVAL (trueop1
);
4808 HOST_WIDE_INT mmin
, mmax
;
4818 /* Get a reduced range if the sign bit is zero. */
4819 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4826 rtx mmin_rtx
, mmax_rtx
;
4827 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4829 mmin
= INTVAL (mmin_rtx
);
4830 mmax
= INTVAL (mmax_rtx
);
4833 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4835 mmin
>>= (sign_copies
- 1);
4836 mmax
>>= (sign_copies
- 1);
4842 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4844 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4845 return const_true_rtx
;
4846 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4851 return const_true_rtx
;
4856 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4858 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4859 return const_true_rtx
;
4860 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4865 return const_true_rtx
;
4871 /* x == y is always false for y out of range. */
4872 if (val
< mmin
|| val
> mmax
)
4876 /* x > y is always false for y >= mmax, always true for y < mmin. */
4878 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4880 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4881 return const_true_rtx
;
4887 return const_true_rtx
;
4890 /* x < y is always false for y <= mmin, always true for y > mmax. */
4892 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4894 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4895 return const_true_rtx
;
4901 return const_true_rtx
;
4905 /* x != y is always true for y out of range. */
4906 if (val
< mmin
|| val
> mmax
)
4907 return const_true_rtx
;
4915 /* Optimize integer comparisons with zero. */
4916 if (trueop1
== const0_rtx
)
4918 /* Some addresses are known to be nonzero. We don't know
4919 their sign, but equality comparisons are known. */
4920 if (nonzero_address_p (trueop0
))
4922 if (code
== EQ
|| code
== LEU
)
4924 if (code
== NE
|| code
== GTU
)
4925 return const_true_rtx
;
4928 /* See if the first operand is an IOR with a constant. If so, we
4929 may be able to determine the result of this comparison. */
4930 if (GET_CODE (op0
) == IOR
)
4932 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4933 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4935 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4936 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4937 && (UINTVAL (inner_const
)
4938 & ((unsigned HOST_WIDE_INT
) 1
4948 return const_true_rtx
;
4952 return const_true_rtx
;
4966 /* Optimize comparison of ABS with zero. */
4967 if (trueop1
== CONST0_RTX (mode
)
4968 && (GET_CODE (trueop0
) == ABS
4969 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4970 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4975 /* Optimize abs(x) < 0.0. */
4976 if (!HONOR_SNANS (mode
)
4977 && (!INTEGRAL_MODE_P (mode
)
4978 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4980 if (INTEGRAL_MODE_P (mode
)
4981 && (issue_strict_overflow_warning
4982 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4983 warning (OPT_Wstrict_overflow
,
4984 ("assuming signed overflow does not occur when "
4985 "assuming abs (x) < 0 is false"));
4991 /* Optimize abs(x) >= 0.0. */
4992 if (!HONOR_NANS (mode
)
4993 && (!INTEGRAL_MODE_P (mode
)
4994 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4996 if (INTEGRAL_MODE_P (mode
)
4997 && (issue_strict_overflow_warning
4998 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4999 warning (OPT_Wstrict_overflow
,
5000 ("assuming signed overflow does not occur when "
5001 "assuming abs (x) >= 0 is true"));
5002 return const_true_rtx
;
5007 /* Optimize ! (abs(x) < 0.0). */
5008 return const_true_rtx
;
5018 /* Simplify CODE, an operation with result mode MODE and three operands,
5019 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5020 a constant. Return 0 if no simplifications is possible. */
5023 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5024 machine_mode op0_mode
, rtx op0
, rtx op1
,
5027 unsigned int width
= GET_MODE_PRECISION (mode
);
5028 bool any_change
= false;
5031 /* VOIDmode means "infinite" precision. */
5033 width
= HOST_BITS_PER_WIDE_INT
;
5038 /* Simplify negations around the multiplication. */
5039 /* -a * -b + c => a * b + c. */
5040 if (GET_CODE (op0
) == NEG
)
5042 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5044 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5046 else if (GET_CODE (op1
) == NEG
)
5048 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5050 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5053 /* Canonicalize the two multiplication operands. */
5054 /* a * -b + c => -b * a + c. */
5055 if (swap_commutative_operands_p (op0
, op1
))
5056 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5059 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5064 if (CONST_INT_P (op0
)
5065 && CONST_INT_P (op1
)
5066 && CONST_INT_P (op2
)
5067 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5068 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5070 /* Extracting a bit-field from a constant */
5071 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5072 HOST_WIDE_INT op1val
= INTVAL (op1
);
5073 HOST_WIDE_INT op2val
= INTVAL (op2
);
5074 if (BITS_BIG_ENDIAN
)
5075 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5079 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5081 /* First zero-extend. */
5082 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5083 /* If desired, propagate sign bit. */
5084 if (code
== SIGN_EXTRACT
5085 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5087 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5090 return gen_int_mode (val
, mode
);
5095 if (CONST_INT_P (op0
))
5096 return op0
!= const0_rtx
? op1
: op2
;
5098 /* Convert c ? a : a into "a". */
5099 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5102 /* Convert a != b ? a : b into "a". */
5103 if (GET_CODE (op0
) == NE
5104 && ! side_effects_p (op0
)
5105 && ! HONOR_NANS (mode
)
5106 && ! HONOR_SIGNED_ZEROS (mode
)
5107 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5108 && rtx_equal_p (XEXP (op0
, 1), op2
))
5109 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5110 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5113 /* Convert a == b ? a : b into "b". */
5114 if (GET_CODE (op0
) == EQ
5115 && ! side_effects_p (op0
)
5116 && ! HONOR_NANS (mode
)
5117 && ! HONOR_SIGNED_ZEROS (mode
)
5118 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5119 && rtx_equal_p (XEXP (op0
, 1), op2
))
5120 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5121 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5124 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5126 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5127 ? GET_MODE (XEXP (op0
, 1))
5128 : GET_MODE (XEXP (op0
, 0)));
5131 /* Look for happy constants in op1 and op2. */
5132 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5134 HOST_WIDE_INT t
= INTVAL (op1
);
5135 HOST_WIDE_INT f
= INTVAL (op2
);
5137 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5138 code
= GET_CODE (op0
);
5139 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5142 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5150 return simplify_gen_relational (code
, mode
, cmp_mode
,
5151 XEXP (op0
, 0), XEXP (op0
, 1));
5154 if (cmp_mode
== VOIDmode
)
5155 cmp_mode
= op0_mode
;
5156 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5157 cmp_mode
, XEXP (op0
, 0),
5160 /* See if any simplifications were possible. */
5163 if (CONST_INT_P (temp
))
5164 return temp
== const0_rtx
? op2
: op1
;
5166 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5172 gcc_assert (GET_MODE (op0
) == mode
);
5173 gcc_assert (GET_MODE (op1
) == mode
);
5174 gcc_assert (VECTOR_MODE_P (mode
));
5175 trueop2
= avoid_constant_pool_reference (op2
);
5176 if (CONST_INT_P (trueop2
))
5178 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5179 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5180 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5181 unsigned HOST_WIDE_INT mask
;
5182 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5185 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5187 if (!(sel
& mask
) && !side_effects_p (op0
))
5189 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5192 rtx trueop0
= avoid_constant_pool_reference (op0
);
5193 rtx trueop1
= avoid_constant_pool_reference (op1
);
5194 if (GET_CODE (trueop0
) == CONST_VECTOR
5195 && GET_CODE (trueop1
) == CONST_VECTOR
)
5197 rtvec v
= rtvec_alloc (n_elts
);
5200 for (i
= 0; i
< n_elts
; i
++)
5201 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5202 ? CONST_VECTOR_ELT (trueop0
, i
)
5203 : CONST_VECTOR_ELT (trueop1
, i
));
5204 return gen_rtx_CONST_VECTOR (mode
, v
);
5207 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5208 if no element from a appears in the result. */
5209 if (GET_CODE (op0
) == VEC_MERGE
)
5211 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5212 if (CONST_INT_P (tem
))
5214 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5215 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5216 return simplify_gen_ternary (code
, mode
, mode
,
5217 XEXP (op0
, 1), op1
, op2
);
5218 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5219 return simplify_gen_ternary (code
, mode
, mode
,
5220 XEXP (op0
, 0), op1
, op2
);
5223 if (GET_CODE (op1
) == VEC_MERGE
)
5225 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5226 if (CONST_INT_P (tem
))
5228 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5229 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5230 return simplify_gen_ternary (code
, mode
, mode
,
5231 op0
, XEXP (op1
, 1), op2
);
5232 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5233 return simplify_gen_ternary (code
, mode
, mode
,
5234 op0
, XEXP (op1
, 0), op2
);
5239 if (rtx_equal_p (op0
, op1
)
5240 && !side_effects_p (op2
) && !side_effects_p (op1
))
5252 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5253 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5254 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5256 Works by unpacking OP into a collection of 8-bit values
5257 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5258 and then repacking them again for OUTERMODE. */
5261 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5262 machine_mode innermode
, unsigned int byte
)
5266 value_mask
= (1 << value_bit
) - 1
5268 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5277 rtvec result_v
= NULL
;
5278 enum mode_class outer_class
;
5279 machine_mode outer_submode
;
5282 /* Some ports misuse CCmode. */
5283 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5286 /* We have no way to represent a complex constant at the rtl level. */
5287 if (COMPLEX_MODE_P (outermode
))
5290 /* We support any size mode. */
5291 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5292 GET_MODE_BITSIZE (innermode
));
5294 /* Unpack the value. */
5296 if (GET_CODE (op
) == CONST_VECTOR
)
5298 num_elem
= CONST_VECTOR_NUNITS (op
);
5299 elems
= &CONST_VECTOR_ELT (op
, 0);
5300 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5306 elem_bitsize
= max_bitsize
;
5308 /* If this asserts, it is too complicated; reducing value_bit may help. */
5309 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5310 /* I don't know how to handle endianness of sub-units. */
5311 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5313 for (elem
= 0; elem
< num_elem
; elem
++)
5316 rtx el
= elems
[elem
];
5318 /* Vectors are kept in target memory order. (This is probably
5321 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5322 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5324 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5325 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5326 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5327 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5328 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5331 switch (GET_CODE (el
))
5335 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5337 *vp
++ = INTVAL (el
) >> i
;
5338 /* CONST_INTs are always logically sign-extended. */
5339 for (; i
< elem_bitsize
; i
+= value_bit
)
5340 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5343 case CONST_WIDE_INT
:
5345 rtx_mode_t val
= std::make_pair (el
, innermode
);
5346 unsigned char extend
= wi::sign_mask (val
);
5348 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5349 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5350 for (; i
< elem_bitsize
; i
+= value_bit
)
5356 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5358 unsigned char extend
= 0;
5359 /* If this triggers, someone should have generated a
5360 CONST_INT instead. */
5361 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5363 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5364 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5365 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5368 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5372 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5374 for (; i
< elem_bitsize
; i
+= value_bit
)
5379 /* This is big enough for anything on the platform. */
5380 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5381 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5383 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5384 gcc_assert (bitsize
<= elem_bitsize
);
5385 gcc_assert (bitsize
% value_bit
== 0);
5387 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5390 /* real_to_target produces its result in words affected by
5391 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5392 and use WORDS_BIG_ENDIAN instead; see the documentation
5393 of SUBREG in rtl.texi. */
5394 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5397 if (WORDS_BIG_ENDIAN
)
5398 ibase
= bitsize
- 1 - i
;
5401 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5404 /* It shouldn't matter what's done here, so fill it with
5406 for (; i
< elem_bitsize
; i
+= value_bit
)
5412 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5414 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5415 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5419 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5420 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5421 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5423 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5424 >> (i
- HOST_BITS_PER_WIDE_INT
);
5425 for (; i
< elem_bitsize
; i
+= value_bit
)
5435 /* Now, pick the right byte to start with. */
5436 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5437 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5438 will already have offset 0. */
5439 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5441 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5443 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5444 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5445 byte
= (subword_byte
% UNITS_PER_WORD
5446 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5449 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5450 so if it's become negative it will instead be very large.) */
5451 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5453 /* Convert from bytes to chunks of size value_bit. */
5454 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5456 /* Re-pack the value. */
5458 if (VECTOR_MODE_P (outermode
))
5460 num_elem
= GET_MODE_NUNITS (outermode
);
5461 result_v
= rtvec_alloc (num_elem
);
5462 elems
= &RTVEC_ELT (result_v
, 0);
5463 outer_submode
= GET_MODE_INNER (outermode
);
5469 outer_submode
= outermode
;
5472 outer_class
= GET_MODE_CLASS (outer_submode
);
5473 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5475 gcc_assert (elem_bitsize
% value_bit
== 0);
5476 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5478 for (elem
= 0; elem
< num_elem
; elem
++)
5482 /* Vectors are stored in target memory order. (This is probably
5485 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5486 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5488 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5489 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5490 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5491 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5492 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5495 switch (outer_class
)
5498 case MODE_PARTIAL_INT
:
5503 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5504 / HOST_BITS_PER_WIDE_INT
;
5505 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5508 for (u
= 0; u
< units
; u
++)
5510 unsigned HOST_WIDE_INT buf
= 0;
5512 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5514 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5517 base
+= HOST_BITS_PER_WIDE_INT
;
5519 gcc_assert (GET_MODE_PRECISION (outer_submode
)
5520 <= MAX_BITSIZE_MODE_ANY_INT
);
5521 r
= wide_int::from_array (tmp
, units
,
5522 GET_MODE_PRECISION (outer_submode
));
5523 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5528 case MODE_DECIMAL_FLOAT
:
5531 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5533 /* real_from_target wants its input in words affected by
5534 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5535 and use WORDS_BIG_ENDIAN instead; see the documentation
5536 of SUBREG in rtl.texi. */
5537 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5539 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5542 if (WORDS_BIG_ENDIAN
)
5543 ibase
= elem_bitsize
- 1 - i
;
5546 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5549 real_from_target (&r
, tmp
, outer_submode
);
5550 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5562 f
.mode
= outer_submode
;
5565 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5567 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5568 for (; i
< elem_bitsize
; i
+= value_bit
)
5569 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5570 << (i
- HOST_BITS_PER_WIDE_INT
));
5572 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5580 if (VECTOR_MODE_P (outermode
))
5581 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5586 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5587 Return 0 if no simplifications are possible. */
5589 simplify_subreg (machine_mode outermode
, rtx op
,
5590 machine_mode innermode
, unsigned int byte
)
5592 /* Little bit of sanity checking. */
5593 gcc_assert (innermode
!= VOIDmode
);
5594 gcc_assert (outermode
!= VOIDmode
);
5595 gcc_assert (innermode
!= BLKmode
);
5596 gcc_assert (outermode
!= BLKmode
);
5598 gcc_assert (GET_MODE (op
) == innermode
5599 || GET_MODE (op
) == VOIDmode
);
5601 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5604 if (byte
>= GET_MODE_SIZE (innermode
))
5607 if (outermode
== innermode
&& !byte
)
5610 if (CONST_SCALAR_INT_P (op
)
5611 || CONST_DOUBLE_AS_FLOAT_P (op
)
5612 || GET_CODE (op
) == CONST_FIXED
5613 || GET_CODE (op
) == CONST_VECTOR
)
5614 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5616 /* Changing mode twice with SUBREG => just change it once,
5617 or not at all if changing back op starting mode. */
5618 if (GET_CODE (op
) == SUBREG
)
5620 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5621 int final_offset
= byte
+ SUBREG_BYTE (op
);
5624 if (outermode
== innermostmode
5625 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5626 return SUBREG_REG (op
);
5628 /* The SUBREG_BYTE represents offset, as if the value were stored
5629 in memory. Irritating exception is paradoxical subreg, where
5630 we define SUBREG_BYTE to be 0. On big endian machines, this
5631 value should be negative. For a moment, undo this exception. */
5632 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5634 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5635 if (WORDS_BIG_ENDIAN
)
5636 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5637 if (BYTES_BIG_ENDIAN
)
5638 final_offset
+= difference
% UNITS_PER_WORD
;
5640 if (SUBREG_BYTE (op
) == 0
5641 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5643 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5644 if (WORDS_BIG_ENDIAN
)
5645 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5646 if (BYTES_BIG_ENDIAN
)
5647 final_offset
+= difference
% UNITS_PER_WORD
;
5650 /* See whether resulting subreg will be paradoxical. */
5651 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5653 /* In nonparadoxical subregs we can't handle negative offsets. */
5654 if (final_offset
< 0)
5656 /* Bail out in case resulting subreg would be incorrect. */
5657 if (final_offset
% GET_MODE_SIZE (outermode
)
5658 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5664 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5666 /* In paradoxical subreg, see if we are still looking on lower part.
5667 If so, our SUBREG_BYTE will be 0. */
5668 if (WORDS_BIG_ENDIAN
)
5669 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5670 if (BYTES_BIG_ENDIAN
)
5671 offset
+= difference
% UNITS_PER_WORD
;
5672 if (offset
== final_offset
)
5678 /* Recurse for further possible simplifications. */
5679 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5683 if (validate_subreg (outermode
, innermostmode
,
5684 SUBREG_REG (op
), final_offset
))
5686 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5687 if (SUBREG_PROMOTED_VAR_P (op
)
5688 && SUBREG_PROMOTED_SIGN (op
) >= 0
5689 && GET_MODE_CLASS (outermode
) == MODE_INT
5690 && IN_RANGE (GET_MODE_SIZE (outermode
),
5691 GET_MODE_SIZE (innermode
),
5692 GET_MODE_SIZE (innermostmode
))
5693 && subreg_lowpart_p (newx
))
5695 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5696 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5703 /* SUBREG of a hard register => just change the register number
5704 and/or mode. If the hard register is not valid in that mode,
5705 suppress this simplification. If the hard register is the stack,
5706 frame, or argument pointer, leave this as a SUBREG. */
5708 if (REG_P (op
) && HARD_REGISTER_P (op
))
5710 unsigned int regno
, final_regno
;
5713 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5714 if (HARD_REGISTER_NUM_P (final_regno
))
5717 int final_offset
= byte
;
5719 /* Adjust offset for paradoxical subregs. */
5721 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5723 int difference
= (GET_MODE_SIZE (innermode
)
5724 - GET_MODE_SIZE (outermode
));
5725 if (WORDS_BIG_ENDIAN
)
5726 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5727 if (BYTES_BIG_ENDIAN
)
5728 final_offset
+= difference
% UNITS_PER_WORD
;
5731 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5733 /* Propagate original regno. We don't have any way to specify
5734 the offset inside original regno, so do so only for lowpart.
5735 The information is used only by alias analysis that can not
5736 grog partial register anyway. */
5738 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5739 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5744 /* If we have a SUBREG of a register that we are replacing and we are
5745 replacing it with a MEM, make a new MEM and try replacing the
5746 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5747 or if we would be widening it. */
5750 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5751 /* Allow splitting of volatile memory references in case we don't
5752 have instruction to move the whole thing. */
5753 && (! MEM_VOLATILE_P (op
)
5754 || ! have_insn_for (SET
, innermode
))
5755 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5756 return adjust_address_nv (op
, outermode
, byte
);
5758 /* Handle complex values represented as CONCAT
5759 of real and imaginary part. */
5760 if (GET_CODE (op
) == CONCAT
)
5762 unsigned int part_size
, final_offset
;
5765 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5766 if (byte
< part_size
)
5768 part
= XEXP (op
, 0);
5769 final_offset
= byte
;
5773 part
= XEXP (op
, 1);
5774 final_offset
= byte
- part_size
;
5777 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5780 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5783 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5784 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5788 /* A SUBREG resulting from a zero extension may fold to zero if
5789 it extracts higher bits that the ZERO_EXTEND's source bits. */
5790 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5792 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5793 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5794 return CONST0_RTX (outermode
);
5797 if (SCALAR_INT_MODE_P (outermode
)
5798 && SCALAR_INT_MODE_P (innermode
)
5799 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5800 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5802 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5810 /* Make a SUBREG operation or equivalent if it folds. */
5813 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5814 machine_mode innermode
, unsigned int byte
)
5818 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5822 if (GET_CODE (op
) == SUBREG
5823 || GET_CODE (op
) == CONCAT
5824 || GET_MODE (op
) == VOIDmode
)
5827 if (validate_subreg (outermode
, innermode
, op
, byte
))
5828 return gen_rtx_SUBREG (outermode
, op
, byte
);
5833 /* Simplify X, an rtx expression.
5835 Return the simplified expression or NULL if no simplifications
5838 This is the preferred entry point into the simplification routines;
5839 however, we still allow passes to call the more specific routines.
5841 Right now GCC has three (yes, three) major bodies of RTL simplification
5842 code that need to be unified.
5844 1. fold_rtx in cse.c. This code uses various CSE specific
5845 information to aid in RTL simplification.
5847 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5848 it uses combine specific information to aid in RTL
5851 3. The routines in this file.
5854 Long term we want to only have one body of simplification code; to
5855 get to that state I recommend the following steps:
5857 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5858 which are not pass dependent state into these routines.
5860 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5861 use this routine whenever possible.
5863 3. Allow for pass dependent state to be provided to these
5864 routines and add simplifications based on the pass dependent
5865 state. Remove code from cse.c & combine.c that becomes
5868 It will take time, but ultimately the compiler will be easier to
5869 maintain and improve. It's totally silly that when we add a
5870 simplification that it needs to be added to 4 places (3 for RTL
5871 simplification and 1 for tree simplification. */
5874 simplify_rtx (const_rtx x
)
5876 const enum rtx_code code
= GET_CODE (x
);
5877 const machine_mode mode
= GET_MODE (x
);
5879 switch (GET_RTX_CLASS (code
))
5882 return simplify_unary_operation (code
, mode
,
5883 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5884 case RTX_COMM_ARITH
:
5885 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5886 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5888 /* Fall through.... */
5891 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5894 case RTX_BITFIELD_OPS
:
5895 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5896 XEXP (x
, 0), XEXP (x
, 1),
5900 case RTX_COMM_COMPARE
:
5901 return simplify_relational_operation (code
, mode
,
5902 ((GET_MODE (XEXP (x
, 0))
5904 ? GET_MODE (XEXP (x
, 0))
5905 : GET_MODE (XEXP (x
, 1))),
5911 return simplify_subreg (mode
, SUBREG_REG (x
),
5912 GET_MODE (SUBREG_REG (x
)),
5919 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5920 if (GET_CODE (XEXP (x
, 0)) == HIGH
5921 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))