1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
40 #include "insn-codes.h"
43 #include "diagnostic-core.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx
neg_const_int (machine_mode
, const_rtx
);
58 static bool plus_minus_operand_p (const_rtx
);
59 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
60 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
61 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
63 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
65 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
66 machine_mode
, rtx
, rtx
);
67 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
68 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
71 /* Negate a CONST_INT rtx, truncating (because a conversion from a
72 maximally negative number can overflow). */
74 neg_const_int (machine_mode mode
, const_rtx i
)
76 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
79 /* Test whether expression, X, is an immediate constant that represents
80 the most significant bit of machine mode MODE. */
83 mode_signbit_p (machine_mode mode
, const_rtx x
)
85 unsigned HOST_WIDE_INT val
;
88 if (GET_MODE_CLASS (mode
) != MODE_INT
)
91 width
= GET_MODE_PRECISION (mode
);
95 if (width
<= HOST_BITS_PER_WIDE_INT
98 #if TARGET_SUPPORTS_WIDE_INT
99 else if (CONST_WIDE_INT_P (x
))
102 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
103 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
105 for (i
= 0; i
< elts
- 1; i
++)
106 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
108 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
109 width
%= HOST_BITS_PER_WIDE_INT
;
111 width
= HOST_BITS_PER_WIDE_INT
;
114 else if (width
<= HOST_BITS_PER_DOUBLE_INT
115 && CONST_DOUBLE_AS_INT_P (x
)
116 && CONST_DOUBLE_LOW (x
) == 0)
118 val
= CONST_DOUBLE_HIGH (x
);
119 width
-= HOST_BITS_PER_WIDE_INT
;
123 /* X is not an integer constant. */
126 if (width
< HOST_BITS_PER_WIDE_INT
)
127 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
128 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
131 /* Test whether VAL is equal to the most significant bit of mode MODE
132 (after masking with the mode mask of MODE). Returns false if the
133 precision of MODE is too large to handle. */
136 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
140 if (GET_MODE_CLASS (mode
) != MODE_INT
)
143 width
= GET_MODE_PRECISION (mode
);
144 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
147 val
&= GET_MODE_MASK (mode
);
148 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
151 /* Test whether the most significant bit of mode MODE is set in VAL.
152 Returns false if the precision of MODE is too large to handle. */
154 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
158 if (GET_MODE_CLASS (mode
) != MODE_INT
)
161 width
= GET_MODE_PRECISION (mode
);
162 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
165 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
169 /* Test whether the most significant bit of mode MODE is clear in VAL.
170 Returns false if the precision of MODE is too large to handle. */
172 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
176 if (GET_MODE_CLASS (mode
) != MODE_INT
)
179 width
= GET_MODE_PRECISION (mode
);
180 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
183 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
187 /* Make a binary operation by properly ordering the operands and
188 seeing if the expression folds. */
191 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
196 /* If this simplifies, do it. */
197 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
201 /* Put complex operands first and constants second if commutative. */
202 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
203 && swap_commutative_operands_p (op0
, op1
))
204 tem
= op0
, op0
= op1
, op1
= tem
;
206 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
209 /* If X is a MEM referencing the constant pool, return the real value.
210 Otherwise return X. */
212 avoid_constant_pool_reference (rtx x
)
216 HOST_WIDE_INT offset
= 0;
218 switch (GET_CODE (x
))
224 /* Handle float extensions of constant pool references. */
226 c
= avoid_constant_pool_reference (tmp
);
227 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
231 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
232 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
240 if (GET_MODE (x
) == BLKmode
)
245 /* Call target hook to avoid the effects of -fpic etc.... */
246 addr
= targetm
.delegitimize_address (addr
);
248 /* Split the address into a base and integer offset. */
249 if (GET_CODE (addr
) == CONST
250 && GET_CODE (XEXP (addr
, 0)) == PLUS
251 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
253 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
254 addr
= XEXP (XEXP (addr
, 0), 0);
257 if (GET_CODE (addr
) == LO_SUM
)
258 addr
= XEXP (addr
, 1);
260 /* If this is a constant pool reference, we can turn it into its
261 constant and hope that simplifications happen. */
262 if (GET_CODE (addr
) == SYMBOL_REF
263 && CONSTANT_POOL_ADDRESS_P (addr
))
265 c
= get_pool_constant (addr
);
266 cmode
= get_pool_mode (addr
);
268 /* If we're accessing the constant in a different mode than it was
269 originally stored, attempt to fix that up via subreg simplifications.
270 If that fails we have no choice but to return the original memory. */
271 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
272 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
274 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
275 if (tem
&& CONSTANT_P (tem
))
285 /* Simplify a MEM based on its attributes. This is the default
286 delegitimize_address target hook, and it's recommended that every
287 overrider call it. */
290 delegitimize_mem_from_attrs (rtx x
)
292 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
293 use their base addresses as equivalent. */
296 && MEM_OFFSET_KNOWN_P (x
))
298 tree decl
= MEM_EXPR (x
);
299 machine_mode mode
= GET_MODE (x
);
300 HOST_WIDE_INT offset
= 0;
302 switch (TREE_CODE (decl
))
312 case ARRAY_RANGE_REF
:
317 case VIEW_CONVERT_EXPR
:
319 HOST_WIDE_INT bitsize
, bitpos
;
321 int unsignedp
, volatilep
= 0;
323 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
324 &mode
, &unsignedp
, &volatilep
, false);
325 if (bitsize
!= GET_MODE_BITSIZE (mode
)
326 || (bitpos
% BITS_PER_UNIT
)
327 || (toffset
&& !tree_fits_shwi_p (toffset
)))
331 offset
+= bitpos
/ BITS_PER_UNIT
;
333 offset
+= tree_to_shwi (toffset
);
340 && mode
== GET_MODE (x
)
341 && TREE_CODE (decl
) == VAR_DECL
342 && (TREE_STATIC (decl
)
343 || DECL_THREAD_LOCAL_P (decl
))
344 && DECL_RTL_SET_P (decl
)
345 && MEM_P (DECL_RTL (decl
)))
349 offset
+= MEM_OFFSET (x
);
351 newx
= DECL_RTL (decl
);
355 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
357 /* Avoid creating a new MEM needlessly if we already had
358 the same address. We do if there's no OFFSET and the
359 old address X is identical to NEWX, or if X is of the
360 form (plus NEWX OFFSET), or the NEWX is of the form
361 (plus Y (const_int Z)) and X is that with the offset
362 added: (plus Y (const_int Z+OFFSET)). */
364 || (GET_CODE (o
) == PLUS
365 && GET_CODE (XEXP (o
, 1)) == CONST_INT
366 && (offset
== INTVAL (XEXP (o
, 1))
367 || (GET_CODE (n
) == PLUS
368 && GET_CODE (XEXP (n
, 1)) == CONST_INT
369 && (INTVAL (XEXP (n
, 1)) + offset
370 == INTVAL (XEXP (o
, 1)))
371 && (n
= XEXP (n
, 0))))
372 && (o
= XEXP (o
, 0))))
373 && rtx_equal_p (o
, n
)))
374 x
= adjust_address_nv (newx
, mode
, offset
);
376 else if (GET_MODE (x
) == GET_MODE (newx
)
385 /* Make a unary operation by first seeing if it folds and otherwise making
386 the specified operation. */
389 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
390 machine_mode op_mode
)
394 /* If this simplifies, use it. */
395 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
398 return gen_rtx_fmt_e (code
, mode
, op
);
401 /* Likewise for ternary operations. */
404 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
405 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
409 /* If this simplifies, use it. */
410 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
414 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
417 /* Likewise, for relational operations.
418 CMP_MODE specifies mode comparison is done in. */
421 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
422 machine_mode cmp_mode
, rtx op0
, rtx op1
)
426 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
430 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
433 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
434 and simplify the result. If FN is non-NULL, call this callback on each
435 X, if it returns non-NULL, replace X with its return value and simplify the
439 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
440 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
442 enum rtx_code code
= GET_CODE (x
);
443 machine_mode mode
= GET_MODE (x
);
444 machine_mode op_mode
;
446 rtx op0
, op1
, op2
, newx
, op
;
450 if (__builtin_expect (fn
!= NULL
, 0))
452 newx
= fn (x
, old_rtx
, data
);
456 else if (rtx_equal_p (x
, old_rtx
))
457 return copy_rtx ((rtx
) data
);
459 switch (GET_RTX_CLASS (code
))
463 op_mode
= GET_MODE (op0
);
464 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
465 if (op0
== XEXP (x
, 0))
467 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
471 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
472 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
473 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
475 return simplify_gen_binary (code
, mode
, op0
, op1
);
478 case RTX_COMM_COMPARE
:
481 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
482 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
483 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
484 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
486 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
489 case RTX_BITFIELD_OPS
:
491 op_mode
= GET_MODE (op0
);
492 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
493 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
494 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
495 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
497 if (op_mode
== VOIDmode
)
498 op_mode
= GET_MODE (op0
);
499 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
504 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
505 if (op0
== SUBREG_REG (x
))
507 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
508 GET_MODE (SUBREG_REG (x
)),
510 return op0
? op0
: x
;
517 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
518 if (op0
== XEXP (x
, 0))
520 return replace_equiv_address_nv (x
, op0
);
522 else if (code
== LO_SUM
)
524 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
525 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
527 /* (lo_sum (high x) x) -> x */
528 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
531 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
533 return gen_rtx_LO_SUM (mode
, op0
, op1
);
542 fmt
= GET_RTX_FORMAT (code
);
543 for (i
= 0; fmt
[i
]; i
++)
548 newvec
= XVEC (newx
, i
);
549 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
551 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
553 if (op
!= RTVEC_ELT (vec
, j
))
557 newvec
= shallow_copy_rtvec (vec
);
559 newx
= shallow_copy_rtx (x
);
560 XVEC (newx
, i
) = newvec
;
562 RTVEC_ELT (newvec
, j
) = op
;
570 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
571 if (op
!= XEXP (x
, i
))
574 newx
= shallow_copy_rtx (x
);
583 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
584 resulting RTX. Return a new RTX which is as simplified as possible. */
587 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
589 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
592 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
593 Only handle cases where the truncated value is inherently an rvalue.
595 RTL provides two ways of truncating a value:
597 1. a lowpart subreg. This form is only a truncation when both
598 the outer and inner modes (here MODE and OP_MODE respectively)
599 are scalar integers, and only then when the subreg is used as
602 It is only valid to form such truncating subregs if the
603 truncation requires no action by the target. The onus for
604 proving this is on the creator of the subreg -- e.g. the
605 caller to simplify_subreg or simplify_gen_subreg -- and typically
606 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
608 2. a TRUNCATE. This form handles both scalar and compound integers.
610 The first form is preferred where valid. However, the TRUNCATE
611 handling in simplify_unary_operation turns the second form into the
612 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
613 so it is generally safe to form rvalue truncations using:
615 simplify_gen_unary (TRUNCATE, ...)
617 and leave simplify_unary_operation to work out which representation
620 Because of the proof requirements on (1), simplify_truncation must
621 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
622 regardless of whether the outer truncation came from a SUBREG or a
623 TRUNCATE. For example, if the caller has proven that an SImode
628 is a no-op and can be represented as a subreg, it does not follow
629 that SImode truncations of X and Y are also no-ops. On a target
630 like 64-bit MIPS that requires SImode values to be stored in
631 sign-extended form, an SImode truncation of:
633 (and:DI (reg:DI X) (const_int 63))
635 is trivially a no-op because only the lower 6 bits can be set.
636 However, X is still an arbitrary 64-bit number and so we cannot
637 assume that truncating it too is a no-op. */
640 simplify_truncation (machine_mode mode
, rtx op
,
641 machine_mode op_mode
)
643 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
644 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
645 gcc_assert (precision
<= op_precision
);
647 /* Optimize truncations of zero and sign extended values. */
648 if (GET_CODE (op
) == ZERO_EXTEND
649 || GET_CODE (op
) == SIGN_EXTEND
)
651 /* There are three possibilities. If MODE is the same as the
652 origmode, we can omit both the extension and the subreg.
653 If MODE is not larger than the origmode, we can apply the
654 truncation without the extension. Finally, if the outermode
655 is larger than the origmode, we can just extend to the appropriate
657 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
658 if (mode
== origmode
)
660 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
661 return simplify_gen_unary (TRUNCATE
, mode
,
662 XEXP (op
, 0), origmode
);
664 return simplify_gen_unary (GET_CODE (op
), mode
,
665 XEXP (op
, 0), origmode
);
668 /* If the machine can perform operations in the truncated mode, distribute
669 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
670 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 #ifdef WORD_REGISTER_OPERATIONS
673 && precision
>= BITS_PER_WORD
675 && (GET_CODE (op
) == PLUS
676 || GET_CODE (op
) == MINUS
677 || GET_CODE (op
) == MULT
))
679 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
682 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
684 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
688 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
689 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op
) == LSHIFTRT
692 || GET_CODE (op
) == ASHIFTRT
)
693 /* Ensure that OP_MODE is at least twice as wide as MODE
694 to avoid the possibility that an outer LSHIFTRT shifts by more
695 than the sign extension's sign_bit_copies and introduces zeros
696 into the high bits of the result. */
697 && 2 * precision
<= op_precision
698 && CONST_INT_P (XEXP (op
, 1))
699 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
700 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
701 && UINTVAL (XEXP (op
, 1)) < precision
)
702 return simplify_gen_binary (ASHIFTRT
, mode
,
703 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
705 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
706 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if ((GET_CODE (op
) == LSHIFTRT
709 || GET_CODE (op
) == ASHIFTRT
)
710 && CONST_INT_P (XEXP (op
, 1))
711 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
712 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
713 && UINTVAL (XEXP (op
, 1)) < precision
)
714 return simplify_gen_binary (LSHIFTRT
, mode
,
715 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
717 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
718 to (ashift:QI (x:QI) C), where C is a suitable small constant and
719 the outer subreg is effectively a truncation to the original mode. */
720 if (GET_CODE (op
) == ASHIFT
721 && CONST_INT_P (XEXP (op
, 1))
722 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
723 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
724 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
725 && UINTVAL (XEXP (op
, 1)) < precision
)
726 return simplify_gen_binary (ASHIFT
, mode
,
727 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
729 /* Recognize a word extraction from a multi-word subreg. */
730 if ((GET_CODE (op
) == LSHIFTRT
731 || GET_CODE (op
) == ASHIFTRT
)
732 && SCALAR_INT_MODE_P (mode
)
733 && SCALAR_INT_MODE_P (op_mode
)
734 && precision
>= BITS_PER_WORD
735 && 2 * precision
<= op_precision
736 && CONST_INT_P (XEXP (op
, 1))
737 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
738 && UINTVAL (XEXP (op
, 1)) < op_precision
)
740 int byte
= subreg_lowpart_offset (mode
, op_mode
);
741 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
742 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
744 ? byte
- shifted_bytes
745 : byte
+ shifted_bytes
));
748 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
749 and try replacing the TRUNCATE and shift with it. Don't do this
750 if the MEM has a mode-dependent address. */
751 if ((GET_CODE (op
) == LSHIFTRT
752 || GET_CODE (op
) == ASHIFTRT
)
753 && SCALAR_INT_MODE_P (op_mode
)
754 && MEM_P (XEXP (op
, 0))
755 && CONST_INT_P (XEXP (op
, 1))
756 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
757 && INTVAL (XEXP (op
, 1)) > 0
758 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
759 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
760 MEM_ADDR_SPACE (XEXP (op
, 0)))
761 && ! MEM_VOLATILE_P (XEXP (op
, 0))
762 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
763 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
765 int byte
= subreg_lowpart_offset (mode
, op_mode
);
766 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
767 return adjust_address_nv (XEXP (op
, 0), mode
,
769 ? byte
- shifted_bytes
770 : byte
+ shifted_bytes
));
773 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
774 (OP:SI foo:SI) if OP is NEG or ABS. */
775 if ((GET_CODE (op
) == ABS
776 || GET_CODE (op
) == NEG
)
777 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
778 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
779 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
780 return simplify_gen_unary (GET_CODE (op
), mode
,
781 XEXP (XEXP (op
, 0), 0), mode
);
783 /* (truncate:A (subreg:B (truncate:C X) 0)) is
785 if (GET_CODE (op
) == SUBREG
786 && SCALAR_INT_MODE_P (mode
)
787 && SCALAR_INT_MODE_P (op_mode
)
788 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
789 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
790 && subreg_lowpart_p (op
))
792 rtx inner
= XEXP (SUBREG_REG (op
), 0);
793 if (GET_MODE_PRECISION (mode
)
794 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
795 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
797 /* If subreg above is paradoxical and C is narrower
798 than A, return (subreg:A (truncate:C X) 0). */
799 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
800 GET_MODE (SUBREG_REG (op
)), 0);
803 /* (truncate:A (truncate:B X)) is (truncate:A X). */
804 if (GET_CODE (op
) == TRUNCATE
)
805 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
806 GET_MODE (XEXP (op
, 0)));
811 /* Try to simplify a unary operation CODE whose output mode is to be
812 MODE with input operand OP whose mode was originally OP_MODE.
813 Return zero if no simplification can be made. */
815 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
816 rtx op
, machine_mode op_mode
)
820 trueop
= avoid_constant_pool_reference (op
);
822 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
826 return simplify_unary_operation_1 (code
, mode
, op
);
829 /* Perform some simplifications we can do even if the operands
832 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
834 enum rtx_code reversed
;
840 /* (not (not X)) == X. */
841 if (GET_CODE (op
) == NOT
)
844 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
845 comparison is all ones. */
846 if (COMPARISON_P (op
)
847 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
848 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
849 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
850 XEXP (op
, 0), XEXP (op
, 1));
852 /* (not (plus X -1)) can become (neg X). */
853 if (GET_CODE (op
) == PLUS
854 && XEXP (op
, 1) == constm1_rtx
)
855 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
857 /* Similarly, (not (neg X)) is (plus X -1). */
858 if (GET_CODE (op
) == NEG
)
859 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
862 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
863 if (GET_CODE (op
) == XOR
864 && CONST_INT_P (XEXP (op
, 1))
865 && (temp
= simplify_unary_operation (NOT
, mode
,
866 XEXP (op
, 1), mode
)) != 0)
867 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
869 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
870 if (GET_CODE (op
) == PLUS
871 && CONST_INT_P (XEXP (op
, 1))
872 && mode_signbit_p (mode
, XEXP (op
, 1))
873 && (temp
= simplify_unary_operation (NOT
, mode
,
874 XEXP (op
, 1), mode
)) != 0)
875 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
878 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
879 operands other than 1, but that is not valid. We could do a
880 similar simplification for (not (lshiftrt C X)) where C is
881 just the sign bit, but this doesn't seem common enough to
883 if (GET_CODE (op
) == ASHIFT
884 && XEXP (op
, 0) == const1_rtx
)
886 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
887 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
890 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
891 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
892 so we can perform the above simplification. */
893 if (STORE_FLAG_VALUE
== -1
894 && GET_CODE (op
) == ASHIFTRT
895 && CONST_INT_P (XEXP (op
, 1))
896 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
897 return simplify_gen_relational (GE
, mode
, VOIDmode
,
898 XEXP (op
, 0), const0_rtx
);
901 if (GET_CODE (op
) == SUBREG
902 && subreg_lowpart_p (op
)
903 && (GET_MODE_SIZE (GET_MODE (op
))
904 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
905 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
906 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
908 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
911 x
= gen_rtx_ROTATE (inner_mode
,
912 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
914 XEXP (SUBREG_REG (op
), 1));
915 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
920 /* Apply De Morgan's laws to reduce number of patterns for machines
921 with negating logical insns (and-not, nand, etc.). If result has
922 only one NOT, put it first, since that is how the patterns are
924 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
926 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
927 machine_mode op_mode
;
929 op_mode
= GET_MODE (in1
);
930 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
932 op_mode
= GET_MODE (in2
);
933 if (op_mode
== VOIDmode
)
935 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
937 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
940 in2
= in1
; in1
= tem
;
943 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
947 /* (not (bswap x)) -> (bswap (not x)). */
948 if (GET_CODE (op
) == BSWAP
)
950 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
951 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op
) == NEG
)
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op
) == PLUS
962 && XEXP (op
, 1) == const1_rtx
)
963 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op
) == NOT
)
967 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op
) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode
)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
978 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
980 if (GET_CODE (op
) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode
)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
986 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
988 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
990 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
993 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
994 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
995 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
998 /* (neg (mult A B)) becomes (mult A (neg B)).
999 This works even for floating-point values. */
1000 if (GET_CODE (op
) == MULT
1001 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1003 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1004 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1007 /* NEG commutes with ASHIFT since it is multiplication. Only do
1008 this if we can then eliminate the NEG (e.g., if the operand
1010 if (GET_CODE (op
) == ASHIFT
)
1012 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1014 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op
) == ASHIFTRT
1020 && CONST_INT_P (XEXP (op
, 1))
1021 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1022 return simplify_gen_binary (LSHIFTRT
, mode
,
1023 XEXP (op
, 0), XEXP (op
, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op
) == LSHIFTRT
1028 && CONST_INT_P (XEXP (op
, 1))
1029 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1030 return simplify_gen_binary (ASHIFTRT
, mode
,
1031 XEXP (op
, 0), XEXP (op
, 1));
1033 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1034 if (GET_CODE (op
) == XOR
1035 && XEXP (op
, 1) == const1_rtx
1036 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1037 return plus_constant (mode
, XEXP (op
, 0), -1);
1039 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1040 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1041 if (GET_CODE (op
) == LT
1042 && XEXP (op
, 1) == const0_rtx
1043 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1045 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1046 int isize
= GET_MODE_PRECISION (inner
);
1047 if (STORE_FLAG_VALUE
== 1)
1049 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1050 GEN_INT (isize
- 1));
1053 if (GET_MODE_PRECISION (mode
) > isize
)
1054 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1055 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1057 else if (STORE_FLAG_VALUE
== -1)
1059 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1060 GEN_INT (isize
- 1));
1063 if (GET_MODE_PRECISION (mode
) > isize
)
1064 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1065 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1071 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1072 with the umulXi3_highpart patterns. */
1073 if (GET_CODE (op
) == LSHIFTRT
1074 && GET_CODE (XEXP (op
, 0)) == MULT
)
1077 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1079 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1081 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1085 /* We can't handle truncation to a partial integer mode here
1086 because we don't know the real bitsize of the partial
1091 if (GET_MODE (op
) != VOIDmode
)
1093 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1098 /* If we know that the value is already truncated, we can
1099 replace the TRUNCATE with a SUBREG. */
1100 if (GET_MODE_NUNITS (mode
) == 1
1101 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1102 || truncated_to_mode (mode
, op
)))
1104 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1109 /* A truncate of a comparison can be replaced with a subreg if
1110 STORE_FLAG_VALUE permits. This is like the previous test,
1111 but it works even if the comparison is done in a mode larger
1112 than HOST_BITS_PER_WIDE_INT. */
1113 if (HWI_COMPUTABLE_MODE_P (mode
)
1114 && COMPARISON_P (op
)
1115 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1117 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1122 /* A truncate of a memory is just loading the low part of the memory
1123 if we are not changing the meaning of the address. */
1124 if (GET_CODE (op
) == MEM
1125 && !VECTOR_MODE_P (mode
)
1126 && !MEM_VOLATILE_P (op
)
1127 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1129 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1136 case FLOAT_TRUNCATE
:
1137 if (DECIMAL_FLOAT_MODE_P (mode
))
1140 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1141 if (GET_CODE (op
) == FLOAT_EXTEND
1142 && GET_MODE (XEXP (op
, 0)) == mode
)
1143 return XEXP (op
, 0);
1145 /* (float_truncate:SF (float_truncate:DF foo:XF))
1146 = (float_truncate:SF foo:XF).
1147 This may eliminate double rounding, so it is unsafe.
1149 (float_truncate:SF (float_extend:XF foo:DF))
1150 = (float_truncate:SF foo:DF).
1152 (float_truncate:DF (float_extend:XF foo:SF))
1153 = (float_extend:SF foo:DF). */
1154 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1155 && flag_unsafe_math_optimizations
)
1156 || GET_CODE (op
) == FLOAT_EXTEND
)
1157 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1159 > GET_MODE_SIZE (mode
)
1160 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1162 XEXP (op
, 0), mode
);
1164 /* (float_truncate (float x)) is (float x) */
1165 if (GET_CODE (op
) == FLOAT
1166 && (flag_unsafe_math_optimizations
1167 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1168 && ((unsigned)significand_size (GET_MODE (op
))
1169 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1170 - num_sign_bit_copies (XEXP (op
, 0),
1171 GET_MODE (XEXP (op
, 0))))))))
1172 return simplify_gen_unary (FLOAT
, mode
,
1174 GET_MODE (XEXP (op
, 0)));
1176 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1177 (OP:SF foo:SF) if OP is NEG or ABS. */
1178 if ((GET_CODE (op
) == ABS
1179 || GET_CODE (op
) == NEG
)
1180 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1181 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1182 return simplify_gen_unary (GET_CODE (op
), mode
,
1183 XEXP (XEXP (op
, 0), 0), mode
);
1185 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1186 is (float_truncate:SF x). */
1187 if (GET_CODE (op
) == SUBREG
1188 && subreg_lowpart_p (op
)
1189 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1190 return SUBREG_REG (op
);
1194 if (DECIMAL_FLOAT_MODE_P (mode
))
1197 /* (float_extend (float_extend x)) is (float_extend x)
1199 (float_extend (float x)) is (float x) assuming that double
1200 rounding can't happen.
1202 if (GET_CODE (op
) == FLOAT_EXTEND
1203 || (GET_CODE (op
) == FLOAT
1204 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1205 && ((unsigned)significand_size (GET_MODE (op
))
1206 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1207 - num_sign_bit_copies (XEXP (op
, 0),
1208 GET_MODE (XEXP (op
, 0)))))))
1209 return simplify_gen_unary (GET_CODE (op
), mode
,
1211 GET_MODE (XEXP (op
, 0)));
1216 /* (abs (neg <foo>)) -> (abs <foo>) */
1217 if (GET_CODE (op
) == NEG
)
1218 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1219 GET_MODE (XEXP (op
, 0)));
1221 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1223 if (GET_MODE (op
) == VOIDmode
)
1226 /* If operand is something known to be positive, ignore the ABS. */
1227 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1228 || val_signbit_known_clear_p (GET_MODE (op
),
1229 nonzero_bits (op
, GET_MODE (op
))))
1232 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1233 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1234 return gen_rtx_NEG (mode
, op
);
1239 /* (ffs (*_extend <X>)) = (ffs <X>) */
1240 if (GET_CODE (op
) == SIGN_EXTEND
1241 || GET_CODE (op
) == ZERO_EXTEND
)
1242 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1243 GET_MODE (XEXP (op
, 0)));
1247 switch (GET_CODE (op
))
1251 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1252 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1253 GET_MODE (XEXP (op
, 0)));
1257 /* Rotations don't affect popcount. */
1258 if (!side_effects_p (XEXP (op
, 1)))
1259 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1260 GET_MODE (XEXP (op
, 0)));
1269 switch (GET_CODE (op
))
1275 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1276 GET_MODE (XEXP (op
, 0)));
1280 /* Rotations don't affect parity. */
1281 if (!side_effects_p (XEXP (op
, 1)))
1282 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1283 GET_MODE (XEXP (op
, 0)));
1292 /* (bswap (bswap x)) -> x. */
1293 if (GET_CODE (op
) == BSWAP
)
1294 return XEXP (op
, 0);
1298 /* (float (sign_extend <X>)) = (float <X>). */
1299 if (GET_CODE (op
) == SIGN_EXTEND
)
1300 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1301 GET_MODE (XEXP (op
, 0)));
1305 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1306 becomes just the MINUS if its mode is MODE. This allows
1307 folding switch statements on machines using casesi (such as
1309 if (GET_CODE (op
) == TRUNCATE
1310 && GET_MODE (XEXP (op
, 0)) == mode
1311 && GET_CODE (XEXP (op
, 0)) == MINUS
1312 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1313 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1314 return XEXP (op
, 0);
1316 /* Extending a widening multiplication should be canonicalized to
1317 a wider widening multiplication. */
1318 if (GET_CODE (op
) == MULT
)
1320 rtx lhs
= XEXP (op
, 0);
1321 rtx rhs
= XEXP (op
, 1);
1322 enum rtx_code lcode
= GET_CODE (lhs
);
1323 enum rtx_code rcode
= GET_CODE (rhs
);
1325 /* Widening multiplies usually extend both operands, but sometimes
1326 they use a shift to extract a portion of a register. */
1327 if ((lcode
== SIGN_EXTEND
1328 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1329 && (rcode
== SIGN_EXTEND
1330 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1332 machine_mode lmode
= GET_MODE (lhs
);
1333 machine_mode rmode
= GET_MODE (rhs
);
1336 if (lcode
== ASHIFTRT
)
1337 /* Number of bits not shifted off the end. */
1338 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1339 else /* lcode == SIGN_EXTEND */
1340 /* Size of inner mode. */
1341 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1343 if (rcode
== ASHIFTRT
)
1344 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1345 else /* rcode == SIGN_EXTEND */
1346 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1348 /* We can only widen multiplies if the result is mathematiclly
1349 equivalent. I.e. if overflow was impossible. */
1350 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1351 return simplify_gen_binary
1353 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1354 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1358 /* Check for a sign extension of a subreg of a promoted
1359 variable, where the promotion is sign-extended, and the
1360 target mode is the same as the variable's promotion. */
1361 if (GET_CODE (op
) == SUBREG
1362 && SUBREG_PROMOTED_VAR_P (op
)
1363 && SUBREG_PROMOTED_SIGNED_P (op
)
1364 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1366 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1371 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1372 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1373 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1375 gcc_assert (GET_MODE_PRECISION (mode
)
1376 > GET_MODE_PRECISION (GET_MODE (op
)));
1377 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1378 GET_MODE (XEXP (op
, 0)));
1381 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1382 is (sign_extend:M (subreg:O <X>)) if there is mode with
1383 GET_MODE_BITSIZE (N) - I bits.
1384 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1385 is similarly (zero_extend:M (subreg:O <X>)). */
1386 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1387 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1388 && CONST_INT_P (XEXP (op
, 1))
1389 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1390 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1393 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1394 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1395 gcc_assert (GET_MODE_BITSIZE (mode
)
1396 > GET_MODE_BITSIZE (GET_MODE (op
)));
1397 if (tmode
!= BLKmode
)
1400 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1402 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1403 ? SIGN_EXTEND
: ZERO_EXTEND
,
1404 mode
, inner
, tmode
);
1408 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1409 /* As we do not know which address space the pointer is referring to,
1410 we can do this only if the target does not support different pointer
1411 or address modes depending on the address space. */
1412 if (target_default_pointer_address_modes_p ()
1413 && ! POINTERS_EXTEND_UNSIGNED
1414 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1416 || (GET_CODE (op
) == SUBREG
1417 && REG_P (SUBREG_REG (op
))
1418 && REG_POINTER (SUBREG_REG (op
))
1419 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1420 return convert_memory_address (Pmode
, op
);
1425 /* Check for a zero extension of a subreg of a promoted
1426 variable, where the promotion is zero-extended, and the
1427 target mode is the same as the variable's promotion. */
1428 if (GET_CODE (op
) == SUBREG
1429 && SUBREG_PROMOTED_VAR_P (op
)
1430 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1431 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1433 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1438 /* Extending a widening multiplication should be canonicalized to
1439 a wider widening multiplication. */
1440 if (GET_CODE (op
) == MULT
)
1442 rtx lhs
= XEXP (op
, 0);
1443 rtx rhs
= XEXP (op
, 1);
1444 enum rtx_code lcode
= GET_CODE (lhs
);
1445 enum rtx_code rcode
= GET_CODE (rhs
);
1447 /* Widening multiplies usually extend both operands, but sometimes
1448 they use a shift to extract a portion of a register. */
1449 if ((lcode
== ZERO_EXTEND
1450 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1451 && (rcode
== ZERO_EXTEND
1452 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1454 machine_mode lmode
= GET_MODE (lhs
);
1455 machine_mode rmode
= GET_MODE (rhs
);
1458 if (lcode
== LSHIFTRT
)
1459 /* Number of bits not shifted off the end. */
1460 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1461 else /* lcode == ZERO_EXTEND */
1462 /* Size of inner mode. */
1463 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1465 if (rcode
== LSHIFTRT
)
1466 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1467 else /* rcode == ZERO_EXTEND */
1468 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1470 /* We can only widen multiplies if the result is mathematiclly
1471 equivalent. I.e. if overflow was impossible. */
1472 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1473 return simplify_gen_binary
1475 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1476 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1480 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1481 if (GET_CODE (op
) == ZERO_EXTEND
)
1482 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1483 GET_MODE (XEXP (op
, 0)));
1485 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (zero_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_PRECISION (N) - I bits. */
1488 if (GET_CODE (op
) == LSHIFTRT
1489 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1490 && CONST_INT_P (XEXP (op
, 1))
1491 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1492 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1495 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1496 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1497 if (tmode
!= BLKmode
)
1500 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1502 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1506 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1507 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1509 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1510 (and:SI (reg:SI) (const_int 63)). */
1511 if (GET_CODE (op
) == SUBREG
1512 && GET_MODE_PRECISION (GET_MODE (op
))
1513 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1514 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1515 <= HOST_BITS_PER_WIDE_INT
1516 && GET_MODE_PRECISION (mode
)
1517 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1518 && subreg_lowpart_p (op
)
1519 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1520 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1522 if (GET_MODE_PRECISION (mode
)
1523 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1524 return SUBREG_REG (op
);
1525 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1526 GET_MODE (SUBREG_REG (op
)));
1529 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && POINTERS_EXTEND_UNSIGNED
> 0
1535 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1537 || (GET_CODE (op
) == SUBREG
1538 && REG_P (SUBREG_REG (op
))
1539 && REG_POINTER (SUBREG_REG (op
))
1540 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1541 return convert_memory_address (Pmode
, op
);
1552 /* Try to compute the value of a unary operation CODE whose output mode is to
1553 be MODE with input operand OP whose mode was originally OP_MODE.
1554 Return zero if the value cannot be computed. */
1556 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1557 rtx op
, machine_mode op_mode
)
1559 unsigned int width
= GET_MODE_PRECISION (mode
);
1561 if (code
== VEC_DUPLICATE
)
1563 gcc_assert (VECTOR_MODE_P (mode
));
1564 if (GET_MODE (op
) != VOIDmode
)
1566 if (!VECTOR_MODE_P (GET_MODE (op
)))
1567 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1569 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1572 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1573 || GET_CODE (op
) == CONST_VECTOR
)
1575 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1576 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1577 rtvec v
= rtvec_alloc (n_elts
);
1580 if (GET_CODE (op
) != CONST_VECTOR
)
1581 for (i
= 0; i
< n_elts
; i
++)
1582 RTVEC_ELT (v
, i
) = op
;
1585 machine_mode inmode
= GET_MODE (op
);
1586 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1587 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1589 gcc_assert (in_n_elts
< n_elts
);
1590 gcc_assert ((n_elts
% in_n_elts
) == 0);
1591 for (i
= 0; i
< n_elts
; i
++)
1592 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1594 return gen_rtx_CONST_VECTOR (mode
, v
);
1598 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1600 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1601 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1602 machine_mode opmode
= GET_MODE (op
);
1603 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1604 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1605 rtvec v
= rtvec_alloc (n_elts
);
1608 gcc_assert (op_n_elts
== n_elts
);
1609 for (i
= 0; i
< n_elts
; i
++)
1611 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1612 CONST_VECTOR_ELT (op
, i
),
1613 GET_MODE_INNER (opmode
));
1616 RTVEC_ELT (v
, i
) = x
;
1618 return gen_rtx_CONST_VECTOR (mode
, v
);
1621 /* The order of these tests is critical so that, for example, we don't
1622 check the wrong mode (input vs. output) for a conversion operation,
1623 such as FIX. At some point, this should be simplified. */
1625 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1629 if (op_mode
== VOIDmode
)
1631 /* CONST_INT have VOIDmode as the mode. We assume that all
1632 the bits of the constant are significant, though, this is
1633 a dangerous assumption as many times CONST_INTs are
1634 created and used with garbage in the bits outside of the
1635 precision of the implied mode of the const_int. */
1636 op_mode
= MAX_MODE_INT
;
1639 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1640 d
= real_value_truncate (mode
, d
);
1641 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1643 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1647 if (op_mode
== VOIDmode
)
1649 /* CONST_INT have VOIDmode as the mode. We assume that all
1650 the bits of the constant are significant, though, this is
1651 a dangerous assumption as many times CONST_INTs are
1652 created and used with garbage in the bits outside of the
1653 precision of the implied mode of the const_int. */
1654 op_mode
= MAX_MODE_INT
;
1657 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1658 d
= real_value_truncate (mode
, d
);
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1662 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1665 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1666 rtx_mode_t op0
= std::make_pair (op
, imode
);
1669 #if TARGET_SUPPORTS_WIDE_INT == 0
1670 /* This assert keeps the simplification from producing a result
1671 that cannot be represented in a CONST_DOUBLE but a lot of
1672 upstream callers expect that this function never fails to
1673 simplify something and so you if you added this to the test
1674 above the code would die later anyway. If this assert
1675 happens, you just need to make the port support wide int. */
1676 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1682 result
= wi::bit_not (op0
);
1686 result
= wi::neg (op0
);
1690 result
= wi::abs (op0
);
1694 result
= wi::shwi (wi::ffs (op0
), mode
);
1698 if (wi::ne_p (op0
, 0))
1699 int_value
= wi::clz (op0
);
1700 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1701 int_value
= GET_MODE_PRECISION (mode
);
1702 result
= wi::shwi (int_value
, mode
);
1706 result
= wi::shwi (wi::clrsb (op0
), mode
);
1710 if (wi::ne_p (op0
, 0))
1711 int_value
= wi::ctz (op0
);
1712 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1713 int_value
= GET_MODE_PRECISION (mode
);
1714 result
= wi::shwi (int_value
, mode
);
1718 result
= wi::shwi (wi::popcount (op0
), mode
);
1722 result
= wi::shwi (wi::parity (op0
), mode
);
1726 result
= wide_int (op0
).bswap ();
1731 result
= wide_int::from (op0
, width
, UNSIGNED
);
1735 result
= wide_int::from (op0
, width
, SIGNED
);
1743 return immed_wide_int_const (result
, mode
);
1746 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1747 && SCALAR_FLOAT_MODE_P (mode
)
1748 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1751 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1758 d
= real_value_abs (&d
);
1761 d
= real_value_negate (&d
);
1763 case FLOAT_TRUNCATE
:
1764 d
= real_value_truncate (mode
, d
);
1767 /* All this does is change the mode, unless changing
1769 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1770 real_convert (&d
, mode
, &d
);
1773 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1780 real_to_target (tmp
, &d
, GET_MODE (op
));
1781 for (i
= 0; i
< 4; i
++)
1783 real_from_target (&d
, tmp
, mode
);
1789 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1791 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1792 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1793 && GET_MODE_CLASS (mode
) == MODE_INT
1796 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1797 operators are intentionally left unspecified (to ease implementation
1798 by target backends), for consistency, this routine implements the
1799 same semantics for constant folding as used by the middle-end. */
1801 /* This was formerly used only for non-IEEE float.
1802 eggert@twinsun.com says it is safe for IEEE also. */
1803 REAL_VALUE_TYPE x
, t
;
1804 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1805 wide_int wmax
, wmin
;
1806 /* This is part of the abi to real_to_integer, but we check
1807 things before making this call. */
1813 if (REAL_VALUE_ISNAN (x
))
1816 /* Test against the signed upper bound. */
1817 wmax
= wi::max_value (width
, SIGNED
);
1818 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1819 if (REAL_VALUES_LESS (t
, x
))
1820 return immed_wide_int_const (wmax
, mode
);
1822 /* Test against the signed lower bound. */
1823 wmin
= wi::min_value (width
, SIGNED
);
1824 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1825 if (REAL_VALUES_LESS (x
, t
))
1826 return immed_wide_int_const (wmin
, mode
);
1828 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1832 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1835 /* Test against the unsigned upper bound. */
1836 wmax
= wi::max_value (width
, UNSIGNED
);
1837 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1838 if (REAL_VALUES_LESS (t
, x
))
1839 return immed_wide_int_const (wmax
, mode
);
1841 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1853 /* Subroutine of simplify_binary_operation to simplify a binary operation
1854 CODE that can commute with byte swapping, with result mode MODE and
1855 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1856 Return zero if no simplification or canonicalization is possible. */
1859 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1864 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1865 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1867 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1868 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1869 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1872 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1873 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1875 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1876 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1882 /* Subroutine of simplify_binary_operation to simplify a commutative,
1883 associative binary operation CODE with result mode MODE, operating
1884 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1885 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1886 canonicalization is possible. */
1889 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1894 /* Linearize the operator to the left. */
1895 if (GET_CODE (op1
) == code
)
1897 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1898 if (GET_CODE (op0
) == code
)
1900 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1901 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1904 /* "a op (b op c)" becomes "(b op c) op a". */
1905 if (! swap_commutative_operands_p (op1
, op0
))
1906 return simplify_gen_binary (code
, mode
, op1
, op0
);
1913 if (GET_CODE (op0
) == code
)
1915 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1916 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1918 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1919 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1922 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1923 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1925 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1927 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1928 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1930 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1937 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1938 and OP1. Return 0 if no simplification is possible.
1940 Don't use this for relational operations such as EQ or LT.
1941 Use simplify_relational_operation instead. */
1943 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1946 rtx trueop0
, trueop1
;
1949 /* Relational operations don't work here. We must know the mode
1950 of the operands in order to do the comparison correctly.
1951 Assuming a full word can give incorrect results.
1952 Consider comparing 128 with -128 in QImode. */
1953 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1954 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1956 /* Make sure the constant is second. */
1957 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1958 && swap_commutative_operands_p (op0
, op1
))
1960 tem
= op0
, op0
= op1
, op1
= tem
;
1963 trueop0
= avoid_constant_pool_reference (op0
);
1964 trueop1
= avoid_constant_pool_reference (op1
);
1966 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1969 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1972 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1973 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1974 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1975 actual constants. */
1978 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
1979 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1981 rtx tem
, reversed
, opleft
, opright
;
1983 unsigned int width
= GET_MODE_PRECISION (mode
);
1985 /* Even if we can't compute a constant result,
1986 there are some cases worth simplifying. */
1991 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1992 when x is NaN, infinite, or finite and nonzero. They aren't
1993 when x is -0 and the rounding mode is not towards -infinity,
1994 since (-0) + 0 is then 0. */
1995 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1998 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1999 transformations are safe even for IEEE. */
2000 if (GET_CODE (op0
) == NEG
)
2001 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2002 else if (GET_CODE (op1
) == NEG
)
2003 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2005 /* (~a) + 1 -> -a */
2006 if (INTEGRAL_MODE_P (mode
)
2007 && GET_CODE (op0
) == NOT
2008 && trueop1
== const1_rtx
)
2009 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2011 /* Handle both-operands-constant cases. We can only add
2012 CONST_INTs to constants since the sum of relocatable symbols
2013 can't be handled by most assemblers. Don't add CONST_INT
2014 to CONST_INT since overflow won't be computed properly if wider
2015 than HOST_BITS_PER_WIDE_INT. */
2017 if ((GET_CODE (op0
) == CONST
2018 || GET_CODE (op0
) == SYMBOL_REF
2019 || GET_CODE (op0
) == LABEL_REF
)
2020 && CONST_INT_P (op1
))
2021 return plus_constant (mode
, op0
, INTVAL (op1
));
2022 else if ((GET_CODE (op1
) == CONST
2023 || GET_CODE (op1
) == SYMBOL_REF
2024 || GET_CODE (op1
) == LABEL_REF
)
2025 && CONST_INT_P (op0
))
2026 return plus_constant (mode
, op1
, INTVAL (op0
));
2028 /* See if this is something like X * C - X or vice versa or
2029 if the multiplication is written as a shift. If so, we can
2030 distribute and make a new multiply, shift, or maybe just
2031 have X (if C is 2 in the example above). But don't make
2032 something more expensive than we had before. */
2034 if (SCALAR_INT_MODE_P (mode
))
2036 rtx lhs
= op0
, rhs
= op1
;
2038 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2039 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2041 if (GET_CODE (lhs
) == NEG
)
2043 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2044 lhs
= XEXP (lhs
, 0);
2046 else if (GET_CODE (lhs
) == MULT
2047 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2049 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2050 lhs
= XEXP (lhs
, 0);
2052 else if (GET_CODE (lhs
) == ASHIFT
2053 && CONST_INT_P (XEXP (lhs
, 1))
2054 && INTVAL (XEXP (lhs
, 1)) >= 0
2055 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2057 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2058 GET_MODE_PRECISION (mode
));
2059 lhs
= XEXP (lhs
, 0);
2062 if (GET_CODE (rhs
) == NEG
)
2064 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2065 rhs
= XEXP (rhs
, 0);
2067 else if (GET_CODE (rhs
) == MULT
2068 && CONST_INT_P (XEXP (rhs
, 1)))
2070 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2071 rhs
= XEXP (rhs
, 0);
2073 else if (GET_CODE (rhs
) == ASHIFT
2074 && CONST_INT_P (XEXP (rhs
, 1))
2075 && INTVAL (XEXP (rhs
, 1)) >= 0
2076 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2078 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2079 GET_MODE_PRECISION (mode
));
2080 rhs
= XEXP (rhs
, 0);
2083 if (rtx_equal_p (lhs
, rhs
))
2085 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2087 bool speed
= optimize_function_for_speed_p (cfun
);
2089 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2091 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2092 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2097 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2098 if (CONST_SCALAR_INT_P (op1
)
2099 && GET_CODE (op0
) == XOR
2100 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2101 && mode_signbit_p (mode
, op1
))
2102 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2103 simplify_gen_binary (XOR
, mode
, op1
,
2106 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2107 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2108 && GET_CODE (op0
) == MULT
2109 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2113 in1
= XEXP (XEXP (op0
, 0), 0);
2114 in2
= XEXP (op0
, 1);
2115 return simplify_gen_binary (MINUS
, mode
, op1
,
2116 simplify_gen_binary (MULT
, mode
,
2120 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2121 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2123 if (COMPARISON_P (op0
)
2124 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2125 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2126 && (reversed
= reversed_comparison (op0
, mode
)))
2128 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2130 /* If one of the operands is a PLUS or a MINUS, see if we can
2131 simplify this by the associative law.
2132 Don't use the associative law for floating point.
2133 The inaccuracy makes it nonassociative,
2134 and subtle programs can break if operations are associated. */
2136 if (INTEGRAL_MODE_P (mode
)
2137 && (plus_minus_operand_p (op0
)
2138 || plus_minus_operand_p (op1
))
2139 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2142 /* Reassociate floating point addition only when the user
2143 specifies associative math operations. */
2144 if (FLOAT_MODE_P (mode
)
2145 && flag_associative_math
)
2147 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2154 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2155 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2156 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2157 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2159 rtx xop00
= XEXP (op0
, 0);
2160 rtx xop10
= XEXP (op1
, 0);
2163 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2165 if (REG_P (xop00
) && REG_P (xop10
)
2166 && GET_MODE (xop00
) == GET_MODE (xop10
)
2167 && REGNO (xop00
) == REGNO (xop10
)
2168 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2169 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2176 /* We can't assume x-x is 0 even with non-IEEE floating point,
2177 but since it is zero except in very strange circumstances, we
2178 will treat it as zero with -ffinite-math-only. */
2179 if (rtx_equal_p (trueop0
, trueop1
)
2180 && ! side_effects_p (op0
)
2181 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2182 return CONST0_RTX (mode
);
2184 /* Change subtraction from zero into negation. (0 - x) is the
2185 same as -x when x is NaN, infinite, or finite and nonzero.
2186 But if the mode has signed zeros, and does not round towards
2187 -infinity, then 0 - 0 is 0, not -0. */
2188 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2189 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2191 /* (-1 - a) is ~a. */
2192 if (trueop0
== constm1_rtx
)
2193 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2195 /* Subtracting 0 has no effect unless the mode has signed zeros
2196 and supports rounding towards -infinity. In such a case,
2198 if (!(HONOR_SIGNED_ZEROS (mode
)
2199 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2200 && trueop1
== CONST0_RTX (mode
))
2203 /* See if this is something like X * C - X or vice versa or
2204 if the multiplication is written as a shift. If so, we can
2205 distribute and make a new multiply, shift, or maybe just
2206 have X (if C is 2 in the example above). But don't make
2207 something more expensive than we had before. */
2209 if (SCALAR_INT_MODE_P (mode
))
2211 rtx lhs
= op0
, rhs
= op1
;
2213 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2214 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2216 if (GET_CODE (lhs
) == NEG
)
2218 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2219 lhs
= XEXP (lhs
, 0);
2221 else if (GET_CODE (lhs
) == MULT
2222 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2224 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2225 lhs
= XEXP (lhs
, 0);
2227 else if (GET_CODE (lhs
) == ASHIFT
2228 && CONST_INT_P (XEXP (lhs
, 1))
2229 && INTVAL (XEXP (lhs
, 1)) >= 0
2230 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2232 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2233 GET_MODE_PRECISION (mode
));
2234 lhs
= XEXP (lhs
, 0);
2237 if (GET_CODE (rhs
) == NEG
)
2239 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2240 rhs
= XEXP (rhs
, 0);
2242 else if (GET_CODE (rhs
) == MULT
2243 && CONST_INT_P (XEXP (rhs
, 1)))
2245 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2246 rhs
= XEXP (rhs
, 0);
2248 else if (GET_CODE (rhs
) == ASHIFT
2249 && CONST_INT_P (XEXP (rhs
, 1))
2250 && INTVAL (XEXP (rhs
, 1)) >= 0
2251 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2253 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2254 GET_MODE_PRECISION (mode
));
2255 negcoeff1
= -negcoeff1
;
2256 rhs
= XEXP (rhs
, 0);
2259 if (rtx_equal_p (lhs
, rhs
))
2261 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2263 bool speed
= optimize_function_for_speed_p (cfun
);
2265 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2267 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2268 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2273 /* (a - (-b)) -> (a + b). True even for IEEE. */
2274 if (GET_CODE (op1
) == NEG
)
2275 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2277 /* (-x - c) may be simplified as (-c - x). */
2278 if (GET_CODE (op0
) == NEG
2279 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2281 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2283 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2286 /* Don't let a relocatable value get a negative coeff. */
2287 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2288 return simplify_gen_binary (PLUS
, mode
,
2290 neg_const_int (mode
, op1
));
2292 /* (x - (x & y)) -> (x & ~y) */
2293 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2295 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2297 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2298 GET_MODE (XEXP (op1
, 1)));
2299 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2301 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2303 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2304 GET_MODE (XEXP (op1
, 0)));
2305 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2309 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2310 by reversing the comparison code if valid. */
2311 if (STORE_FLAG_VALUE
== 1
2312 && trueop0
== const1_rtx
2313 && COMPARISON_P (op1
)
2314 && (reversed
= reversed_comparison (op1
, mode
)))
2317 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2318 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2319 && GET_CODE (op1
) == MULT
2320 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2324 in1
= XEXP (XEXP (op1
, 0), 0);
2325 in2
= XEXP (op1
, 1);
2326 return simplify_gen_binary (PLUS
, mode
,
2327 simplify_gen_binary (MULT
, mode
,
2332 /* Canonicalize (minus (neg A) (mult B C)) to
2333 (minus (mult (neg B) C) A). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2335 && GET_CODE (op1
) == MULT
2336 && GET_CODE (op0
) == NEG
)
2340 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2341 in2
= XEXP (op1
, 1);
2342 return simplify_gen_binary (MINUS
, mode
,
2343 simplify_gen_binary (MULT
, mode
,
2348 /* If one of the operands is a PLUS or a MINUS, see if we can
2349 simplify this by the associative law. This will, for example,
2350 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2351 Don't use the associative law for floating point.
2352 The inaccuracy makes it nonassociative,
2353 and subtle programs can break if operations are associated. */
2355 if (INTEGRAL_MODE_P (mode
)
2356 && (plus_minus_operand_p (op0
)
2357 || plus_minus_operand_p (op1
))
2358 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2363 if (trueop1
== constm1_rtx
)
2364 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2366 if (GET_CODE (op0
) == NEG
)
2368 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2369 /* If op1 is a MULT as well and simplify_unary_operation
2370 just moved the NEG to the second operand, simplify_gen_binary
2371 below could through simplify_associative_operation move
2372 the NEG around again and recurse endlessly. */
2374 && GET_CODE (op1
) == MULT
2375 && GET_CODE (temp
) == MULT
2376 && XEXP (op1
, 0) == XEXP (temp
, 0)
2377 && GET_CODE (XEXP (temp
, 1)) == NEG
2378 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2381 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2383 if (GET_CODE (op1
) == NEG
)
2385 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2386 /* If op0 is a MULT as well and simplify_unary_operation
2387 just moved the NEG to the second operand, simplify_gen_binary
2388 below could through simplify_associative_operation move
2389 the NEG around again and recurse endlessly. */
2391 && GET_CODE (op0
) == MULT
2392 && GET_CODE (temp
) == MULT
2393 && XEXP (op0
, 0) == XEXP (temp
, 0)
2394 && GET_CODE (XEXP (temp
, 1)) == NEG
2395 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2398 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2401 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2402 x is NaN, since x * 0 is then also NaN. Nor is it valid
2403 when the mode has signed zeros, since multiplying a negative
2404 number by 0 will give -0, not 0. */
2405 if (!HONOR_NANS (mode
)
2406 && !HONOR_SIGNED_ZEROS (mode
)
2407 && trueop1
== CONST0_RTX (mode
)
2408 && ! side_effects_p (op0
))
2411 /* In IEEE floating point, x*1 is not equivalent to x for
2413 if (!HONOR_SNANS (mode
)
2414 && trueop1
== CONST1_RTX (mode
))
2417 /* Convert multiply by constant power of two into shift. */
2418 if (CONST_SCALAR_INT_P (trueop1
))
2420 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2422 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2425 /* x*2 is x+x and x*(-1) is -x */
2426 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2427 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2428 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2429 && GET_MODE (op0
) == mode
)
2432 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2434 if (REAL_VALUES_EQUAL (d
, dconst2
))
2435 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2437 if (!HONOR_SNANS (mode
)
2438 && REAL_VALUES_EQUAL (d
, dconstm1
))
2439 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2442 /* Optimize -x * -x as x * x. */
2443 if (FLOAT_MODE_P (mode
)
2444 && GET_CODE (op0
) == NEG
2445 && GET_CODE (op1
) == NEG
2446 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2447 && !side_effects_p (XEXP (op0
, 0)))
2448 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2450 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2451 if (SCALAR_FLOAT_MODE_P (mode
)
2452 && GET_CODE (op0
) == ABS
2453 && GET_CODE (op1
) == ABS
2454 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2455 && !side_effects_p (XEXP (op0
, 0)))
2456 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2458 /* Reassociate multiplication, but for floating point MULTs
2459 only when the user specifies unsafe math optimizations. */
2460 if (! FLOAT_MODE_P (mode
)
2461 || flag_unsafe_math_optimizations
)
2463 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2470 if (trueop1
== CONST0_RTX (mode
))
2472 if (INTEGRAL_MODE_P (mode
)
2473 && trueop1
== CONSTM1_RTX (mode
)
2474 && !side_effects_p (op0
))
2476 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2478 /* A | (~A) -> -1 */
2479 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2480 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2481 && ! side_effects_p (op0
)
2482 && SCALAR_INT_MODE_P (mode
))
2485 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2486 if (CONST_INT_P (op1
)
2487 && HWI_COMPUTABLE_MODE_P (mode
)
2488 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2489 && !side_effects_p (op0
))
2492 /* Canonicalize (X & C1) | C2. */
2493 if (GET_CODE (op0
) == AND
2494 && CONST_INT_P (trueop1
)
2495 && CONST_INT_P (XEXP (op0
, 1)))
2497 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2498 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2499 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2501 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2503 && !side_effects_p (XEXP (op0
, 0)))
2506 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2507 if (((c1
|c2
) & mask
) == mask
)
2508 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2510 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2511 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2513 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2514 gen_int_mode (c1
& ~c2
, mode
));
2515 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2519 /* Convert (A & B) | A to A. */
2520 if (GET_CODE (op0
) == AND
2521 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2522 || rtx_equal_p (XEXP (op0
, 1), op1
))
2523 && ! side_effects_p (XEXP (op0
, 0))
2524 && ! side_effects_p (XEXP (op0
, 1)))
2527 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2528 mode size to (rotate A CX). */
2530 if (GET_CODE (op1
) == ASHIFT
2531 || GET_CODE (op1
) == SUBREG
)
2542 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2543 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2544 && CONST_INT_P (XEXP (opleft
, 1))
2545 && CONST_INT_P (XEXP (opright
, 1))
2546 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2547 == GET_MODE_PRECISION (mode
)))
2548 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2550 /* Same, but for ashift that has been "simplified" to a wider mode
2551 by simplify_shift_const. */
2553 if (GET_CODE (opleft
) == SUBREG
2554 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2555 && GET_CODE (opright
) == LSHIFTRT
2556 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2557 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2558 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2559 && (GET_MODE_SIZE (GET_MODE (opleft
))
2560 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2561 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2562 SUBREG_REG (XEXP (opright
, 0)))
2563 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2564 && CONST_INT_P (XEXP (opright
, 1))
2565 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2566 == GET_MODE_PRECISION (mode
)))
2567 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2568 XEXP (SUBREG_REG (opleft
), 1));
2570 /* If we have (ior (and (X C1) C2)), simplify this by making
2571 C1 as small as possible if C1 actually changes. */
2572 if (CONST_INT_P (op1
)
2573 && (HWI_COMPUTABLE_MODE_P (mode
)
2574 || INTVAL (op1
) > 0)
2575 && GET_CODE (op0
) == AND
2576 && CONST_INT_P (XEXP (op0
, 1))
2577 && CONST_INT_P (op1
)
2578 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2580 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2581 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2584 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2587 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2588 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2589 the PLUS does not affect any of the bits in OP1: then we can do
2590 the IOR as a PLUS and we can associate. This is valid if OP1
2591 can be safely shifted left C bits. */
2592 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2593 && GET_CODE (XEXP (op0
, 0)) == PLUS
2594 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2595 && CONST_INT_P (XEXP (op0
, 1))
2596 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2598 int count
= INTVAL (XEXP (op0
, 1));
2599 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2601 if (mask
>> count
== INTVAL (trueop1
)
2602 && trunc_int_for_mode (mask
, mode
) == mask
2603 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2604 return simplify_gen_binary (ASHIFTRT
, mode
,
2605 plus_constant (mode
, XEXP (op0
, 0),
2610 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2614 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2620 if (trueop1
== CONST0_RTX (mode
))
2622 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2623 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2624 if (rtx_equal_p (trueop0
, trueop1
)
2625 && ! side_effects_p (op0
)
2626 && GET_MODE_CLASS (mode
) != MODE_CC
)
2627 return CONST0_RTX (mode
);
2629 /* Canonicalize XOR of the most significant bit to PLUS. */
2630 if (CONST_SCALAR_INT_P (op1
)
2631 && mode_signbit_p (mode
, op1
))
2632 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2633 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2634 if (CONST_SCALAR_INT_P (op1
)
2635 && GET_CODE (op0
) == PLUS
2636 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2637 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2638 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2639 simplify_gen_binary (XOR
, mode
, op1
,
2642 /* If we are XORing two things that have no bits in common,
2643 convert them into an IOR. This helps to detect rotation encoded
2644 using those methods and possibly other simplifications. */
2646 if (HWI_COMPUTABLE_MODE_P (mode
)
2647 && (nonzero_bits (op0
, mode
)
2648 & nonzero_bits (op1
, mode
)) == 0)
2649 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2651 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2652 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2655 int num_negated
= 0;
2657 if (GET_CODE (op0
) == NOT
)
2658 num_negated
++, op0
= XEXP (op0
, 0);
2659 if (GET_CODE (op1
) == NOT
)
2660 num_negated
++, op1
= XEXP (op1
, 0);
2662 if (num_negated
== 2)
2663 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2664 else if (num_negated
== 1)
2665 return simplify_gen_unary (NOT
, mode
,
2666 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2670 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2671 correspond to a machine insn or result in further simplifications
2672 if B is a constant. */
2674 if (GET_CODE (op0
) == AND
2675 && rtx_equal_p (XEXP (op0
, 1), op1
)
2676 && ! side_effects_p (op1
))
2677 return simplify_gen_binary (AND
, mode
,
2678 simplify_gen_unary (NOT
, mode
,
2679 XEXP (op0
, 0), mode
),
2682 else if (GET_CODE (op0
) == AND
2683 && rtx_equal_p (XEXP (op0
, 0), op1
)
2684 && ! side_effects_p (op1
))
2685 return simplify_gen_binary (AND
, mode
,
2686 simplify_gen_unary (NOT
, mode
,
2687 XEXP (op0
, 1), mode
),
2690 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2691 we can transform like this:
2692 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2693 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2694 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2695 Attempt a few simplifications when B and C are both constants. */
2696 if (GET_CODE (op0
) == AND
2697 && CONST_INT_P (op1
)
2698 && CONST_INT_P (XEXP (op0
, 1)))
2700 rtx a
= XEXP (op0
, 0);
2701 rtx b
= XEXP (op0
, 1);
2703 HOST_WIDE_INT bval
= INTVAL (b
);
2704 HOST_WIDE_INT cval
= INTVAL (c
);
2707 = simplify_binary_operation (AND
, mode
,
2708 simplify_gen_unary (NOT
, mode
, a
, mode
),
2710 if ((~cval
& bval
) == 0)
2712 /* Try to simplify ~A&C | ~B&C. */
2713 if (na_c
!= NULL_RTX
)
2714 return simplify_gen_binary (IOR
, mode
, na_c
,
2715 gen_int_mode (~bval
& cval
, mode
));
2719 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2720 if (na_c
== const0_rtx
)
2722 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2723 gen_int_mode (~cval
& bval
,
2725 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2726 gen_int_mode (~bval
& cval
,
2732 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2733 comparison if STORE_FLAG_VALUE is 1. */
2734 if (STORE_FLAG_VALUE
== 1
2735 && trueop1
== const1_rtx
2736 && COMPARISON_P (op0
)
2737 && (reversed
= reversed_comparison (op0
, mode
)))
2740 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2741 is (lt foo (const_int 0)), so we can perform the above
2742 simplification if STORE_FLAG_VALUE is 1. */
2744 if (STORE_FLAG_VALUE
== 1
2745 && trueop1
== const1_rtx
2746 && GET_CODE (op0
) == LSHIFTRT
2747 && CONST_INT_P (XEXP (op0
, 1))
2748 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2749 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2751 /* (xor (comparison foo bar) (const_int sign-bit))
2752 when STORE_FLAG_VALUE is the sign bit. */
2753 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2754 && trueop1
== const_true_rtx
2755 && COMPARISON_P (op0
)
2756 && (reversed
= reversed_comparison (op0
, mode
)))
2759 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2763 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2769 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2771 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2773 if (HWI_COMPUTABLE_MODE_P (mode
))
2775 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2776 HOST_WIDE_INT nzop1
;
2777 if (CONST_INT_P (trueop1
))
2779 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2780 /* If we are turning off bits already known off in OP0, we need
2782 if ((nzop0
& ~val1
) == 0)
2785 nzop1
= nonzero_bits (trueop1
, mode
);
2786 /* If we are clearing all the nonzero bits, the result is zero. */
2787 if ((nzop1
& nzop0
) == 0
2788 && !side_effects_p (op0
) && !side_effects_p (op1
))
2789 return CONST0_RTX (mode
);
2791 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2792 && GET_MODE_CLASS (mode
) != MODE_CC
)
2795 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2796 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2797 && ! side_effects_p (op0
)
2798 && GET_MODE_CLASS (mode
) != MODE_CC
)
2799 return CONST0_RTX (mode
);
2801 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2802 there are no nonzero bits of C outside of X's mode. */
2803 if ((GET_CODE (op0
) == SIGN_EXTEND
2804 || GET_CODE (op0
) == ZERO_EXTEND
)
2805 && CONST_INT_P (trueop1
)
2806 && HWI_COMPUTABLE_MODE_P (mode
)
2807 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2808 & UINTVAL (trueop1
)) == 0)
2810 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2811 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2812 gen_int_mode (INTVAL (trueop1
),
2814 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2817 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2818 we might be able to further simplify the AND with X and potentially
2819 remove the truncation altogether. */
2820 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2822 rtx x
= XEXP (op0
, 0);
2823 machine_mode xmode
= GET_MODE (x
);
2824 tem
= simplify_gen_binary (AND
, xmode
, x
,
2825 gen_int_mode (INTVAL (trueop1
), xmode
));
2826 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2829 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2830 if (GET_CODE (op0
) == IOR
2831 && CONST_INT_P (trueop1
)
2832 && CONST_INT_P (XEXP (op0
, 1)))
2834 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2835 return simplify_gen_binary (IOR
, mode
,
2836 simplify_gen_binary (AND
, mode
,
2837 XEXP (op0
, 0), op1
),
2838 gen_int_mode (tmp
, mode
));
2841 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2842 insn (and may simplify more). */
2843 if (GET_CODE (op0
) == XOR
2844 && rtx_equal_p (XEXP (op0
, 0), op1
)
2845 && ! side_effects_p (op1
))
2846 return simplify_gen_binary (AND
, mode
,
2847 simplify_gen_unary (NOT
, mode
,
2848 XEXP (op0
, 1), mode
),
2851 if (GET_CODE (op0
) == XOR
2852 && rtx_equal_p (XEXP (op0
, 1), op1
)
2853 && ! side_effects_p (op1
))
2854 return simplify_gen_binary (AND
, mode
,
2855 simplify_gen_unary (NOT
, mode
,
2856 XEXP (op0
, 0), mode
),
2859 /* Similarly for (~(A ^ B)) & A. */
2860 if (GET_CODE (op0
) == NOT
2861 && GET_CODE (XEXP (op0
, 0)) == XOR
2862 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2863 && ! side_effects_p (op1
))
2864 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2866 if (GET_CODE (op0
) == NOT
2867 && GET_CODE (XEXP (op0
, 0)) == XOR
2868 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2869 && ! side_effects_p (op1
))
2870 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2872 /* Convert (A | B) & A to A. */
2873 if (GET_CODE (op0
) == IOR
2874 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2875 || rtx_equal_p (XEXP (op0
, 1), op1
))
2876 && ! side_effects_p (XEXP (op0
, 0))
2877 && ! side_effects_p (XEXP (op0
, 1)))
2880 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2881 ((A & N) + B) & M -> (A + B) & M
2882 Similarly if (N & M) == 0,
2883 ((A | N) + B) & M -> (A + B) & M
2884 and for - instead of + and/or ^ instead of |.
2885 Also, if (N & M) == 0, then
2886 (A +- N) & M -> A & M. */
2887 if (CONST_INT_P (trueop1
)
2888 && HWI_COMPUTABLE_MODE_P (mode
)
2889 && ~UINTVAL (trueop1
)
2890 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2891 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2896 pmop
[0] = XEXP (op0
, 0);
2897 pmop
[1] = XEXP (op0
, 1);
2899 if (CONST_INT_P (pmop
[1])
2900 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2901 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2903 for (which
= 0; which
< 2; which
++)
2906 switch (GET_CODE (tem
))
2909 if (CONST_INT_P (XEXP (tem
, 1))
2910 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2911 == UINTVAL (trueop1
))
2912 pmop
[which
] = XEXP (tem
, 0);
2916 if (CONST_INT_P (XEXP (tem
, 1))
2917 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2918 pmop
[which
] = XEXP (tem
, 0);
2925 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2927 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2929 return simplify_gen_binary (code
, mode
, tem
, op1
);
2933 /* (and X (ior (not X) Y) -> (and X Y) */
2934 if (GET_CODE (op1
) == IOR
2935 && GET_CODE (XEXP (op1
, 0)) == NOT
2936 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
2937 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2939 /* (and (ior (not X) Y) X) -> (and X Y) */
2940 if (GET_CODE (op0
) == IOR
2941 && GET_CODE (XEXP (op0
, 0)) == NOT
2942 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
2943 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2945 /* (and X (ior Y (not X)) -> (and X Y) */
2946 if (GET_CODE (op1
) == IOR
2947 && GET_CODE (XEXP (op1
, 1)) == NOT
2948 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
2949 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
2951 /* (and (ior Y (not X)) X) -> (and X Y) */
2952 if (GET_CODE (op0
) == IOR
2953 && GET_CODE (XEXP (op0
, 1)) == NOT
2954 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
2955 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
2957 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2961 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2967 /* 0/x is 0 (or x&0 if x has side-effects). */
2968 if (trueop0
== CONST0_RTX (mode
))
2970 if (side_effects_p (op1
))
2971 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2975 if (trueop1
== CONST1_RTX (mode
))
2977 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2981 /* Convert divide by power of two into shift. */
2982 if (CONST_INT_P (trueop1
)
2983 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2984 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2988 /* Handle floating point and integers separately. */
2989 if (SCALAR_FLOAT_MODE_P (mode
))
2991 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2992 safe for modes with NaNs, since 0.0 / 0.0 will then be
2993 NaN rather than 0.0. Nor is it safe for modes with signed
2994 zeros, since dividing 0 by a negative number gives -0.0 */
2995 if (trueop0
== CONST0_RTX (mode
)
2996 && !HONOR_NANS (mode
)
2997 && !HONOR_SIGNED_ZEROS (mode
)
2998 && ! side_effects_p (op1
))
3001 if (trueop1
== CONST1_RTX (mode
)
3002 && !HONOR_SNANS (mode
))
3005 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3006 && trueop1
!= CONST0_RTX (mode
))
3009 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3012 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3013 && !HONOR_SNANS (mode
))
3014 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3016 /* Change FP division by a constant into multiplication.
3017 Only do this with -freciprocal-math. */
3018 if (flag_reciprocal_math
3019 && !REAL_VALUES_EQUAL (d
, dconst0
))
3021 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3022 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3023 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3027 else if (SCALAR_INT_MODE_P (mode
))
3029 /* 0/x is 0 (or x&0 if x has side-effects). */
3030 if (trueop0
== CONST0_RTX (mode
)
3031 && !cfun
->can_throw_non_call_exceptions
)
3033 if (side_effects_p (op1
))
3034 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3038 if (trueop1
== CONST1_RTX (mode
))
3040 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3045 if (trueop1
== constm1_rtx
)
3047 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3049 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3055 /* 0%x is 0 (or x&0 if x has side-effects). */
3056 if (trueop0
== CONST0_RTX (mode
))
3058 if (side_effects_p (op1
))
3059 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3062 /* x%1 is 0 (of x&0 if x has side-effects). */
3063 if (trueop1
== CONST1_RTX (mode
))
3065 if (side_effects_p (op0
))
3066 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3067 return CONST0_RTX (mode
);
3069 /* Implement modulus by power of two as AND. */
3070 if (CONST_INT_P (trueop1
)
3071 && exact_log2 (UINTVAL (trueop1
)) > 0)
3072 return simplify_gen_binary (AND
, mode
, op0
,
3073 gen_int_mode (INTVAL (op1
) - 1, mode
));
3077 /* 0%x is 0 (or x&0 if x has side-effects). */
3078 if (trueop0
== CONST0_RTX (mode
))
3080 if (side_effects_p (op1
))
3081 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3084 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3085 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3087 if (side_effects_p (op0
))
3088 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3089 return CONST0_RTX (mode
);
3095 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3096 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3097 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3099 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3100 if (CONST_INT_P (trueop1
)
3101 && IN_RANGE (INTVAL (trueop1
),
3102 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3103 GET_MODE_PRECISION (mode
) - 1))
3104 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3105 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3106 - INTVAL (trueop1
)));
3110 if (trueop1
== CONST0_RTX (mode
))
3112 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3114 /* Rotating ~0 always results in ~0. */
3115 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3116 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3117 && ! side_effects_p (op1
))
3121 scalar constants c1, c2
3122 size (M2) > size (M1)
3123 c1 == size (M2) - size (M1)
3125 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3129 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3131 if (code
== ASHIFTRT
3132 && !VECTOR_MODE_P (mode
)
3134 && CONST_INT_P (op1
)
3135 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3136 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3137 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3138 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3139 > GET_MODE_BITSIZE (mode
))
3140 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3141 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3142 - GET_MODE_BITSIZE (mode
)))
3143 && subreg_lowpart_p (op0
))
3145 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3147 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3148 tmp
= simplify_gen_binary (ASHIFTRT
,
3149 GET_MODE (SUBREG_REG (op0
)),
3150 XEXP (SUBREG_REG (op0
), 0),
3152 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3153 subreg_lowpart_offset (mode
,
3157 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3159 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3160 if (val
!= INTVAL (op1
))
3161 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3168 if (trueop1
== CONST0_RTX (mode
))
3170 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3172 goto canonicalize_shift
;
3175 if (trueop1
== CONST0_RTX (mode
))
3177 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3179 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3180 if (GET_CODE (op0
) == CLZ
3181 && CONST_INT_P (trueop1
)
3182 && STORE_FLAG_VALUE
== 1
3183 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3185 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3186 unsigned HOST_WIDE_INT zero_val
= 0;
3188 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3189 && zero_val
== GET_MODE_PRECISION (imode
)
3190 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3191 return simplify_gen_relational (EQ
, mode
, imode
,
3192 XEXP (op0
, 0), const0_rtx
);
3194 goto canonicalize_shift
;
3197 if (width
<= HOST_BITS_PER_WIDE_INT
3198 && mode_signbit_p (mode
, trueop1
)
3199 && ! side_effects_p (op0
))
3201 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3203 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3209 if (width
<= HOST_BITS_PER_WIDE_INT
3210 && CONST_INT_P (trueop1
)
3211 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3212 && ! side_effects_p (op0
))
3214 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3216 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3222 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3224 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3226 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3232 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3234 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3236 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3249 /* ??? There are simplifications that can be done. */
3253 if (!VECTOR_MODE_P (mode
))
3255 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3256 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3257 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3258 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3259 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3261 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3262 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3265 /* Extract a scalar element from a nested VEC_SELECT expression
3266 (with optional nested VEC_CONCAT expression). Some targets
3267 (i386) extract scalar element from a vector using chain of
3268 nested VEC_SELECT expressions. When input operand is a memory
3269 operand, this operation can be simplified to a simple scalar
3270 load from an offseted memory address. */
3271 if (GET_CODE (trueop0
) == VEC_SELECT
)
3273 rtx op0
= XEXP (trueop0
, 0);
3274 rtx op1
= XEXP (trueop0
, 1);
3276 machine_mode opmode
= GET_MODE (op0
);
3277 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3278 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3280 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3286 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3287 gcc_assert (i
< n_elts
);
3289 /* Select element, pointed by nested selector. */
3290 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3292 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3293 if (GET_CODE (op0
) == VEC_CONCAT
)
3295 rtx op00
= XEXP (op0
, 0);
3296 rtx op01
= XEXP (op0
, 1);
3298 machine_mode mode00
, mode01
;
3299 int n_elts00
, n_elts01
;
3301 mode00
= GET_MODE (op00
);
3302 mode01
= GET_MODE (op01
);
3304 /* Find out number of elements of each operand. */
3305 if (VECTOR_MODE_P (mode00
))
3307 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3308 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3313 if (VECTOR_MODE_P (mode01
))
3315 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3316 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3321 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3323 /* Select correct operand of VEC_CONCAT
3324 and adjust selector. */
3325 if (elem
< n_elts01
)
3336 vec
= rtvec_alloc (1);
3337 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3339 tmp
= gen_rtx_fmt_ee (code
, mode
,
3340 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3343 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3344 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3345 return XEXP (trueop0
, 0);
3349 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3350 gcc_assert (GET_MODE_INNER (mode
)
3351 == GET_MODE_INNER (GET_MODE (trueop0
)));
3352 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3354 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3356 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3357 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3358 rtvec v
= rtvec_alloc (n_elts
);
3361 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3362 for (i
= 0; i
< n_elts
; i
++)
3364 rtx x
= XVECEXP (trueop1
, 0, i
);
3366 gcc_assert (CONST_INT_P (x
));
3367 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3371 return gen_rtx_CONST_VECTOR (mode
, v
);
3374 /* Recognize the identity. */
3375 if (GET_MODE (trueop0
) == mode
)
3377 bool maybe_ident
= true;
3378 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3380 rtx j
= XVECEXP (trueop1
, 0, i
);
3381 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3383 maybe_ident
= false;
3391 /* If we build {a,b} then permute it, build the result directly. */
3392 if (XVECLEN (trueop1
, 0) == 2
3393 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3394 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3395 && GET_CODE (trueop0
) == VEC_CONCAT
3396 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3397 && GET_MODE (XEXP (trueop0
, 0)) == mode
3398 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3399 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3401 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3402 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3405 gcc_assert (i0
< 4 && i1
< 4);
3406 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3407 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3409 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3412 if (XVECLEN (trueop1
, 0) == 2
3413 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3414 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3415 && GET_CODE (trueop0
) == VEC_CONCAT
3416 && GET_MODE (trueop0
) == mode
)
3418 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3419 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3422 gcc_assert (i0
< 2 && i1
< 2);
3423 subop0
= XEXP (trueop0
, i0
);
3424 subop1
= XEXP (trueop0
, i1
);
3426 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3429 /* If we select one half of a vec_concat, return that. */
3430 if (GET_CODE (trueop0
) == VEC_CONCAT
3431 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3433 rtx subop0
= XEXP (trueop0
, 0);
3434 rtx subop1
= XEXP (trueop0
, 1);
3435 machine_mode mode0
= GET_MODE (subop0
);
3436 machine_mode mode1
= GET_MODE (subop1
);
3437 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3438 int l0
= GET_MODE_SIZE (mode0
) / li
;
3439 int l1
= GET_MODE_SIZE (mode1
) / li
;
3440 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3441 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3443 bool success
= true;
3444 for (int i
= 1; i
< l0
; ++i
)
3446 rtx j
= XVECEXP (trueop1
, 0, i
);
3447 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3456 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3458 bool success
= true;
3459 for (int i
= 1; i
< l1
; ++i
)
3461 rtx j
= XVECEXP (trueop1
, 0, i
);
3462 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3474 if (XVECLEN (trueop1
, 0) == 1
3475 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3476 && GET_CODE (trueop0
) == VEC_CONCAT
)
3479 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3481 /* Try to find the element in the VEC_CONCAT. */
3482 while (GET_MODE (vec
) != mode
3483 && GET_CODE (vec
) == VEC_CONCAT
)
3485 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3486 if (offset
< vec_size
)
3487 vec
= XEXP (vec
, 0);
3491 vec
= XEXP (vec
, 1);
3493 vec
= avoid_constant_pool_reference (vec
);
3496 if (GET_MODE (vec
) == mode
)
3500 /* If we select elements in a vec_merge that all come from the same
3501 operand, select from that operand directly. */
3502 if (GET_CODE (op0
) == VEC_MERGE
)
3504 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3505 if (CONST_INT_P (trueop02
))
3507 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3508 bool all_operand0
= true;
3509 bool all_operand1
= true;
3510 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3512 rtx j
= XVECEXP (trueop1
, 0, i
);
3513 if (sel
& (1 << UINTVAL (j
)))
3514 all_operand1
= false;
3516 all_operand0
= false;
3518 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3519 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3520 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3521 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3525 /* If we have two nested selects that are inverses of each
3526 other, replace them with the source operand. */
3527 if (GET_CODE (trueop0
) == VEC_SELECT
3528 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3530 rtx op0_subop1
= XEXP (trueop0
, 1);
3531 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3532 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3534 /* Apply the outer ordering vector to the inner one. (The inner
3535 ordering vector is expressly permitted to be of a different
3536 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3537 then the two VEC_SELECTs cancel. */
3538 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3540 rtx x
= XVECEXP (trueop1
, 0, i
);
3541 if (!CONST_INT_P (x
))
3543 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3544 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3547 return XEXP (trueop0
, 0);
3553 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3554 ? GET_MODE (trueop0
)
3555 : GET_MODE_INNER (mode
));
3556 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3557 ? GET_MODE (trueop1
)
3558 : GET_MODE_INNER (mode
));
3560 gcc_assert (VECTOR_MODE_P (mode
));
3561 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3562 == GET_MODE_SIZE (mode
));
3564 if (VECTOR_MODE_P (op0_mode
))
3565 gcc_assert (GET_MODE_INNER (mode
)
3566 == GET_MODE_INNER (op0_mode
));
3568 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3570 if (VECTOR_MODE_P (op1_mode
))
3571 gcc_assert (GET_MODE_INNER (mode
)
3572 == GET_MODE_INNER (op1_mode
));
3574 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3576 if ((GET_CODE (trueop0
) == CONST_VECTOR
3577 || CONST_SCALAR_INT_P (trueop0
)
3578 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3579 && (GET_CODE (trueop1
) == CONST_VECTOR
3580 || CONST_SCALAR_INT_P (trueop1
)
3581 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3583 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3584 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3585 rtvec v
= rtvec_alloc (n_elts
);
3587 unsigned in_n_elts
= 1;
3589 if (VECTOR_MODE_P (op0_mode
))
3590 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3591 for (i
= 0; i
< n_elts
; i
++)
3595 if (!VECTOR_MODE_P (op0_mode
))
3596 RTVEC_ELT (v
, i
) = trueop0
;
3598 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3602 if (!VECTOR_MODE_P (op1_mode
))
3603 RTVEC_ELT (v
, i
) = trueop1
;
3605 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3610 return gen_rtx_CONST_VECTOR (mode
, v
);
3613 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3614 Restrict the transformation to avoid generating a VEC_SELECT with a
3615 mode unrelated to its operand. */
3616 if (GET_CODE (trueop0
) == VEC_SELECT
3617 && GET_CODE (trueop1
) == VEC_SELECT
3618 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3619 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3621 rtx par0
= XEXP (trueop0
, 1);
3622 rtx par1
= XEXP (trueop1
, 1);
3623 int len0
= XVECLEN (par0
, 0);
3624 int len1
= XVECLEN (par1
, 0);
3625 rtvec vec
= rtvec_alloc (len0
+ len1
);
3626 for (int i
= 0; i
< len0
; i
++)
3627 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3628 for (int i
= 0; i
< len1
; i
++)
3629 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3630 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3631 gen_rtx_PARALLEL (VOIDmode
, vec
));
3644 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3647 unsigned int width
= GET_MODE_PRECISION (mode
);
3649 if (VECTOR_MODE_P (mode
)
3650 && code
!= VEC_CONCAT
3651 && GET_CODE (op0
) == CONST_VECTOR
3652 && GET_CODE (op1
) == CONST_VECTOR
)
3654 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3655 machine_mode op0mode
= GET_MODE (op0
);
3656 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3657 machine_mode op1mode
= GET_MODE (op1
);
3658 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3659 rtvec v
= rtvec_alloc (n_elts
);
3662 gcc_assert (op0_n_elts
== n_elts
);
3663 gcc_assert (op1_n_elts
== n_elts
);
3664 for (i
= 0; i
< n_elts
; i
++)
3666 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3667 CONST_VECTOR_ELT (op0
, i
),
3668 CONST_VECTOR_ELT (op1
, i
));
3671 RTVEC_ELT (v
, i
) = x
;
3674 return gen_rtx_CONST_VECTOR (mode
, v
);
3677 if (VECTOR_MODE_P (mode
)
3678 && code
== VEC_CONCAT
3679 && (CONST_SCALAR_INT_P (op0
)
3680 || GET_CODE (op0
) == CONST_FIXED
3681 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3682 && (CONST_SCALAR_INT_P (op1
)
3683 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3684 || GET_CODE (op1
) == CONST_FIXED
))
3686 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3687 rtvec v
= rtvec_alloc (n_elts
);
3689 gcc_assert (n_elts
>= 2);
3692 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3693 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3695 RTVEC_ELT (v
, 0) = op0
;
3696 RTVEC_ELT (v
, 1) = op1
;
3700 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3701 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3704 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3705 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3706 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3708 for (i
= 0; i
< op0_n_elts
; ++i
)
3709 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3710 for (i
= 0; i
< op1_n_elts
; ++i
)
3711 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3714 return gen_rtx_CONST_VECTOR (mode
, v
);
3717 if (SCALAR_FLOAT_MODE_P (mode
)
3718 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3719 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3720 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3731 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3733 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3735 for (i
= 0; i
< 4; i
++)
3752 real_from_target (&r
, tmp0
, mode
);
3753 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3757 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3760 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3761 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3762 real_convert (&f0
, mode
, &f0
);
3763 real_convert (&f1
, mode
, &f1
);
3765 if (HONOR_SNANS (mode
)
3766 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3770 && REAL_VALUES_EQUAL (f1
, dconst0
)
3771 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3774 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3775 && flag_trapping_math
3776 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3778 int s0
= REAL_VALUE_NEGATIVE (f0
);
3779 int s1
= REAL_VALUE_NEGATIVE (f1
);
3784 /* Inf + -Inf = NaN plus exception. */
3789 /* Inf - Inf = NaN plus exception. */
3794 /* Inf / Inf = NaN plus exception. */
3801 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3802 && flag_trapping_math
3803 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3804 || (REAL_VALUE_ISINF (f1
)
3805 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3806 /* Inf * 0 = NaN plus exception. */
3809 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3811 real_convert (&result
, mode
, &value
);
3813 /* Don't constant fold this floating point operation if
3814 the result has overflowed and flag_trapping_math. */
3816 if (flag_trapping_math
3817 && MODE_HAS_INFINITIES (mode
)
3818 && REAL_VALUE_ISINF (result
)
3819 && !REAL_VALUE_ISINF (f0
)
3820 && !REAL_VALUE_ISINF (f1
))
3821 /* Overflow plus exception. */
3824 /* Don't constant fold this floating point operation if the
3825 result may dependent upon the run-time rounding mode and
3826 flag_rounding_math is set, or if GCC's software emulation
3827 is unable to accurately represent the result. */
3829 if ((flag_rounding_math
3830 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3831 && (inexact
|| !real_identical (&result
, &value
)))
3834 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3838 /* We can fold some multi-word operations. */
3839 if ((GET_MODE_CLASS (mode
) == MODE_INT
3840 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3841 && CONST_SCALAR_INT_P (op0
)
3842 && CONST_SCALAR_INT_P (op1
))
3846 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3847 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3849 #if TARGET_SUPPORTS_WIDE_INT == 0
3850 /* This assert keeps the simplification from producing a result
3851 that cannot be represented in a CONST_DOUBLE but a lot of
3852 upstream callers expect that this function never fails to
3853 simplify something and so you if you added this to the test
3854 above the code would die later anyway. If this assert
3855 happens, you just need to make the port support wide int. */
3856 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3861 result
= wi::sub (pop0
, pop1
);
3865 result
= wi::add (pop0
, pop1
);
3869 result
= wi::mul (pop0
, pop1
);
3873 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3879 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3885 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3891 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3897 result
= wi::bit_and (pop0
, pop1
);
3901 result
= wi::bit_or (pop0
, pop1
);
3905 result
= wi::bit_xor (pop0
, pop1
);
3909 result
= wi::smin (pop0
, pop1
);
3913 result
= wi::smax (pop0
, pop1
);
3917 result
= wi::umin (pop0
, pop1
);
3921 result
= wi::umax (pop0
, pop1
);
3928 wide_int wop1
= pop1
;
3929 if (SHIFT_COUNT_TRUNCATED
)
3930 wop1
= wi::umod_trunc (wop1
, width
);
3931 else if (wi::geu_p (wop1
, width
))
3937 result
= wi::lrshift (pop0
, wop1
);
3941 result
= wi::arshift (pop0
, wop1
);
3945 result
= wi::lshift (pop0
, wop1
);
3956 if (wi::neg_p (pop1
))
3962 result
= wi::lrotate (pop0
, pop1
);
3966 result
= wi::rrotate (pop0
, pop1
);
3977 return immed_wide_int_const (result
, mode
);
3985 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3988 Rather than test for specific case, we do this by a brute-force method
3989 and do all possible simplifications until no more changes occur. Then
3990 we rebuild the operation. */
3992 struct simplify_plus_minus_op_data
3999 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4003 result
= (commutative_operand_precedence (y
)
4004 - commutative_operand_precedence (x
));
4008 /* Group together equal REGs to do more simplification. */
4009 if (REG_P (x
) && REG_P (y
))
4010 return REGNO (x
) > REGNO (y
);
4016 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4019 struct simplify_plus_minus_op_data ops
[16];
4022 int changed
, n_constants
, canonicalized
= 0;
4025 memset (ops
, 0, sizeof ops
);
4027 /* Set up the two operands and then expand them until nothing has been
4028 changed. If we run out of room in our array, give up; this should
4029 almost never happen. */
4034 ops
[1].neg
= (code
== MINUS
);
4041 for (i
= 0; i
< n_ops
; i
++)
4043 rtx this_op
= ops
[i
].op
;
4044 int this_neg
= ops
[i
].neg
;
4045 enum rtx_code this_code
= GET_CODE (this_op
);
4051 if (n_ops
== ARRAY_SIZE (ops
))
4054 ops
[n_ops
].op
= XEXP (this_op
, 1);
4055 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4058 ops
[i
].op
= XEXP (this_op
, 0);
4060 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4064 ops
[i
].op
= XEXP (this_op
, 0);
4065 ops
[i
].neg
= ! this_neg
;
4071 if (n_ops
!= ARRAY_SIZE (ops
)
4072 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4073 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4074 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4076 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4077 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4078 ops
[n_ops
].neg
= this_neg
;
4086 /* ~a -> (-a - 1) */
4087 if (n_ops
!= ARRAY_SIZE (ops
))
4089 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4090 ops
[n_ops
++].neg
= this_neg
;
4091 ops
[i
].op
= XEXP (this_op
, 0);
4092 ops
[i
].neg
= !this_neg
;
4102 ops
[i
].op
= neg_const_int (mode
, this_op
);
4116 if (n_constants
> 1)
4119 gcc_assert (n_ops
>= 2);
4121 /* If we only have two operands, we can avoid the loops. */
4124 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4127 /* Get the two operands. Be careful with the order, especially for
4128 the cases where code == MINUS. */
4129 if (ops
[0].neg
&& ops
[1].neg
)
4131 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4134 else if (ops
[0].neg
)
4145 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4148 /* Now simplify each pair of operands until nothing changes. */
4151 /* Insertion sort is good enough for a small array. */
4152 for (i
= 1; i
< n_ops
; i
++)
4154 struct simplify_plus_minus_op_data save
;
4156 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4162 ops
[j
+ 1] = ops
[j
];
4163 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4168 for (i
= n_ops
- 1; i
> 0; i
--)
4169 for (j
= i
- 1; j
>= 0; j
--)
4171 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4172 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4174 if (lhs
!= 0 && rhs
!= 0)
4176 enum rtx_code ncode
= PLUS
;
4182 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4184 else if (swap_commutative_operands_p (lhs
, rhs
))
4185 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4187 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4188 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4190 rtx tem_lhs
, tem_rhs
;
4192 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4193 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4194 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4196 if (tem
&& !CONSTANT_P (tem
))
4197 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4200 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4204 /* Reject "simplifications" that just wrap the two
4205 arguments in a CONST. Failure to do so can result
4206 in infinite recursion with simplify_binary_operation
4207 when it calls us to simplify CONST operations.
4208 Also, if we find such a simplification, don't try
4209 any more combinations with this rhs: We must have
4210 something like symbol+offset, ie. one of the
4211 trivial CONST expressions we handle later. */
4212 if (GET_CODE (tem
) == CONST
4213 && GET_CODE (XEXP (tem
, 0)) == ncode
4214 && XEXP (XEXP (tem
, 0), 0) == lhs
4215 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4218 if (GET_CODE (tem
) == NEG
)
4219 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4220 if (CONST_INT_P (tem
) && lneg
)
4221 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4225 ops
[j
].op
= NULL_RTX
;
4232 /* If nothing changed, fail. */
4236 /* Pack all the operands to the lower-numbered entries. */
4237 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4247 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4249 && CONST_INT_P (ops
[1].op
)
4250 && CONSTANT_P (ops
[0].op
)
4252 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4254 /* We suppressed creation of trivial CONST expressions in the
4255 combination loop to avoid recursion. Create one manually now.
4256 The combination loop should have ensured that there is exactly
4257 one CONST_INT, and the sort will have ensured that it is last
4258 in the array and that any other constant will be next-to-last. */
4261 && CONST_INT_P (ops
[n_ops
- 1].op
)
4262 && CONSTANT_P (ops
[n_ops
- 2].op
))
4264 rtx value
= ops
[n_ops
- 1].op
;
4265 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4266 value
= neg_const_int (mode
, value
);
4267 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4272 /* Put a non-negated operand first, if possible. */
4274 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4277 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4286 /* Now make the result by performing the requested operations. */
4288 for (i
= 1; i
< n_ops
; i
++)
4289 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4290 mode
, result
, ops
[i
].op
);
4295 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4297 plus_minus_operand_p (const_rtx x
)
4299 return GET_CODE (x
) == PLUS
4300 || GET_CODE (x
) == MINUS
4301 || (GET_CODE (x
) == CONST
4302 && GET_CODE (XEXP (x
, 0)) == PLUS
4303 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4304 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4307 /* Like simplify_binary_operation except used for relational operators.
4308 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4309 not also be VOIDmode.
4311 CMP_MODE specifies in which mode the comparison is done in, so it is
4312 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4313 the operands or, if both are VOIDmode, the operands are compared in
4314 "infinite precision". */
4316 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4317 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4319 rtx tem
, trueop0
, trueop1
;
4321 if (cmp_mode
== VOIDmode
)
4322 cmp_mode
= GET_MODE (op0
);
4323 if (cmp_mode
== VOIDmode
)
4324 cmp_mode
= GET_MODE (op1
);
4326 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4329 if (SCALAR_FLOAT_MODE_P (mode
))
4331 if (tem
== const0_rtx
)
4332 return CONST0_RTX (mode
);
4333 #ifdef FLOAT_STORE_FLAG_VALUE
4335 REAL_VALUE_TYPE val
;
4336 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4337 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4343 if (VECTOR_MODE_P (mode
))
4345 if (tem
== const0_rtx
)
4346 return CONST0_RTX (mode
);
4347 #ifdef VECTOR_STORE_FLAG_VALUE
4352 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4353 if (val
== NULL_RTX
)
4355 if (val
== const1_rtx
)
4356 return CONST1_RTX (mode
);
4358 units
= GET_MODE_NUNITS (mode
);
4359 v
= rtvec_alloc (units
);
4360 for (i
= 0; i
< units
; i
++)
4361 RTVEC_ELT (v
, i
) = val
;
4362 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4372 /* For the following tests, ensure const0_rtx is op1. */
4373 if (swap_commutative_operands_p (op0
, op1
)
4374 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4375 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4377 /* If op0 is a compare, extract the comparison arguments from it. */
4378 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4379 return simplify_gen_relational (code
, mode
, VOIDmode
,
4380 XEXP (op0
, 0), XEXP (op0
, 1));
4382 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4386 trueop0
= avoid_constant_pool_reference (op0
);
4387 trueop1
= avoid_constant_pool_reference (op1
);
4388 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4392 /* This part of simplify_relational_operation is only used when CMP_MODE
4393 is not in class MODE_CC (i.e. it is a real comparison).
4395 MODE is the mode of the result, while CMP_MODE specifies in which
4396 mode the comparison is done in, so it is the mode of the operands. */
4399 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4400 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4402 enum rtx_code op0code
= GET_CODE (op0
);
4404 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4406 /* If op0 is a comparison, extract the comparison arguments
4410 if (GET_MODE (op0
) == mode
)
4411 return simplify_rtx (op0
);
4413 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4414 XEXP (op0
, 0), XEXP (op0
, 1));
4416 else if (code
== EQ
)
4418 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4419 if (new_code
!= UNKNOWN
)
4420 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4421 XEXP (op0
, 0), XEXP (op0
, 1));
4425 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4426 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4427 if ((code
== LTU
|| code
== GEU
)
4428 && GET_CODE (op0
) == PLUS
4429 && CONST_INT_P (XEXP (op0
, 1))
4430 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4431 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4432 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4433 && XEXP (op0
, 1) != const0_rtx
)
4436 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4437 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4438 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4441 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4442 if ((code
== LTU
|| code
== GEU
)
4443 && GET_CODE (op0
) == PLUS
4444 && rtx_equal_p (op1
, XEXP (op0
, 1))
4445 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4446 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4447 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4448 copy_rtx (XEXP (op0
, 0)));
4450 if (op1
== const0_rtx
)
4452 /* Canonicalize (GTU x 0) as (NE x 0). */
4454 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4455 /* Canonicalize (LEU x 0) as (EQ x 0). */
4457 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4459 else if (op1
== const1_rtx
)
4464 /* Canonicalize (GE x 1) as (GT x 0). */
4465 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4468 /* Canonicalize (GEU x 1) as (NE x 0). */
4469 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4472 /* Canonicalize (LT x 1) as (LE x 0). */
4473 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4476 /* Canonicalize (LTU x 1) as (EQ x 0). */
4477 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4483 else if (op1
== constm1_rtx
)
4485 /* Canonicalize (LE x -1) as (LT x 0). */
4487 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4488 /* Canonicalize (GT x -1) as (GE x 0). */
4490 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4493 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4494 if ((code
== EQ
|| code
== NE
)
4495 && (op0code
== PLUS
|| op0code
== MINUS
)
4497 && CONSTANT_P (XEXP (op0
, 1))
4498 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4500 rtx x
= XEXP (op0
, 0);
4501 rtx c
= XEXP (op0
, 1);
4502 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4503 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4505 /* Detect an infinite recursive condition, where we oscillate at this
4506 simplification case between:
4507 A + B == C <---> C - B == A,
4508 where A, B, and C are all constants with non-simplifiable expressions,
4509 usually SYMBOL_REFs. */
4510 if (GET_CODE (tem
) == invcode
4512 && rtx_equal_p (c
, XEXP (tem
, 1)))
4515 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4518 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4519 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4521 && op1
== const0_rtx
4522 && GET_MODE_CLASS (mode
) == MODE_INT
4523 && cmp_mode
!= VOIDmode
4524 /* ??? Work-around BImode bugs in the ia64 backend. */
4526 && cmp_mode
!= BImode
4527 && nonzero_bits (op0
, cmp_mode
) == 1
4528 && STORE_FLAG_VALUE
== 1)
4529 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4530 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4531 : lowpart_subreg (mode
, op0
, cmp_mode
);
4533 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4534 if ((code
== EQ
|| code
== NE
)
4535 && op1
== const0_rtx
4537 return simplify_gen_relational (code
, mode
, cmp_mode
,
4538 XEXP (op0
, 0), XEXP (op0
, 1));
4540 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4541 if ((code
== EQ
|| code
== NE
)
4543 && rtx_equal_p (XEXP (op0
, 0), op1
)
4544 && !side_effects_p (XEXP (op0
, 0)))
4545 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4548 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4549 if ((code
== EQ
|| code
== NE
)
4551 && rtx_equal_p (XEXP (op0
, 1), op1
)
4552 && !side_effects_p (XEXP (op0
, 1)))
4553 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4556 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4557 if ((code
== EQ
|| code
== NE
)
4559 && CONST_SCALAR_INT_P (op1
)
4560 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4561 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4562 simplify_gen_binary (XOR
, cmp_mode
,
4563 XEXP (op0
, 1), op1
));
4565 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4566 can be implemented with a BICS instruction on some targets, or
4567 constant-folded if y is a constant. */
4568 if ((code
== EQ
|| code
== NE
)
4570 && rtx_equal_p (XEXP (op0
, 0), op1
)
4571 && !side_effects_p (op1
))
4573 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4574 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4576 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4577 CONST0_RTX (cmp_mode
));
4580 /* Likewise for (eq/ne (and x y) y). */
4581 if ((code
== EQ
|| code
== NE
)
4583 && rtx_equal_p (XEXP (op0
, 1), op1
)
4584 && !side_effects_p (op1
))
4586 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4587 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4589 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4590 CONST0_RTX (cmp_mode
));
4593 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4594 if ((code
== EQ
|| code
== NE
)
4595 && GET_CODE (op0
) == BSWAP
4596 && CONST_SCALAR_INT_P (op1
))
4597 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4598 simplify_gen_unary (BSWAP
, cmp_mode
,
4601 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4602 if ((code
== EQ
|| code
== NE
)
4603 && GET_CODE (op0
) == BSWAP
4604 && GET_CODE (op1
) == BSWAP
)
4605 return simplify_gen_relational (code
, mode
, cmp_mode
,
4606 XEXP (op0
, 0), XEXP (op1
, 0));
4608 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4614 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4615 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4616 XEXP (op0
, 0), const0_rtx
);
4621 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4622 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4623 XEXP (op0
, 0), const0_rtx
);
4642 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4643 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4644 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4645 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4646 For floating-point comparisons, assume that the operands were ordered. */
4649 comparison_result (enum rtx_code code
, int known_results
)
4655 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4658 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4662 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4665 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4669 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4672 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4675 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4677 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4680 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4682 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4685 return const_true_rtx
;
4693 /* Check if the given comparison (done in the given MODE) is actually
4694 a tautology or a contradiction. If the mode is VOID_mode, the
4695 comparison is done in "infinite precision". If no simplification
4696 is possible, this function returns zero. Otherwise, it returns
4697 either const_true_rtx or const0_rtx. */
4700 simplify_const_relational_operation (enum rtx_code code
,
4708 gcc_assert (mode
!= VOIDmode
4709 || (GET_MODE (op0
) == VOIDmode
4710 && GET_MODE (op1
) == VOIDmode
));
4712 /* If op0 is a compare, extract the comparison arguments from it. */
4713 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4715 op1
= XEXP (op0
, 1);
4716 op0
= XEXP (op0
, 0);
4718 if (GET_MODE (op0
) != VOIDmode
)
4719 mode
= GET_MODE (op0
);
4720 else if (GET_MODE (op1
) != VOIDmode
)
4721 mode
= GET_MODE (op1
);
4726 /* We can't simplify MODE_CC values since we don't know what the
4727 actual comparison is. */
4728 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4731 /* Make sure the constant is second. */
4732 if (swap_commutative_operands_p (op0
, op1
))
4734 tem
= op0
, op0
= op1
, op1
= tem
;
4735 code
= swap_condition (code
);
4738 trueop0
= avoid_constant_pool_reference (op0
);
4739 trueop1
= avoid_constant_pool_reference (op1
);
4741 /* For integer comparisons of A and B maybe we can simplify A - B and can
4742 then simplify a comparison of that with zero. If A and B are both either
4743 a register or a CONST_INT, this can't help; testing for these cases will
4744 prevent infinite recursion here and speed things up.
4746 We can only do this for EQ and NE comparisons as otherwise we may
4747 lose or introduce overflow which we cannot disregard as undefined as
4748 we do not know the signedness of the operation on either the left or
4749 the right hand side of the comparison. */
4751 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4752 && (code
== EQ
|| code
== NE
)
4753 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4754 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4755 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4756 /* We cannot do this if tem is a nonzero address. */
4757 && ! nonzero_address_p (tem
))
4758 return simplify_const_relational_operation (signed_condition (code
),
4759 mode
, tem
, const0_rtx
);
4761 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4762 return const_true_rtx
;
4764 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4767 /* For modes without NaNs, if the two operands are equal, we know the
4768 result except if they have side-effects. Even with NaNs we know
4769 the result of unordered comparisons and, if signaling NaNs are
4770 irrelevant, also the result of LT/GT/LTGT. */
4771 if ((! HONOR_NANS (trueop0
)
4772 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4773 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4774 && ! HONOR_SNANS (trueop0
)))
4775 && rtx_equal_p (trueop0
, trueop1
)
4776 && ! side_effects_p (trueop0
))
4777 return comparison_result (code
, CMP_EQ
);
4779 /* If the operands are floating-point constants, see if we can fold
4781 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4782 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4783 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4785 REAL_VALUE_TYPE d0
, d1
;
4787 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4788 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4790 /* Comparisons are unordered iff at least one of the values is NaN. */
4791 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4801 return const_true_rtx
;
4814 return comparison_result (code
,
4815 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4816 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4819 /* Otherwise, see if the operands are both integers. */
4820 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4821 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4823 /* It would be nice if we really had a mode here. However, the
4824 largest int representable on the target is as good as
4826 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4827 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4828 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4830 if (wi::eq_p (ptrueop0
, ptrueop1
))
4831 return comparison_result (code
, CMP_EQ
);
4834 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4835 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4836 return comparison_result (code
, cr
);
4840 /* Optimize comparisons with upper and lower bounds. */
4841 if (HWI_COMPUTABLE_MODE_P (mode
)
4842 && CONST_INT_P (trueop1
))
4845 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4846 HOST_WIDE_INT val
= INTVAL (trueop1
);
4847 HOST_WIDE_INT mmin
, mmax
;
4857 /* Get a reduced range if the sign bit is zero. */
4858 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4865 rtx mmin_rtx
, mmax_rtx
;
4866 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4868 mmin
= INTVAL (mmin_rtx
);
4869 mmax
= INTVAL (mmax_rtx
);
4872 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4874 mmin
>>= (sign_copies
- 1);
4875 mmax
>>= (sign_copies
- 1);
4881 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4883 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4884 return const_true_rtx
;
4885 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4890 return const_true_rtx
;
4895 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4897 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4898 return const_true_rtx
;
4899 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4904 return const_true_rtx
;
4910 /* x == y is always false for y out of range. */
4911 if (val
< mmin
|| val
> mmax
)
4915 /* x > y is always false for y >= mmax, always true for y < mmin. */
4917 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4919 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4920 return const_true_rtx
;
4926 return const_true_rtx
;
4929 /* x < y is always false for y <= mmin, always true for y > mmax. */
4931 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4933 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4934 return const_true_rtx
;
4940 return const_true_rtx
;
4944 /* x != y is always true for y out of range. */
4945 if (val
< mmin
|| val
> mmax
)
4946 return const_true_rtx
;
4954 /* Optimize integer comparisons with zero. */
4955 if (trueop1
== const0_rtx
)
4957 /* Some addresses are known to be nonzero. We don't know
4958 their sign, but equality comparisons are known. */
4959 if (nonzero_address_p (trueop0
))
4961 if (code
== EQ
|| code
== LEU
)
4963 if (code
== NE
|| code
== GTU
)
4964 return const_true_rtx
;
4967 /* See if the first operand is an IOR with a constant. If so, we
4968 may be able to determine the result of this comparison. */
4969 if (GET_CODE (op0
) == IOR
)
4971 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4972 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4974 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4975 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4976 && (UINTVAL (inner_const
)
4977 & ((unsigned HOST_WIDE_INT
) 1
4987 return const_true_rtx
;
4991 return const_true_rtx
;
5005 /* Optimize comparison of ABS with zero. */
5006 if (trueop1
== CONST0_RTX (mode
)
5007 && (GET_CODE (trueop0
) == ABS
5008 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5009 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5014 /* Optimize abs(x) < 0.0. */
5015 if (!HONOR_SNANS (mode
)
5016 && (!INTEGRAL_MODE_P (mode
)
5017 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5019 if (INTEGRAL_MODE_P (mode
)
5020 && (issue_strict_overflow_warning
5021 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5022 warning (OPT_Wstrict_overflow
,
5023 ("assuming signed overflow does not occur when "
5024 "assuming abs (x) < 0 is false"));
5030 /* Optimize abs(x) >= 0.0. */
5031 if (!HONOR_NANS (mode
)
5032 && (!INTEGRAL_MODE_P (mode
)
5033 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5035 if (INTEGRAL_MODE_P (mode
)
5036 && (issue_strict_overflow_warning
5037 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5038 warning (OPT_Wstrict_overflow
,
5039 ("assuming signed overflow does not occur when "
5040 "assuming abs (x) >= 0 is true"));
5041 return const_true_rtx
;
5046 /* Optimize ! (abs(x) < 0.0). */
5047 return const_true_rtx
;
5057 /* Simplify CODE, an operation with result mode MODE and three operands,
5058 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5059 a constant. Return 0 if no simplifications is possible. */
5062 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5063 machine_mode op0_mode
, rtx op0
, rtx op1
,
5066 unsigned int width
= GET_MODE_PRECISION (mode
);
5067 bool any_change
= false;
5070 /* VOIDmode means "infinite" precision. */
5072 width
= HOST_BITS_PER_WIDE_INT
;
5077 /* Simplify negations around the multiplication. */
5078 /* -a * -b + c => a * b + c. */
5079 if (GET_CODE (op0
) == NEG
)
5081 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5083 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5085 else if (GET_CODE (op1
) == NEG
)
5087 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5089 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5092 /* Canonicalize the two multiplication operands. */
5093 /* a * -b + c => -b * a + c. */
5094 if (swap_commutative_operands_p (op0
, op1
))
5095 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5098 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5103 if (CONST_INT_P (op0
)
5104 && CONST_INT_P (op1
)
5105 && CONST_INT_P (op2
)
5106 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5107 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5109 /* Extracting a bit-field from a constant */
5110 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5111 HOST_WIDE_INT op1val
= INTVAL (op1
);
5112 HOST_WIDE_INT op2val
= INTVAL (op2
);
5113 if (BITS_BIG_ENDIAN
)
5114 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5118 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5120 /* First zero-extend. */
5121 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5122 /* If desired, propagate sign bit. */
5123 if (code
== SIGN_EXTRACT
5124 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5126 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5129 return gen_int_mode (val
, mode
);
5134 if (CONST_INT_P (op0
))
5135 return op0
!= const0_rtx
? op1
: op2
;
5137 /* Convert c ? a : a into "a". */
5138 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5141 /* Convert a != b ? a : b into "a". */
5142 if (GET_CODE (op0
) == NE
5143 && ! side_effects_p (op0
)
5144 && ! HONOR_NANS (mode
)
5145 && ! HONOR_SIGNED_ZEROS (mode
)
5146 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5147 && rtx_equal_p (XEXP (op0
, 1), op2
))
5148 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5149 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5152 /* Convert a == b ? a : b into "b". */
5153 if (GET_CODE (op0
) == EQ
5154 && ! side_effects_p (op0
)
5155 && ! HONOR_NANS (mode
)
5156 && ! HONOR_SIGNED_ZEROS (mode
)
5157 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5158 && rtx_equal_p (XEXP (op0
, 1), op2
))
5159 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5160 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5163 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5165 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5166 ? GET_MODE (XEXP (op0
, 1))
5167 : GET_MODE (XEXP (op0
, 0)));
5170 /* Look for happy constants in op1 and op2. */
5171 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5173 HOST_WIDE_INT t
= INTVAL (op1
);
5174 HOST_WIDE_INT f
= INTVAL (op2
);
5176 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5177 code
= GET_CODE (op0
);
5178 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5181 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5189 return simplify_gen_relational (code
, mode
, cmp_mode
,
5190 XEXP (op0
, 0), XEXP (op0
, 1));
5193 if (cmp_mode
== VOIDmode
)
5194 cmp_mode
= op0_mode
;
5195 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5196 cmp_mode
, XEXP (op0
, 0),
5199 /* See if any simplifications were possible. */
5202 if (CONST_INT_P (temp
))
5203 return temp
== const0_rtx
? op2
: op1
;
5205 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5211 gcc_assert (GET_MODE (op0
) == mode
);
5212 gcc_assert (GET_MODE (op1
) == mode
);
5213 gcc_assert (VECTOR_MODE_P (mode
));
5214 trueop2
= avoid_constant_pool_reference (op2
);
5215 if (CONST_INT_P (trueop2
))
5217 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5218 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5219 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5220 unsigned HOST_WIDE_INT mask
;
5221 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5224 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5226 if (!(sel
& mask
) && !side_effects_p (op0
))
5228 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5231 rtx trueop0
= avoid_constant_pool_reference (op0
);
5232 rtx trueop1
= avoid_constant_pool_reference (op1
);
5233 if (GET_CODE (trueop0
) == CONST_VECTOR
5234 && GET_CODE (trueop1
) == CONST_VECTOR
)
5236 rtvec v
= rtvec_alloc (n_elts
);
5239 for (i
= 0; i
< n_elts
; i
++)
5240 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5241 ? CONST_VECTOR_ELT (trueop0
, i
)
5242 : CONST_VECTOR_ELT (trueop1
, i
));
5243 return gen_rtx_CONST_VECTOR (mode
, v
);
5246 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5247 if no element from a appears in the result. */
5248 if (GET_CODE (op0
) == VEC_MERGE
)
5250 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5251 if (CONST_INT_P (tem
))
5253 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5254 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5255 return simplify_gen_ternary (code
, mode
, mode
,
5256 XEXP (op0
, 1), op1
, op2
);
5257 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5258 return simplify_gen_ternary (code
, mode
, mode
,
5259 XEXP (op0
, 0), op1
, op2
);
5262 if (GET_CODE (op1
) == VEC_MERGE
)
5264 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5265 if (CONST_INT_P (tem
))
5267 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5268 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5269 return simplify_gen_ternary (code
, mode
, mode
,
5270 op0
, XEXP (op1
, 1), op2
);
5271 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5272 return simplify_gen_ternary (code
, mode
, mode
,
5273 op0
, XEXP (op1
, 0), op2
);
5277 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5279 if (GET_CODE (op0
) == VEC_DUPLICATE
5280 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5281 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5282 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5284 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5285 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5287 if (XEXP (XEXP (op0
, 0), 0) == op1
5288 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5294 if (rtx_equal_p (op0
, op1
)
5295 && !side_effects_p (op2
) && !side_effects_p (op1
))
5307 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5308 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5309 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5311 Works by unpacking OP into a collection of 8-bit values
5312 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5313 and then repacking them again for OUTERMODE. */
5316 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5317 machine_mode innermode
, unsigned int byte
)
5321 value_mask
= (1 << value_bit
) - 1
5323 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5332 rtvec result_v
= NULL
;
5333 enum mode_class outer_class
;
5334 machine_mode outer_submode
;
5337 /* Some ports misuse CCmode. */
5338 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5341 /* We have no way to represent a complex constant at the rtl level. */
5342 if (COMPLEX_MODE_P (outermode
))
5345 /* We support any size mode. */
5346 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5347 GET_MODE_BITSIZE (innermode
));
5349 /* Unpack the value. */
5351 if (GET_CODE (op
) == CONST_VECTOR
)
5353 num_elem
= CONST_VECTOR_NUNITS (op
);
5354 elems
= &CONST_VECTOR_ELT (op
, 0);
5355 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5361 elem_bitsize
= max_bitsize
;
5363 /* If this asserts, it is too complicated; reducing value_bit may help. */
5364 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5365 /* I don't know how to handle endianness of sub-units. */
5366 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5368 for (elem
= 0; elem
< num_elem
; elem
++)
5371 rtx el
= elems
[elem
];
5373 /* Vectors are kept in target memory order. (This is probably
5376 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5377 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5379 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5380 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5381 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5382 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5383 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5386 switch (GET_CODE (el
))
5390 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5392 *vp
++ = INTVAL (el
) >> i
;
5393 /* CONST_INTs are always logically sign-extended. */
5394 for (; i
< elem_bitsize
; i
+= value_bit
)
5395 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5398 case CONST_WIDE_INT
:
5400 rtx_mode_t val
= std::make_pair (el
, innermode
);
5401 unsigned char extend
= wi::sign_mask (val
);
5403 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5404 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5405 for (; i
< elem_bitsize
; i
+= value_bit
)
5411 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5413 unsigned char extend
= 0;
5414 /* If this triggers, someone should have generated a
5415 CONST_INT instead. */
5416 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5418 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5419 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5420 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5423 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5427 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5429 for (; i
< elem_bitsize
; i
+= value_bit
)
5434 /* This is big enough for anything on the platform. */
5435 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5436 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5438 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5439 gcc_assert (bitsize
<= elem_bitsize
);
5440 gcc_assert (bitsize
% value_bit
== 0);
5442 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5445 /* real_to_target produces its result in words affected by
5446 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5447 and use WORDS_BIG_ENDIAN instead; see the documentation
5448 of SUBREG in rtl.texi. */
5449 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5452 if (WORDS_BIG_ENDIAN
)
5453 ibase
= bitsize
- 1 - i
;
5456 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5459 /* It shouldn't matter what's done here, so fill it with
5461 for (; i
< elem_bitsize
; i
+= value_bit
)
5467 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5469 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5470 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5474 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5475 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5476 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5478 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5479 >> (i
- HOST_BITS_PER_WIDE_INT
);
5480 for (; i
< elem_bitsize
; i
+= value_bit
)
5490 /* Now, pick the right byte to start with. */
5491 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5492 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5493 will already have offset 0. */
5494 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5496 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5498 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5499 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5500 byte
= (subword_byte
% UNITS_PER_WORD
5501 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5504 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5505 so if it's become negative it will instead be very large.) */
5506 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5508 /* Convert from bytes to chunks of size value_bit. */
5509 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5511 /* Re-pack the value. */
5513 if (VECTOR_MODE_P (outermode
))
5515 num_elem
= GET_MODE_NUNITS (outermode
);
5516 result_v
= rtvec_alloc (num_elem
);
5517 elems
= &RTVEC_ELT (result_v
, 0);
5518 outer_submode
= GET_MODE_INNER (outermode
);
5524 outer_submode
= outermode
;
5527 outer_class
= GET_MODE_CLASS (outer_submode
);
5528 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5530 gcc_assert (elem_bitsize
% value_bit
== 0);
5531 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5533 for (elem
= 0; elem
< num_elem
; elem
++)
5537 /* Vectors are stored in target memory order. (This is probably
5540 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5541 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5543 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5544 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5545 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5546 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5547 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5550 switch (outer_class
)
5553 case MODE_PARTIAL_INT
:
5558 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5559 / HOST_BITS_PER_WIDE_INT
;
5560 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5563 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5565 for (u
= 0; u
< units
; u
++)
5567 unsigned HOST_WIDE_INT buf
= 0;
5569 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5571 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5574 base
+= HOST_BITS_PER_WIDE_INT
;
5576 r
= wide_int::from_array (tmp
, units
,
5577 GET_MODE_PRECISION (outer_submode
));
5578 #if TARGET_SUPPORTS_WIDE_INT == 0
5579 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5580 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5583 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5588 case MODE_DECIMAL_FLOAT
:
5591 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5593 /* real_from_target wants its input in words affected by
5594 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5595 and use WORDS_BIG_ENDIAN instead; see the documentation
5596 of SUBREG in rtl.texi. */
5597 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5599 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5602 if (WORDS_BIG_ENDIAN
)
5603 ibase
= elem_bitsize
- 1 - i
;
5606 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5609 real_from_target (&r
, tmp
, outer_submode
);
5610 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5622 f
.mode
= outer_submode
;
5625 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5627 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5628 for (; i
< elem_bitsize
; i
+= value_bit
)
5629 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5630 << (i
- HOST_BITS_PER_WIDE_INT
));
5632 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5640 if (VECTOR_MODE_P (outermode
))
5641 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5646 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5647 Return 0 if no simplifications are possible. */
5649 simplify_subreg (machine_mode outermode
, rtx op
,
5650 machine_mode innermode
, unsigned int byte
)
5652 /* Little bit of sanity checking. */
5653 gcc_assert (innermode
!= VOIDmode
);
5654 gcc_assert (outermode
!= VOIDmode
);
5655 gcc_assert (innermode
!= BLKmode
);
5656 gcc_assert (outermode
!= BLKmode
);
5658 gcc_assert (GET_MODE (op
) == innermode
5659 || GET_MODE (op
) == VOIDmode
);
5661 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5664 if (byte
>= GET_MODE_SIZE (innermode
))
5667 if (outermode
== innermode
&& !byte
)
5670 if (CONST_SCALAR_INT_P (op
)
5671 || CONST_DOUBLE_AS_FLOAT_P (op
)
5672 || GET_CODE (op
) == CONST_FIXED
5673 || GET_CODE (op
) == CONST_VECTOR
)
5674 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5676 /* Changing mode twice with SUBREG => just change it once,
5677 or not at all if changing back op starting mode. */
5678 if (GET_CODE (op
) == SUBREG
)
5680 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5681 int final_offset
= byte
+ SUBREG_BYTE (op
);
5684 if (outermode
== innermostmode
5685 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5686 return SUBREG_REG (op
);
5688 /* The SUBREG_BYTE represents offset, as if the value were stored
5689 in memory. Irritating exception is paradoxical subreg, where
5690 we define SUBREG_BYTE to be 0. On big endian machines, this
5691 value should be negative. For a moment, undo this exception. */
5692 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5694 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5695 if (WORDS_BIG_ENDIAN
)
5696 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5697 if (BYTES_BIG_ENDIAN
)
5698 final_offset
+= difference
% UNITS_PER_WORD
;
5700 if (SUBREG_BYTE (op
) == 0
5701 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5703 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5704 if (WORDS_BIG_ENDIAN
)
5705 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5706 if (BYTES_BIG_ENDIAN
)
5707 final_offset
+= difference
% UNITS_PER_WORD
;
5710 /* See whether resulting subreg will be paradoxical. */
5711 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5713 /* In nonparadoxical subregs we can't handle negative offsets. */
5714 if (final_offset
< 0)
5716 /* Bail out in case resulting subreg would be incorrect. */
5717 if (final_offset
% GET_MODE_SIZE (outermode
)
5718 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5724 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5726 /* In paradoxical subreg, see if we are still looking on lower part.
5727 If so, our SUBREG_BYTE will be 0. */
5728 if (WORDS_BIG_ENDIAN
)
5729 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5730 if (BYTES_BIG_ENDIAN
)
5731 offset
+= difference
% UNITS_PER_WORD
;
5732 if (offset
== final_offset
)
5738 /* Recurse for further possible simplifications. */
5739 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5743 if (validate_subreg (outermode
, innermostmode
,
5744 SUBREG_REG (op
), final_offset
))
5746 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5747 if (SUBREG_PROMOTED_VAR_P (op
)
5748 && SUBREG_PROMOTED_SIGN (op
) >= 0
5749 && GET_MODE_CLASS (outermode
) == MODE_INT
5750 && IN_RANGE (GET_MODE_SIZE (outermode
),
5751 GET_MODE_SIZE (innermode
),
5752 GET_MODE_SIZE (innermostmode
))
5753 && subreg_lowpart_p (newx
))
5755 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5756 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5763 /* SUBREG of a hard register => just change the register number
5764 and/or mode. If the hard register is not valid in that mode,
5765 suppress this simplification. If the hard register is the stack,
5766 frame, or argument pointer, leave this as a SUBREG. */
5768 if (REG_P (op
) && HARD_REGISTER_P (op
))
5770 unsigned int regno
, final_regno
;
5773 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5774 if (HARD_REGISTER_NUM_P (final_regno
))
5777 int final_offset
= byte
;
5779 /* Adjust offset for paradoxical subregs. */
5781 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5783 int difference
= (GET_MODE_SIZE (innermode
)
5784 - GET_MODE_SIZE (outermode
));
5785 if (WORDS_BIG_ENDIAN
)
5786 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5787 if (BYTES_BIG_ENDIAN
)
5788 final_offset
+= difference
% UNITS_PER_WORD
;
5791 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5793 /* Propagate original regno. We don't have any way to specify
5794 the offset inside original regno, so do so only for lowpart.
5795 The information is used only by alias analysis that can not
5796 grog partial register anyway. */
5798 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5799 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5804 /* If we have a SUBREG of a register that we are replacing and we are
5805 replacing it with a MEM, make a new MEM and try replacing the
5806 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5807 or if we would be widening it. */
5810 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5811 /* Allow splitting of volatile memory references in case we don't
5812 have instruction to move the whole thing. */
5813 && (! MEM_VOLATILE_P (op
)
5814 || ! have_insn_for (SET
, innermode
))
5815 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5816 return adjust_address_nv (op
, outermode
, byte
);
5818 /* Handle complex values represented as CONCAT
5819 of real and imaginary part. */
5820 if (GET_CODE (op
) == CONCAT
)
5822 unsigned int part_size
, final_offset
;
5825 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5826 if (byte
< part_size
)
5828 part
= XEXP (op
, 0);
5829 final_offset
= byte
;
5833 part
= XEXP (op
, 1);
5834 final_offset
= byte
- part_size
;
5837 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5840 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5843 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5844 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5848 /* A SUBREG resulting from a zero extension may fold to zero if
5849 it extracts higher bits that the ZERO_EXTEND's source bits. */
5850 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5852 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5853 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5854 return CONST0_RTX (outermode
);
5857 if (SCALAR_INT_MODE_P (outermode
)
5858 && SCALAR_INT_MODE_P (innermode
)
5859 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5860 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5862 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5870 /* Make a SUBREG operation or equivalent if it folds. */
5873 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5874 machine_mode innermode
, unsigned int byte
)
5878 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5882 if (GET_CODE (op
) == SUBREG
5883 || GET_CODE (op
) == CONCAT
5884 || GET_MODE (op
) == VOIDmode
)
5887 if (validate_subreg (outermode
, innermode
, op
, byte
))
5888 return gen_rtx_SUBREG (outermode
, op
, byte
);
5893 /* Simplify X, an rtx expression.
5895 Return the simplified expression or NULL if no simplifications
5898 This is the preferred entry point into the simplification routines;
5899 however, we still allow passes to call the more specific routines.
5901 Right now GCC has three (yes, three) major bodies of RTL simplification
5902 code that need to be unified.
5904 1. fold_rtx in cse.c. This code uses various CSE specific
5905 information to aid in RTL simplification.
5907 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5908 it uses combine specific information to aid in RTL
5911 3. The routines in this file.
5914 Long term we want to only have one body of simplification code; to
5915 get to that state I recommend the following steps:
5917 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5918 which are not pass dependent state into these routines.
5920 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5921 use this routine whenever possible.
5923 3. Allow for pass dependent state to be provided to these
5924 routines and add simplifications based on the pass dependent
5925 state. Remove code from cse.c & combine.c that becomes
5928 It will take time, but ultimately the compiler will be easier to
5929 maintain and improve. It's totally silly that when we add a
5930 simplification that it needs to be added to 4 places (3 for RTL
5931 simplification and 1 for tree simplification. */
5934 simplify_rtx (const_rtx x
)
5936 const enum rtx_code code
= GET_CODE (x
);
5937 const machine_mode mode
= GET_MODE (x
);
5939 switch (GET_RTX_CLASS (code
))
5942 return simplify_unary_operation (code
, mode
,
5943 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5944 case RTX_COMM_ARITH
:
5945 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5946 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5948 /* Fall through.... */
5951 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5954 case RTX_BITFIELD_OPS
:
5955 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5956 XEXP (x
, 0), XEXP (x
, 1),
5960 case RTX_COMM_COMPARE
:
5961 return simplify_relational_operation (code
, mode
,
5962 ((GET_MODE (XEXP (x
, 0))
5964 ? GET_MODE (XEXP (x
, 0))
5965 : GET_MODE (XEXP (x
, 1))),
5971 return simplify_subreg (mode
, SUBREG_REG (x
),
5972 GET_MODE (SUBREG_REG (x
)),
5979 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5980 if (GET_CODE (XEXP (x
, 0)) == HIGH
5981 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))