1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "double-int.h"
36 #include "fold-const.h"
40 #include "hard-reg-set.h"
42 #include "insn-config.h"
45 #include "insn-codes.h"
48 #include "statistics.h"
50 #include "fixed-value.h"
58 #include "diagnostic-core.h"
63 /* Simplification and canonicalization of RTL. */
65 /* Much code operates on (low, high) pairs; the low value is an
66 unsigned wide int, the high value a signed wide int. We
67 occasionally need to sign extend from low to high as if low were a
69 #define HWI_SIGN_EXTEND(low) \
70 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
72 static rtx
neg_const_int (machine_mode
, const_rtx
);
73 static bool plus_minus_operand_p (const_rtx
);
74 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
75 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
76 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
78 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
80 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
81 machine_mode
, rtx
, rtx
);
82 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
83 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87 maximally negative number can overflow). */
89 neg_const_int (machine_mode mode
, const_rtx i
)
91 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
94 /* Test whether expression, X, is an immediate constant that represents
95 the most significant bit of machine mode MODE. */
98 mode_signbit_p (machine_mode mode
, const_rtx x
)
100 unsigned HOST_WIDE_INT val
;
103 if (GET_MODE_CLASS (mode
) != MODE_INT
)
106 width
= GET_MODE_PRECISION (mode
);
110 if (width
<= HOST_BITS_PER_WIDE_INT
113 #if TARGET_SUPPORTS_WIDE_INT
114 else if (CONST_WIDE_INT_P (x
))
117 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
118 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
120 for (i
= 0; i
< elts
- 1; i
++)
121 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
123 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
124 width
%= HOST_BITS_PER_WIDE_INT
;
126 width
= HOST_BITS_PER_WIDE_INT
;
129 else if (width
<= HOST_BITS_PER_DOUBLE_INT
130 && CONST_DOUBLE_AS_INT_P (x
)
131 && CONST_DOUBLE_LOW (x
) == 0)
133 val
= CONST_DOUBLE_HIGH (x
);
134 width
-= HOST_BITS_PER_WIDE_INT
;
138 /* X is not an integer constant. */
141 if (width
< HOST_BITS_PER_WIDE_INT
)
142 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
143 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147 (after masking with the mode mask of MODE). Returns false if the
148 precision of MODE is too large to handle. */
151 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
155 if (GET_MODE_CLASS (mode
) != MODE_INT
)
158 width
= GET_MODE_PRECISION (mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= GET_MODE_MASK (mode
);
163 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 if (GET_MODE_CLASS (mode
) != MODE_INT
)
176 width
= GET_MODE_PRECISION (mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185 Returns false if the precision of MODE is too large to handle. */
187 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
191 if (GET_MODE_CLASS (mode
) != MODE_INT
)
194 width
= GET_MODE_PRECISION (mode
);
195 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
198 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
202 /* Make a binary operation by properly ordering the operands and
203 seeing if the expression folds. */
206 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
211 /* If this simplifies, do it. */
212 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Put complex operands first and constants second if commutative. */
217 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
218 && swap_commutative_operands_p (op0
, op1
))
219 tem
= op0
, op0
= op1
, op1
= tem
;
221 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
224 /* If X is a MEM referencing the constant pool, return the real value.
225 Otherwise return X. */
227 avoid_constant_pool_reference (rtx x
)
231 HOST_WIDE_INT offset
= 0;
233 switch (GET_CODE (x
))
239 /* Handle float extensions of constant pool references. */
241 c
= avoid_constant_pool_reference (tmp
);
242 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
246 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
247 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
255 if (GET_MODE (x
) == BLKmode
)
260 /* Call target hook to avoid the effects of -fpic etc.... */
261 addr
= targetm
.delegitimize_address (addr
);
263 /* Split the address into a base and integer offset. */
264 if (GET_CODE (addr
) == CONST
265 && GET_CODE (XEXP (addr
, 0)) == PLUS
266 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
268 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
269 addr
= XEXP (XEXP (addr
, 0), 0);
272 if (GET_CODE (addr
) == LO_SUM
)
273 addr
= XEXP (addr
, 1);
275 /* If this is a constant pool reference, we can turn it into its
276 constant and hope that simplifications happen. */
277 if (GET_CODE (addr
) == SYMBOL_REF
278 && CONSTANT_POOL_ADDRESS_P (addr
))
280 c
= get_pool_constant (addr
);
281 cmode
= get_pool_mode (addr
);
283 /* If we're accessing the constant in a different mode than it was
284 originally stored, attempt to fix that up via subreg simplifications.
285 If that fails we have no choice but to return the original memory. */
286 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
287 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
289 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
290 if (tem
&& CONSTANT_P (tem
))
300 /* Simplify a MEM based on its attributes. This is the default
301 delegitimize_address target hook, and it's recommended that every
302 overrider call it. */
305 delegitimize_mem_from_attrs (rtx x
)
307 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308 use their base addresses as equivalent. */
311 && MEM_OFFSET_KNOWN_P (x
))
313 tree decl
= MEM_EXPR (x
);
314 machine_mode mode
= GET_MODE (x
);
315 HOST_WIDE_INT offset
= 0;
317 switch (TREE_CODE (decl
))
327 case ARRAY_RANGE_REF
:
332 case VIEW_CONVERT_EXPR
:
334 HOST_WIDE_INT bitsize
, bitpos
;
336 int unsignedp
, volatilep
= 0;
338 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
339 &mode
, &unsignedp
, &volatilep
, false);
340 if (bitsize
!= GET_MODE_BITSIZE (mode
)
341 || (bitpos
% BITS_PER_UNIT
)
342 || (toffset
&& !tree_fits_shwi_p (toffset
)))
346 offset
+= bitpos
/ BITS_PER_UNIT
;
348 offset
+= tree_to_shwi (toffset
);
355 && mode
== GET_MODE (x
)
356 && TREE_CODE (decl
) == VAR_DECL
357 && (TREE_STATIC (decl
)
358 || DECL_THREAD_LOCAL_P (decl
))
359 && DECL_RTL_SET_P (decl
)
360 && MEM_P (DECL_RTL (decl
)))
364 offset
+= MEM_OFFSET (x
);
366 newx
= DECL_RTL (decl
);
370 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
372 /* Avoid creating a new MEM needlessly if we already had
373 the same address. We do if there's no OFFSET and the
374 old address X is identical to NEWX, or if X is of the
375 form (plus NEWX OFFSET), or the NEWX is of the form
376 (plus Y (const_int Z)) and X is that with the offset
377 added: (plus Y (const_int Z+OFFSET)). */
379 || (GET_CODE (o
) == PLUS
380 && GET_CODE (XEXP (o
, 1)) == CONST_INT
381 && (offset
== INTVAL (XEXP (o
, 1))
382 || (GET_CODE (n
) == PLUS
383 && GET_CODE (XEXP (n
, 1)) == CONST_INT
384 && (INTVAL (XEXP (n
, 1)) + offset
385 == INTVAL (XEXP (o
, 1)))
386 && (n
= XEXP (n
, 0))))
387 && (o
= XEXP (o
, 0))))
388 && rtx_equal_p (o
, n
)))
389 x
= adjust_address_nv (newx
, mode
, offset
);
391 else if (GET_MODE (x
) == GET_MODE (newx
)
400 /* Make a unary operation by first seeing if it folds and otherwise making
401 the specified operation. */
404 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
405 machine_mode op_mode
)
409 /* If this simplifies, use it. */
410 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
413 return gen_rtx_fmt_e (code
, mode
, op
);
416 /* Likewise for ternary operations. */
419 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
420 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
424 /* If this simplifies, use it. */
425 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
429 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
432 /* Likewise, for relational operations.
433 CMP_MODE specifies mode comparison is done in. */
436 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
437 machine_mode cmp_mode
, rtx op0
, rtx op1
)
441 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
445 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
448 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449 and simplify the result. If FN is non-NULL, call this callback on each
450 X, if it returns non-NULL, replace X with its return value and simplify the
454 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
455 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
457 enum rtx_code code
= GET_CODE (x
);
458 machine_mode mode
= GET_MODE (x
);
459 machine_mode op_mode
;
461 rtx op0
, op1
, op2
, newx
, op
;
465 if (__builtin_expect (fn
!= NULL
, 0))
467 newx
= fn (x
, old_rtx
, data
);
471 else if (rtx_equal_p (x
, old_rtx
))
472 return copy_rtx ((rtx
) data
);
474 switch (GET_RTX_CLASS (code
))
478 op_mode
= GET_MODE (op0
);
479 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
480 if (op0
== XEXP (x
, 0))
482 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
486 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
487 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
490 return simplify_gen_binary (code
, mode
, op0
, op1
);
493 case RTX_COMM_COMPARE
:
496 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
497 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
498 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
499 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
501 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
504 case RTX_BITFIELD_OPS
:
506 op_mode
= GET_MODE (op0
);
507 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
508 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
509 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
510 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
512 if (op_mode
== VOIDmode
)
513 op_mode
= GET_MODE (op0
);
514 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
519 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
520 if (op0
== SUBREG_REG (x
))
522 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
523 GET_MODE (SUBREG_REG (x
)),
525 return op0
? op0
: x
;
532 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
533 if (op0
== XEXP (x
, 0))
535 return replace_equiv_address_nv (x
, op0
);
537 else if (code
== LO_SUM
)
539 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
540 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
542 /* (lo_sum (high x) y) -> y where x and y have the same base. */
543 if (GET_CODE (op0
) == HIGH
)
545 rtx base0
, base1
, offset0
, offset1
;
546 split_const (XEXP (op0
, 0), &base0
, &offset0
);
547 split_const (op1
, &base1
, &offset1
);
548 if (rtx_equal_p (base0
, base1
))
552 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
554 return gen_rtx_LO_SUM (mode
, op0
, op1
);
563 fmt
= GET_RTX_FORMAT (code
);
564 for (i
= 0; fmt
[i
]; i
++)
569 newvec
= XVEC (newx
, i
);
570 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
572 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
574 if (op
!= RTVEC_ELT (vec
, j
))
578 newvec
= shallow_copy_rtvec (vec
);
580 newx
= shallow_copy_rtx (x
);
581 XVEC (newx
, i
) = newvec
;
583 RTVEC_ELT (newvec
, j
) = op
;
591 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
592 if (op
!= XEXP (x
, i
))
595 newx
= shallow_copy_rtx (x
);
604 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
605 resulting RTX. Return a new RTX which is as simplified as possible. */
608 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
610 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
613 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
614 Only handle cases where the truncated value is inherently an rvalue.
616 RTL provides two ways of truncating a value:
618 1. a lowpart subreg. This form is only a truncation when both
619 the outer and inner modes (here MODE and OP_MODE respectively)
620 are scalar integers, and only then when the subreg is used as
623 It is only valid to form such truncating subregs if the
624 truncation requires no action by the target. The onus for
625 proving this is on the creator of the subreg -- e.g. the
626 caller to simplify_subreg or simplify_gen_subreg -- and typically
627 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
629 2. a TRUNCATE. This form handles both scalar and compound integers.
631 The first form is preferred where valid. However, the TRUNCATE
632 handling in simplify_unary_operation turns the second form into the
633 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
634 so it is generally safe to form rvalue truncations using:
636 simplify_gen_unary (TRUNCATE, ...)
638 and leave simplify_unary_operation to work out which representation
641 Because of the proof requirements on (1), simplify_truncation must
642 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
643 regardless of whether the outer truncation came from a SUBREG or a
644 TRUNCATE. For example, if the caller has proven that an SImode
649 is a no-op and can be represented as a subreg, it does not follow
650 that SImode truncations of X and Y are also no-ops. On a target
651 like 64-bit MIPS that requires SImode values to be stored in
652 sign-extended form, an SImode truncation of:
654 (and:DI (reg:DI X) (const_int 63))
656 is trivially a no-op because only the lower 6 bits can be set.
657 However, X is still an arbitrary 64-bit number and so we cannot
658 assume that truncating it too is a no-op. */
661 simplify_truncation (machine_mode mode
, rtx op
,
662 machine_mode op_mode
)
664 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
665 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
666 gcc_assert (precision
<= op_precision
);
668 /* Optimize truncations of zero and sign extended values. */
669 if (GET_CODE (op
) == ZERO_EXTEND
670 || GET_CODE (op
) == SIGN_EXTEND
)
672 /* There are three possibilities. If MODE is the same as the
673 origmode, we can omit both the extension and the subreg.
674 If MODE is not larger than the origmode, we can apply the
675 truncation without the extension. Finally, if the outermode
676 is larger than the origmode, we can just extend to the appropriate
678 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
679 if (mode
== origmode
)
681 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
682 return simplify_gen_unary (TRUNCATE
, mode
,
683 XEXP (op
, 0), origmode
);
685 return simplify_gen_unary (GET_CODE (op
), mode
,
686 XEXP (op
, 0), origmode
);
689 /* If the machine can perform operations in the truncated mode, distribute
690 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
691 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
693 #ifdef WORD_REGISTER_OPERATIONS
694 && precision
>= BITS_PER_WORD
696 && (GET_CODE (op
) == PLUS
697 || GET_CODE (op
) == MINUS
698 || GET_CODE (op
) == MULT
))
700 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
703 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
705 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
709 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
710 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if ((GET_CODE (op
) == LSHIFTRT
713 || GET_CODE (op
) == ASHIFTRT
)
714 /* Ensure that OP_MODE is at least twice as wide as MODE
715 to avoid the possibility that an outer LSHIFTRT shifts by more
716 than the sign extension's sign_bit_copies and introduces zeros
717 into the high bits of the result. */
718 && 2 * precision
<= op_precision
719 && CONST_INT_P (XEXP (op
, 1))
720 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
722 && UINTVAL (XEXP (op
, 1)) < precision
)
723 return simplify_gen_binary (ASHIFTRT
, mode
,
724 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
726 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
727 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if ((GET_CODE (op
) == LSHIFTRT
730 || GET_CODE (op
) == ASHIFTRT
)
731 && CONST_INT_P (XEXP (op
, 1))
732 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
733 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
734 && UINTVAL (XEXP (op
, 1)) < precision
)
735 return simplify_gen_binary (LSHIFTRT
, mode
,
736 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
738 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
739 to (ashift:QI (x:QI) C), where C is a suitable small constant and
740 the outer subreg is effectively a truncation to the original mode. */
741 if (GET_CODE (op
) == ASHIFT
742 && CONST_INT_P (XEXP (op
, 1))
743 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
744 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
745 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
746 && UINTVAL (XEXP (op
, 1)) < precision
)
747 return simplify_gen_binary (ASHIFT
, mode
,
748 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
750 /* Recognize a word extraction from a multi-word subreg. */
751 if ((GET_CODE (op
) == LSHIFTRT
752 || GET_CODE (op
) == ASHIFTRT
)
753 && SCALAR_INT_MODE_P (mode
)
754 && SCALAR_INT_MODE_P (op_mode
)
755 && precision
>= BITS_PER_WORD
756 && 2 * precision
<= op_precision
757 && CONST_INT_P (XEXP (op
, 1))
758 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
759 && UINTVAL (XEXP (op
, 1)) < op_precision
)
761 int byte
= subreg_lowpart_offset (mode
, op_mode
);
762 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
763 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
765 ? byte
- shifted_bytes
766 : byte
+ shifted_bytes
));
769 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
770 and try replacing the TRUNCATE and shift with it. Don't do this
771 if the MEM has a mode-dependent address. */
772 if ((GET_CODE (op
) == LSHIFTRT
773 || GET_CODE (op
) == ASHIFTRT
)
774 && SCALAR_INT_MODE_P (op_mode
)
775 && MEM_P (XEXP (op
, 0))
776 && CONST_INT_P (XEXP (op
, 1))
777 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
778 && INTVAL (XEXP (op
, 1)) > 0
779 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
780 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
781 MEM_ADDR_SPACE (XEXP (op
, 0)))
782 && ! MEM_VOLATILE_P (XEXP (op
, 0))
783 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
784 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
786 int byte
= subreg_lowpart_offset (mode
, op_mode
);
787 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
788 return adjust_address_nv (XEXP (op
, 0), mode
,
790 ? byte
- shifted_bytes
791 : byte
+ shifted_bytes
));
794 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
795 (OP:SI foo:SI) if OP is NEG or ABS. */
796 if ((GET_CODE (op
) == ABS
797 || GET_CODE (op
) == NEG
)
798 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
799 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
800 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
801 return simplify_gen_unary (GET_CODE (op
), mode
,
802 XEXP (XEXP (op
, 0), 0), mode
);
804 /* (truncate:A (subreg:B (truncate:C X) 0)) is
806 if (GET_CODE (op
) == SUBREG
807 && SCALAR_INT_MODE_P (mode
)
808 && SCALAR_INT_MODE_P (op_mode
)
809 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
810 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
811 && subreg_lowpart_p (op
))
813 rtx inner
= XEXP (SUBREG_REG (op
), 0);
814 if (GET_MODE_PRECISION (mode
)
815 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
816 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
818 /* If subreg above is paradoxical and C is narrower
819 than A, return (subreg:A (truncate:C X) 0). */
820 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
821 GET_MODE (SUBREG_REG (op
)), 0);
824 /* (truncate:A (truncate:B X)) is (truncate:A X). */
825 if (GET_CODE (op
) == TRUNCATE
)
826 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
827 GET_MODE (XEXP (op
, 0)));
832 /* Try to simplify a unary operation CODE whose output mode is to be
833 MODE with input operand OP whose mode was originally OP_MODE.
834 Return zero if no simplification can be made. */
836 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
837 rtx op
, machine_mode op_mode
)
841 trueop
= avoid_constant_pool_reference (op
);
843 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
847 return simplify_unary_operation_1 (code
, mode
, op
);
850 /* Perform some simplifications we can do even if the operands
853 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
855 enum rtx_code reversed
;
861 /* (not (not X)) == X. */
862 if (GET_CODE (op
) == NOT
)
865 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
866 comparison is all ones. */
867 if (COMPARISON_P (op
)
868 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
869 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
870 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
871 XEXP (op
, 0), XEXP (op
, 1));
873 /* (not (plus X -1)) can become (neg X). */
874 if (GET_CODE (op
) == PLUS
875 && XEXP (op
, 1) == constm1_rtx
)
876 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
878 /* Similarly, (not (neg X)) is (plus X -1). */
879 if (GET_CODE (op
) == NEG
)
880 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
883 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
884 if (GET_CODE (op
) == XOR
885 && CONST_INT_P (XEXP (op
, 1))
886 && (temp
= simplify_unary_operation (NOT
, mode
,
887 XEXP (op
, 1), mode
)) != 0)
888 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
890 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
891 if (GET_CODE (op
) == PLUS
892 && CONST_INT_P (XEXP (op
, 1))
893 && mode_signbit_p (mode
, XEXP (op
, 1))
894 && (temp
= simplify_unary_operation (NOT
, mode
,
895 XEXP (op
, 1), mode
)) != 0)
896 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
899 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
900 operands other than 1, but that is not valid. We could do a
901 similar simplification for (not (lshiftrt C X)) where C is
902 just the sign bit, but this doesn't seem common enough to
904 if (GET_CODE (op
) == ASHIFT
905 && XEXP (op
, 0) == const1_rtx
)
907 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
908 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
911 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
912 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
913 so we can perform the above simplification. */
914 if (STORE_FLAG_VALUE
== -1
915 && GET_CODE (op
) == ASHIFTRT
916 && CONST_INT_P (XEXP (op
, 1))
917 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
918 return simplify_gen_relational (GE
, mode
, VOIDmode
,
919 XEXP (op
, 0), const0_rtx
);
922 if (GET_CODE (op
) == SUBREG
923 && subreg_lowpart_p (op
)
924 && (GET_MODE_SIZE (GET_MODE (op
))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
926 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
927 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
929 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
932 x
= gen_rtx_ROTATE (inner_mode
,
933 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
935 XEXP (SUBREG_REG (op
), 1));
936 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
941 /* Apply De Morgan's laws to reduce number of patterns for machines
942 with negating logical insns (and-not, nand, etc.). If result has
943 only one NOT, put it first, since that is how the patterns are
945 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
947 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
948 machine_mode op_mode
;
950 op_mode
= GET_MODE (in1
);
951 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
953 op_mode
= GET_MODE (in2
);
954 if (op_mode
== VOIDmode
)
956 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
958 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
961 in2
= in1
; in1
= tem
;
964 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
968 /* (not (bswap x)) -> (bswap (not x)). */
969 if (GET_CODE (op
) == BSWAP
)
971 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
972 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
977 /* (neg (neg X)) == X. */
978 if (GET_CODE (op
) == NEG
)
981 /* (neg (plus X 1)) can become (not X). */
982 if (GET_CODE (op
) == PLUS
983 && XEXP (op
, 1) == const1_rtx
)
984 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
986 /* Similarly, (neg (not X)) is (plus X 1). */
987 if (GET_CODE (op
) == NOT
)
988 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
991 /* (neg (minus X Y)) can become (minus Y X). This transformation
992 isn't safe for modes with signed zeros, since if X and Y are
993 both +0, (minus Y X) is the same as (minus X Y). If the
994 rounding mode is towards +infinity (or -infinity) then the two
995 expressions will be rounded differently. */
996 if (GET_CODE (op
) == MINUS
997 && !HONOR_SIGNED_ZEROS (mode
)
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
999 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1001 if (GET_CODE (op
) == PLUS
1002 && !HONOR_SIGNED_ZEROS (mode
)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1005 /* (neg (plus A C)) is simplified to (minus -C A). */
1006 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1007 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1011 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1014 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1015 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1016 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1019 /* (neg (mult A B)) becomes (mult A (neg B)).
1020 This works even for floating-point values. */
1021 if (GET_CODE (op
) == MULT
1022 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1024 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1025 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1028 /* NEG commutes with ASHIFT since it is multiplication. Only do
1029 this if we can then eliminate the NEG (e.g., if the operand
1031 if (GET_CODE (op
) == ASHIFT
)
1033 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1035 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1038 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1039 C is equal to the width of MODE minus 1. */
1040 if (GET_CODE (op
) == ASHIFTRT
1041 && CONST_INT_P (XEXP (op
, 1))
1042 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1043 return simplify_gen_binary (LSHIFTRT
, mode
,
1044 XEXP (op
, 0), XEXP (op
, 1));
1046 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1047 C is equal to the width of MODE minus 1. */
1048 if (GET_CODE (op
) == LSHIFTRT
1049 && CONST_INT_P (XEXP (op
, 1))
1050 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1051 return simplify_gen_binary (ASHIFTRT
, mode
,
1052 XEXP (op
, 0), XEXP (op
, 1));
1054 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1055 if (GET_CODE (op
) == XOR
1056 && XEXP (op
, 1) == const1_rtx
1057 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1058 return plus_constant (mode
, XEXP (op
, 0), -1);
1060 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1061 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1062 if (GET_CODE (op
) == LT
1063 && XEXP (op
, 1) == const0_rtx
1064 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1066 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1067 int isize
= GET_MODE_PRECISION (inner
);
1068 if (STORE_FLAG_VALUE
== 1)
1070 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1071 GEN_INT (isize
- 1));
1074 if (GET_MODE_PRECISION (mode
) > isize
)
1075 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1076 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1078 else if (STORE_FLAG_VALUE
== -1)
1080 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1081 GEN_INT (isize
- 1));
1084 if (GET_MODE_PRECISION (mode
) > isize
)
1085 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1086 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1092 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1093 with the umulXi3_highpart patterns. */
1094 if (GET_CODE (op
) == LSHIFTRT
1095 && GET_CODE (XEXP (op
, 0)) == MULT
)
1098 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1100 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1102 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1106 /* We can't handle truncation to a partial integer mode here
1107 because we don't know the real bitsize of the partial
1112 if (GET_MODE (op
) != VOIDmode
)
1114 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1119 /* If we know that the value is already truncated, we can
1120 replace the TRUNCATE with a SUBREG. */
1121 if (GET_MODE_NUNITS (mode
) == 1
1122 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1123 || truncated_to_mode (mode
, op
)))
1125 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1130 /* A truncate of a comparison can be replaced with a subreg if
1131 STORE_FLAG_VALUE permits. This is like the previous test,
1132 but it works even if the comparison is done in a mode larger
1133 than HOST_BITS_PER_WIDE_INT. */
1134 if (HWI_COMPUTABLE_MODE_P (mode
)
1135 && COMPARISON_P (op
)
1136 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1138 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1143 /* A truncate of a memory is just loading the low part of the memory
1144 if we are not changing the meaning of the address. */
1145 if (GET_CODE (op
) == MEM
1146 && !VECTOR_MODE_P (mode
)
1147 && !MEM_VOLATILE_P (op
)
1148 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1150 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1157 case FLOAT_TRUNCATE
:
1158 if (DECIMAL_FLOAT_MODE_P (mode
))
1161 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1162 if (GET_CODE (op
) == FLOAT_EXTEND
1163 && GET_MODE (XEXP (op
, 0)) == mode
)
1164 return XEXP (op
, 0);
1166 /* (float_truncate:SF (float_truncate:DF foo:XF))
1167 = (float_truncate:SF foo:XF).
1168 This may eliminate double rounding, so it is unsafe.
1170 (float_truncate:SF (float_extend:XF foo:DF))
1171 = (float_truncate:SF foo:DF).
1173 (float_truncate:DF (float_extend:XF foo:SF))
1174 = (float_extend:SF foo:DF). */
1175 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1176 && flag_unsafe_math_optimizations
)
1177 || GET_CODE (op
) == FLOAT_EXTEND
)
1178 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1180 > GET_MODE_SIZE (mode
)
1181 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1183 XEXP (op
, 0), mode
);
1185 /* (float_truncate (float x)) is (float x) */
1186 if (GET_CODE (op
) == FLOAT
1187 && (flag_unsafe_math_optimizations
1188 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1189 && ((unsigned)significand_size (GET_MODE (op
))
1190 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1191 - num_sign_bit_copies (XEXP (op
, 0),
1192 GET_MODE (XEXP (op
, 0))))))))
1193 return simplify_gen_unary (FLOAT
, mode
,
1195 GET_MODE (XEXP (op
, 0)));
1197 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1198 (OP:SF foo:SF) if OP is NEG or ABS. */
1199 if ((GET_CODE (op
) == ABS
1200 || GET_CODE (op
) == NEG
)
1201 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1202 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1203 return simplify_gen_unary (GET_CODE (op
), mode
,
1204 XEXP (XEXP (op
, 0), 0), mode
);
1206 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1207 is (float_truncate:SF x). */
1208 if (GET_CODE (op
) == SUBREG
1209 && subreg_lowpart_p (op
)
1210 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1211 return SUBREG_REG (op
);
1215 if (DECIMAL_FLOAT_MODE_P (mode
))
1218 /* (float_extend (float_extend x)) is (float_extend x)
1220 (float_extend (float x)) is (float x) assuming that double
1221 rounding can't happen.
1223 if (GET_CODE (op
) == FLOAT_EXTEND
1224 || (GET_CODE (op
) == FLOAT
1225 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1226 && ((unsigned)significand_size (GET_MODE (op
))
1227 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1228 - num_sign_bit_copies (XEXP (op
, 0),
1229 GET_MODE (XEXP (op
, 0)))))))
1230 return simplify_gen_unary (GET_CODE (op
), mode
,
1232 GET_MODE (XEXP (op
, 0)));
1237 /* (abs (neg <foo>)) -> (abs <foo>) */
1238 if (GET_CODE (op
) == NEG
)
1239 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1240 GET_MODE (XEXP (op
, 0)));
1242 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1244 if (GET_MODE (op
) == VOIDmode
)
1247 /* If operand is something known to be positive, ignore the ABS. */
1248 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1249 || val_signbit_known_clear_p (GET_MODE (op
),
1250 nonzero_bits (op
, GET_MODE (op
))))
1253 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1254 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1255 return gen_rtx_NEG (mode
, op
);
1260 /* (ffs (*_extend <X>)) = (ffs <X>) */
1261 if (GET_CODE (op
) == SIGN_EXTEND
1262 || GET_CODE (op
) == ZERO_EXTEND
)
1263 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1264 GET_MODE (XEXP (op
, 0)));
1268 switch (GET_CODE (op
))
1272 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1273 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1274 GET_MODE (XEXP (op
, 0)));
1278 /* Rotations don't affect popcount. */
1279 if (!side_effects_p (XEXP (op
, 1)))
1280 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1281 GET_MODE (XEXP (op
, 0)));
1290 switch (GET_CODE (op
))
1296 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1297 GET_MODE (XEXP (op
, 0)));
1301 /* Rotations don't affect parity. */
1302 if (!side_effects_p (XEXP (op
, 1)))
1303 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1304 GET_MODE (XEXP (op
, 0)));
1313 /* (bswap (bswap x)) -> x. */
1314 if (GET_CODE (op
) == BSWAP
)
1315 return XEXP (op
, 0);
1319 /* (float (sign_extend <X>)) = (float <X>). */
1320 if (GET_CODE (op
) == SIGN_EXTEND
)
1321 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1322 GET_MODE (XEXP (op
, 0)));
1326 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1327 becomes just the MINUS if its mode is MODE. This allows
1328 folding switch statements on machines using casesi (such as
1330 if (GET_CODE (op
) == TRUNCATE
1331 && GET_MODE (XEXP (op
, 0)) == mode
1332 && GET_CODE (XEXP (op
, 0)) == MINUS
1333 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1334 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1335 return XEXP (op
, 0);
1337 /* Extending a widening multiplication should be canonicalized to
1338 a wider widening multiplication. */
1339 if (GET_CODE (op
) == MULT
)
1341 rtx lhs
= XEXP (op
, 0);
1342 rtx rhs
= XEXP (op
, 1);
1343 enum rtx_code lcode
= GET_CODE (lhs
);
1344 enum rtx_code rcode
= GET_CODE (rhs
);
1346 /* Widening multiplies usually extend both operands, but sometimes
1347 they use a shift to extract a portion of a register. */
1348 if ((lcode
== SIGN_EXTEND
1349 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1350 && (rcode
== SIGN_EXTEND
1351 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1353 machine_mode lmode
= GET_MODE (lhs
);
1354 machine_mode rmode
= GET_MODE (rhs
);
1357 if (lcode
== ASHIFTRT
)
1358 /* Number of bits not shifted off the end. */
1359 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1360 else /* lcode == SIGN_EXTEND */
1361 /* Size of inner mode. */
1362 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1364 if (rcode
== ASHIFTRT
)
1365 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1366 else /* rcode == SIGN_EXTEND */
1367 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1369 /* We can only widen multiplies if the result is mathematiclly
1370 equivalent. I.e. if overflow was impossible. */
1371 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1372 return simplify_gen_binary
1374 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1375 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1379 /* Check for a sign extension of a subreg of a promoted
1380 variable, where the promotion is sign-extended, and the
1381 target mode is the same as the variable's promotion. */
1382 if (GET_CODE (op
) == SUBREG
1383 && SUBREG_PROMOTED_VAR_P (op
)
1384 && SUBREG_PROMOTED_SIGNED_P (op
)
1385 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1387 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1392 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1393 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1394 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1396 gcc_assert (GET_MODE_PRECISION (mode
)
1397 > GET_MODE_PRECISION (GET_MODE (op
)));
1398 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1399 GET_MODE (XEXP (op
, 0)));
1402 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1403 is (sign_extend:M (subreg:O <X>)) if there is mode with
1404 GET_MODE_BITSIZE (N) - I bits.
1405 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1406 is similarly (zero_extend:M (subreg:O <X>)). */
1407 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1408 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1409 && CONST_INT_P (XEXP (op
, 1))
1410 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1411 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1414 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1415 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1416 gcc_assert (GET_MODE_BITSIZE (mode
)
1417 > GET_MODE_BITSIZE (GET_MODE (op
)));
1418 if (tmode
!= BLKmode
)
1421 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1423 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1424 ? SIGN_EXTEND
: ZERO_EXTEND
,
1425 mode
, inner
, tmode
);
1429 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1430 /* As we do not know which address space the pointer is referring to,
1431 we can do this only if the target does not support different pointer
1432 or address modes depending on the address space. */
1433 if (target_default_pointer_address_modes_p ()
1434 && ! POINTERS_EXTEND_UNSIGNED
1435 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1437 || (GET_CODE (op
) == SUBREG
1438 && REG_P (SUBREG_REG (op
))
1439 && REG_POINTER (SUBREG_REG (op
))
1440 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1441 return convert_memory_address (Pmode
, op
);
1446 /* Check for a zero extension of a subreg of a promoted
1447 variable, where the promotion is zero-extended, and the
1448 target mode is the same as the variable's promotion. */
1449 if (GET_CODE (op
) == SUBREG
1450 && SUBREG_PROMOTED_VAR_P (op
)
1451 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1452 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1454 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1459 /* Extending a widening multiplication should be canonicalized to
1460 a wider widening multiplication. */
1461 if (GET_CODE (op
) == MULT
)
1463 rtx lhs
= XEXP (op
, 0);
1464 rtx rhs
= XEXP (op
, 1);
1465 enum rtx_code lcode
= GET_CODE (lhs
);
1466 enum rtx_code rcode
= GET_CODE (rhs
);
1468 /* Widening multiplies usually extend both operands, but sometimes
1469 they use a shift to extract a portion of a register. */
1470 if ((lcode
== ZERO_EXTEND
1471 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1472 && (rcode
== ZERO_EXTEND
1473 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1475 machine_mode lmode
= GET_MODE (lhs
);
1476 machine_mode rmode
= GET_MODE (rhs
);
1479 if (lcode
== LSHIFTRT
)
1480 /* Number of bits not shifted off the end. */
1481 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1482 else /* lcode == ZERO_EXTEND */
1483 /* Size of inner mode. */
1484 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1486 if (rcode
== LSHIFTRT
)
1487 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1488 else /* rcode == ZERO_EXTEND */
1489 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1491 /* We can only widen multiplies if the result is mathematiclly
1492 equivalent. I.e. if overflow was impossible. */
1493 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1494 return simplify_gen_binary
1496 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1497 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1501 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op
) == ZERO_EXTEND
)
1503 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1504 GET_MODE (XEXP (op
, 0)));
1506 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1507 is (zero_extend:M (subreg:O <X>)) if there is mode with
1508 GET_MODE_PRECISION (N) - I bits. */
1509 if (GET_CODE (op
) == LSHIFTRT
1510 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1511 && CONST_INT_P (XEXP (op
, 1))
1512 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1513 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1516 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1517 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1518 if (tmode
!= BLKmode
)
1521 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1523 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1527 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1528 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1530 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1531 (and:SI (reg:SI) (const_int 63)). */
1532 if (GET_CODE (op
) == SUBREG
1533 && GET_MODE_PRECISION (GET_MODE (op
))
1534 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1535 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1536 <= HOST_BITS_PER_WIDE_INT
1537 && GET_MODE_PRECISION (mode
)
1538 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1539 && subreg_lowpart_p (op
)
1540 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1541 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1543 if (GET_MODE_PRECISION (mode
)
1544 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1545 return SUBREG_REG (op
);
1546 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1547 GET_MODE (SUBREG_REG (op
)));
1550 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1551 /* As we do not know which address space the pointer is referring to,
1552 we can do this only if the target does not support different pointer
1553 or address modes depending on the address space. */
1554 if (target_default_pointer_address_modes_p ()
1555 && POINTERS_EXTEND_UNSIGNED
> 0
1556 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1558 || (GET_CODE (op
) == SUBREG
1559 && REG_P (SUBREG_REG (op
))
1560 && REG_POINTER (SUBREG_REG (op
))
1561 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1562 return convert_memory_address (Pmode
, op
);
1573 /* Try to compute the value of a unary operation CODE whose output mode is to
1574 be MODE with input operand OP whose mode was originally OP_MODE.
1575 Return zero if the value cannot be computed. */
1577 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1578 rtx op
, machine_mode op_mode
)
1580 unsigned int width
= GET_MODE_PRECISION (mode
);
1582 if (code
== VEC_DUPLICATE
)
1584 gcc_assert (VECTOR_MODE_P (mode
));
1585 if (GET_MODE (op
) != VOIDmode
)
1587 if (!VECTOR_MODE_P (GET_MODE (op
)))
1588 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1590 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1593 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1594 || GET_CODE (op
) == CONST_VECTOR
)
1596 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1597 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1598 rtvec v
= rtvec_alloc (n_elts
);
1601 if (GET_CODE (op
) != CONST_VECTOR
)
1602 for (i
= 0; i
< n_elts
; i
++)
1603 RTVEC_ELT (v
, i
) = op
;
1606 machine_mode inmode
= GET_MODE (op
);
1607 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1608 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1610 gcc_assert (in_n_elts
< n_elts
);
1611 gcc_assert ((n_elts
% in_n_elts
) == 0);
1612 for (i
= 0; i
< n_elts
; i
++)
1613 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1615 return gen_rtx_CONST_VECTOR (mode
, v
);
1619 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1621 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1622 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1623 machine_mode opmode
= GET_MODE (op
);
1624 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1625 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1626 rtvec v
= rtvec_alloc (n_elts
);
1629 gcc_assert (op_n_elts
== n_elts
);
1630 for (i
= 0; i
< n_elts
; i
++)
1632 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1633 CONST_VECTOR_ELT (op
, i
),
1634 GET_MODE_INNER (opmode
));
1637 RTVEC_ELT (v
, i
) = x
;
1639 return gen_rtx_CONST_VECTOR (mode
, v
);
1642 /* The order of these tests is critical so that, for example, we don't
1643 check the wrong mode (input vs. output) for a conversion operation,
1644 such as FIX. At some point, this should be simplified. */
1646 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1650 if (op_mode
== VOIDmode
)
1652 /* CONST_INT have VOIDmode as the mode. We assume that all
1653 the bits of the constant are significant, though, this is
1654 a dangerous assumption as many times CONST_INTs are
1655 created and used with garbage in the bits outside of the
1656 precision of the implied mode of the const_int. */
1657 op_mode
= MAX_MODE_INT
;
1660 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1661 d
= real_value_truncate (mode
, d
);
1662 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1664 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1668 if (op_mode
== VOIDmode
)
1670 /* CONST_INT have VOIDmode as the mode. We assume that all
1671 the bits of the constant are significant, though, this is
1672 a dangerous assumption as many times CONST_INTs are
1673 created and used with garbage in the bits outside of the
1674 precision of the implied mode of the const_int. */
1675 op_mode
= MAX_MODE_INT
;
1678 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1679 d
= real_value_truncate (mode
, d
);
1680 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1683 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1686 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1687 rtx_mode_t op0
= std::make_pair (op
, imode
);
1690 #if TARGET_SUPPORTS_WIDE_INT == 0
1691 /* This assert keeps the simplification from producing a result
1692 that cannot be represented in a CONST_DOUBLE but a lot of
1693 upstream callers expect that this function never fails to
1694 simplify something and so you if you added this to the test
1695 above the code would die later anyway. If this assert
1696 happens, you just need to make the port support wide int. */
1697 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1703 result
= wi::bit_not (op0
);
1707 result
= wi::neg (op0
);
1711 result
= wi::abs (op0
);
1715 result
= wi::shwi (wi::ffs (op0
), mode
);
1719 if (wi::ne_p (op0
, 0))
1720 int_value
= wi::clz (op0
);
1721 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1722 int_value
= GET_MODE_PRECISION (mode
);
1723 result
= wi::shwi (int_value
, mode
);
1727 result
= wi::shwi (wi::clrsb (op0
), mode
);
1731 if (wi::ne_p (op0
, 0))
1732 int_value
= wi::ctz (op0
);
1733 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1734 int_value
= GET_MODE_PRECISION (mode
);
1735 result
= wi::shwi (int_value
, mode
);
1739 result
= wi::shwi (wi::popcount (op0
), mode
);
1743 result
= wi::shwi (wi::parity (op0
), mode
);
1747 result
= wide_int (op0
).bswap ();
1752 result
= wide_int::from (op0
, width
, UNSIGNED
);
1756 result
= wide_int::from (op0
, width
, SIGNED
);
1764 return immed_wide_int_const (result
, mode
);
1767 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1768 && SCALAR_FLOAT_MODE_P (mode
)
1769 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1772 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1779 d
= real_value_abs (&d
);
1782 d
= real_value_negate (&d
);
1784 case FLOAT_TRUNCATE
:
1785 d
= real_value_truncate (mode
, d
);
1788 /* All this does is change the mode, unless changing
1790 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1791 real_convert (&d
, mode
, &d
);
1794 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1801 real_to_target (tmp
, &d
, GET_MODE (op
));
1802 for (i
= 0; i
< 4; i
++)
1804 real_from_target (&d
, tmp
, mode
);
1810 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1812 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1813 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1814 && GET_MODE_CLASS (mode
) == MODE_INT
1817 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1818 operators are intentionally left unspecified (to ease implementation
1819 by target backends), for consistency, this routine implements the
1820 same semantics for constant folding as used by the middle-end. */
1822 /* This was formerly used only for non-IEEE float.
1823 eggert@twinsun.com says it is safe for IEEE also. */
1824 REAL_VALUE_TYPE x
, t
;
1825 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1826 wide_int wmax
, wmin
;
1827 /* This is part of the abi to real_to_integer, but we check
1828 things before making this call. */
1834 if (REAL_VALUE_ISNAN (x
))
1837 /* Test against the signed upper bound. */
1838 wmax
= wi::max_value (width
, SIGNED
);
1839 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1840 if (REAL_VALUES_LESS (t
, x
))
1841 return immed_wide_int_const (wmax
, mode
);
1843 /* Test against the signed lower bound. */
1844 wmin
= wi::min_value (width
, SIGNED
);
1845 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1846 if (REAL_VALUES_LESS (x
, t
))
1847 return immed_wide_int_const (wmin
, mode
);
1849 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1853 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1856 /* Test against the unsigned upper bound. */
1857 wmax
= wi::max_value (width
, UNSIGNED
);
1858 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1859 if (REAL_VALUES_LESS (t
, x
))
1860 return immed_wide_int_const (wmax
, mode
);
1862 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1874 /* Subroutine of simplify_binary_operation to simplify a binary operation
1875 CODE that can commute with byte swapping, with result mode MODE and
1876 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1877 Return zero if no simplification or canonicalization is possible. */
1880 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1885 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1886 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1888 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1889 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1890 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1893 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1894 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1896 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1897 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1903 /* Subroutine of simplify_binary_operation to simplify a commutative,
1904 associative binary operation CODE with result mode MODE, operating
1905 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1906 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1907 canonicalization is possible. */
1910 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1915 /* Linearize the operator to the left. */
1916 if (GET_CODE (op1
) == code
)
1918 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1919 if (GET_CODE (op0
) == code
)
1921 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1922 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1925 /* "a op (b op c)" becomes "(b op c) op a". */
1926 if (! swap_commutative_operands_p (op1
, op0
))
1927 return simplify_gen_binary (code
, mode
, op1
, op0
);
1934 if (GET_CODE (op0
) == code
)
1936 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1937 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1939 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1940 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1943 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1944 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1946 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1948 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1949 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1951 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1958 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1959 and OP1. Return 0 if no simplification is possible.
1961 Don't use this for relational operations such as EQ or LT.
1962 Use simplify_relational_operation instead. */
1964 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1967 rtx trueop0
, trueop1
;
1970 /* Relational operations don't work here. We must know the mode
1971 of the operands in order to do the comparison correctly.
1972 Assuming a full word can give incorrect results.
1973 Consider comparing 128 with -128 in QImode. */
1974 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1975 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1977 /* Make sure the constant is second. */
1978 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1979 && swap_commutative_operands_p (op0
, op1
))
1981 tem
= op0
, op0
= op1
, op1
= tem
;
1984 trueop0
= avoid_constant_pool_reference (op0
);
1985 trueop1
= avoid_constant_pool_reference (op1
);
1987 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1990 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1993 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1994 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1995 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1996 actual constants. */
1999 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2000 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2002 rtx tem
, reversed
, opleft
, opright
;
2004 unsigned int width
= GET_MODE_PRECISION (mode
);
2006 /* Even if we can't compute a constant result,
2007 there are some cases worth simplifying. */
2012 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2013 when x is NaN, infinite, or finite and nonzero. They aren't
2014 when x is -0 and the rounding mode is not towards -infinity,
2015 since (-0) + 0 is then 0. */
2016 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2019 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2020 transformations are safe even for IEEE. */
2021 if (GET_CODE (op0
) == NEG
)
2022 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2023 else if (GET_CODE (op1
) == NEG
)
2024 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2026 /* (~a) + 1 -> -a */
2027 if (INTEGRAL_MODE_P (mode
)
2028 && GET_CODE (op0
) == NOT
2029 && trueop1
== const1_rtx
)
2030 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2032 /* Handle both-operands-constant cases. We can only add
2033 CONST_INTs to constants since the sum of relocatable symbols
2034 can't be handled by most assemblers. Don't add CONST_INT
2035 to CONST_INT since overflow won't be computed properly if wider
2036 than HOST_BITS_PER_WIDE_INT. */
2038 if ((GET_CODE (op0
) == CONST
2039 || GET_CODE (op0
) == SYMBOL_REF
2040 || GET_CODE (op0
) == LABEL_REF
)
2041 && CONST_INT_P (op1
))
2042 return plus_constant (mode
, op0
, INTVAL (op1
));
2043 else if ((GET_CODE (op1
) == CONST
2044 || GET_CODE (op1
) == SYMBOL_REF
2045 || GET_CODE (op1
) == LABEL_REF
)
2046 && CONST_INT_P (op0
))
2047 return plus_constant (mode
, op1
, INTVAL (op0
));
2049 /* See if this is something like X * C - X or vice versa or
2050 if the multiplication is written as a shift. If so, we can
2051 distribute and make a new multiply, shift, or maybe just
2052 have X (if C is 2 in the example above). But don't make
2053 something more expensive than we had before. */
2055 if (SCALAR_INT_MODE_P (mode
))
2057 rtx lhs
= op0
, rhs
= op1
;
2059 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2060 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2062 if (GET_CODE (lhs
) == NEG
)
2064 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2065 lhs
= XEXP (lhs
, 0);
2067 else if (GET_CODE (lhs
) == MULT
2068 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2070 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2071 lhs
= XEXP (lhs
, 0);
2073 else if (GET_CODE (lhs
) == ASHIFT
2074 && CONST_INT_P (XEXP (lhs
, 1))
2075 && INTVAL (XEXP (lhs
, 1)) >= 0
2076 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2078 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2079 GET_MODE_PRECISION (mode
));
2080 lhs
= XEXP (lhs
, 0);
2083 if (GET_CODE (rhs
) == NEG
)
2085 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2086 rhs
= XEXP (rhs
, 0);
2088 else if (GET_CODE (rhs
) == MULT
2089 && CONST_INT_P (XEXP (rhs
, 1)))
2091 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2092 rhs
= XEXP (rhs
, 0);
2094 else if (GET_CODE (rhs
) == ASHIFT
2095 && CONST_INT_P (XEXP (rhs
, 1))
2096 && INTVAL (XEXP (rhs
, 1)) >= 0
2097 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2099 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2100 GET_MODE_PRECISION (mode
));
2101 rhs
= XEXP (rhs
, 0);
2104 if (rtx_equal_p (lhs
, rhs
))
2106 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2108 bool speed
= optimize_function_for_speed_p (cfun
);
2110 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2112 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2113 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2118 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2119 if (CONST_SCALAR_INT_P (op1
)
2120 && GET_CODE (op0
) == XOR
2121 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2122 && mode_signbit_p (mode
, op1
))
2123 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2124 simplify_gen_binary (XOR
, mode
, op1
,
2127 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2128 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2129 && GET_CODE (op0
) == MULT
2130 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2134 in1
= XEXP (XEXP (op0
, 0), 0);
2135 in2
= XEXP (op0
, 1);
2136 return simplify_gen_binary (MINUS
, mode
, op1
,
2137 simplify_gen_binary (MULT
, mode
,
2141 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2142 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2144 if (COMPARISON_P (op0
)
2145 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2146 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2147 && (reversed
= reversed_comparison (op0
, mode
)))
2149 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2151 /* If one of the operands is a PLUS or a MINUS, see if we can
2152 simplify this by the associative law.
2153 Don't use the associative law for floating point.
2154 The inaccuracy makes it nonassociative,
2155 and subtle programs can break if operations are associated. */
2157 if (INTEGRAL_MODE_P (mode
)
2158 && (plus_minus_operand_p (op0
)
2159 || plus_minus_operand_p (op1
))
2160 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2163 /* Reassociate floating point addition only when the user
2164 specifies associative math operations. */
2165 if (FLOAT_MODE_P (mode
)
2166 && flag_associative_math
)
2168 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2175 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2176 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2177 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2178 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2180 rtx xop00
= XEXP (op0
, 0);
2181 rtx xop10
= XEXP (op1
, 0);
2184 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2186 if (REG_P (xop00
) && REG_P (xop10
)
2187 && GET_MODE (xop00
) == GET_MODE (xop10
)
2188 && REGNO (xop00
) == REGNO (xop10
)
2189 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2190 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2197 /* We can't assume x-x is 0 even with non-IEEE floating point,
2198 but since it is zero except in very strange circumstances, we
2199 will treat it as zero with -ffinite-math-only. */
2200 if (rtx_equal_p (trueop0
, trueop1
)
2201 && ! side_effects_p (op0
)
2202 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2203 return CONST0_RTX (mode
);
2205 /* Change subtraction from zero into negation. (0 - x) is the
2206 same as -x when x is NaN, infinite, or finite and nonzero.
2207 But if the mode has signed zeros, and does not round towards
2208 -infinity, then 0 - 0 is 0, not -0. */
2209 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2210 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2212 /* (-1 - a) is ~a. */
2213 if (trueop0
== constm1_rtx
)
2214 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2216 /* Subtracting 0 has no effect unless the mode has signed zeros
2217 and supports rounding towards -infinity. In such a case,
2219 if (!(HONOR_SIGNED_ZEROS (mode
)
2220 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2221 && trueop1
== CONST0_RTX (mode
))
2224 /* See if this is something like X * C - X or vice versa or
2225 if the multiplication is written as a shift. If so, we can
2226 distribute and make a new multiply, shift, or maybe just
2227 have X (if C is 2 in the example above). But don't make
2228 something more expensive than we had before. */
2230 if (SCALAR_INT_MODE_P (mode
))
2232 rtx lhs
= op0
, rhs
= op1
;
2234 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2235 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2237 if (GET_CODE (lhs
) == NEG
)
2239 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2240 lhs
= XEXP (lhs
, 0);
2242 else if (GET_CODE (lhs
) == MULT
2243 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2245 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2246 lhs
= XEXP (lhs
, 0);
2248 else if (GET_CODE (lhs
) == ASHIFT
2249 && CONST_INT_P (XEXP (lhs
, 1))
2250 && INTVAL (XEXP (lhs
, 1)) >= 0
2251 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2253 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2254 GET_MODE_PRECISION (mode
));
2255 lhs
= XEXP (lhs
, 0);
2258 if (GET_CODE (rhs
) == NEG
)
2260 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2261 rhs
= XEXP (rhs
, 0);
2263 else if (GET_CODE (rhs
) == MULT
2264 && CONST_INT_P (XEXP (rhs
, 1)))
2266 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2267 rhs
= XEXP (rhs
, 0);
2269 else if (GET_CODE (rhs
) == ASHIFT
2270 && CONST_INT_P (XEXP (rhs
, 1))
2271 && INTVAL (XEXP (rhs
, 1)) >= 0
2272 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2274 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2275 GET_MODE_PRECISION (mode
));
2276 negcoeff1
= -negcoeff1
;
2277 rhs
= XEXP (rhs
, 0);
2280 if (rtx_equal_p (lhs
, rhs
))
2282 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2284 bool speed
= optimize_function_for_speed_p (cfun
);
2286 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2288 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2289 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2294 /* (a - (-b)) -> (a + b). True even for IEEE. */
2295 if (GET_CODE (op1
) == NEG
)
2296 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2298 /* (-x - c) may be simplified as (-c - x). */
2299 if (GET_CODE (op0
) == NEG
2300 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2302 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2304 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2307 /* Don't let a relocatable value get a negative coeff. */
2308 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2309 return simplify_gen_binary (PLUS
, mode
,
2311 neg_const_int (mode
, op1
));
2313 /* (x - (x & y)) -> (x & ~y) */
2314 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2316 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2318 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2319 GET_MODE (XEXP (op1
, 1)));
2320 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2322 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2324 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2325 GET_MODE (XEXP (op1
, 0)));
2326 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2330 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2331 by reversing the comparison code if valid. */
2332 if (STORE_FLAG_VALUE
== 1
2333 && trueop0
== const1_rtx
2334 && COMPARISON_P (op1
)
2335 && (reversed
= reversed_comparison (op1
, mode
)))
2338 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2339 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2340 && GET_CODE (op1
) == MULT
2341 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2345 in1
= XEXP (XEXP (op1
, 0), 0);
2346 in2
= XEXP (op1
, 1);
2347 return simplify_gen_binary (PLUS
, mode
,
2348 simplify_gen_binary (MULT
, mode
,
2353 /* Canonicalize (minus (neg A) (mult B C)) to
2354 (minus (mult (neg B) C) A). */
2355 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2356 && GET_CODE (op1
) == MULT
2357 && GET_CODE (op0
) == NEG
)
2361 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2362 in2
= XEXP (op1
, 1);
2363 return simplify_gen_binary (MINUS
, mode
,
2364 simplify_gen_binary (MULT
, mode
,
2369 /* If one of the operands is a PLUS or a MINUS, see if we can
2370 simplify this by the associative law. This will, for example,
2371 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2372 Don't use the associative law for floating point.
2373 The inaccuracy makes it nonassociative,
2374 and subtle programs can break if operations are associated. */
2376 if (INTEGRAL_MODE_P (mode
)
2377 && (plus_minus_operand_p (op0
)
2378 || plus_minus_operand_p (op1
))
2379 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2384 if (trueop1
== constm1_rtx
)
2385 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2387 if (GET_CODE (op0
) == NEG
)
2389 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2390 /* If op1 is a MULT as well and simplify_unary_operation
2391 just moved the NEG to the second operand, simplify_gen_binary
2392 below could through simplify_associative_operation move
2393 the NEG around again and recurse endlessly. */
2395 && GET_CODE (op1
) == MULT
2396 && GET_CODE (temp
) == MULT
2397 && XEXP (op1
, 0) == XEXP (temp
, 0)
2398 && GET_CODE (XEXP (temp
, 1)) == NEG
2399 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2402 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2404 if (GET_CODE (op1
) == NEG
)
2406 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2407 /* If op0 is a MULT as well and simplify_unary_operation
2408 just moved the NEG to the second operand, simplify_gen_binary
2409 below could through simplify_associative_operation move
2410 the NEG around again and recurse endlessly. */
2412 && GET_CODE (op0
) == MULT
2413 && GET_CODE (temp
) == MULT
2414 && XEXP (op0
, 0) == XEXP (temp
, 0)
2415 && GET_CODE (XEXP (temp
, 1)) == NEG
2416 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2419 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2422 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2423 x is NaN, since x * 0 is then also NaN. Nor is it valid
2424 when the mode has signed zeros, since multiplying a negative
2425 number by 0 will give -0, not 0. */
2426 if (!HONOR_NANS (mode
)
2427 && !HONOR_SIGNED_ZEROS (mode
)
2428 && trueop1
== CONST0_RTX (mode
)
2429 && ! side_effects_p (op0
))
2432 /* In IEEE floating point, x*1 is not equivalent to x for
2434 if (!HONOR_SNANS (mode
)
2435 && trueop1
== CONST1_RTX (mode
))
2438 /* Convert multiply by constant power of two into shift. */
2439 if (CONST_SCALAR_INT_P (trueop1
))
2441 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2443 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2446 /* x*2 is x+x and x*(-1) is -x */
2447 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2448 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2449 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2450 && GET_MODE (op0
) == mode
)
2453 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2455 if (REAL_VALUES_EQUAL (d
, dconst2
))
2456 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2458 if (!HONOR_SNANS (mode
)
2459 && REAL_VALUES_EQUAL (d
, dconstm1
))
2460 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2463 /* Optimize -x * -x as x * x. */
2464 if (FLOAT_MODE_P (mode
)
2465 && GET_CODE (op0
) == NEG
2466 && GET_CODE (op1
) == NEG
2467 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2468 && !side_effects_p (XEXP (op0
, 0)))
2469 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2471 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2472 if (SCALAR_FLOAT_MODE_P (mode
)
2473 && GET_CODE (op0
) == ABS
2474 && GET_CODE (op1
) == ABS
2475 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2476 && !side_effects_p (XEXP (op0
, 0)))
2477 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2479 /* Reassociate multiplication, but for floating point MULTs
2480 only when the user specifies unsafe math optimizations. */
2481 if (! FLOAT_MODE_P (mode
)
2482 || flag_unsafe_math_optimizations
)
2484 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2491 if (trueop1
== CONST0_RTX (mode
))
2493 if (INTEGRAL_MODE_P (mode
)
2494 && trueop1
== CONSTM1_RTX (mode
)
2495 && !side_effects_p (op0
))
2497 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2499 /* A | (~A) -> -1 */
2500 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2501 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2502 && ! side_effects_p (op0
)
2503 && SCALAR_INT_MODE_P (mode
))
2506 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2507 if (CONST_INT_P (op1
)
2508 && HWI_COMPUTABLE_MODE_P (mode
)
2509 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2510 && !side_effects_p (op0
))
2513 /* Canonicalize (X & C1) | C2. */
2514 if (GET_CODE (op0
) == AND
2515 && CONST_INT_P (trueop1
)
2516 && CONST_INT_P (XEXP (op0
, 1)))
2518 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2519 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2520 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2522 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2524 && !side_effects_p (XEXP (op0
, 0)))
2527 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2528 if (((c1
|c2
) & mask
) == mask
)
2529 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2531 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2532 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2534 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2535 gen_int_mode (c1
& ~c2
, mode
));
2536 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2540 /* Convert (A & B) | A to A. */
2541 if (GET_CODE (op0
) == AND
2542 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2543 || rtx_equal_p (XEXP (op0
, 1), op1
))
2544 && ! side_effects_p (XEXP (op0
, 0))
2545 && ! side_effects_p (XEXP (op0
, 1)))
2548 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2549 mode size to (rotate A CX). */
2551 if (GET_CODE (op1
) == ASHIFT
2552 || GET_CODE (op1
) == SUBREG
)
2563 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2564 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2565 && CONST_INT_P (XEXP (opleft
, 1))
2566 && CONST_INT_P (XEXP (opright
, 1))
2567 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2568 == GET_MODE_PRECISION (mode
)))
2569 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2571 /* Same, but for ashift that has been "simplified" to a wider mode
2572 by simplify_shift_const. */
2574 if (GET_CODE (opleft
) == SUBREG
2575 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2576 && GET_CODE (opright
) == LSHIFTRT
2577 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2578 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2579 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2580 && (GET_MODE_SIZE (GET_MODE (opleft
))
2581 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2582 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2583 SUBREG_REG (XEXP (opright
, 0)))
2584 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2585 && CONST_INT_P (XEXP (opright
, 1))
2586 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2587 == GET_MODE_PRECISION (mode
)))
2588 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2589 XEXP (SUBREG_REG (opleft
), 1));
2591 /* If we have (ior (and (X C1) C2)), simplify this by making
2592 C1 as small as possible if C1 actually changes. */
2593 if (CONST_INT_P (op1
)
2594 && (HWI_COMPUTABLE_MODE_P (mode
)
2595 || INTVAL (op1
) > 0)
2596 && GET_CODE (op0
) == AND
2597 && CONST_INT_P (XEXP (op0
, 1))
2598 && CONST_INT_P (op1
)
2599 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2601 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2602 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2605 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2608 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2609 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2610 the PLUS does not affect any of the bits in OP1: then we can do
2611 the IOR as a PLUS and we can associate. This is valid if OP1
2612 can be safely shifted left C bits. */
2613 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2614 && GET_CODE (XEXP (op0
, 0)) == PLUS
2615 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2616 && CONST_INT_P (XEXP (op0
, 1))
2617 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2619 int count
= INTVAL (XEXP (op0
, 1));
2620 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2622 if (mask
>> count
== INTVAL (trueop1
)
2623 && trunc_int_for_mode (mask
, mode
) == mask
2624 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2625 return simplify_gen_binary (ASHIFTRT
, mode
,
2626 plus_constant (mode
, XEXP (op0
, 0),
2631 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2635 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2641 if (trueop1
== CONST0_RTX (mode
))
2643 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2644 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2645 if (rtx_equal_p (trueop0
, trueop1
)
2646 && ! side_effects_p (op0
)
2647 && GET_MODE_CLASS (mode
) != MODE_CC
)
2648 return CONST0_RTX (mode
);
2650 /* Canonicalize XOR of the most significant bit to PLUS. */
2651 if (CONST_SCALAR_INT_P (op1
)
2652 && mode_signbit_p (mode
, op1
))
2653 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2654 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2655 if (CONST_SCALAR_INT_P (op1
)
2656 && GET_CODE (op0
) == PLUS
2657 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2658 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2659 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2660 simplify_gen_binary (XOR
, mode
, op1
,
2663 /* If we are XORing two things that have no bits in common,
2664 convert them into an IOR. This helps to detect rotation encoded
2665 using those methods and possibly other simplifications. */
2667 if (HWI_COMPUTABLE_MODE_P (mode
)
2668 && (nonzero_bits (op0
, mode
)
2669 & nonzero_bits (op1
, mode
)) == 0)
2670 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2672 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2673 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2676 int num_negated
= 0;
2678 if (GET_CODE (op0
) == NOT
)
2679 num_negated
++, op0
= XEXP (op0
, 0);
2680 if (GET_CODE (op1
) == NOT
)
2681 num_negated
++, op1
= XEXP (op1
, 0);
2683 if (num_negated
== 2)
2684 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2685 else if (num_negated
== 1)
2686 return simplify_gen_unary (NOT
, mode
,
2687 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2691 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2692 correspond to a machine insn or result in further simplifications
2693 if B is a constant. */
2695 if (GET_CODE (op0
) == AND
2696 && rtx_equal_p (XEXP (op0
, 1), op1
)
2697 && ! side_effects_p (op1
))
2698 return simplify_gen_binary (AND
, mode
,
2699 simplify_gen_unary (NOT
, mode
,
2700 XEXP (op0
, 0), mode
),
2703 else if (GET_CODE (op0
) == AND
2704 && rtx_equal_p (XEXP (op0
, 0), op1
)
2705 && ! side_effects_p (op1
))
2706 return simplify_gen_binary (AND
, mode
,
2707 simplify_gen_unary (NOT
, mode
,
2708 XEXP (op0
, 1), mode
),
2711 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2712 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2713 out bits inverted twice and not set by C. Similarly, given
2714 (xor (and (xor A B) C) D), simplify without inverting C in
2715 the xor operand: (xor (and A C) (B&C)^D).
2717 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2718 && GET_CODE (XEXP (op0
, 0)) == XOR
2719 && CONST_INT_P (op1
)
2720 && CONST_INT_P (XEXP (op0
, 1))
2721 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2723 enum rtx_code op
= GET_CODE (op0
);
2724 rtx a
= XEXP (XEXP (op0
, 0), 0);
2725 rtx b
= XEXP (XEXP (op0
, 0), 1);
2726 rtx c
= XEXP (op0
, 1);
2728 HOST_WIDE_INT bval
= INTVAL (b
);
2729 HOST_WIDE_INT cval
= INTVAL (c
);
2730 HOST_WIDE_INT dval
= INTVAL (d
);
2731 HOST_WIDE_INT xcval
;
2738 return simplify_gen_binary (XOR
, mode
,
2739 simplify_gen_binary (op
, mode
, a
, c
),
2740 gen_int_mode ((bval
& xcval
) ^ dval
,
2744 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2745 we can transform like this:
2746 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2747 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2748 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2749 Attempt a few simplifications when B and C are both constants. */
2750 if (GET_CODE (op0
) == AND
2751 && CONST_INT_P (op1
)
2752 && CONST_INT_P (XEXP (op0
, 1)))
2754 rtx a
= XEXP (op0
, 0);
2755 rtx b
= XEXP (op0
, 1);
2757 HOST_WIDE_INT bval
= INTVAL (b
);
2758 HOST_WIDE_INT cval
= INTVAL (c
);
2760 /* Instead of computing ~A&C, we compute its negated value,
2761 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2762 optimize for sure. If it does not simplify, we still try
2763 to compute ~A&C below, but since that always allocates
2764 RTL, we don't try that before committing to returning a
2765 simplified expression. */
2766 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2769 if ((~cval
& bval
) == 0)
2771 rtx na_c
= NULL_RTX
;
2773 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2776 /* If ~A does not simplify, don't bother: we don't
2777 want to simplify 2 operations into 3, and if na_c
2778 were to simplify with na, n_na_c would have
2779 simplified as well. */
2780 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2782 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2785 /* Try to simplify ~A&C | ~B&C. */
2786 if (na_c
!= NULL_RTX
)
2787 return simplify_gen_binary (IOR
, mode
, na_c
,
2788 gen_int_mode (~bval
& cval
, mode
));
2792 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2793 if (n_na_c
== CONSTM1_RTX (mode
))
2795 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2796 gen_int_mode (~cval
& bval
,
2798 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2799 gen_int_mode (~bval
& cval
,
2805 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2806 comparison if STORE_FLAG_VALUE is 1. */
2807 if (STORE_FLAG_VALUE
== 1
2808 && trueop1
== const1_rtx
2809 && COMPARISON_P (op0
)
2810 && (reversed
= reversed_comparison (op0
, mode
)))
2813 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2814 is (lt foo (const_int 0)), so we can perform the above
2815 simplification if STORE_FLAG_VALUE is 1. */
2817 if (STORE_FLAG_VALUE
== 1
2818 && trueop1
== const1_rtx
2819 && GET_CODE (op0
) == LSHIFTRT
2820 && CONST_INT_P (XEXP (op0
, 1))
2821 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2822 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2824 /* (xor (comparison foo bar) (const_int sign-bit))
2825 when STORE_FLAG_VALUE is the sign bit. */
2826 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2827 && trueop1
== const_true_rtx
2828 && COMPARISON_P (op0
)
2829 && (reversed
= reversed_comparison (op0
, mode
)))
2832 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2836 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2842 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2844 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2846 if (HWI_COMPUTABLE_MODE_P (mode
))
2848 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2849 HOST_WIDE_INT nzop1
;
2850 if (CONST_INT_P (trueop1
))
2852 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2853 /* If we are turning off bits already known off in OP0, we need
2855 if ((nzop0
& ~val1
) == 0)
2858 nzop1
= nonzero_bits (trueop1
, mode
);
2859 /* If we are clearing all the nonzero bits, the result is zero. */
2860 if ((nzop1
& nzop0
) == 0
2861 && !side_effects_p (op0
) && !side_effects_p (op1
))
2862 return CONST0_RTX (mode
);
2864 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2865 && GET_MODE_CLASS (mode
) != MODE_CC
)
2868 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2869 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2870 && ! side_effects_p (op0
)
2871 && GET_MODE_CLASS (mode
) != MODE_CC
)
2872 return CONST0_RTX (mode
);
2874 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2875 there are no nonzero bits of C outside of X's mode. */
2876 if ((GET_CODE (op0
) == SIGN_EXTEND
2877 || GET_CODE (op0
) == ZERO_EXTEND
)
2878 && CONST_INT_P (trueop1
)
2879 && HWI_COMPUTABLE_MODE_P (mode
)
2880 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2881 & UINTVAL (trueop1
)) == 0)
2883 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2884 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2885 gen_int_mode (INTVAL (trueop1
),
2887 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2890 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2891 we might be able to further simplify the AND with X and potentially
2892 remove the truncation altogether. */
2893 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2895 rtx x
= XEXP (op0
, 0);
2896 machine_mode xmode
= GET_MODE (x
);
2897 tem
= simplify_gen_binary (AND
, xmode
, x
,
2898 gen_int_mode (INTVAL (trueop1
), xmode
));
2899 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2902 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2903 if (GET_CODE (op0
) == IOR
2904 && CONST_INT_P (trueop1
)
2905 && CONST_INT_P (XEXP (op0
, 1)))
2907 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2908 return simplify_gen_binary (IOR
, mode
,
2909 simplify_gen_binary (AND
, mode
,
2910 XEXP (op0
, 0), op1
),
2911 gen_int_mode (tmp
, mode
));
2914 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2915 insn (and may simplify more). */
2916 if (GET_CODE (op0
) == XOR
2917 && rtx_equal_p (XEXP (op0
, 0), op1
)
2918 && ! side_effects_p (op1
))
2919 return simplify_gen_binary (AND
, mode
,
2920 simplify_gen_unary (NOT
, mode
,
2921 XEXP (op0
, 1), mode
),
2924 if (GET_CODE (op0
) == XOR
2925 && rtx_equal_p (XEXP (op0
, 1), op1
)
2926 && ! side_effects_p (op1
))
2927 return simplify_gen_binary (AND
, mode
,
2928 simplify_gen_unary (NOT
, mode
,
2929 XEXP (op0
, 0), mode
),
2932 /* Similarly for (~(A ^ B)) & A. */
2933 if (GET_CODE (op0
) == NOT
2934 && GET_CODE (XEXP (op0
, 0)) == XOR
2935 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2936 && ! side_effects_p (op1
))
2937 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2939 if (GET_CODE (op0
) == NOT
2940 && GET_CODE (XEXP (op0
, 0)) == XOR
2941 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2942 && ! side_effects_p (op1
))
2943 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2945 /* Convert (A | B) & A to A. */
2946 if (GET_CODE (op0
) == IOR
2947 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2948 || rtx_equal_p (XEXP (op0
, 1), op1
))
2949 && ! side_effects_p (XEXP (op0
, 0))
2950 && ! side_effects_p (XEXP (op0
, 1)))
2953 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2954 ((A & N) + B) & M -> (A + B) & M
2955 Similarly if (N & M) == 0,
2956 ((A | N) + B) & M -> (A + B) & M
2957 and for - instead of + and/or ^ instead of |.
2958 Also, if (N & M) == 0, then
2959 (A +- N) & M -> A & M. */
2960 if (CONST_INT_P (trueop1
)
2961 && HWI_COMPUTABLE_MODE_P (mode
)
2962 && ~UINTVAL (trueop1
)
2963 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2964 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2969 pmop
[0] = XEXP (op0
, 0);
2970 pmop
[1] = XEXP (op0
, 1);
2972 if (CONST_INT_P (pmop
[1])
2973 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2974 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2976 for (which
= 0; which
< 2; which
++)
2979 switch (GET_CODE (tem
))
2982 if (CONST_INT_P (XEXP (tem
, 1))
2983 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2984 == UINTVAL (trueop1
))
2985 pmop
[which
] = XEXP (tem
, 0);
2989 if (CONST_INT_P (XEXP (tem
, 1))
2990 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2991 pmop
[which
] = XEXP (tem
, 0);
2998 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3000 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3002 return simplify_gen_binary (code
, mode
, tem
, op1
);
3006 /* (and X (ior (not X) Y) -> (and X Y) */
3007 if (GET_CODE (op1
) == IOR
3008 && GET_CODE (XEXP (op1
, 0)) == NOT
3009 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3010 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3012 /* (and (ior (not X) Y) X) -> (and X Y) */
3013 if (GET_CODE (op0
) == IOR
3014 && GET_CODE (XEXP (op0
, 0)) == NOT
3015 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3016 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3018 /* (and X (ior Y (not X)) -> (and X Y) */
3019 if (GET_CODE (op1
) == IOR
3020 && GET_CODE (XEXP (op1
, 1)) == NOT
3021 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3022 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3024 /* (and (ior Y (not X)) X) -> (and X Y) */
3025 if (GET_CODE (op0
) == IOR
3026 && GET_CODE (XEXP (op0
, 1)) == NOT
3027 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3028 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3030 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3034 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3040 /* 0/x is 0 (or x&0 if x has side-effects). */
3041 if (trueop0
== CONST0_RTX (mode
))
3043 if (side_effects_p (op1
))
3044 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3048 if (trueop1
== CONST1_RTX (mode
))
3050 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3054 /* Convert divide by power of two into shift. */
3055 if (CONST_INT_P (trueop1
)
3056 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3057 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3061 /* Handle floating point and integers separately. */
3062 if (SCALAR_FLOAT_MODE_P (mode
))
3064 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3065 safe for modes with NaNs, since 0.0 / 0.0 will then be
3066 NaN rather than 0.0. Nor is it safe for modes with signed
3067 zeros, since dividing 0 by a negative number gives -0.0 */
3068 if (trueop0
== CONST0_RTX (mode
)
3069 && !HONOR_NANS (mode
)
3070 && !HONOR_SIGNED_ZEROS (mode
)
3071 && ! side_effects_p (op1
))
3074 if (trueop1
== CONST1_RTX (mode
)
3075 && !HONOR_SNANS (mode
))
3078 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3079 && trueop1
!= CONST0_RTX (mode
))
3082 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3085 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3086 && !HONOR_SNANS (mode
))
3087 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3089 /* Change FP division by a constant into multiplication.
3090 Only do this with -freciprocal-math. */
3091 if (flag_reciprocal_math
3092 && !REAL_VALUES_EQUAL (d
, dconst0
))
3094 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3095 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3096 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3100 else if (SCALAR_INT_MODE_P (mode
))
3102 /* 0/x is 0 (or x&0 if x has side-effects). */
3103 if (trueop0
== CONST0_RTX (mode
)
3104 && !cfun
->can_throw_non_call_exceptions
)
3106 if (side_effects_p (op1
))
3107 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3111 if (trueop1
== CONST1_RTX (mode
))
3113 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3118 if (trueop1
== constm1_rtx
)
3120 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3122 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3128 /* 0%x is 0 (or x&0 if x has side-effects). */
3129 if (trueop0
== CONST0_RTX (mode
))
3131 if (side_effects_p (op1
))
3132 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3135 /* x%1 is 0 (of x&0 if x has side-effects). */
3136 if (trueop1
== CONST1_RTX (mode
))
3138 if (side_effects_p (op0
))
3139 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3140 return CONST0_RTX (mode
);
3142 /* Implement modulus by power of two as AND. */
3143 if (CONST_INT_P (trueop1
)
3144 && exact_log2 (UINTVAL (trueop1
)) > 0)
3145 return simplify_gen_binary (AND
, mode
, op0
,
3146 gen_int_mode (INTVAL (op1
) - 1, mode
));
3150 /* 0%x is 0 (or x&0 if x has side-effects). */
3151 if (trueop0
== CONST0_RTX (mode
))
3153 if (side_effects_p (op1
))
3154 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3157 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3158 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3160 if (side_effects_p (op0
))
3161 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3162 return CONST0_RTX (mode
);
3168 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3169 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3170 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3172 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3173 if (CONST_INT_P (trueop1
)
3174 && IN_RANGE (INTVAL (trueop1
),
3175 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3176 GET_MODE_PRECISION (mode
) - 1))
3177 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3178 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3179 - INTVAL (trueop1
)));
3183 if (trueop1
== CONST0_RTX (mode
))
3185 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3187 /* Rotating ~0 always results in ~0. */
3188 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3189 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3190 && ! side_effects_p (op1
))
3194 scalar constants c1, c2
3195 size (M2) > size (M1)
3196 c1 == size (M2) - size (M1)
3198 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3202 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3204 if (code
== ASHIFTRT
3205 && !VECTOR_MODE_P (mode
)
3207 && CONST_INT_P (op1
)
3208 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3209 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3210 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3211 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3212 > GET_MODE_BITSIZE (mode
))
3213 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3214 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3215 - GET_MODE_BITSIZE (mode
)))
3216 && subreg_lowpart_p (op0
))
3218 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3220 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3221 tmp
= simplify_gen_binary (ASHIFTRT
,
3222 GET_MODE (SUBREG_REG (op0
)),
3223 XEXP (SUBREG_REG (op0
), 0),
3225 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3226 subreg_lowpart_offset (mode
,
3230 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3232 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3233 if (val
!= INTVAL (op1
))
3234 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3241 if (trueop1
== CONST0_RTX (mode
))
3243 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3245 goto canonicalize_shift
;
3248 if (trueop1
== CONST0_RTX (mode
))
3250 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3252 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3253 if (GET_CODE (op0
) == CLZ
3254 && CONST_INT_P (trueop1
)
3255 && STORE_FLAG_VALUE
== 1
3256 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3258 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3259 unsigned HOST_WIDE_INT zero_val
= 0;
3261 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3262 && zero_val
== GET_MODE_PRECISION (imode
)
3263 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3264 return simplify_gen_relational (EQ
, mode
, imode
,
3265 XEXP (op0
, 0), const0_rtx
);
3267 goto canonicalize_shift
;
3270 if (width
<= HOST_BITS_PER_WIDE_INT
3271 && mode_signbit_p (mode
, trueop1
)
3272 && ! side_effects_p (op0
))
3274 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3276 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3282 if (width
<= HOST_BITS_PER_WIDE_INT
3283 && CONST_INT_P (trueop1
)
3284 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3285 && ! side_effects_p (op0
))
3287 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3289 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3295 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3297 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3299 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3305 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3307 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3309 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3322 /* ??? There are simplifications that can be done. */
3326 if (!VECTOR_MODE_P (mode
))
3328 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3329 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3330 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3331 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3332 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3334 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3335 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3338 /* Extract a scalar element from a nested VEC_SELECT expression
3339 (with optional nested VEC_CONCAT expression). Some targets
3340 (i386) extract scalar element from a vector using chain of
3341 nested VEC_SELECT expressions. When input operand is a memory
3342 operand, this operation can be simplified to a simple scalar
3343 load from an offseted memory address. */
3344 if (GET_CODE (trueop0
) == VEC_SELECT
)
3346 rtx op0
= XEXP (trueop0
, 0);
3347 rtx op1
= XEXP (trueop0
, 1);
3349 machine_mode opmode
= GET_MODE (op0
);
3350 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3351 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3353 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3359 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3360 gcc_assert (i
< n_elts
);
3362 /* Select element, pointed by nested selector. */
3363 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3365 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3366 if (GET_CODE (op0
) == VEC_CONCAT
)
3368 rtx op00
= XEXP (op0
, 0);
3369 rtx op01
= XEXP (op0
, 1);
3371 machine_mode mode00
, mode01
;
3372 int n_elts00
, n_elts01
;
3374 mode00
= GET_MODE (op00
);
3375 mode01
= GET_MODE (op01
);
3377 /* Find out number of elements of each operand. */
3378 if (VECTOR_MODE_P (mode00
))
3380 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3381 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3386 if (VECTOR_MODE_P (mode01
))
3388 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3389 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3394 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3396 /* Select correct operand of VEC_CONCAT
3397 and adjust selector. */
3398 if (elem
< n_elts01
)
3409 vec
= rtvec_alloc (1);
3410 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3412 tmp
= gen_rtx_fmt_ee (code
, mode
,
3413 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3416 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3417 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3418 return XEXP (trueop0
, 0);
3422 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3423 gcc_assert (GET_MODE_INNER (mode
)
3424 == GET_MODE_INNER (GET_MODE (trueop0
)));
3425 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3427 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3429 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3430 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3431 rtvec v
= rtvec_alloc (n_elts
);
3434 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3435 for (i
= 0; i
< n_elts
; i
++)
3437 rtx x
= XVECEXP (trueop1
, 0, i
);
3439 gcc_assert (CONST_INT_P (x
));
3440 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3444 return gen_rtx_CONST_VECTOR (mode
, v
);
3447 /* Recognize the identity. */
3448 if (GET_MODE (trueop0
) == mode
)
3450 bool maybe_ident
= true;
3451 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3453 rtx j
= XVECEXP (trueop1
, 0, i
);
3454 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3456 maybe_ident
= false;
3464 /* If we build {a,b} then permute it, build the result directly. */
3465 if (XVECLEN (trueop1
, 0) == 2
3466 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3467 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3468 && GET_CODE (trueop0
) == VEC_CONCAT
3469 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3470 && GET_MODE (XEXP (trueop0
, 0)) == mode
3471 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3472 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3474 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3475 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3478 gcc_assert (i0
< 4 && i1
< 4);
3479 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3480 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3482 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3485 if (XVECLEN (trueop1
, 0) == 2
3486 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3487 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3488 && GET_CODE (trueop0
) == VEC_CONCAT
3489 && GET_MODE (trueop0
) == mode
)
3491 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3492 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3495 gcc_assert (i0
< 2 && i1
< 2);
3496 subop0
= XEXP (trueop0
, i0
);
3497 subop1
= XEXP (trueop0
, i1
);
3499 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3502 /* If we select one half of a vec_concat, return that. */
3503 if (GET_CODE (trueop0
) == VEC_CONCAT
3504 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3506 rtx subop0
= XEXP (trueop0
, 0);
3507 rtx subop1
= XEXP (trueop0
, 1);
3508 machine_mode mode0
= GET_MODE (subop0
);
3509 machine_mode mode1
= GET_MODE (subop1
);
3510 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3511 int l0
= GET_MODE_SIZE (mode0
) / li
;
3512 int l1
= GET_MODE_SIZE (mode1
) / li
;
3513 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3514 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3516 bool success
= true;
3517 for (int i
= 1; i
< l0
; ++i
)
3519 rtx j
= XVECEXP (trueop1
, 0, i
);
3520 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3529 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3531 bool success
= true;
3532 for (int i
= 1; i
< l1
; ++i
)
3534 rtx j
= XVECEXP (trueop1
, 0, i
);
3535 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3547 if (XVECLEN (trueop1
, 0) == 1
3548 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3549 && GET_CODE (trueop0
) == VEC_CONCAT
)
3552 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3554 /* Try to find the element in the VEC_CONCAT. */
3555 while (GET_MODE (vec
) != mode
3556 && GET_CODE (vec
) == VEC_CONCAT
)
3558 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3559 if (offset
< vec_size
)
3560 vec
= XEXP (vec
, 0);
3564 vec
= XEXP (vec
, 1);
3566 vec
= avoid_constant_pool_reference (vec
);
3569 if (GET_MODE (vec
) == mode
)
3573 /* If we select elements in a vec_merge that all come from the same
3574 operand, select from that operand directly. */
3575 if (GET_CODE (op0
) == VEC_MERGE
)
3577 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3578 if (CONST_INT_P (trueop02
))
3580 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3581 bool all_operand0
= true;
3582 bool all_operand1
= true;
3583 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3585 rtx j
= XVECEXP (trueop1
, 0, i
);
3586 if (sel
& (1 << UINTVAL (j
)))
3587 all_operand1
= false;
3589 all_operand0
= false;
3591 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3592 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3593 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3594 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3598 /* If we have two nested selects that are inverses of each
3599 other, replace them with the source operand. */
3600 if (GET_CODE (trueop0
) == VEC_SELECT
3601 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3603 rtx op0_subop1
= XEXP (trueop0
, 1);
3604 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3605 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3607 /* Apply the outer ordering vector to the inner one. (The inner
3608 ordering vector is expressly permitted to be of a different
3609 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3610 then the two VEC_SELECTs cancel. */
3611 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3613 rtx x
= XVECEXP (trueop1
, 0, i
);
3614 if (!CONST_INT_P (x
))
3616 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3617 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3620 return XEXP (trueop0
, 0);
3626 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3627 ? GET_MODE (trueop0
)
3628 : GET_MODE_INNER (mode
));
3629 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3630 ? GET_MODE (trueop1
)
3631 : GET_MODE_INNER (mode
));
3633 gcc_assert (VECTOR_MODE_P (mode
));
3634 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3635 == GET_MODE_SIZE (mode
));
3637 if (VECTOR_MODE_P (op0_mode
))
3638 gcc_assert (GET_MODE_INNER (mode
)
3639 == GET_MODE_INNER (op0_mode
));
3641 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3643 if (VECTOR_MODE_P (op1_mode
))
3644 gcc_assert (GET_MODE_INNER (mode
)
3645 == GET_MODE_INNER (op1_mode
));
3647 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3649 if ((GET_CODE (trueop0
) == CONST_VECTOR
3650 || CONST_SCALAR_INT_P (trueop0
)
3651 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3652 && (GET_CODE (trueop1
) == CONST_VECTOR
3653 || CONST_SCALAR_INT_P (trueop1
)
3654 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3656 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3657 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3658 rtvec v
= rtvec_alloc (n_elts
);
3660 unsigned in_n_elts
= 1;
3662 if (VECTOR_MODE_P (op0_mode
))
3663 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3664 for (i
= 0; i
< n_elts
; i
++)
3668 if (!VECTOR_MODE_P (op0_mode
))
3669 RTVEC_ELT (v
, i
) = trueop0
;
3671 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3675 if (!VECTOR_MODE_P (op1_mode
))
3676 RTVEC_ELT (v
, i
) = trueop1
;
3678 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3683 return gen_rtx_CONST_VECTOR (mode
, v
);
3686 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3687 Restrict the transformation to avoid generating a VEC_SELECT with a
3688 mode unrelated to its operand. */
3689 if (GET_CODE (trueop0
) == VEC_SELECT
3690 && GET_CODE (trueop1
) == VEC_SELECT
3691 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3692 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3694 rtx par0
= XEXP (trueop0
, 1);
3695 rtx par1
= XEXP (trueop1
, 1);
3696 int len0
= XVECLEN (par0
, 0);
3697 int len1
= XVECLEN (par1
, 0);
3698 rtvec vec
= rtvec_alloc (len0
+ len1
);
3699 for (int i
= 0; i
< len0
; i
++)
3700 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3701 for (int i
= 0; i
< len1
; i
++)
3702 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3703 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3704 gen_rtx_PARALLEL (VOIDmode
, vec
));
3717 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3720 unsigned int width
= GET_MODE_PRECISION (mode
);
3722 if (VECTOR_MODE_P (mode
)
3723 && code
!= VEC_CONCAT
3724 && GET_CODE (op0
) == CONST_VECTOR
3725 && GET_CODE (op1
) == CONST_VECTOR
)
3727 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3728 machine_mode op0mode
= GET_MODE (op0
);
3729 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3730 machine_mode op1mode
= GET_MODE (op1
);
3731 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3732 rtvec v
= rtvec_alloc (n_elts
);
3735 gcc_assert (op0_n_elts
== n_elts
);
3736 gcc_assert (op1_n_elts
== n_elts
);
3737 for (i
= 0; i
< n_elts
; i
++)
3739 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3740 CONST_VECTOR_ELT (op0
, i
),
3741 CONST_VECTOR_ELT (op1
, i
));
3744 RTVEC_ELT (v
, i
) = x
;
3747 return gen_rtx_CONST_VECTOR (mode
, v
);
3750 if (VECTOR_MODE_P (mode
)
3751 && code
== VEC_CONCAT
3752 && (CONST_SCALAR_INT_P (op0
)
3753 || GET_CODE (op0
) == CONST_FIXED
3754 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3755 && (CONST_SCALAR_INT_P (op1
)
3756 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3757 || GET_CODE (op1
) == CONST_FIXED
))
3759 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3760 rtvec v
= rtvec_alloc (n_elts
);
3762 gcc_assert (n_elts
>= 2);
3765 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3766 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3768 RTVEC_ELT (v
, 0) = op0
;
3769 RTVEC_ELT (v
, 1) = op1
;
3773 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3774 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3777 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3778 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3779 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3781 for (i
= 0; i
< op0_n_elts
; ++i
)
3782 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3783 for (i
= 0; i
< op1_n_elts
; ++i
)
3784 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3787 return gen_rtx_CONST_VECTOR (mode
, v
);
3790 if (SCALAR_FLOAT_MODE_P (mode
)
3791 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3792 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3793 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3804 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3806 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3808 for (i
= 0; i
< 4; i
++)
3825 real_from_target (&r
, tmp0
, mode
);
3826 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3830 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3833 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3834 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3835 real_convert (&f0
, mode
, &f0
);
3836 real_convert (&f1
, mode
, &f1
);
3838 if (HONOR_SNANS (mode
)
3839 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3843 && REAL_VALUES_EQUAL (f1
, dconst0
)
3844 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3847 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3848 && flag_trapping_math
3849 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3851 int s0
= REAL_VALUE_NEGATIVE (f0
);
3852 int s1
= REAL_VALUE_NEGATIVE (f1
);
3857 /* Inf + -Inf = NaN plus exception. */
3862 /* Inf - Inf = NaN plus exception. */
3867 /* Inf / Inf = NaN plus exception. */
3874 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3875 && flag_trapping_math
3876 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3877 || (REAL_VALUE_ISINF (f1
)
3878 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3879 /* Inf * 0 = NaN plus exception. */
3882 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3884 real_convert (&result
, mode
, &value
);
3886 /* Don't constant fold this floating point operation if
3887 the result has overflowed and flag_trapping_math. */
3889 if (flag_trapping_math
3890 && MODE_HAS_INFINITIES (mode
)
3891 && REAL_VALUE_ISINF (result
)
3892 && !REAL_VALUE_ISINF (f0
)
3893 && !REAL_VALUE_ISINF (f1
))
3894 /* Overflow plus exception. */
3897 /* Don't constant fold this floating point operation if the
3898 result may dependent upon the run-time rounding mode and
3899 flag_rounding_math is set, or if GCC's software emulation
3900 is unable to accurately represent the result. */
3902 if ((flag_rounding_math
3903 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3904 && (inexact
|| !real_identical (&result
, &value
)))
3907 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3911 /* We can fold some multi-word operations. */
3912 if ((GET_MODE_CLASS (mode
) == MODE_INT
3913 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3914 && CONST_SCALAR_INT_P (op0
)
3915 && CONST_SCALAR_INT_P (op1
))
3919 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3920 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3922 #if TARGET_SUPPORTS_WIDE_INT == 0
3923 /* This assert keeps the simplification from producing a result
3924 that cannot be represented in a CONST_DOUBLE but a lot of
3925 upstream callers expect that this function never fails to
3926 simplify something and so you if you added this to the test
3927 above the code would die later anyway. If this assert
3928 happens, you just need to make the port support wide int. */
3929 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3934 result
= wi::sub (pop0
, pop1
);
3938 result
= wi::add (pop0
, pop1
);
3942 result
= wi::mul (pop0
, pop1
);
3946 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3952 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3958 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3964 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3970 result
= wi::bit_and (pop0
, pop1
);
3974 result
= wi::bit_or (pop0
, pop1
);
3978 result
= wi::bit_xor (pop0
, pop1
);
3982 result
= wi::smin (pop0
, pop1
);
3986 result
= wi::smax (pop0
, pop1
);
3990 result
= wi::umin (pop0
, pop1
);
3994 result
= wi::umax (pop0
, pop1
);
4001 wide_int wop1
= pop1
;
4002 if (SHIFT_COUNT_TRUNCATED
)
4003 wop1
= wi::umod_trunc (wop1
, width
);
4004 else if (wi::geu_p (wop1
, width
))
4010 result
= wi::lrshift (pop0
, wop1
);
4014 result
= wi::arshift (pop0
, wop1
);
4018 result
= wi::lshift (pop0
, wop1
);
4029 if (wi::neg_p (pop1
))
4035 result
= wi::lrotate (pop0
, pop1
);
4039 result
= wi::rrotate (pop0
, pop1
);
4050 return immed_wide_int_const (result
, mode
);
4058 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4061 Rather than test for specific case, we do this by a brute-force method
4062 and do all possible simplifications until no more changes occur. Then
4063 we rebuild the operation. */
4065 struct simplify_plus_minus_op_data
4072 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4076 result
= (commutative_operand_precedence (y
)
4077 - commutative_operand_precedence (x
));
4081 /* Group together equal REGs to do more simplification. */
4082 if (REG_P (x
) && REG_P (y
))
4083 return REGNO (x
) > REGNO (y
);
4089 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4092 struct simplify_plus_minus_op_data ops
[16];
4095 int changed
, n_constants
, canonicalized
= 0;
4098 memset (ops
, 0, sizeof ops
);
4100 /* Set up the two operands and then expand them until nothing has been
4101 changed. If we run out of room in our array, give up; this should
4102 almost never happen. */
4107 ops
[1].neg
= (code
== MINUS
);
4114 for (i
= 0; i
< n_ops
; i
++)
4116 rtx this_op
= ops
[i
].op
;
4117 int this_neg
= ops
[i
].neg
;
4118 enum rtx_code this_code
= GET_CODE (this_op
);
4124 if (n_ops
== ARRAY_SIZE (ops
))
4127 ops
[n_ops
].op
= XEXP (this_op
, 1);
4128 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4131 ops
[i
].op
= XEXP (this_op
, 0);
4133 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4137 ops
[i
].op
= XEXP (this_op
, 0);
4138 ops
[i
].neg
= ! this_neg
;
4144 if (n_ops
!= ARRAY_SIZE (ops
)
4145 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4146 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4147 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4149 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4150 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4151 ops
[n_ops
].neg
= this_neg
;
4159 /* ~a -> (-a - 1) */
4160 if (n_ops
!= ARRAY_SIZE (ops
))
4162 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4163 ops
[n_ops
++].neg
= this_neg
;
4164 ops
[i
].op
= XEXP (this_op
, 0);
4165 ops
[i
].neg
= !this_neg
;
4175 ops
[i
].op
= neg_const_int (mode
, this_op
);
4189 if (n_constants
> 1)
4192 gcc_assert (n_ops
>= 2);
4194 /* If we only have two operands, we can avoid the loops. */
4197 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4200 /* Get the two operands. Be careful with the order, especially for
4201 the cases where code == MINUS. */
4202 if (ops
[0].neg
&& ops
[1].neg
)
4204 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4207 else if (ops
[0].neg
)
4218 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4221 /* Now simplify each pair of operands until nothing changes. */
4224 /* Insertion sort is good enough for a small array. */
4225 for (i
= 1; i
< n_ops
; i
++)
4227 struct simplify_plus_minus_op_data save
;
4229 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4235 ops
[j
+ 1] = ops
[j
];
4236 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4241 for (i
= n_ops
- 1; i
> 0; i
--)
4242 for (j
= i
- 1; j
>= 0; j
--)
4244 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4245 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4247 if (lhs
!= 0 && rhs
!= 0)
4249 enum rtx_code ncode
= PLUS
;
4255 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4257 else if (swap_commutative_operands_p (lhs
, rhs
))
4258 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4260 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4261 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4263 rtx tem_lhs
, tem_rhs
;
4265 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4266 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4267 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4269 if (tem
&& !CONSTANT_P (tem
))
4270 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4273 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4277 /* Reject "simplifications" that just wrap the two
4278 arguments in a CONST. Failure to do so can result
4279 in infinite recursion with simplify_binary_operation
4280 when it calls us to simplify CONST operations.
4281 Also, if we find such a simplification, don't try
4282 any more combinations with this rhs: We must have
4283 something like symbol+offset, ie. one of the
4284 trivial CONST expressions we handle later. */
4285 if (GET_CODE (tem
) == CONST
4286 && GET_CODE (XEXP (tem
, 0)) == ncode
4287 && XEXP (XEXP (tem
, 0), 0) == lhs
4288 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4291 if (GET_CODE (tem
) == NEG
)
4292 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4293 if (CONST_INT_P (tem
) && lneg
)
4294 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4298 ops
[j
].op
= NULL_RTX
;
4305 /* If nothing changed, fail. */
4309 /* Pack all the operands to the lower-numbered entries. */
4310 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4320 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4322 && CONST_INT_P (ops
[1].op
)
4323 && CONSTANT_P (ops
[0].op
)
4325 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4327 /* We suppressed creation of trivial CONST expressions in the
4328 combination loop to avoid recursion. Create one manually now.
4329 The combination loop should have ensured that there is exactly
4330 one CONST_INT, and the sort will have ensured that it is last
4331 in the array and that any other constant will be next-to-last. */
4334 && CONST_INT_P (ops
[n_ops
- 1].op
)
4335 && CONSTANT_P (ops
[n_ops
- 2].op
))
4337 rtx value
= ops
[n_ops
- 1].op
;
4338 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4339 value
= neg_const_int (mode
, value
);
4340 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4345 /* Put a non-negated operand first, if possible. */
4347 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4350 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4359 /* Now make the result by performing the requested operations. */
4361 for (i
= 1; i
< n_ops
; i
++)
4362 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4363 mode
, result
, ops
[i
].op
);
4368 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4370 plus_minus_operand_p (const_rtx x
)
4372 return GET_CODE (x
) == PLUS
4373 || GET_CODE (x
) == MINUS
4374 || (GET_CODE (x
) == CONST
4375 && GET_CODE (XEXP (x
, 0)) == PLUS
4376 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4377 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4380 /* Like simplify_binary_operation except used for relational operators.
4381 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4382 not also be VOIDmode.
4384 CMP_MODE specifies in which mode the comparison is done in, so it is
4385 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4386 the operands or, if both are VOIDmode, the operands are compared in
4387 "infinite precision". */
4389 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4390 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4392 rtx tem
, trueop0
, trueop1
;
4394 if (cmp_mode
== VOIDmode
)
4395 cmp_mode
= GET_MODE (op0
);
4396 if (cmp_mode
== VOIDmode
)
4397 cmp_mode
= GET_MODE (op1
);
4399 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4402 if (SCALAR_FLOAT_MODE_P (mode
))
4404 if (tem
== const0_rtx
)
4405 return CONST0_RTX (mode
);
4406 #ifdef FLOAT_STORE_FLAG_VALUE
4408 REAL_VALUE_TYPE val
;
4409 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4410 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4416 if (VECTOR_MODE_P (mode
))
4418 if (tem
== const0_rtx
)
4419 return CONST0_RTX (mode
);
4420 #ifdef VECTOR_STORE_FLAG_VALUE
4425 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4426 if (val
== NULL_RTX
)
4428 if (val
== const1_rtx
)
4429 return CONST1_RTX (mode
);
4431 units
= GET_MODE_NUNITS (mode
);
4432 v
= rtvec_alloc (units
);
4433 for (i
= 0; i
< units
; i
++)
4434 RTVEC_ELT (v
, i
) = val
;
4435 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4445 /* For the following tests, ensure const0_rtx is op1. */
4446 if (swap_commutative_operands_p (op0
, op1
)
4447 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4448 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4450 /* If op0 is a compare, extract the comparison arguments from it. */
4451 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4452 return simplify_gen_relational (code
, mode
, VOIDmode
,
4453 XEXP (op0
, 0), XEXP (op0
, 1));
4455 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4459 trueop0
= avoid_constant_pool_reference (op0
);
4460 trueop1
= avoid_constant_pool_reference (op1
);
4461 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4465 /* This part of simplify_relational_operation is only used when CMP_MODE
4466 is not in class MODE_CC (i.e. it is a real comparison).
4468 MODE is the mode of the result, while CMP_MODE specifies in which
4469 mode the comparison is done in, so it is the mode of the operands. */
4472 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4473 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4475 enum rtx_code op0code
= GET_CODE (op0
);
4477 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4479 /* If op0 is a comparison, extract the comparison arguments
4483 if (GET_MODE (op0
) == mode
)
4484 return simplify_rtx (op0
);
4486 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4487 XEXP (op0
, 0), XEXP (op0
, 1));
4489 else if (code
== EQ
)
4491 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4492 if (new_code
!= UNKNOWN
)
4493 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4494 XEXP (op0
, 0), XEXP (op0
, 1));
4498 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4499 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4500 if ((code
== LTU
|| code
== GEU
)
4501 && GET_CODE (op0
) == PLUS
4502 && CONST_INT_P (XEXP (op0
, 1))
4503 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4504 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4505 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4506 && XEXP (op0
, 1) != const0_rtx
)
4509 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4510 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4511 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4514 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4515 if ((code
== LTU
|| code
== GEU
)
4516 && GET_CODE (op0
) == PLUS
4517 && rtx_equal_p (op1
, XEXP (op0
, 1))
4518 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4519 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4520 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4521 copy_rtx (XEXP (op0
, 0)));
4523 if (op1
== const0_rtx
)
4525 /* Canonicalize (GTU x 0) as (NE x 0). */
4527 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4528 /* Canonicalize (LEU x 0) as (EQ x 0). */
4530 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4532 else if (op1
== const1_rtx
)
4537 /* Canonicalize (GE x 1) as (GT x 0). */
4538 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4541 /* Canonicalize (GEU x 1) as (NE x 0). */
4542 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4545 /* Canonicalize (LT x 1) as (LE x 0). */
4546 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4549 /* Canonicalize (LTU x 1) as (EQ x 0). */
4550 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4556 else if (op1
== constm1_rtx
)
4558 /* Canonicalize (LE x -1) as (LT x 0). */
4560 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4561 /* Canonicalize (GT x -1) as (GE x 0). */
4563 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4566 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4567 if ((code
== EQ
|| code
== NE
)
4568 && (op0code
== PLUS
|| op0code
== MINUS
)
4570 && CONSTANT_P (XEXP (op0
, 1))
4571 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4573 rtx x
= XEXP (op0
, 0);
4574 rtx c
= XEXP (op0
, 1);
4575 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4576 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4578 /* Detect an infinite recursive condition, where we oscillate at this
4579 simplification case between:
4580 A + B == C <---> C - B == A,
4581 where A, B, and C are all constants with non-simplifiable expressions,
4582 usually SYMBOL_REFs. */
4583 if (GET_CODE (tem
) == invcode
4585 && rtx_equal_p (c
, XEXP (tem
, 1)))
4588 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4591 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4592 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4594 && op1
== const0_rtx
4595 && GET_MODE_CLASS (mode
) == MODE_INT
4596 && cmp_mode
!= VOIDmode
4597 /* ??? Work-around BImode bugs in the ia64 backend. */
4599 && cmp_mode
!= BImode
4600 && nonzero_bits (op0
, cmp_mode
) == 1
4601 && STORE_FLAG_VALUE
== 1)
4602 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4603 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4604 : lowpart_subreg (mode
, op0
, cmp_mode
);
4606 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4607 if ((code
== EQ
|| code
== NE
)
4608 && op1
== const0_rtx
4610 return simplify_gen_relational (code
, mode
, cmp_mode
,
4611 XEXP (op0
, 0), XEXP (op0
, 1));
4613 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4614 if ((code
== EQ
|| code
== NE
)
4616 && rtx_equal_p (XEXP (op0
, 0), op1
)
4617 && !side_effects_p (XEXP (op0
, 0)))
4618 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4621 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4622 if ((code
== EQ
|| code
== NE
)
4624 && rtx_equal_p (XEXP (op0
, 1), op1
)
4625 && !side_effects_p (XEXP (op0
, 1)))
4626 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4629 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4630 if ((code
== EQ
|| code
== NE
)
4632 && CONST_SCALAR_INT_P (op1
)
4633 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4634 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4635 simplify_gen_binary (XOR
, cmp_mode
,
4636 XEXP (op0
, 1), op1
));
4638 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4639 can be implemented with a BICS instruction on some targets, or
4640 constant-folded if y is a constant. */
4641 if ((code
== EQ
|| code
== NE
)
4643 && rtx_equal_p (XEXP (op0
, 0), op1
)
4644 && !side_effects_p (op1
)
4645 && op1
!= CONST0_RTX (cmp_mode
))
4647 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4648 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4650 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4651 CONST0_RTX (cmp_mode
));
4654 /* Likewise for (eq/ne (and x y) y). */
4655 if ((code
== EQ
|| code
== NE
)
4657 && rtx_equal_p (XEXP (op0
, 1), op1
)
4658 && !side_effects_p (op1
)
4659 && op1
!= CONST0_RTX (cmp_mode
))
4661 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4662 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4664 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4665 CONST0_RTX (cmp_mode
));
4668 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4669 if ((code
== EQ
|| code
== NE
)
4670 && GET_CODE (op0
) == BSWAP
4671 && CONST_SCALAR_INT_P (op1
))
4672 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4673 simplify_gen_unary (BSWAP
, cmp_mode
,
4676 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4677 if ((code
== EQ
|| code
== NE
)
4678 && GET_CODE (op0
) == BSWAP
4679 && GET_CODE (op1
) == BSWAP
)
4680 return simplify_gen_relational (code
, mode
, cmp_mode
,
4681 XEXP (op0
, 0), XEXP (op1
, 0));
4683 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4689 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4690 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4691 XEXP (op0
, 0), const0_rtx
);
4696 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4697 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4698 XEXP (op0
, 0), const0_rtx
);
4717 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4718 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4719 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4720 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4721 For floating-point comparisons, assume that the operands were ordered. */
4724 comparison_result (enum rtx_code code
, int known_results
)
4730 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4733 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4737 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4740 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4744 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4747 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4750 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4752 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4755 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4757 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4760 return const_true_rtx
;
4768 /* Check if the given comparison (done in the given MODE) is actually
4769 a tautology or a contradiction. If the mode is VOID_mode, the
4770 comparison is done in "infinite precision". If no simplification
4771 is possible, this function returns zero. Otherwise, it returns
4772 either const_true_rtx or const0_rtx. */
4775 simplify_const_relational_operation (enum rtx_code code
,
4783 gcc_assert (mode
!= VOIDmode
4784 || (GET_MODE (op0
) == VOIDmode
4785 && GET_MODE (op1
) == VOIDmode
));
4787 /* If op0 is a compare, extract the comparison arguments from it. */
4788 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4790 op1
= XEXP (op0
, 1);
4791 op0
= XEXP (op0
, 0);
4793 if (GET_MODE (op0
) != VOIDmode
)
4794 mode
= GET_MODE (op0
);
4795 else if (GET_MODE (op1
) != VOIDmode
)
4796 mode
= GET_MODE (op1
);
4801 /* We can't simplify MODE_CC values since we don't know what the
4802 actual comparison is. */
4803 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4806 /* Make sure the constant is second. */
4807 if (swap_commutative_operands_p (op0
, op1
))
4809 tem
= op0
, op0
= op1
, op1
= tem
;
4810 code
= swap_condition (code
);
4813 trueop0
= avoid_constant_pool_reference (op0
);
4814 trueop1
= avoid_constant_pool_reference (op1
);
4816 /* For integer comparisons of A and B maybe we can simplify A - B and can
4817 then simplify a comparison of that with zero. If A and B are both either
4818 a register or a CONST_INT, this can't help; testing for these cases will
4819 prevent infinite recursion here and speed things up.
4821 We can only do this for EQ and NE comparisons as otherwise we may
4822 lose or introduce overflow which we cannot disregard as undefined as
4823 we do not know the signedness of the operation on either the left or
4824 the right hand side of the comparison. */
4826 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4827 && (code
== EQ
|| code
== NE
)
4828 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4829 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4830 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4831 /* We cannot do this if tem is a nonzero address. */
4832 && ! nonzero_address_p (tem
))
4833 return simplify_const_relational_operation (signed_condition (code
),
4834 mode
, tem
, const0_rtx
);
4836 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4837 return const_true_rtx
;
4839 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4842 /* For modes without NaNs, if the two operands are equal, we know the
4843 result except if they have side-effects. Even with NaNs we know
4844 the result of unordered comparisons and, if signaling NaNs are
4845 irrelevant, also the result of LT/GT/LTGT. */
4846 if ((! HONOR_NANS (trueop0
)
4847 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4848 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4849 && ! HONOR_SNANS (trueop0
)))
4850 && rtx_equal_p (trueop0
, trueop1
)
4851 && ! side_effects_p (trueop0
))
4852 return comparison_result (code
, CMP_EQ
);
4854 /* If the operands are floating-point constants, see if we can fold
4856 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4857 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4858 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4860 REAL_VALUE_TYPE d0
, d1
;
4862 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4863 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4865 /* Comparisons are unordered iff at least one of the values is NaN. */
4866 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4876 return const_true_rtx
;
4889 return comparison_result (code
,
4890 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4891 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4894 /* Otherwise, see if the operands are both integers. */
4895 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4896 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4898 /* It would be nice if we really had a mode here. However, the
4899 largest int representable on the target is as good as
4901 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4902 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4903 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4905 if (wi::eq_p (ptrueop0
, ptrueop1
))
4906 return comparison_result (code
, CMP_EQ
);
4909 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4910 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4911 return comparison_result (code
, cr
);
4915 /* Optimize comparisons with upper and lower bounds. */
4916 if (HWI_COMPUTABLE_MODE_P (mode
)
4917 && CONST_INT_P (trueop1
))
4920 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4921 HOST_WIDE_INT val
= INTVAL (trueop1
);
4922 HOST_WIDE_INT mmin
, mmax
;
4932 /* Get a reduced range if the sign bit is zero. */
4933 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4940 rtx mmin_rtx
, mmax_rtx
;
4941 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4943 mmin
= INTVAL (mmin_rtx
);
4944 mmax
= INTVAL (mmax_rtx
);
4947 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4949 mmin
>>= (sign_copies
- 1);
4950 mmax
>>= (sign_copies
- 1);
4956 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4958 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4959 return const_true_rtx
;
4960 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4965 return const_true_rtx
;
4970 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4972 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4973 return const_true_rtx
;
4974 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4979 return const_true_rtx
;
4985 /* x == y is always false for y out of range. */
4986 if (val
< mmin
|| val
> mmax
)
4990 /* x > y is always false for y >= mmax, always true for y < mmin. */
4992 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4994 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4995 return const_true_rtx
;
5001 return const_true_rtx
;
5004 /* x < y is always false for y <= mmin, always true for y > mmax. */
5006 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5008 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5009 return const_true_rtx
;
5015 return const_true_rtx
;
5019 /* x != y is always true for y out of range. */
5020 if (val
< mmin
|| val
> mmax
)
5021 return const_true_rtx
;
5029 /* Optimize integer comparisons with zero. */
5030 if (trueop1
== const0_rtx
)
5032 /* Some addresses are known to be nonzero. We don't know
5033 their sign, but equality comparisons are known. */
5034 if (nonzero_address_p (trueop0
))
5036 if (code
== EQ
|| code
== LEU
)
5038 if (code
== NE
|| code
== GTU
)
5039 return const_true_rtx
;
5042 /* See if the first operand is an IOR with a constant. If so, we
5043 may be able to determine the result of this comparison. */
5044 if (GET_CODE (op0
) == IOR
)
5046 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5047 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5049 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5050 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5051 && (UINTVAL (inner_const
)
5052 & ((unsigned HOST_WIDE_INT
) 1
5062 return const_true_rtx
;
5066 return const_true_rtx
;
5080 /* Optimize comparison of ABS with zero. */
5081 if (trueop1
== CONST0_RTX (mode
)
5082 && (GET_CODE (trueop0
) == ABS
5083 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5084 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5089 /* Optimize abs(x) < 0.0. */
5090 if (!HONOR_SNANS (mode
)
5091 && (!INTEGRAL_MODE_P (mode
)
5092 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5094 if (INTEGRAL_MODE_P (mode
)
5095 && (issue_strict_overflow_warning
5096 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5097 warning (OPT_Wstrict_overflow
,
5098 ("assuming signed overflow does not occur when "
5099 "assuming abs (x) < 0 is false"));
5105 /* Optimize abs(x) >= 0.0. */
5106 if (!HONOR_NANS (mode
)
5107 && (!INTEGRAL_MODE_P (mode
)
5108 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5110 if (INTEGRAL_MODE_P (mode
)
5111 && (issue_strict_overflow_warning
5112 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5113 warning (OPT_Wstrict_overflow
,
5114 ("assuming signed overflow does not occur when "
5115 "assuming abs (x) >= 0 is true"));
5116 return const_true_rtx
;
5121 /* Optimize ! (abs(x) < 0.0). */
5122 return const_true_rtx
;
5132 /* Simplify CODE, an operation with result mode MODE and three operands,
5133 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5134 a constant. Return 0 if no simplifications is possible. */
5137 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5138 machine_mode op0_mode
, rtx op0
, rtx op1
,
5141 unsigned int width
= GET_MODE_PRECISION (mode
);
5142 bool any_change
= false;
5145 /* VOIDmode means "infinite" precision. */
5147 width
= HOST_BITS_PER_WIDE_INT
;
5152 /* Simplify negations around the multiplication. */
5153 /* -a * -b + c => a * b + c. */
5154 if (GET_CODE (op0
) == NEG
)
5156 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5158 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5160 else if (GET_CODE (op1
) == NEG
)
5162 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5164 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5167 /* Canonicalize the two multiplication operands. */
5168 /* a * -b + c => -b * a + c. */
5169 if (swap_commutative_operands_p (op0
, op1
))
5170 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5173 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5178 if (CONST_INT_P (op0
)
5179 && CONST_INT_P (op1
)
5180 && CONST_INT_P (op2
)
5181 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5182 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5184 /* Extracting a bit-field from a constant */
5185 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5186 HOST_WIDE_INT op1val
= INTVAL (op1
);
5187 HOST_WIDE_INT op2val
= INTVAL (op2
);
5188 if (BITS_BIG_ENDIAN
)
5189 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5193 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5195 /* First zero-extend. */
5196 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5197 /* If desired, propagate sign bit. */
5198 if (code
== SIGN_EXTRACT
5199 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5201 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5204 return gen_int_mode (val
, mode
);
5209 if (CONST_INT_P (op0
))
5210 return op0
!= const0_rtx
? op1
: op2
;
5212 /* Convert c ? a : a into "a". */
5213 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5216 /* Convert a != b ? a : b into "a". */
5217 if (GET_CODE (op0
) == NE
5218 && ! side_effects_p (op0
)
5219 && ! HONOR_NANS (mode
)
5220 && ! HONOR_SIGNED_ZEROS (mode
)
5221 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5222 && rtx_equal_p (XEXP (op0
, 1), op2
))
5223 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5224 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5227 /* Convert a == b ? a : b into "b". */
5228 if (GET_CODE (op0
) == EQ
5229 && ! side_effects_p (op0
)
5230 && ! HONOR_NANS (mode
)
5231 && ! HONOR_SIGNED_ZEROS (mode
)
5232 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5233 && rtx_equal_p (XEXP (op0
, 1), op2
))
5234 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5235 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5238 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5240 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5241 ? GET_MODE (XEXP (op0
, 1))
5242 : GET_MODE (XEXP (op0
, 0)));
5245 /* Look for happy constants in op1 and op2. */
5246 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5248 HOST_WIDE_INT t
= INTVAL (op1
);
5249 HOST_WIDE_INT f
= INTVAL (op2
);
5251 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5252 code
= GET_CODE (op0
);
5253 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5256 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5264 return simplify_gen_relational (code
, mode
, cmp_mode
,
5265 XEXP (op0
, 0), XEXP (op0
, 1));
5268 if (cmp_mode
== VOIDmode
)
5269 cmp_mode
= op0_mode
;
5270 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5271 cmp_mode
, XEXP (op0
, 0),
5274 /* See if any simplifications were possible. */
5277 if (CONST_INT_P (temp
))
5278 return temp
== const0_rtx
? op2
: op1
;
5280 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5286 gcc_assert (GET_MODE (op0
) == mode
);
5287 gcc_assert (GET_MODE (op1
) == mode
);
5288 gcc_assert (VECTOR_MODE_P (mode
));
5289 trueop2
= avoid_constant_pool_reference (op2
);
5290 if (CONST_INT_P (trueop2
))
5292 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5293 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5294 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5295 unsigned HOST_WIDE_INT mask
;
5296 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5299 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5301 if (!(sel
& mask
) && !side_effects_p (op0
))
5303 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5306 rtx trueop0
= avoid_constant_pool_reference (op0
);
5307 rtx trueop1
= avoid_constant_pool_reference (op1
);
5308 if (GET_CODE (trueop0
) == CONST_VECTOR
5309 && GET_CODE (trueop1
) == CONST_VECTOR
)
5311 rtvec v
= rtvec_alloc (n_elts
);
5314 for (i
= 0; i
< n_elts
; i
++)
5315 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5316 ? CONST_VECTOR_ELT (trueop0
, i
)
5317 : CONST_VECTOR_ELT (trueop1
, i
));
5318 return gen_rtx_CONST_VECTOR (mode
, v
);
5321 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5322 if no element from a appears in the result. */
5323 if (GET_CODE (op0
) == VEC_MERGE
)
5325 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5326 if (CONST_INT_P (tem
))
5328 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5329 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5330 return simplify_gen_ternary (code
, mode
, mode
,
5331 XEXP (op0
, 1), op1
, op2
);
5332 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5333 return simplify_gen_ternary (code
, mode
, mode
,
5334 XEXP (op0
, 0), op1
, op2
);
5337 if (GET_CODE (op1
) == VEC_MERGE
)
5339 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5340 if (CONST_INT_P (tem
))
5342 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5343 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5344 return simplify_gen_ternary (code
, mode
, mode
,
5345 op0
, XEXP (op1
, 1), op2
);
5346 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5347 return simplify_gen_ternary (code
, mode
, mode
,
5348 op0
, XEXP (op1
, 0), op2
);
5352 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5354 if (GET_CODE (op0
) == VEC_DUPLICATE
5355 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5356 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5357 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5359 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5360 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5362 if (XEXP (XEXP (op0
, 0), 0) == op1
5363 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5369 if (rtx_equal_p (op0
, op1
)
5370 && !side_effects_p (op2
) && !side_effects_p (op1
))
5382 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5383 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5384 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5386 Works by unpacking OP into a collection of 8-bit values
5387 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5388 and then repacking them again for OUTERMODE. */
5391 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5392 machine_mode innermode
, unsigned int byte
)
5396 value_mask
= (1 << value_bit
) - 1
5398 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5407 rtvec result_v
= NULL
;
5408 enum mode_class outer_class
;
5409 machine_mode outer_submode
;
5412 /* Some ports misuse CCmode. */
5413 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5416 /* We have no way to represent a complex constant at the rtl level. */
5417 if (COMPLEX_MODE_P (outermode
))
5420 /* We support any size mode. */
5421 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5422 GET_MODE_BITSIZE (innermode
));
5424 /* Unpack the value. */
5426 if (GET_CODE (op
) == CONST_VECTOR
)
5428 num_elem
= CONST_VECTOR_NUNITS (op
);
5429 elems
= &CONST_VECTOR_ELT (op
, 0);
5430 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5436 elem_bitsize
= max_bitsize
;
5438 /* If this asserts, it is too complicated; reducing value_bit may help. */
5439 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5440 /* I don't know how to handle endianness of sub-units. */
5441 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5443 for (elem
= 0; elem
< num_elem
; elem
++)
5446 rtx el
= elems
[elem
];
5448 /* Vectors are kept in target memory order. (This is probably
5451 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5452 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5454 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5455 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5456 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5457 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5458 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5461 switch (GET_CODE (el
))
5465 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5467 *vp
++ = INTVAL (el
) >> i
;
5468 /* CONST_INTs are always logically sign-extended. */
5469 for (; i
< elem_bitsize
; i
+= value_bit
)
5470 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5473 case CONST_WIDE_INT
:
5475 rtx_mode_t val
= std::make_pair (el
, innermode
);
5476 unsigned char extend
= wi::sign_mask (val
);
5478 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5479 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5480 for (; i
< elem_bitsize
; i
+= value_bit
)
5486 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5488 unsigned char extend
= 0;
5489 /* If this triggers, someone should have generated a
5490 CONST_INT instead. */
5491 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5493 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5494 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5495 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5498 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5502 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5504 for (; i
< elem_bitsize
; i
+= value_bit
)
5509 /* This is big enough for anything on the platform. */
5510 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5511 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5513 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5514 gcc_assert (bitsize
<= elem_bitsize
);
5515 gcc_assert (bitsize
% value_bit
== 0);
5517 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5520 /* real_to_target produces its result in words affected by
5521 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5522 and use WORDS_BIG_ENDIAN instead; see the documentation
5523 of SUBREG in rtl.texi. */
5524 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5527 if (WORDS_BIG_ENDIAN
)
5528 ibase
= bitsize
- 1 - i
;
5531 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5534 /* It shouldn't matter what's done here, so fill it with
5536 for (; i
< elem_bitsize
; i
+= value_bit
)
5542 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5544 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5545 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5549 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5550 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5551 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5553 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5554 >> (i
- HOST_BITS_PER_WIDE_INT
);
5555 for (; i
< elem_bitsize
; i
+= value_bit
)
5565 /* Now, pick the right byte to start with. */
5566 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5567 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5568 will already have offset 0. */
5569 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5571 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5573 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5574 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5575 byte
= (subword_byte
% UNITS_PER_WORD
5576 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5579 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5580 so if it's become negative it will instead be very large.) */
5581 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5583 /* Convert from bytes to chunks of size value_bit. */
5584 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5586 /* Re-pack the value. */
5588 if (VECTOR_MODE_P (outermode
))
5590 num_elem
= GET_MODE_NUNITS (outermode
);
5591 result_v
= rtvec_alloc (num_elem
);
5592 elems
= &RTVEC_ELT (result_v
, 0);
5593 outer_submode
= GET_MODE_INNER (outermode
);
5599 outer_submode
= outermode
;
5602 outer_class
= GET_MODE_CLASS (outer_submode
);
5603 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5605 gcc_assert (elem_bitsize
% value_bit
== 0);
5606 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5608 for (elem
= 0; elem
< num_elem
; elem
++)
5612 /* Vectors are stored in target memory order. (This is probably
5615 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5616 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5618 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5619 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5620 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5621 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5622 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5625 switch (outer_class
)
5628 case MODE_PARTIAL_INT
:
5633 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5634 / HOST_BITS_PER_WIDE_INT
;
5635 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5638 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5640 for (u
= 0; u
< units
; u
++)
5642 unsigned HOST_WIDE_INT buf
= 0;
5644 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5646 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5649 base
+= HOST_BITS_PER_WIDE_INT
;
5651 r
= wide_int::from_array (tmp
, units
,
5652 GET_MODE_PRECISION (outer_submode
));
5653 #if TARGET_SUPPORTS_WIDE_INT == 0
5654 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5655 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5658 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5663 case MODE_DECIMAL_FLOAT
:
5666 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5668 /* real_from_target wants its input in words affected by
5669 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5670 and use WORDS_BIG_ENDIAN instead; see the documentation
5671 of SUBREG in rtl.texi. */
5672 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5674 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5677 if (WORDS_BIG_ENDIAN
)
5678 ibase
= elem_bitsize
- 1 - i
;
5681 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5684 real_from_target (&r
, tmp
, outer_submode
);
5685 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5697 f
.mode
= outer_submode
;
5700 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5702 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5703 for (; i
< elem_bitsize
; i
+= value_bit
)
5704 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5705 << (i
- HOST_BITS_PER_WIDE_INT
));
5707 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5715 if (VECTOR_MODE_P (outermode
))
5716 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5721 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5722 Return 0 if no simplifications are possible. */
5724 simplify_subreg (machine_mode outermode
, rtx op
,
5725 machine_mode innermode
, unsigned int byte
)
5727 /* Little bit of sanity checking. */
5728 gcc_assert (innermode
!= VOIDmode
);
5729 gcc_assert (outermode
!= VOIDmode
);
5730 gcc_assert (innermode
!= BLKmode
);
5731 gcc_assert (outermode
!= BLKmode
);
5733 gcc_assert (GET_MODE (op
) == innermode
5734 || GET_MODE (op
) == VOIDmode
);
5736 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5739 if (byte
>= GET_MODE_SIZE (innermode
))
5742 if (outermode
== innermode
&& !byte
)
5745 if (CONST_SCALAR_INT_P (op
)
5746 || CONST_DOUBLE_AS_FLOAT_P (op
)
5747 || GET_CODE (op
) == CONST_FIXED
5748 || GET_CODE (op
) == CONST_VECTOR
)
5749 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5751 /* Changing mode twice with SUBREG => just change it once,
5752 or not at all if changing back op starting mode. */
5753 if (GET_CODE (op
) == SUBREG
)
5755 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5756 int final_offset
= byte
+ SUBREG_BYTE (op
);
5759 if (outermode
== innermostmode
5760 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5761 return SUBREG_REG (op
);
5763 /* The SUBREG_BYTE represents offset, as if the value were stored
5764 in memory. Irritating exception is paradoxical subreg, where
5765 we define SUBREG_BYTE to be 0. On big endian machines, this
5766 value should be negative. For a moment, undo this exception. */
5767 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5769 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5770 if (WORDS_BIG_ENDIAN
)
5771 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5772 if (BYTES_BIG_ENDIAN
)
5773 final_offset
+= difference
% UNITS_PER_WORD
;
5775 if (SUBREG_BYTE (op
) == 0
5776 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5778 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5779 if (WORDS_BIG_ENDIAN
)
5780 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5781 if (BYTES_BIG_ENDIAN
)
5782 final_offset
+= difference
% UNITS_PER_WORD
;
5785 /* See whether resulting subreg will be paradoxical. */
5786 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5788 /* In nonparadoxical subregs we can't handle negative offsets. */
5789 if (final_offset
< 0)
5791 /* Bail out in case resulting subreg would be incorrect. */
5792 if (final_offset
% GET_MODE_SIZE (outermode
)
5793 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5799 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5801 /* In paradoxical subreg, see if we are still looking on lower part.
5802 If so, our SUBREG_BYTE will be 0. */
5803 if (WORDS_BIG_ENDIAN
)
5804 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5805 if (BYTES_BIG_ENDIAN
)
5806 offset
+= difference
% UNITS_PER_WORD
;
5807 if (offset
== final_offset
)
5813 /* Recurse for further possible simplifications. */
5814 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5818 if (validate_subreg (outermode
, innermostmode
,
5819 SUBREG_REG (op
), final_offset
))
5821 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5822 if (SUBREG_PROMOTED_VAR_P (op
)
5823 && SUBREG_PROMOTED_SIGN (op
) >= 0
5824 && GET_MODE_CLASS (outermode
) == MODE_INT
5825 && IN_RANGE (GET_MODE_SIZE (outermode
),
5826 GET_MODE_SIZE (innermode
),
5827 GET_MODE_SIZE (innermostmode
))
5828 && subreg_lowpart_p (newx
))
5830 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5831 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5838 /* SUBREG of a hard register => just change the register number
5839 and/or mode. If the hard register is not valid in that mode,
5840 suppress this simplification. If the hard register is the stack,
5841 frame, or argument pointer, leave this as a SUBREG. */
5843 if (REG_P (op
) && HARD_REGISTER_P (op
))
5845 unsigned int regno
, final_regno
;
5848 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5849 if (HARD_REGISTER_NUM_P (final_regno
))
5852 int final_offset
= byte
;
5854 /* Adjust offset for paradoxical subregs. */
5856 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5858 int difference
= (GET_MODE_SIZE (innermode
)
5859 - GET_MODE_SIZE (outermode
));
5860 if (WORDS_BIG_ENDIAN
)
5861 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5862 if (BYTES_BIG_ENDIAN
)
5863 final_offset
+= difference
% UNITS_PER_WORD
;
5866 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5868 /* Propagate original regno. We don't have any way to specify
5869 the offset inside original regno, so do so only for lowpart.
5870 The information is used only by alias analysis that can not
5871 grog partial register anyway. */
5873 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5874 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5879 /* If we have a SUBREG of a register that we are replacing and we are
5880 replacing it with a MEM, make a new MEM and try replacing the
5881 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5882 or if we would be widening it. */
5885 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5886 /* Allow splitting of volatile memory references in case we don't
5887 have instruction to move the whole thing. */
5888 && (! MEM_VOLATILE_P (op
)
5889 || ! have_insn_for (SET
, innermode
))
5890 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5891 return adjust_address_nv (op
, outermode
, byte
);
5893 /* Handle complex values represented as CONCAT
5894 of real and imaginary part. */
5895 if (GET_CODE (op
) == CONCAT
)
5897 unsigned int part_size
, final_offset
;
5900 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5901 if (byte
< part_size
)
5903 part
= XEXP (op
, 0);
5904 final_offset
= byte
;
5908 part
= XEXP (op
, 1);
5909 final_offset
= byte
- part_size
;
5912 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5915 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5918 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5919 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5923 /* A SUBREG resulting from a zero extension may fold to zero if
5924 it extracts higher bits that the ZERO_EXTEND's source bits. */
5925 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5927 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5928 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5929 return CONST0_RTX (outermode
);
5932 if (SCALAR_INT_MODE_P (outermode
)
5933 && SCALAR_INT_MODE_P (innermode
)
5934 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5935 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5937 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5945 /* Make a SUBREG operation or equivalent if it folds. */
5948 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5949 machine_mode innermode
, unsigned int byte
)
5953 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5957 if (GET_CODE (op
) == SUBREG
5958 || GET_CODE (op
) == CONCAT
5959 || GET_MODE (op
) == VOIDmode
)
5962 if (validate_subreg (outermode
, innermode
, op
, byte
))
5963 return gen_rtx_SUBREG (outermode
, op
, byte
);
5968 /* Simplify X, an rtx expression.
5970 Return the simplified expression or NULL if no simplifications
5973 This is the preferred entry point into the simplification routines;
5974 however, we still allow passes to call the more specific routines.
5976 Right now GCC has three (yes, three) major bodies of RTL simplification
5977 code that need to be unified.
5979 1. fold_rtx in cse.c. This code uses various CSE specific
5980 information to aid in RTL simplification.
5982 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5983 it uses combine specific information to aid in RTL
5986 3. The routines in this file.
5989 Long term we want to only have one body of simplification code; to
5990 get to that state I recommend the following steps:
5992 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5993 which are not pass dependent state into these routines.
5995 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5996 use this routine whenever possible.
5998 3. Allow for pass dependent state to be provided to these
5999 routines and add simplifications based on the pass dependent
6000 state. Remove code from cse.c & combine.c that becomes
6003 It will take time, but ultimately the compiler will be easier to
6004 maintain and improve. It's totally silly that when we add a
6005 simplification that it needs to be added to 4 places (3 for RTL
6006 simplification and 1 for tree simplification. */
6009 simplify_rtx (const_rtx x
)
6011 const enum rtx_code code
= GET_CODE (x
);
6012 const machine_mode mode
= GET_MODE (x
);
6014 switch (GET_RTX_CLASS (code
))
6017 return simplify_unary_operation (code
, mode
,
6018 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6019 case RTX_COMM_ARITH
:
6020 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6021 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6023 /* Fall through.... */
6026 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6029 case RTX_BITFIELD_OPS
:
6030 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6031 XEXP (x
, 0), XEXP (x
, 1),
6035 case RTX_COMM_COMPARE
:
6036 return simplify_relational_operation (code
, mode
,
6037 ((GET_MODE (XEXP (x
, 0))
6039 ? GET_MODE (XEXP (x
, 0))
6040 : GET_MODE (XEXP (x
, 1))),
6046 return simplify_subreg (mode
, SUBREG_REG (x
),
6047 GET_MODE (SUBREG_REG (x
)),
6054 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6055 if (GET_CODE (XEXP (x
, 0)) == HIGH
6056 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))