1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "double-int.h"
36 #include "fold-const.h"
40 #include "hard-reg-set.h"
42 #include "insn-config.h"
45 #include "insn-codes.h"
48 #include "statistics.h"
50 #include "fixed-value.h"
58 #include "diagnostic-core.h"
63 /* Simplification and canonicalization of RTL. */
65 /* Much code operates on (low, high) pairs; the low value is an
66 unsigned wide int, the high value a signed wide int. We
67 occasionally need to sign extend from low to high as if low were a
69 #define HWI_SIGN_EXTEND(low) \
70 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
72 static rtx
neg_const_int (machine_mode
, const_rtx
);
73 static bool plus_minus_operand_p (const_rtx
);
74 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
75 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
76 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
78 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
80 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
81 machine_mode
, rtx
, rtx
);
82 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
83 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87 maximally negative number can overflow). */
89 neg_const_int (machine_mode mode
, const_rtx i
)
91 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
94 /* Test whether expression, X, is an immediate constant that represents
95 the most significant bit of machine mode MODE. */
98 mode_signbit_p (machine_mode mode
, const_rtx x
)
100 unsigned HOST_WIDE_INT val
;
103 if (GET_MODE_CLASS (mode
) != MODE_INT
)
106 width
= GET_MODE_PRECISION (mode
);
110 if (width
<= HOST_BITS_PER_WIDE_INT
113 #if TARGET_SUPPORTS_WIDE_INT
114 else if (CONST_WIDE_INT_P (x
))
117 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
118 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
120 for (i
= 0; i
< elts
- 1; i
++)
121 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
123 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
124 width
%= HOST_BITS_PER_WIDE_INT
;
126 width
= HOST_BITS_PER_WIDE_INT
;
129 else if (width
<= HOST_BITS_PER_DOUBLE_INT
130 && CONST_DOUBLE_AS_INT_P (x
)
131 && CONST_DOUBLE_LOW (x
) == 0)
133 val
= CONST_DOUBLE_HIGH (x
);
134 width
-= HOST_BITS_PER_WIDE_INT
;
138 /* X is not an integer constant. */
141 if (width
< HOST_BITS_PER_WIDE_INT
)
142 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
143 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147 (after masking with the mode mask of MODE). Returns false if the
148 precision of MODE is too large to handle. */
151 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
155 if (GET_MODE_CLASS (mode
) != MODE_INT
)
158 width
= GET_MODE_PRECISION (mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= GET_MODE_MASK (mode
);
163 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 if (GET_MODE_CLASS (mode
) != MODE_INT
)
176 width
= GET_MODE_PRECISION (mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185 Returns false if the precision of MODE is too large to handle. */
187 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
191 if (GET_MODE_CLASS (mode
) != MODE_INT
)
194 width
= GET_MODE_PRECISION (mode
);
195 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
198 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
202 /* Make a binary operation by properly ordering the operands and
203 seeing if the expression folds. */
206 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
211 /* If this simplifies, do it. */
212 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Put complex operands first and constants second if commutative. */
217 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
218 && swap_commutative_operands_p (op0
, op1
))
219 tem
= op0
, op0
= op1
, op1
= tem
;
221 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
224 /* If X is a MEM referencing the constant pool, return the real value.
225 Otherwise return X. */
227 avoid_constant_pool_reference (rtx x
)
231 HOST_WIDE_INT offset
= 0;
233 switch (GET_CODE (x
))
239 /* Handle float extensions of constant pool references. */
241 c
= avoid_constant_pool_reference (tmp
);
242 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
246 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
247 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
255 if (GET_MODE (x
) == BLKmode
)
260 /* Call target hook to avoid the effects of -fpic etc.... */
261 addr
= targetm
.delegitimize_address (addr
);
263 /* Split the address into a base and integer offset. */
264 if (GET_CODE (addr
) == CONST
265 && GET_CODE (XEXP (addr
, 0)) == PLUS
266 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
268 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
269 addr
= XEXP (XEXP (addr
, 0), 0);
272 if (GET_CODE (addr
) == LO_SUM
)
273 addr
= XEXP (addr
, 1);
275 /* If this is a constant pool reference, we can turn it into its
276 constant and hope that simplifications happen. */
277 if (GET_CODE (addr
) == SYMBOL_REF
278 && CONSTANT_POOL_ADDRESS_P (addr
))
280 c
= get_pool_constant (addr
);
281 cmode
= get_pool_mode (addr
);
283 /* If we're accessing the constant in a different mode than it was
284 originally stored, attempt to fix that up via subreg simplifications.
285 If that fails we have no choice but to return the original memory. */
286 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
287 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
289 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
290 if (tem
&& CONSTANT_P (tem
))
300 /* Simplify a MEM based on its attributes. This is the default
301 delegitimize_address target hook, and it's recommended that every
302 overrider call it. */
305 delegitimize_mem_from_attrs (rtx x
)
307 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308 use their base addresses as equivalent. */
311 && MEM_OFFSET_KNOWN_P (x
))
313 tree decl
= MEM_EXPR (x
);
314 machine_mode mode
= GET_MODE (x
);
315 HOST_WIDE_INT offset
= 0;
317 switch (TREE_CODE (decl
))
327 case ARRAY_RANGE_REF
:
332 case VIEW_CONVERT_EXPR
:
334 HOST_WIDE_INT bitsize
, bitpos
;
336 int unsignedp
, volatilep
= 0;
338 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
339 &mode
, &unsignedp
, &volatilep
, false);
340 if (bitsize
!= GET_MODE_BITSIZE (mode
)
341 || (bitpos
% BITS_PER_UNIT
)
342 || (toffset
&& !tree_fits_shwi_p (toffset
)))
346 offset
+= bitpos
/ BITS_PER_UNIT
;
348 offset
+= tree_to_shwi (toffset
);
355 && mode
== GET_MODE (x
)
356 && TREE_CODE (decl
) == VAR_DECL
357 && (TREE_STATIC (decl
)
358 || DECL_THREAD_LOCAL_P (decl
))
359 && DECL_RTL_SET_P (decl
)
360 && MEM_P (DECL_RTL (decl
)))
364 offset
+= MEM_OFFSET (x
);
366 newx
= DECL_RTL (decl
);
370 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
372 /* Avoid creating a new MEM needlessly if we already had
373 the same address. We do if there's no OFFSET and the
374 old address X is identical to NEWX, or if X is of the
375 form (plus NEWX OFFSET), or the NEWX is of the form
376 (plus Y (const_int Z)) and X is that with the offset
377 added: (plus Y (const_int Z+OFFSET)). */
379 || (GET_CODE (o
) == PLUS
380 && GET_CODE (XEXP (o
, 1)) == CONST_INT
381 && (offset
== INTVAL (XEXP (o
, 1))
382 || (GET_CODE (n
) == PLUS
383 && GET_CODE (XEXP (n
, 1)) == CONST_INT
384 && (INTVAL (XEXP (n
, 1)) + offset
385 == INTVAL (XEXP (o
, 1)))
386 && (n
= XEXP (n
, 0))))
387 && (o
= XEXP (o
, 0))))
388 && rtx_equal_p (o
, n
)))
389 x
= adjust_address_nv (newx
, mode
, offset
);
391 else if (GET_MODE (x
) == GET_MODE (newx
)
400 /* Make a unary operation by first seeing if it folds and otherwise making
401 the specified operation. */
404 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
405 machine_mode op_mode
)
409 /* If this simplifies, use it. */
410 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
413 return gen_rtx_fmt_e (code
, mode
, op
);
416 /* Likewise for ternary operations. */
419 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
420 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
424 /* If this simplifies, use it. */
425 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
429 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
432 /* Likewise, for relational operations.
433 CMP_MODE specifies mode comparison is done in. */
436 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
437 machine_mode cmp_mode
, rtx op0
, rtx op1
)
441 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
445 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
448 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449 and simplify the result. If FN is non-NULL, call this callback on each
450 X, if it returns non-NULL, replace X with its return value and simplify the
454 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
455 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
457 enum rtx_code code
= GET_CODE (x
);
458 machine_mode mode
= GET_MODE (x
);
459 machine_mode op_mode
;
461 rtx op0
, op1
, op2
, newx
, op
;
465 if (__builtin_expect (fn
!= NULL
, 0))
467 newx
= fn (x
, old_rtx
, data
);
471 else if (rtx_equal_p (x
, old_rtx
))
472 return copy_rtx ((rtx
) data
);
474 switch (GET_RTX_CLASS (code
))
478 op_mode
= GET_MODE (op0
);
479 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
480 if (op0
== XEXP (x
, 0))
482 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
486 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
487 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
490 return simplify_gen_binary (code
, mode
, op0
, op1
);
493 case RTX_COMM_COMPARE
:
496 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
497 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
498 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
499 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
501 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
504 case RTX_BITFIELD_OPS
:
506 op_mode
= GET_MODE (op0
);
507 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
508 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
509 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
510 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
512 if (op_mode
== VOIDmode
)
513 op_mode
= GET_MODE (op0
);
514 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
519 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
520 if (op0
== SUBREG_REG (x
))
522 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
523 GET_MODE (SUBREG_REG (x
)),
525 return op0
? op0
: x
;
532 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
533 if (op0
== XEXP (x
, 0))
535 return replace_equiv_address_nv (x
, op0
);
537 else if (code
== LO_SUM
)
539 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
540 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
542 /* (lo_sum (high x) y) -> y where x and y have the same base. */
543 if (GET_CODE (op0
) == HIGH
)
545 rtx base0
, base1
, offset0
, offset1
;
546 split_const (XEXP (op0
, 0), &base0
, &offset0
);
547 split_const (op1
, &base1
, &offset1
);
548 if (rtx_equal_p (base0
, base1
))
552 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
554 return gen_rtx_LO_SUM (mode
, op0
, op1
);
563 fmt
= GET_RTX_FORMAT (code
);
564 for (i
= 0; fmt
[i
]; i
++)
569 newvec
= XVEC (newx
, i
);
570 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
572 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
574 if (op
!= RTVEC_ELT (vec
, j
))
578 newvec
= shallow_copy_rtvec (vec
);
580 newx
= shallow_copy_rtx (x
);
581 XVEC (newx
, i
) = newvec
;
583 RTVEC_ELT (newvec
, j
) = op
;
591 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
592 if (op
!= XEXP (x
, i
))
595 newx
= shallow_copy_rtx (x
);
604 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
605 resulting RTX. Return a new RTX which is as simplified as possible. */
608 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
610 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
613 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
614 Only handle cases where the truncated value is inherently an rvalue.
616 RTL provides two ways of truncating a value:
618 1. a lowpart subreg. This form is only a truncation when both
619 the outer and inner modes (here MODE and OP_MODE respectively)
620 are scalar integers, and only then when the subreg is used as
623 It is only valid to form such truncating subregs if the
624 truncation requires no action by the target. The onus for
625 proving this is on the creator of the subreg -- e.g. the
626 caller to simplify_subreg or simplify_gen_subreg -- and typically
627 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
629 2. a TRUNCATE. This form handles both scalar and compound integers.
631 The first form is preferred where valid. However, the TRUNCATE
632 handling in simplify_unary_operation turns the second form into the
633 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
634 so it is generally safe to form rvalue truncations using:
636 simplify_gen_unary (TRUNCATE, ...)
638 and leave simplify_unary_operation to work out which representation
641 Because of the proof requirements on (1), simplify_truncation must
642 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
643 regardless of whether the outer truncation came from a SUBREG or a
644 TRUNCATE. For example, if the caller has proven that an SImode
649 is a no-op and can be represented as a subreg, it does not follow
650 that SImode truncations of X and Y are also no-ops. On a target
651 like 64-bit MIPS that requires SImode values to be stored in
652 sign-extended form, an SImode truncation of:
654 (and:DI (reg:DI X) (const_int 63))
656 is trivially a no-op because only the lower 6 bits can be set.
657 However, X is still an arbitrary 64-bit number and so we cannot
658 assume that truncating it too is a no-op. */
661 simplify_truncation (machine_mode mode
, rtx op
,
662 machine_mode op_mode
)
664 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
665 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
666 gcc_assert (precision
<= op_precision
);
668 /* Optimize truncations of zero and sign extended values. */
669 if (GET_CODE (op
) == ZERO_EXTEND
670 || GET_CODE (op
) == SIGN_EXTEND
)
672 /* There are three possibilities. If MODE is the same as the
673 origmode, we can omit both the extension and the subreg.
674 If MODE is not larger than the origmode, we can apply the
675 truncation without the extension. Finally, if the outermode
676 is larger than the origmode, we can just extend to the appropriate
678 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
679 if (mode
== origmode
)
681 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
682 return simplify_gen_unary (TRUNCATE
, mode
,
683 XEXP (op
, 0), origmode
);
685 return simplify_gen_unary (GET_CODE (op
), mode
,
686 XEXP (op
, 0), origmode
);
689 /* If the machine can perform operations in the truncated mode, distribute
690 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
691 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
693 #ifdef WORD_REGISTER_OPERATIONS
694 && precision
>= BITS_PER_WORD
696 && (GET_CODE (op
) == PLUS
697 || GET_CODE (op
) == MINUS
698 || GET_CODE (op
) == MULT
))
700 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
703 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
705 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
709 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
710 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if ((GET_CODE (op
) == LSHIFTRT
713 || GET_CODE (op
) == ASHIFTRT
)
714 /* Ensure that OP_MODE is at least twice as wide as MODE
715 to avoid the possibility that an outer LSHIFTRT shifts by more
716 than the sign extension's sign_bit_copies and introduces zeros
717 into the high bits of the result. */
718 && 2 * precision
<= op_precision
719 && CONST_INT_P (XEXP (op
, 1))
720 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
722 && UINTVAL (XEXP (op
, 1)) < precision
)
723 return simplify_gen_binary (ASHIFTRT
, mode
,
724 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
726 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
727 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if ((GET_CODE (op
) == LSHIFTRT
730 || GET_CODE (op
) == ASHIFTRT
)
731 && CONST_INT_P (XEXP (op
, 1))
732 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
733 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
734 && UINTVAL (XEXP (op
, 1)) < precision
)
735 return simplify_gen_binary (LSHIFTRT
, mode
,
736 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
738 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
739 to (ashift:QI (x:QI) C), where C is a suitable small constant and
740 the outer subreg is effectively a truncation to the original mode. */
741 if (GET_CODE (op
) == ASHIFT
742 && CONST_INT_P (XEXP (op
, 1))
743 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
744 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
745 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
746 && UINTVAL (XEXP (op
, 1)) < precision
)
747 return simplify_gen_binary (ASHIFT
, mode
,
748 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
750 /* Recognize a word extraction from a multi-word subreg. */
751 if ((GET_CODE (op
) == LSHIFTRT
752 || GET_CODE (op
) == ASHIFTRT
)
753 && SCALAR_INT_MODE_P (mode
)
754 && SCALAR_INT_MODE_P (op_mode
)
755 && precision
>= BITS_PER_WORD
756 && 2 * precision
<= op_precision
757 && CONST_INT_P (XEXP (op
, 1))
758 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
759 && UINTVAL (XEXP (op
, 1)) < op_precision
)
761 int byte
= subreg_lowpart_offset (mode
, op_mode
);
762 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
763 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
765 ? byte
- shifted_bytes
766 : byte
+ shifted_bytes
));
769 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
770 and try replacing the TRUNCATE and shift with it. Don't do this
771 if the MEM has a mode-dependent address. */
772 if ((GET_CODE (op
) == LSHIFTRT
773 || GET_CODE (op
) == ASHIFTRT
)
774 && SCALAR_INT_MODE_P (op_mode
)
775 && MEM_P (XEXP (op
, 0))
776 && CONST_INT_P (XEXP (op
, 1))
777 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
778 && INTVAL (XEXP (op
, 1)) > 0
779 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
780 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
781 MEM_ADDR_SPACE (XEXP (op
, 0)))
782 && ! MEM_VOLATILE_P (XEXP (op
, 0))
783 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
784 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
786 int byte
= subreg_lowpart_offset (mode
, op_mode
);
787 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
788 return adjust_address_nv (XEXP (op
, 0), mode
,
790 ? byte
- shifted_bytes
791 : byte
+ shifted_bytes
));
794 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
795 (OP:SI foo:SI) if OP is NEG or ABS. */
796 if ((GET_CODE (op
) == ABS
797 || GET_CODE (op
) == NEG
)
798 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
799 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
800 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
801 return simplify_gen_unary (GET_CODE (op
), mode
,
802 XEXP (XEXP (op
, 0), 0), mode
);
804 /* (truncate:A (subreg:B (truncate:C X) 0)) is
806 if (GET_CODE (op
) == SUBREG
807 && SCALAR_INT_MODE_P (mode
)
808 && SCALAR_INT_MODE_P (op_mode
)
809 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
810 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
811 && subreg_lowpart_p (op
))
813 rtx inner
= XEXP (SUBREG_REG (op
), 0);
814 if (GET_MODE_PRECISION (mode
)
815 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
816 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
818 /* If subreg above is paradoxical and C is narrower
819 than A, return (subreg:A (truncate:C X) 0). */
820 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
821 GET_MODE (SUBREG_REG (op
)), 0);
824 /* (truncate:A (truncate:B X)) is (truncate:A X). */
825 if (GET_CODE (op
) == TRUNCATE
)
826 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
827 GET_MODE (XEXP (op
, 0)));
832 /* Try to simplify a unary operation CODE whose output mode is to be
833 MODE with input operand OP whose mode was originally OP_MODE.
834 Return zero if no simplification can be made. */
836 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
837 rtx op
, machine_mode op_mode
)
841 trueop
= avoid_constant_pool_reference (op
);
843 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
847 return simplify_unary_operation_1 (code
, mode
, op
);
850 /* Perform some simplifications we can do even if the operands
853 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
855 enum rtx_code reversed
;
861 /* (not (not X)) == X. */
862 if (GET_CODE (op
) == NOT
)
865 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
866 comparison is all ones. */
867 if (COMPARISON_P (op
)
868 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
869 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
870 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
871 XEXP (op
, 0), XEXP (op
, 1));
873 /* (not (plus X -1)) can become (neg X). */
874 if (GET_CODE (op
) == PLUS
875 && XEXP (op
, 1) == constm1_rtx
)
876 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
878 /* Similarly, (not (neg X)) is (plus X -1). */
879 if (GET_CODE (op
) == NEG
)
880 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
883 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
884 if (GET_CODE (op
) == XOR
885 && CONST_INT_P (XEXP (op
, 1))
886 && (temp
= simplify_unary_operation (NOT
, mode
,
887 XEXP (op
, 1), mode
)) != 0)
888 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
890 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
891 if (GET_CODE (op
) == PLUS
892 && CONST_INT_P (XEXP (op
, 1))
893 && mode_signbit_p (mode
, XEXP (op
, 1))
894 && (temp
= simplify_unary_operation (NOT
, mode
,
895 XEXP (op
, 1), mode
)) != 0)
896 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
899 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
900 operands other than 1, but that is not valid. We could do a
901 similar simplification for (not (lshiftrt C X)) where C is
902 just the sign bit, but this doesn't seem common enough to
904 if (GET_CODE (op
) == ASHIFT
905 && XEXP (op
, 0) == const1_rtx
)
907 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
908 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
911 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
912 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
913 so we can perform the above simplification. */
914 if (STORE_FLAG_VALUE
== -1
915 && GET_CODE (op
) == ASHIFTRT
916 && CONST_INT_P (XEXP (op
, 1))
917 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
918 return simplify_gen_relational (GE
, mode
, VOIDmode
,
919 XEXP (op
, 0), const0_rtx
);
922 if (GET_CODE (op
) == SUBREG
923 && subreg_lowpart_p (op
)
924 && (GET_MODE_SIZE (GET_MODE (op
))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
926 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
927 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
929 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
932 x
= gen_rtx_ROTATE (inner_mode
,
933 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
935 XEXP (SUBREG_REG (op
), 1));
936 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
941 /* Apply De Morgan's laws to reduce number of patterns for machines
942 with negating logical insns (and-not, nand, etc.). If result has
943 only one NOT, put it first, since that is how the patterns are
945 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
947 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
948 machine_mode op_mode
;
950 op_mode
= GET_MODE (in1
);
951 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
953 op_mode
= GET_MODE (in2
);
954 if (op_mode
== VOIDmode
)
956 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
958 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
961 in2
= in1
; in1
= tem
;
964 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
968 /* (not (bswap x)) -> (bswap (not x)). */
969 if (GET_CODE (op
) == BSWAP
)
971 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
972 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
977 /* (neg (neg X)) == X. */
978 if (GET_CODE (op
) == NEG
)
981 /* (neg (plus X 1)) can become (not X). */
982 if (GET_CODE (op
) == PLUS
983 && XEXP (op
, 1) == const1_rtx
)
984 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
986 /* Similarly, (neg (not X)) is (plus X 1). */
987 if (GET_CODE (op
) == NOT
)
988 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
991 /* (neg (minus X Y)) can become (minus Y X). This transformation
992 isn't safe for modes with signed zeros, since if X and Y are
993 both +0, (minus Y X) is the same as (minus X Y). If the
994 rounding mode is towards +infinity (or -infinity) then the two
995 expressions will be rounded differently. */
996 if (GET_CODE (op
) == MINUS
997 && !HONOR_SIGNED_ZEROS (mode
)
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
999 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1001 if (GET_CODE (op
) == PLUS
1002 && !HONOR_SIGNED_ZEROS (mode
)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1005 /* (neg (plus A C)) is simplified to (minus -C A). */
1006 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1007 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1011 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1014 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1015 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1016 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1019 /* (neg (mult A B)) becomes (mult A (neg B)).
1020 This works even for floating-point values. */
1021 if (GET_CODE (op
) == MULT
1022 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1024 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1025 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1028 /* NEG commutes with ASHIFT since it is multiplication. Only do
1029 this if we can then eliminate the NEG (e.g., if the operand
1031 if (GET_CODE (op
) == ASHIFT
)
1033 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1035 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1038 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1039 C is equal to the width of MODE minus 1. */
1040 if (GET_CODE (op
) == ASHIFTRT
1041 && CONST_INT_P (XEXP (op
, 1))
1042 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1043 return simplify_gen_binary (LSHIFTRT
, mode
,
1044 XEXP (op
, 0), XEXP (op
, 1));
1046 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1047 C is equal to the width of MODE minus 1. */
1048 if (GET_CODE (op
) == LSHIFTRT
1049 && CONST_INT_P (XEXP (op
, 1))
1050 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1051 return simplify_gen_binary (ASHIFTRT
, mode
,
1052 XEXP (op
, 0), XEXP (op
, 1));
1054 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1055 if (GET_CODE (op
) == XOR
1056 && XEXP (op
, 1) == const1_rtx
1057 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1058 return plus_constant (mode
, XEXP (op
, 0), -1);
1060 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1061 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1062 if (GET_CODE (op
) == LT
1063 && XEXP (op
, 1) == const0_rtx
1064 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1066 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1067 int isize
= GET_MODE_PRECISION (inner
);
1068 if (STORE_FLAG_VALUE
== 1)
1070 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1071 GEN_INT (isize
- 1));
1074 if (GET_MODE_PRECISION (mode
) > isize
)
1075 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1076 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1078 else if (STORE_FLAG_VALUE
== -1)
1080 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1081 GEN_INT (isize
- 1));
1084 if (GET_MODE_PRECISION (mode
) > isize
)
1085 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1086 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1092 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1093 with the umulXi3_highpart patterns. */
1094 if (GET_CODE (op
) == LSHIFTRT
1095 && GET_CODE (XEXP (op
, 0)) == MULT
)
1098 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1100 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1102 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1106 /* We can't handle truncation to a partial integer mode here
1107 because we don't know the real bitsize of the partial
1112 if (GET_MODE (op
) != VOIDmode
)
1114 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1119 /* If we know that the value is already truncated, we can
1120 replace the TRUNCATE with a SUBREG. */
1121 if (GET_MODE_NUNITS (mode
) == 1
1122 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1123 || truncated_to_mode (mode
, op
)))
1125 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1130 /* A truncate of a comparison can be replaced with a subreg if
1131 STORE_FLAG_VALUE permits. This is like the previous test,
1132 but it works even if the comparison is done in a mode larger
1133 than HOST_BITS_PER_WIDE_INT. */
1134 if (HWI_COMPUTABLE_MODE_P (mode
)
1135 && COMPARISON_P (op
)
1136 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1138 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1143 /* A truncate of a memory is just loading the low part of the memory
1144 if we are not changing the meaning of the address. */
1145 if (GET_CODE (op
) == MEM
1146 && !VECTOR_MODE_P (mode
)
1147 && !MEM_VOLATILE_P (op
)
1148 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1150 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1157 case FLOAT_TRUNCATE
:
1158 if (DECIMAL_FLOAT_MODE_P (mode
))
1161 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1162 if (GET_CODE (op
) == FLOAT_EXTEND
1163 && GET_MODE (XEXP (op
, 0)) == mode
)
1164 return XEXP (op
, 0);
1166 /* (float_truncate:SF (float_truncate:DF foo:XF))
1167 = (float_truncate:SF foo:XF).
1168 This may eliminate double rounding, so it is unsafe.
1170 (float_truncate:SF (float_extend:XF foo:DF))
1171 = (float_truncate:SF foo:DF).
1173 (float_truncate:DF (float_extend:XF foo:SF))
1174 = (float_extend:SF foo:DF). */
1175 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1176 && flag_unsafe_math_optimizations
)
1177 || GET_CODE (op
) == FLOAT_EXTEND
)
1178 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1180 > GET_MODE_SIZE (mode
)
1181 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1183 XEXP (op
, 0), mode
);
1185 /* (float_truncate (float x)) is (float x) */
1186 if (GET_CODE (op
) == FLOAT
1187 && (flag_unsafe_math_optimizations
1188 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1189 && ((unsigned)significand_size (GET_MODE (op
))
1190 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1191 - num_sign_bit_copies (XEXP (op
, 0),
1192 GET_MODE (XEXP (op
, 0))))))))
1193 return simplify_gen_unary (FLOAT
, mode
,
1195 GET_MODE (XEXP (op
, 0)));
1197 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1198 (OP:SF foo:SF) if OP is NEG or ABS. */
1199 if ((GET_CODE (op
) == ABS
1200 || GET_CODE (op
) == NEG
)
1201 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1202 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1203 return simplify_gen_unary (GET_CODE (op
), mode
,
1204 XEXP (XEXP (op
, 0), 0), mode
);
1206 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1207 is (float_truncate:SF x). */
1208 if (GET_CODE (op
) == SUBREG
1209 && subreg_lowpart_p (op
)
1210 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1211 return SUBREG_REG (op
);
1215 if (DECIMAL_FLOAT_MODE_P (mode
))
1218 /* (float_extend (float_extend x)) is (float_extend x)
1220 (float_extend (float x)) is (float x) assuming that double
1221 rounding can't happen.
1223 if (GET_CODE (op
) == FLOAT_EXTEND
1224 || (GET_CODE (op
) == FLOAT
1225 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1226 && ((unsigned)significand_size (GET_MODE (op
))
1227 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1228 - num_sign_bit_copies (XEXP (op
, 0),
1229 GET_MODE (XEXP (op
, 0)))))))
1230 return simplify_gen_unary (GET_CODE (op
), mode
,
1232 GET_MODE (XEXP (op
, 0)));
1237 /* (abs (neg <foo>)) -> (abs <foo>) */
1238 if (GET_CODE (op
) == NEG
)
1239 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1240 GET_MODE (XEXP (op
, 0)));
1242 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1244 if (GET_MODE (op
) == VOIDmode
)
1247 /* If operand is something known to be positive, ignore the ABS. */
1248 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1249 || val_signbit_known_clear_p (GET_MODE (op
),
1250 nonzero_bits (op
, GET_MODE (op
))))
1253 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1254 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1255 return gen_rtx_NEG (mode
, op
);
1260 /* (ffs (*_extend <X>)) = (ffs <X>) */
1261 if (GET_CODE (op
) == SIGN_EXTEND
1262 || GET_CODE (op
) == ZERO_EXTEND
)
1263 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1264 GET_MODE (XEXP (op
, 0)));
1268 switch (GET_CODE (op
))
1272 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1273 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1274 GET_MODE (XEXP (op
, 0)));
1278 /* Rotations don't affect popcount. */
1279 if (!side_effects_p (XEXP (op
, 1)))
1280 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1281 GET_MODE (XEXP (op
, 0)));
1290 switch (GET_CODE (op
))
1296 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1297 GET_MODE (XEXP (op
, 0)));
1301 /* Rotations don't affect parity. */
1302 if (!side_effects_p (XEXP (op
, 1)))
1303 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1304 GET_MODE (XEXP (op
, 0)));
1313 /* (bswap (bswap x)) -> x. */
1314 if (GET_CODE (op
) == BSWAP
)
1315 return XEXP (op
, 0);
1319 /* (float (sign_extend <X>)) = (float <X>). */
1320 if (GET_CODE (op
) == SIGN_EXTEND
)
1321 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1322 GET_MODE (XEXP (op
, 0)));
1326 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1327 becomes just the MINUS if its mode is MODE. This allows
1328 folding switch statements on machines using casesi (such as
1330 if (GET_CODE (op
) == TRUNCATE
1331 && GET_MODE (XEXP (op
, 0)) == mode
1332 && GET_CODE (XEXP (op
, 0)) == MINUS
1333 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1334 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1335 return XEXP (op
, 0);
1337 /* Extending a widening multiplication should be canonicalized to
1338 a wider widening multiplication. */
1339 if (GET_CODE (op
) == MULT
)
1341 rtx lhs
= XEXP (op
, 0);
1342 rtx rhs
= XEXP (op
, 1);
1343 enum rtx_code lcode
= GET_CODE (lhs
);
1344 enum rtx_code rcode
= GET_CODE (rhs
);
1346 /* Widening multiplies usually extend both operands, but sometimes
1347 they use a shift to extract a portion of a register. */
1348 if ((lcode
== SIGN_EXTEND
1349 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1350 && (rcode
== SIGN_EXTEND
1351 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1353 machine_mode lmode
= GET_MODE (lhs
);
1354 machine_mode rmode
= GET_MODE (rhs
);
1357 if (lcode
== ASHIFTRT
)
1358 /* Number of bits not shifted off the end. */
1359 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1360 else /* lcode == SIGN_EXTEND */
1361 /* Size of inner mode. */
1362 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1364 if (rcode
== ASHIFTRT
)
1365 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1366 else /* rcode == SIGN_EXTEND */
1367 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1369 /* We can only widen multiplies if the result is mathematiclly
1370 equivalent. I.e. if overflow was impossible. */
1371 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1372 return simplify_gen_binary
1374 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1375 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1379 /* Check for a sign extension of a subreg of a promoted
1380 variable, where the promotion is sign-extended, and the
1381 target mode is the same as the variable's promotion. */
1382 if (GET_CODE (op
) == SUBREG
1383 && SUBREG_PROMOTED_VAR_P (op
)
1384 && SUBREG_PROMOTED_SIGNED_P (op
)
1385 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1387 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1392 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1393 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1394 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1396 gcc_assert (GET_MODE_PRECISION (mode
)
1397 > GET_MODE_PRECISION (GET_MODE (op
)));
1398 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1399 GET_MODE (XEXP (op
, 0)));
1402 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1403 is (sign_extend:M (subreg:O <X>)) if there is mode with
1404 GET_MODE_BITSIZE (N) - I bits.
1405 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1406 is similarly (zero_extend:M (subreg:O <X>)). */
1407 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1408 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1409 && CONST_INT_P (XEXP (op
, 1))
1410 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1411 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1414 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1415 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1416 gcc_assert (GET_MODE_BITSIZE (mode
)
1417 > GET_MODE_BITSIZE (GET_MODE (op
)));
1418 if (tmode
!= BLKmode
)
1421 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1423 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1424 ? SIGN_EXTEND
: ZERO_EXTEND
,
1425 mode
, inner
, tmode
);
1429 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1430 /* As we do not know which address space the pointer is referring to,
1431 we can do this only if the target does not support different pointer
1432 or address modes depending on the address space. */
1433 if (target_default_pointer_address_modes_p ()
1434 && ! POINTERS_EXTEND_UNSIGNED
1435 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1437 || (GET_CODE (op
) == SUBREG
1438 && REG_P (SUBREG_REG (op
))
1439 && REG_POINTER (SUBREG_REG (op
))
1440 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1441 return convert_memory_address (Pmode
, op
);
1446 /* Check for a zero extension of a subreg of a promoted
1447 variable, where the promotion is zero-extended, and the
1448 target mode is the same as the variable's promotion. */
1449 if (GET_CODE (op
) == SUBREG
1450 && SUBREG_PROMOTED_VAR_P (op
)
1451 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1452 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1454 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1459 /* Extending a widening multiplication should be canonicalized to
1460 a wider widening multiplication. */
1461 if (GET_CODE (op
) == MULT
)
1463 rtx lhs
= XEXP (op
, 0);
1464 rtx rhs
= XEXP (op
, 1);
1465 enum rtx_code lcode
= GET_CODE (lhs
);
1466 enum rtx_code rcode
= GET_CODE (rhs
);
1468 /* Widening multiplies usually extend both operands, but sometimes
1469 they use a shift to extract a portion of a register. */
1470 if ((lcode
== ZERO_EXTEND
1471 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1472 && (rcode
== ZERO_EXTEND
1473 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1475 machine_mode lmode
= GET_MODE (lhs
);
1476 machine_mode rmode
= GET_MODE (rhs
);
1479 if (lcode
== LSHIFTRT
)
1480 /* Number of bits not shifted off the end. */
1481 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1482 else /* lcode == ZERO_EXTEND */
1483 /* Size of inner mode. */
1484 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1486 if (rcode
== LSHIFTRT
)
1487 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1488 else /* rcode == ZERO_EXTEND */
1489 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1491 /* We can only widen multiplies if the result is mathematiclly
1492 equivalent. I.e. if overflow was impossible. */
1493 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1494 return simplify_gen_binary
1496 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1497 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1501 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op
) == ZERO_EXTEND
)
1503 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1504 GET_MODE (XEXP (op
, 0)));
1506 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1507 is (zero_extend:M (subreg:O <X>)) if there is mode with
1508 GET_MODE_PRECISION (N) - I bits. */
1509 if (GET_CODE (op
) == LSHIFTRT
1510 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1511 && CONST_INT_P (XEXP (op
, 1))
1512 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1513 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1516 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1517 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1518 if (tmode
!= BLKmode
)
1521 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1523 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1527 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1528 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1530 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1531 (and:SI (reg:SI) (const_int 63)). */
1532 if (GET_CODE (op
) == SUBREG
1533 && GET_MODE_PRECISION (GET_MODE (op
))
1534 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1535 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1536 <= HOST_BITS_PER_WIDE_INT
1537 && GET_MODE_PRECISION (mode
)
1538 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1539 && subreg_lowpart_p (op
)
1540 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1541 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1543 if (GET_MODE_PRECISION (mode
)
1544 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1545 return SUBREG_REG (op
);
1546 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1547 GET_MODE (SUBREG_REG (op
)));
1550 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1551 /* As we do not know which address space the pointer is referring to,
1552 we can do this only if the target does not support different pointer
1553 or address modes depending on the address space. */
1554 if (target_default_pointer_address_modes_p ()
1555 && POINTERS_EXTEND_UNSIGNED
> 0
1556 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1558 || (GET_CODE (op
) == SUBREG
1559 && REG_P (SUBREG_REG (op
))
1560 && REG_POINTER (SUBREG_REG (op
))
1561 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1562 return convert_memory_address (Pmode
, op
);
1573 /* Try to compute the value of a unary operation CODE whose output mode is to
1574 be MODE with input operand OP whose mode was originally OP_MODE.
1575 Return zero if the value cannot be computed. */
1577 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1578 rtx op
, machine_mode op_mode
)
1580 unsigned int width
= GET_MODE_PRECISION (mode
);
1582 if (code
== VEC_DUPLICATE
)
1584 gcc_assert (VECTOR_MODE_P (mode
));
1585 if (GET_MODE (op
) != VOIDmode
)
1587 if (!VECTOR_MODE_P (GET_MODE (op
)))
1588 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1590 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1593 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1594 || GET_CODE (op
) == CONST_VECTOR
)
1596 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1597 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1598 rtvec v
= rtvec_alloc (n_elts
);
1601 if (GET_CODE (op
) != CONST_VECTOR
)
1602 for (i
= 0; i
< n_elts
; i
++)
1603 RTVEC_ELT (v
, i
) = op
;
1606 machine_mode inmode
= GET_MODE (op
);
1607 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1608 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1610 gcc_assert (in_n_elts
< n_elts
);
1611 gcc_assert ((n_elts
% in_n_elts
) == 0);
1612 for (i
= 0; i
< n_elts
; i
++)
1613 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1615 return gen_rtx_CONST_VECTOR (mode
, v
);
1619 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1621 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1622 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1623 machine_mode opmode
= GET_MODE (op
);
1624 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1625 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1626 rtvec v
= rtvec_alloc (n_elts
);
1629 gcc_assert (op_n_elts
== n_elts
);
1630 for (i
= 0; i
< n_elts
; i
++)
1632 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1633 CONST_VECTOR_ELT (op
, i
),
1634 GET_MODE_INNER (opmode
));
1637 RTVEC_ELT (v
, i
) = x
;
1639 return gen_rtx_CONST_VECTOR (mode
, v
);
1642 /* The order of these tests is critical so that, for example, we don't
1643 check the wrong mode (input vs. output) for a conversion operation,
1644 such as FIX. At some point, this should be simplified. */
1646 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1650 if (op_mode
== VOIDmode
)
1652 /* CONST_INT have VOIDmode as the mode. We assume that all
1653 the bits of the constant are significant, though, this is
1654 a dangerous assumption as many times CONST_INTs are
1655 created and used with garbage in the bits outside of the
1656 precision of the implied mode of the const_int. */
1657 op_mode
= MAX_MODE_INT
;
1660 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1661 d
= real_value_truncate (mode
, d
);
1662 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1664 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1668 if (op_mode
== VOIDmode
)
1670 /* CONST_INT have VOIDmode as the mode. We assume that all
1671 the bits of the constant are significant, though, this is
1672 a dangerous assumption as many times CONST_INTs are
1673 created and used with garbage in the bits outside of the
1674 precision of the implied mode of the const_int. */
1675 op_mode
= MAX_MODE_INT
;
1678 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1679 d
= real_value_truncate (mode
, d
);
1680 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1683 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1686 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1687 rtx_mode_t op0
= std::make_pair (op
, imode
);
1690 #if TARGET_SUPPORTS_WIDE_INT == 0
1691 /* This assert keeps the simplification from producing a result
1692 that cannot be represented in a CONST_DOUBLE but a lot of
1693 upstream callers expect that this function never fails to
1694 simplify something and so you if you added this to the test
1695 above the code would die later anyway. If this assert
1696 happens, you just need to make the port support wide int. */
1697 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1703 result
= wi::bit_not (op0
);
1707 result
= wi::neg (op0
);
1711 result
= wi::abs (op0
);
1715 result
= wi::shwi (wi::ffs (op0
), mode
);
1719 if (wi::ne_p (op0
, 0))
1720 int_value
= wi::clz (op0
);
1721 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1722 int_value
= GET_MODE_PRECISION (mode
);
1723 result
= wi::shwi (int_value
, mode
);
1727 result
= wi::shwi (wi::clrsb (op0
), mode
);
1731 if (wi::ne_p (op0
, 0))
1732 int_value
= wi::ctz (op0
);
1733 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1734 int_value
= GET_MODE_PRECISION (mode
);
1735 result
= wi::shwi (int_value
, mode
);
1739 result
= wi::shwi (wi::popcount (op0
), mode
);
1743 result
= wi::shwi (wi::parity (op0
), mode
);
1747 result
= wide_int (op0
).bswap ();
1752 result
= wide_int::from (op0
, width
, UNSIGNED
);
1756 result
= wide_int::from (op0
, width
, SIGNED
);
1764 return immed_wide_int_const (result
, mode
);
1767 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1768 && SCALAR_FLOAT_MODE_P (mode
)
1769 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1772 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1779 d
= real_value_abs (&d
);
1782 d
= real_value_negate (&d
);
1784 case FLOAT_TRUNCATE
:
1785 d
= real_value_truncate (mode
, d
);
1788 /* All this does is change the mode, unless changing
1790 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1791 real_convert (&d
, mode
, &d
);
1794 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1801 real_to_target (tmp
, &d
, GET_MODE (op
));
1802 for (i
= 0; i
< 4; i
++)
1804 real_from_target (&d
, tmp
, mode
);
1810 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1812 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1813 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1814 && GET_MODE_CLASS (mode
) == MODE_INT
1817 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1818 operators are intentionally left unspecified (to ease implementation
1819 by target backends), for consistency, this routine implements the
1820 same semantics for constant folding as used by the middle-end. */
1822 /* This was formerly used only for non-IEEE float.
1823 eggert@twinsun.com says it is safe for IEEE also. */
1824 REAL_VALUE_TYPE x
, t
;
1825 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1826 wide_int wmax
, wmin
;
1827 /* This is part of the abi to real_to_integer, but we check
1828 things before making this call. */
1834 if (REAL_VALUE_ISNAN (x
))
1837 /* Test against the signed upper bound. */
1838 wmax
= wi::max_value (width
, SIGNED
);
1839 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1840 if (REAL_VALUES_LESS (t
, x
))
1841 return immed_wide_int_const (wmax
, mode
);
1843 /* Test against the signed lower bound. */
1844 wmin
= wi::min_value (width
, SIGNED
);
1845 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1846 if (REAL_VALUES_LESS (x
, t
))
1847 return immed_wide_int_const (wmin
, mode
);
1849 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1853 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1856 /* Test against the unsigned upper bound. */
1857 wmax
= wi::max_value (width
, UNSIGNED
);
1858 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1859 if (REAL_VALUES_LESS (t
, x
))
1860 return immed_wide_int_const (wmax
, mode
);
1862 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1874 /* Subroutine of simplify_binary_operation to simplify a binary operation
1875 CODE that can commute with byte swapping, with result mode MODE and
1876 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1877 Return zero if no simplification or canonicalization is possible. */
1880 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1885 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1886 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1888 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1889 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1890 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1893 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1894 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1896 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1897 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1903 /* Subroutine of simplify_binary_operation to simplify a commutative,
1904 associative binary operation CODE with result mode MODE, operating
1905 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1906 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1907 canonicalization is possible. */
1910 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1915 /* Linearize the operator to the left. */
1916 if (GET_CODE (op1
) == code
)
1918 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1919 if (GET_CODE (op0
) == code
)
1921 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1922 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1925 /* "a op (b op c)" becomes "(b op c) op a". */
1926 if (! swap_commutative_operands_p (op1
, op0
))
1927 return simplify_gen_binary (code
, mode
, op1
, op0
);
1934 if (GET_CODE (op0
) == code
)
1936 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1937 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1939 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1940 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1943 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1944 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1946 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1948 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1949 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1951 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1958 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1959 and OP1. Return 0 if no simplification is possible.
1961 Don't use this for relational operations such as EQ or LT.
1962 Use simplify_relational_operation instead. */
1964 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1967 rtx trueop0
, trueop1
;
1970 /* Relational operations don't work here. We must know the mode
1971 of the operands in order to do the comparison correctly.
1972 Assuming a full word can give incorrect results.
1973 Consider comparing 128 with -128 in QImode. */
1974 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1975 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1977 /* Make sure the constant is second. */
1978 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1979 && swap_commutative_operands_p (op0
, op1
))
1981 tem
= op0
, op0
= op1
, op1
= tem
;
1984 trueop0
= avoid_constant_pool_reference (op0
);
1985 trueop1
= avoid_constant_pool_reference (op1
);
1987 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1990 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1993 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1994 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1995 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1996 actual constants. */
1999 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2000 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2002 rtx tem
, reversed
, opleft
, opright
;
2004 unsigned int width
= GET_MODE_PRECISION (mode
);
2006 /* Even if we can't compute a constant result,
2007 there are some cases worth simplifying. */
2012 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2013 when x is NaN, infinite, or finite and nonzero. They aren't
2014 when x is -0 and the rounding mode is not towards -infinity,
2015 since (-0) + 0 is then 0. */
2016 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2019 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2020 transformations are safe even for IEEE. */
2021 if (GET_CODE (op0
) == NEG
)
2022 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2023 else if (GET_CODE (op1
) == NEG
)
2024 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2026 /* (~a) + 1 -> -a */
2027 if (INTEGRAL_MODE_P (mode
)
2028 && GET_CODE (op0
) == NOT
2029 && trueop1
== const1_rtx
)
2030 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2032 /* Handle both-operands-constant cases. We can only add
2033 CONST_INTs to constants since the sum of relocatable symbols
2034 can't be handled by most assemblers. Don't add CONST_INT
2035 to CONST_INT since overflow won't be computed properly if wider
2036 than HOST_BITS_PER_WIDE_INT. */
2038 if ((GET_CODE (op0
) == CONST
2039 || GET_CODE (op0
) == SYMBOL_REF
2040 || GET_CODE (op0
) == LABEL_REF
)
2041 && CONST_INT_P (op1
))
2042 return plus_constant (mode
, op0
, INTVAL (op1
));
2043 else if ((GET_CODE (op1
) == CONST
2044 || GET_CODE (op1
) == SYMBOL_REF
2045 || GET_CODE (op1
) == LABEL_REF
)
2046 && CONST_INT_P (op0
))
2047 return plus_constant (mode
, op1
, INTVAL (op0
));
2049 /* See if this is something like X * C - X or vice versa or
2050 if the multiplication is written as a shift. If so, we can
2051 distribute and make a new multiply, shift, or maybe just
2052 have X (if C is 2 in the example above). But don't make
2053 something more expensive than we had before. */
2055 if (SCALAR_INT_MODE_P (mode
))
2057 rtx lhs
= op0
, rhs
= op1
;
2059 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2060 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2062 if (GET_CODE (lhs
) == NEG
)
2064 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2065 lhs
= XEXP (lhs
, 0);
2067 else if (GET_CODE (lhs
) == MULT
2068 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2070 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2071 lhs
= XEXP (lhs
, 0);
2073 else if (GET_CODE (lhs
) == ASHIFT
2074 && CONST_INT_P (XEXP (lhs
, 1))
2075 && INTVAL (XEXP (lhs
, 1)) >= 0
2076 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2078 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2079 GET_MODE_PRECISION (mode
));
2080 lhs
= XEXP (lhs
, 0);
2083 if (GET_CODE (rhs
) == NEG
)
2085 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2086 rhs
= XEXP (rhs
, 0);
2088 else if (GET_CODE (rhs
) == MULT
2089 && CONST_INT_P (XEXP (rhs
, 1)))
2091 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2092 rhs
= XEXP (rhs
, 0);
2094 else if (GET_CODE (rhs
) == ASHIFT
2095 && CONST_INT_P (XEXP (rhs
, 1))
2096 && INTVAL (XEXP (rhs
, 1)) >= 0
2097 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2099 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2100 GET_MODE_PRECISION (mode
));
2101 rhs
= XEXP (rhs
, 0);
2104 if (rtx_equal_p (lhs
, rhs
))
2106 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2108 bool speed
= optimize_function_for_speed_p (cfun
);
2110 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2112 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2113 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2118 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2119 if (CONST_SCALAR_INT_P (op1
)
2120 && GET_CODE (op0
) == XOR
2121 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2122 && mode_signbit_p (mode
, op1
))
2123 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2124 simplify_gen_binary (XOR
, mode
, op1
,
2127 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2128 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2129 && GET_CODE (op0
) == MULT
2130 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2134 in1
= XEXP (XEXP (op0
, 0), 0);
2135 in2
= XEXP (op0
, 1);
2136 return simplify_gen_binary (MINUS
, mode
, op1
,
2137 simplify_gen_binary (MULT
, mode
,
2141 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2142 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2144 if (COMPARISON_P (op0
)
2145 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2146 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2147 && (reversed
= reversed_comparison (op0
, mode
)))
2149 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2151 /* If one of the operands is a PLUS or a MINUS, see if we can
2152 simplify this by the associative law.
2153 Don't use the associative law for floating point.
2154 The inaccuracy makes it nonassociative,
2155 and subtle programs can break if operations are associated. */
2157 if (INTEGRAL_MODE_P (mode
)
2158 && (plus_minus_operand_p (op0
)
2159 || plus_minus_operand_p (op1
))
2160 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2163 /* Reassociate floating point addition only when the user
2164 specifies associative math operations. */
2165 if (FLOAT_MODE_P (mode
)
2166 && flag_associative_math
)
2168 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2175 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2176 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2177 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2178 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2180 rtx xop00
= XEXP (op0
, 0);
2181 rtx xop10
= XEXP (op1
, 0);
2184 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2186 if (REG_P (xop00
) && REG_P (xop10
)
2187 && GET_MODE (xop00
) == GET_MODE (xop10
)
2188 && REGNO (xop00
) == REGNO (xop10
)
2189 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2190 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2197 /* We can't assume x-x is 0 even with non-IEEE floating point,
2198 but since it is zero except in very strange circumstances, we
2199 will treat it as zero with -ffinite-math-only. */
2200 if (rtx_equal_p (trueop0
, trueop1
)
2201 && ! side_effects_p (op0
)
2202 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2203 return CONST0_RTX (mode
);
2205 /* Change subtraction from zero into negation. (0 - x) is the
2206 same as -x when x is NaN, infinite, or finite and nonzero.
2207 But if the mode has signed zeros, and does not round towards
2208 -infinity, then 0 - 0 is 0, not -0. */
2209 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2210 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2212 /* (-1 - a) is ~a. */
2213 if (trueop0
== constm1_rtx
)
2214 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2216 /* Subtracting 0 has no effect unless the mode has signed zeros
2217 and supports rounding towards -infinity. In such a case,
2219 if (!(HONOR_SIGNED_ZEROS (mode
)
2220 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2221 && trueop1
== CONST0_RTX (mode
))
2224 /* See if this is something like X * C - X or vice versa or
2225 if the multiplication is written as a shift. If so, we can
2226 distribute and make a new multiply, shift, or maybe just
2227 have X (if C is 2 in the example above). But don't make
2228 something more expensive than we had before. */
2230 if (SCALAR_INT_MODE_P (mode
))
2232 rtx lhs
= op0
, rhs
= op1
;
2234 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2235 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2237 if (GET_CODE (lhs
) == NEG
)
2239 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2240 lhs
= XEXP (lhs
, 0);
2242 else if (GET_CODE (lhs
) == MULT
2243 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2245 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2246 lhs
= XEXP (lhs
, 0);
2248 else if (GET_CODE (lhs
) == ASHIFT
2249 && CONST_INT_P (XEXP (lhs
, 1))
2250 && INTVAL (XEXP (lhs
, 1)) >= 0
2251 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2253 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2254 GET_MODE_PRECISION (mode
));
2255 lhs
= XEXP (lhs
, 0);
2258 if (GET_CODE (rhs
) == NEG
)
2260 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2261 rhs
= XEXP (rhs
, 0);
2263 else if (GET_CODE (rhs
) == MULT
2264 && CONST_INT_P (XEXP (rhs
, 1)))
2266 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2267 rhs
= XEXP (rhs
, 0);
2269 else if (GET_CODE (rhs
) == ASHIFT
2270 && CONST_INT_P (XEXP (rhs
, 1))
2271 && INTVAL (XEXP (rhs
, 1)) >= 0
2272 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2274 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2275 GET_MODE_PRECISION (mode
));
2276 negcoeff1
= -negcoeff1
;
2277 rhs
= XEXP (rhs
, 0);
2280 if (rtx_equal_p (lhs
, rhs
))
2282 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2284 bool speed
= optimize_function_for_speed_p (cfun
);
2286 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2288 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2289 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2294 /* (a - (-b)) -> (a + b). True even for IEEE. */
2295 if (GET_CODE (op1
) == NEG
)
2296 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2298 /* (-x - c) may be simplified as (-c - x). */
2299 if (GET_CODE (op0
) == NEG
2300 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2302 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2304 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2307 /* Don't let a relocatable value get a negative coeff. */
2308 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2309 return simplify_gen_binary (PLUS
, mode
,
2311 neg_const_int (mode
, op1
));
2313 /* (x - (x & y)) -> (x & ~y) */
2314 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2316 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2318 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2319 GET_MODE (XEXP (op1
, 1)));
2320 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2322 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2324 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2325 GET_MODE (XEXP (op1
, 0)));
2326 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2330 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2331 by reversing the comparison code if valid. */
2332 if (STORE_FLAG_VALUE
== 1
2333 && trueop0
== const1_rtx
2334 && COMPARISON_P (op1
)
2335 && (reversed
= reversed_comparison (op1
, mode
)))
2338 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2339 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2340 && GET_CODE (op1
) == MULT
2341 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2345 in1
= XEXP (XEXP (op1
, 0), 0);
2346 in2
= XEXP (op1
, 1);
2347 return simplify_gen_binary (PLUS
, mode
,
2348 simplify_gen_binary (MULT
, mode
,
2353 /* Canonicalize (minus (neg A) (mult B C)) to
2354 (minus (mult (neg B) C) A). */
2355 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2356 && GET_CODE (op1
) == MULT
2357 && GET_CODE (op0
) == NEG
)
2361 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2362 in2
= XEXP (op1
, 1);
2363 return simplify_gen_binary (MINUS
, mode
,
2364 simplify_gen_binary (MULT
, mode
,
2369 /* If one of the operands is a PLUS or a MINUS, see if we can
2370 simplify this by the associative law. This will, for example,
2371 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2372 Don't use the associative law for floating point.
2373 The inaccuracy makes it nonassociative,
2374 and subtle programs can break if operations are associated. */
2376 if (INTEGRAL_MODE_P (mode
)
2377 && (plus_minus_operand_p (op0
)
2378 || plus_minus_operand_p (op1
))
2379 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2384 if (trueop1
== constm1_rtx
)
2385 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2387 if (GET_CODE (op0
) == NEG
)
2389 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2390 /* If op1 is a MULT as well and simplify_unary_operation
2391 just moved the NEG to the second operand, simplify_gen_binary
2392 below could through simplify_associative_operation move
2393 the NEG around again and recurse endlessly. */
2395 && GET_CODE (op1
) == MULT
2396 && GET_CODE (temp
) == MULT
2397 && XEXP (op1
, 0) == XEXP (temp
, 0)
2398 && GET_CODE (XEXP (temp
, 1)) == NEG
2399 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2402 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2404 if (GET_CODE (op1
) == NEG
)
2406 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2407 /* If op0 is a MULT as well and simplify_unary_operation
2408 just moved the NEG to the second operand, simplify_gen_binary
2409 below could through simplify_associative_operation move
2410 the NEG around again and recurse endlessly. */
2412 && GET_CODE (op0
) == MULT
2413 && GET_CODE (temp
) == MULT
2414 && XEXP (op0
, 0) == XEXP (temp
, 0)
2415 && GET_CODE (XEXP (temp
, 1)) == NEG
2416 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2419 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2422 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2423 x is NaN, since x * 0 is then also NaN. Nor is it valid
2424 when the mode has signed zeros, since multiplying a negative
2425 number by 0 will give -0, not 0. */
2426 if (!HONOR_NANS (mode
)
2427 && !HONOR_SIGNED_ZEROS (mode
)
2428 && trueop1
== CONST0_RTX (mode
)
2429 && ! side_effects_p (op0
))
2432 /* In IEEE floating point, x*1 is not equivalent to x for
2434 if (!HONOR_SNANS (mode
)
2435 && trueop1
== CONST1_RTX (mode
))
2438 /* Convert multiply by constant power of two into shift. */
2439 if (CONST_SCALAR_INT_P (trueop1
))
2441 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2443 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2446 /* x*2 is x+x and x*(-1) is -x */
2447 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2448 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2449 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2450 && GET_MODE (op0
) == mode
)
2453 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2455 if (REAL_VALUES_EQUAL (d
, dconst2
))
2456 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2458 if (!HONOR_SNANS (mode
)
2459 && REAL_VALUES_EQUAL (d
, dconstm1
))
2460 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2463 /* Optimize -x * -x as x * x. */
2464 if (FLOAT_MODE_P (mode
)
2465 && GET_CODE (op0
) == NEG
2466 && GET_CODE (op1
) == NEG
2467 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2468 && !side_effects_p (XEXP (op0
, 0)))
2469 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2471 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2472 if (SCALAR_FLOAT_MODE_P (mode
)
2473 && GET_CODE (op0
) == ABS
2474 && GET_CODE (op1
) == ABS
2475 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2476 && !side_effects_p (XEXP (op0
, 0)))
2477 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2479 /* Reassociate multiplication, but for floating point MULTs
2480 only when the user specifies unsafe math optimizations. */
2481 if (! FLOAT_MODE_P (mode
)
2482 || flag_unsafe_math_optimizations
)
2484 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2491 if (trueop1
== CONST0_RTX (mode
))
2493 if (INTEGRAL_MODE_P (mode
)
2494 && trueop1
== CONSTM1_RTX (mode
)
2495 && !side_effects_p (op0
))
2497 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2499 /* A | (~A) -> -1 */
2500 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2501 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2502 && ! side_effects_p (op0
)
2503 && SCALAR_INT_MODE_P (mode
))
2506 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2507 if (CONST_INT_P (op1
)
2508 && HWI_COMPUTABLE_MODE_P (mode
)
2509 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2510 && !side_effects_p (op0
))
2513 /* Canonicalize (X & C1) | C2. */
2514 if (GET_CODE (op0
) == AND
2515 && CONST_INT_P (trueop1
)
2516 && CONST_INT_P (XEXP (op0
, 1)))
2518 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2519 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2520 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2522 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2524 && !side_effects_p (XEXP (op0
, 0)))
2527 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2528 if (((c1
|c2
) & mask
) == mask
)
2529 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2531 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2532 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2534 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2535 gen_int_mode (c1
& ~c2
, mode
));
2536 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2540 /* Convert (A & B) | A to A. */
2541 if (GET_CODE (op0
) == AND
2542 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2543 || rtx_equal_p (XEXP (op0
, 1), op1
))
2544 && ! side_effects_p (XEXP (op0
, 0))
2545 && ! side_effects_p (XEXP (op0
, 1)))
2548 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2549 mode size to (rotate A CX). */
2551 if (GET_CODE (op1
) == ASHIFT
2552 || GET_CODE (op1
) == SUBREG
)
2563 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2564 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2565 && CONST_INT_P (XEXP (opleft
, 1))
2566 && CONST_INT_P (XEXP (opright
, 1))
2567 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2568 == GET_MODE_PRECISION (mode
)))
2569 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2571 /* Same, but for ashift that has been "simplified" to a wider mode
2572 by simplify_shift_const. */
2574 if (GET_CODE (opleft
) == SUBREG
2575 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2576 && GET_CODE (opright
) == LSHIFTRT
2577 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2578 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2579 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2580 && (GET_MODE_SIZE (GET_MODE (opleft
))
2581 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2582 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2583 SUBREG_REG (XEXP (opright
, 0)))
2584 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2585 && CONST_INT_P (XEXP (opright
, 1))
2586 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2587 == GET_MODE_PRECISION (mode
)))
2588 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2589 XEXP (SUBREG_REG (opleft
), 1));
2591 /* If we have (ior (and (X C1) C2)), simplify this by making
2592 C1 as small as possible if C1 actually changes. */
2593 if (CONST_INT_P (op1
)
2594 && (HWI_COMPUTABLE_MODE_P (mode
)
2595 || INTVAL (op1
) > 0)
2596 && GET_CODE (op0
) == AND
2597 && CONST_INT_P (XEXP (op0
, 1))
2598 && CONST_INT_P (op1
)
2599 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2601 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2602 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2605 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2608 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2609 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2610 the PLUS does not affect any of the bits in OP1: then we can do
2611 the IOR as a PLUS and we can associate. This is valid if OP1
2612 can be safely shifted left C bits. */
2613 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2614 && GET_CODE (XEXP (op0
, 0)) == PLUS
2615 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2616 && CONST_INT_P (XEXP (op0
, 1))
2617 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2619 int count
= INTVAL (XEXP (op0
, 1));
2620 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2622 if (mask
>> count
== INTVAL (trueop1
)
2623 && trunc_int_for_mode (mask
, mode
) == mask
2624 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2625 return simplify_gen_binary (ASHIFTRT
, mode
,
2626 plus_constant (mode
, XEXP (op0
, 0),
2631 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2635 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2641 if (trueop1
== CONST0_RTX (mode
))
2643 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2644 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2645 if (rtx_equal_p (trueop0
, trueop1
)
2646 && ! side_effects_p (op0
)
2647 && GET_MODE_CLASS (mode
) != MODE_CC
)
2648 return CONST0_RTX (mode
);
2650 /* Canonicalize XOR of the most significant bit to PLUS. */
2651 if (CONST_SCALAR_INT_P (op1
)
2652 && mode_signbit_p (mode
, op1
))
2653 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2654 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2655 if (CONST_SCALAR_INT_P (op1
)
2656 && GET_CODE (op0
) == PLUS
2657 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2658 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2659 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2660 simplify_gen_binary (XOR
, mode
, op1
,
2663 /* If we are XORing two things that have no bits in common,
2664 convert them into an IOR. This helps to detect rotation encoded
2665 using those methods and possibly other simplifications. */
2667 if (HWI_COMPUTABLE_MODE_P (mode
)
2668 && (nonzero_bits (op0
, mode
)
2669 & nonzero_bits (op1
, mode
)) == 0)
2670 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2672 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2673 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2676 int num_negated
= 0;
2678 if (GET_CODE (op0
) == NOT
)
2679 num_negated
++, op0
= XEXP (op0
, 0);
2680 if (GET_CODE (op1
) == NOT
)
2681 num_negated
++, op1
= XEXP (op1
, 0);
2683 if (num_negated
== 2)
2684 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2685 else if (num_negated
== 1)
2686 return simplify_gen_unary (NOT
, mode
,
2687 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2691 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2692 correspond to a machine insn or result in further simplifications
2693 if B is a constant. */
2695 if (GET_CODE (op0
) == AND
2696 && rtx_equal_p (XEXP (op0
, 1), op1
)
2697 && ! side_effects_p (op1
))
2698 return simplify_gen_binary (AND
, mode
,
2699 simplify_gen_unary (NOT
, mode
,
2700 XEXP (op0
, 0), mode
),
2703 else if (GET_CODE (op0
) == AND
2704 && rtx_equal_p (XEXP (op0
, 0), op1
)
2705 && ! side_effects_p (op1
))
2706 return simplify_gen_binary (AND
, mode
,
2707 simplify_gen_unary (NOT
, mode
,
2708 XEXP (op0
, 1), mode
),
2711 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2712 we can transform like this:
2713 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2714 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2715 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2716 Attempt a few simplifications when B and C are both constants. */
2717 if (GET_CODE (op0
) == AND
2718 && CONST_INT_P (op1
)
2719 && CONST_INT_P (XEXP (op0
, 1)))
2721 rtx a
= XEXP (op0
, 0);
2722 rtx b
= XEXP (op0
, 1);
2724 HOST_WIDE_INT bval
= INTVAL (b
);
2725 HOST_WIDE_INT cval
= INTVAL (c
);
2728 = simplify_binary_operation (AND
, mode
,
2729 simplify_gen_unary (NOT
, mode
, a
, mode
),
2731 if ((~cval
& bval
) == 0)
2733 /* Try to simplify ~A&C | ~B&C. */
2734 if (na_c
!= NULL_RTX
)
2735 return simplify_gen_binary (IOR
, mode
, na_c
,
2736 gen_int_mode (~bval
& cval
, mode
));
2740 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2741 if (na_c
== const0_rtx
)
2743 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2744 gen_int_mode (~cval
& bval
,
2746 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2747 gen_int_mode (~bval
& cval
,
2753 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2754 comparison if STORE_FLAG_VALUE is 1. */
2755 if (STORE_FLAG_VALUE
== 1
2756 && trueop1
== const1_rtx
2757 && COMPARISON_P (op0
)
2758 && (reversed
= reversed_comparison (op0
, mode
)))
2761 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2762 is (lt foo (const_int 0)), so we can perform the above
2763 simplification if STORE_FLAG_VALUE is 1. */
2765 if (STORE_FLAG_VALUE
== 1
2766 && trueop1
== const1_rtx
2767 && GET_CODE (op0
) == LSHIFTRT
2768 && CONST_INT_P (XEXP (op0
, 1))
2769 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2770 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2772 /* (xor (comparison foo bar) (const_int sign-bit))
2773 when STORE_FLAG_VALUE is the sign bit. */
2774 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2775 && trueop1
== const_true_rtx
2776 && COMPARISON_P (op0
)
2777 && (reversed
= reversed_comparison (op0
, mode
)))
2780 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2784 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2790 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2792 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2794 if (HWI_COMPUTABLE_MODE_P (mode
))
2796 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2797 HOST_WIDE_INT nzop1
;
2798 if (CONST_INT_P (trueop1
))
2800 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2801 /* If we are turning off bits already known off in OP0, we need
2803 if ((nzop0
& ~val1
) == 0)
2806 nzop1
= nonzero_bits (trueop1
, mode
);
2807 /* If we are clearing all the nonzero bits, the result is zero. */
2808 if ((nzop1
& nzop0
) == 0
2809 && !side_effects_p (op0
) && !side_effects_p (op1
))
2810 return CONST0_RTX (mode
);
2812 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2813 && GET_MODE_CLASS (mode
) != MODE_CC
)
2816 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2817 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2818 && ! side_effects_p (op0
)
2819 && GET_MODE_CLASS (mode
) != MODE_CC
)
2820 return CONST0_RTX (mode
);
2822 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2823 there are no nonzero bits of C outside of X's mode. */
2824 if ((GET_CODE (op0
) == SIGN_EXTEND
2825 || GET_CODE (op0
) == ZERO_EXTEND
)
2826 && CONST_INT_P (trueop1
)
2827 && HWI_COMPUTABLE_MODE_P (mode
)
2828 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2829 & UINTVAL (trueop1
)) == 0)
2831 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2832 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2833 gen_int_mode (INTVAL (trueop1
),
2835 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2838 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2839 we might be able to further simplify the AND with X and potentially
2840 remove the truncation altogether. */
2841 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2843 rtx x
= XEXP (op0
, 0);
2844 machine_mode xmode
= GET_MODE (x
);
2845 tem
= simplify_gen_binary (AND
, xmode
, x
,
2846 gen_int_mode (INTVAL (trueop1
), xmode
));
2847 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2850 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2851 if (GET_CODE (op0
) == IOR
2852 && CONST_INT_P (trueop1
)
2853 && CONST_INT_P (XEXP (op0
, 1)))
2855 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2856 return simplify_gen_binary (IOR
, mode
,
2857 simplify_gen_binary (AND
, mode
,
2858 XEXP (op0
, 0), op1
),
2859 gen_int_mode (tmp
, mode
));
2862 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2863 insn (and may simplify more). */
2864 if (GET_CODE (op0
) == XOR
2865 && rtx_equal_p (XEXP (op0
, 0), op1
)
2866 && ! side_effects_p (op1
))
2867 return simplify_gen_binary (AND
, mode
,
2868 simplify_gen_unary (NOT
, mode
,
2869 XEXP (op0
, 1), mode
),
2872 if (GET_CODE (op0
) == XOR
2873 && rtx_equal_p (XEXP (op0
, 1), op1
)
2874 && ! side_effects_p (op1
))
2875 return simplify_gen_binary (AND
, mode
,
2876 simplify_gen_unary (NOT
, mode
,
2877 XEXP (op0
, 0), mode
),
2880 /* Similarly for (~(A ^ B)) & A. */
2881 if (GET_CODE (op0
) == NOT
2882 && GET_CODE (XEXP (op0
, 0)) == XOR
2883 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2884 && ! side_effects_p (op1
))
2885 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2887 if (GET_CODE (op0
) == NOT
2888 && GET_CODE (XEXP (op0
, 0)) == XOR
2889 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2890 && ! side_effects_p (op1
))
2891 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2893 /* Convert (A | B) & A to A. */
2894 if (GET_CODE (op0
) == IOR
2895 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2896 || rtx_equal_p (XEXP (op0
, 1), op1
))
2897 && ! side_effects_p (XEXP (op0
, 0))
2898 && ! side_effects_p (XEXP (op0
, 1)))
2901 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2902 ((A & N) + B) & M -> (A + B) & M
2903 Similarly if (N & M) == 0,
2904 ((A | N) + B) & M -> (A + B) & M
2905 and for - instead of + and/or ^ instead of |.
2906 Also, if (N & M) == 0, then
2907 (A +- N) & M -> A & M. */
2908 if (CONST_INT_P (trueop1
)
2909 && HWI_COMPUTABLE_MODE_P (mode
)
2910 && ~UINTVAL (trueop1
)
2911 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2912 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2917 pmop
[0] = XEXP (op0
, 0);
2918 pmop
[1] = XEXP (op0
, 1);
2920 if (CONST_INT_P (pmop
[1])
2921 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2922 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2924 for (which
= 0; which
< 2; which
++)
2927 switch (GET_CODE (tem
))
2930 if (CONST_INT_P (XEXP (tem
, 1))
2931 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2932 == UINTVAL (trueop1
))
2933 pmop
[which
] = XEXP (tem
, 0);
2937 if (CONST_INT_P (XEXP (tem
, 1))
2938 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2939 pmop
[which
] = XEXP (tem
, 0);
2946 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2948 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2950 return simplify_gen_binary (code
, mode
, tem
, op1
);
2954 /* (and X (ior (not X) Y) -> (and X Y) */
2955 if (GET_CODE (op1
) == IOR
2956 && GET_CODE (XEXP (op1
, 0)) == NOT
2957 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
2958 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2960 /* (and (ior (not X) Y) X) -> (and X Y) */
2961 if (GET_CODE (op0
) == IOR
2962 && GET_CODE (XEXP (op0
, 0)) == NOT
2963 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
2964 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2966 /* (and X (ior Y (not X)) -> (and X Y) */
2967 if (GET_CODE (op1
) == IOR
2968 && GET_CODE (XEXP (op1
, 1)) == NOT
2969 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
2970 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
2972 /* (and (ior Y (not X)) X) -> (and X Y) */
2973 if (GET_CODE (op0
) == IOR
2974 && GET_CODE (XEXP (op0
, 1)) == NOT
2975 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
2976 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
2978 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2982 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2988 /* 0/x is 0 (or x&0 if x has side-effects). */
2989 if (trueop0
== CONST0_RTX (mode
))
2991 if (side_effects_p (op1
))
2992 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2996 if (trueop1
== CONST1_RTX (mode
))
2998 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3002 /* Convert divide by power of two into shift. */
3003 if (CONST_INT_P (trueop1
)
3004 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3005 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3009 /* Handle floating point and integers separately. */
3010 if (SCALAR_FLOAT_MODE_P (mode
))
3012 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3013 safe for modes with NaNs, since 0.0 / 0.0 will then be
3014 NaN rather than 0.0. Nor is it safe for modes with signed
3015 zeros, since dividing 0 by a negative number gives -0.0 */
3016 if (trueop0
== CONST0_RTX (mode
)
3017 && !HONOR_NANS (mode
)
3018 && !HONOR_SIGNED_ZEROS (mode
)
3019 && ! side_effects_p (op1
))
3022 if (trueop1
== CONST1_RTX (mode
)
3023 && !HONOR_SNANS (mode
))
3026 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3027 && trueop1
!= CONST0_RTX (mode
))
3030 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3033 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3034 && !HONOR_SNANS (mode
))
3035 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3037 /* Change FP division by a constant into multiplication.
3038 Only do this with -freciprocal-math. */
3039 if (flag_reciprocal_math
3040 && !REAL_VALUES_EQUAL (d
, dconst0
))
3042 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3043 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3044 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3048 else if (SCALAR_INT_MODE_P (mode
))
3050 /* 0/x is 0 (or x&0 if x has side-effects). */
3051 if (trueop0
== CONST0_RTX (mode
)
3052 && !cfun
->can_throw_non_call_exceptions
)
3054 if (side_effects_p (op1
))
3055 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3059 if (trueop1
== CONST1_RTX (mode
))
3061 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3066 if (trueop1
== constm1_rtx
)
3068 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3070 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3076 /* 0%x is 0 (or x&0 if x has side-effects). */
3077 if (trueop0
== CONST0_RTX (mode
))
3079 if (side_effects_p (op1
))
3080 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3083 /* x%1 is 0 (of x&0 if x has side-effects). */
3084 if (trueop1
== CONST1_RTX (mode
))
3086 if (side_effects_p (op0
))
3087 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3088 return CONST0_RTX (mode
);
3090 /* Implement modulus by power of two as AND. */
3091 if (CONST_INT_P (trueop1
)
3092 && exact_log2 (UINTVAL (trueop1
)) > 0)
3093 return simplify_gen_binary (AND
, mode
, op0
,
3094 gen_int_mode (INTVAL (op1
) - 1, mode
));
3098 /* 0%x is 0 (or x&0 if x has side-effects). */
3099 if (trueop0
== CONST0_RTX (mode
))
3101 if (side_effects_p (op1
))
3102 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3105 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3106 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3108 if (side_effects_p (op0
))
3109 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3110 return CONST0_RTX (mode
);
3116 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3117 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3118 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3120 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3121 if (CONST_INT_P (trueop1
)
3122 && IN_RANGE (INTVAL (trueop1
),
3123 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3124 GET_MODE_PRECISION (mode
) - 1))
3125 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3126 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3127 - INTVAL (trueop1
)));
3131 if (trueop1
== CONST0_RTX (mode
))
3133 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3135 /* Rotating ~0 always results in ~0. */
3136 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3137 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3138 && ! side_effects_p (op1
))
3142 scalar constants c1, c2
3143 size (M2) > size (M1)
3144 c1 == size (M2) - size (M1)
3146 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3150 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3152 if (code
== ASHIFTRT
3153 && !VECTOR_MODE_P (mode
)
3155 && CONST_INT_P (op1
)
3156 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3157 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3158 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3159 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3160 > GET_MODE_BITSIZE (mode
))
3161 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3162 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3163 - GET_MODE_BITSIZE (mode
)))
3164 && subreg_lowpart_p (op0
))
3166 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3168 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3169 tmp
= simplify_gen_binary (ASHIFTRT
,
3170 GET_MODE (SUBREG_REG (op0
)),
3171 XEXP (SUBREG_REG (op0
), 0),
3173 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3174 subreg_lowpart_offset (mode
,
3178 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3180 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3181 if (val
!= INTVAL (op1
))
3182 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3189 if (trueop1
== CONST0_RTX (mode
))
3191 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3193 goto canonicalize_shift
;
3196 if (trueop1
== CONST0_RTX (mode
))
3198 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3200 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3201 if (GET_CODE (op0
) == CLZ
3202 && CONST_INT_P (trueop1
)
3203 && STORE_FLAG_VALUE
== 1
3204 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3206 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3207 unsigned HOST_WIDE_INT zero_val
= 0;
3209 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3210 && zero_val
== GET_MODE_PRECISION (imode
)
3211 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3212 return simplify_gen_relational (EQ
, mode
, imode
,
3213 XEXP (op0
, 0), const0_rtx
);
3215 goto canonicalize_shift
;
3218 if (width
<= HOST_BITS_PER_WIDE_INT
3219 && mode_signbit_p (mode
, trueop1
)
3220 && ! side_effects_p (op0
))
3222 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3224 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3230 if (width
<= HOST_BITS_PER_WIDE_INT
3231 && CONST_INT_P (trueop1
)
3232 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3233 && ! side_effects_p (op0
))
3235 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3237 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3243 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3245 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3247 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3253 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3255 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3257 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3270 /* ??? There are simplifications that can be done. */
3274 if (!VECTOR_MODE_P (mode
))
3276 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3277 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3278 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3279 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3280 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3282 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3283 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3286 /* Extract a scalar element from a nested VEC_SELECT expression
3287 (with optional nested VEC_CONCAT expression). Some targets
3288 (i386) extract scalar element from a vector using chain of
3289 nested VEC_SELECT expressions. When input operand is a memory
3290 operand, this operation can be simplified to a simple scalar
3291 load from an offseted memory address. */
3292 if (GET_CODE (trueop0
) == VEC_SELECT
)
3294 rtx op0
= XEXP (trueop0
, 0);
3295 rtx op1
= XEXP (trueop0
, 1);
3297 machine_mode opmode
= GET_MODE (op0
);
3298 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3299 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3301 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3307 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3308 gcc_assert (i
< n_elts
);
3310 /* Select element, pointed by nested selector. */
3311 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3313 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3314 if (GET_CODE (op0
) == VEC_CONCAT
)
3316 rtx op00
= XEXP (op0
, 0);
3317 rtx op01
= XEXP (op0
, 1);
3319 machine_mode mode00
, mode01
;
3320 int n_elts00
, n_elts01
;
3322 mode00
= GET_MODE (op00
);
3323 mode01
= GET_MODE (op01
);
3325 /* Find out number of elements of each operand. */
3326 if (VECTOR_MODE_P (mode00
))
3328 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3329 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3334 if (VECTOR_MODE_P (mode01
))
3336 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3337 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3342 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3344 /* Select correct operand of VEC_CONCAT
3345 and adjust selector. */
3346 if (elem
< n_elts01
)
3357 vec
= rtvec_alloc (1);
3358 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3360 tmp
= gen_rtx_fmt_ee (code
, mode
,
3361 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3364 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3365 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3366 return XEXP (trueop0
, 0);
3370 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3371 gcc_assert (GET_MODE_INNER (mode
)
3372 == GET_MODE_INNER (GET_MODE (trueop0
)));
3373 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3375 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3377 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3378 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3379 rtvec v
= rtvec_alloc (n_elts
);
3382 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3383 for (i
= 0; i
< n_elts
; i
++)
3385 rtx x
= XVECEXP (trueop1
, 0, i
);
3387 gcc_assert (CONST_INT_P (x
));
3388 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3392 return gen_rtx_CONST_VECTOR (mode
, v
);
3395 /* Recognize the identity. */
3396 if (GET_MODE (trueop0
) == mode
)
3398 bool maybe_ident
= true;
3399 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3401 rtx j
= XVECEXP (trueop1
, 0, i
);
3402 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3404 maybe_ident
= false;
3412 /* If we build {a,b} then permute it, build the result directly. */
3413 if (XVECLEN (trueop1
, 0) == 2
3414 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3415 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3416 && GET_CODE (trueop0
) == VEC_CONCAT
3417 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3418 && GET_MODE (XEXP (trueop0
, 0)) == mode
3419 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3420 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3422 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3423 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3426 gcc_assert (i0
< 4 && i1
< 4);
3427 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3428 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3430 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3433 if (XVECLEN (trueop1
, 0) == 2
3434 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3435 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3436 && GET_CODE (trueop0
) == VEC_CONCAT
3437 && GET_MODE (trueop0
) == mode
)
3439 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3440 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3443 gcc_assert (i0
< 2 && i1
< 2);
3444 subop0
= XEXP (trueop0
, i0
);
3445 subop1
= XEXP (trueop0
, i1
);
3447 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3450 /* If we select one half of a vec_concat, return that. */
3451 if (GET_CODE (trueop0
) == VEC_CONCAT
3452 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3454 rtx subop0
= XEXP (trueop0
, 0);
3455 rtx subop1
= XEXP (trueop0
, 1);
3456 machine_mode mode0
= GET_MODE (subop0
);
3457 machine_mode mode1
= GET_MODE (subop1
);
3458 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3459 int l0
= GET_MODE_SIZE (mode0
) / li
;
3460 int l1
= GET_MODE_SIZE (mode1
) / li
;
3461 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3462 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3464 bool success
= true;
3465 for (int i
= 1; i
< l0
; ++i
)
3467 rtx j
= XVECEXP (trueop1
, 0, i
);
3468 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3477 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3479 bool success
= true;
3480 for (int i
= 1; i
< l1
; ++i
)
3482 rtx j
= XVECEXP (trueop1
, 0, i
);
3483 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3495 if (XVECLEN (trueop1
, 0) == 1
3496 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3497 && GET_CODE (trueop0
) == VEC_CONCAT
)
3500 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3502 /* Try to find the element in the VEC_CONCAT. */
3503 while (GET_MODE (vec
) != mode
3504 && GET_CODE (vec
) == VEC_CONCAT
)
3506 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3507 if (offset
< vec_size
)
3508 vec
= XEXP (vec
, 0);
3512 vec
= XEXP (vec
, 1);
3514 vec
= avoid_constant_pool_reference (vec
);
3517 if (GET_MODE (vec
) == mode
)
3521 /* If we select elements in a vec_merge that all come from the same
3522 operand, select from that operand directly. */
3523 if (GET_CODE (op0
) == VEC_MERGE
)
3525 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3526 if (CONST_INT_P (trueop02
))
3528 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3529 bool all_operand0
= true;
3530 bool all_operand1
= true;
3531 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3533 rtx j
= XVECEXP (trueop1
, 0, i
);
3534 if (sel
& (1 << UINTVAL (j
)))
3535 all_operand1
= false;
3537 all_operand0
= false;
3539 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3540 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3541 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3542 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3546 /* If we have two nested selects that are inverses of each
3547 other, replace them with the source operand. */
3548 if (GET_CODE (trueop0
) == VEC_SELECT
3549 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3551 rtx op0_subop1
= XEXP (trueop0
, 1);
3552 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3553 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3555 /* Apply the outer ordering vector to the inner one. (The inner
3556 ordering vector is expressly permitted to be of a different
3557 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3558 then the two VEC_SELECTs cancel. */
3559 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3561 rtx x
= XVECEXP (trueop1
, 0, i
);
3562 if (!CONST_INT_P (x
))
3564 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3565 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3568 return XEXP (trueop0
, 0);
3574 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3575 ? GET_MODE (trueop0
)
3576 : GET_MODE_INNER (mode
));
3577 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3578 ? GET_MODE (trueop1
)
3579 : GET_MODE_INNER (mode
));
3581 gcc_assert (VECTOR_MODE_P (mode
));
3582 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3583 == GET_MODE_SIZE (mode
));
3585 if (VECTOR_MODE_P (op0_mode
))
3586 gcc_assert (GET_MODE_INNER (mode
)
3587 == GET_MODE_INNER (op0_mode
));
3589 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3591 if (VECTOR_MODE_P (op1_mode
))
3592 gcc_assert (GET_MODE_INNER (mode
)
3593 == GET_MODE_INNER (op1_mode
));
3595 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3597 if ((GET_CODE (trueop0
) == CONST_VECTOR
3598 || CONST_SCALAR_INT_P (trueop0
)
3599 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3600 && (GET_CODE (trueop1
) == CONST_VECTOR
3601 || CONST_SCALAR_INT_P (trueop1
)
3602 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3604 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3605 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3606 rtvec v
= rtvec_alloc (n_elts
);
3608 unsigned in_n_elts
= 1;
3610 if (VECTOR_MODE_P (op0_mode
))
3611 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3612 for (i
= 0; i
< n_elts
; i
++)
3616 if (!VECTOR_MODE_P (op0_mode
))
3617 RTVEC_ELT (v
, i
) = trueop0
;
3619 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3623 if (!VECTOR_MODE_P (op1_mode
))
3624 RTVEC_ELT (v
, i
) = trueop1
;
3626 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3631 return gen_rtx_CONST_VECTOR (mode
, v
);
3634 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3635 Restrict the transformation to avoid generating a VEC_SELECT with a
3636 mode unrelated to its operand. */
3637 if (GET_CODE (trueop0
) == VEC_SELECT
3638 && GET_CODE (trueop1
) == VEC_SELECT
3639 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3640 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3642 rtx par0
= XEXP (trueop0
, 1);
3643 rtx par1
= XEXP (trueop1
, 1);
3644 int len0
= XVECLEN (par0
, 0);
3645 int len1
= XVECLEN (par1
, 0);
3646 rtvec vec
= rtvec_alloc (len0
+ len1
);
3647 for (int i
= 0; i
< len0
; i
++)
3648 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3649 for (int i
= 0; i
< len1
; i
++)
3650 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3651 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3652 gen_rtx_PARALLEL (VOIDmode
, vec
));
3665 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3668 unsigned int width
= GET_MODE_PRECISION (mode
);
3670 if (VECTOR_MODE_P (mode
)
3671 && code
!= VEC_CONCAT
3672 && GET_CODE (op0
) == CONST_VECTOR
3673 && GET_CODE (op1
) == CONST_VECTOR
)
3675 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3676 machine_mode op0mode
= GET_MODE (op0
);
3677 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3678 machine_mode op1mode
= GET_MODE (op1
);
3679 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3680 rtvec v
= rtvec_alloc (n_elts
);
3683 gcc_assert (op0_n_elts
== n_elts
);
3684 gcc_assert (op1_n_elts
== n_elts
);
3685 for (i
= 0; i
< n_elts
; i
++)
3687 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3688 CONST_VECTOR_ELT (op0
, i
),
3689 CONST_VECTOR_ELT (op1
, i
));
3692 RTVEC_ELT (v
, i
) = x
;
3695 return gen_rtx_CONST_VECTOR (mode
, v
);
3698 if (VECTOR_MODE_P (mode
)
3699 && code
== VEC_CONCAT
3700 && (CONST_SCALAR_INT_P (op0
)
3701 || GET_CODE (op0
) == CONST_FIXED
3702 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3703 && (CONST_SCALAR_INT_P (op1
)
3704 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3705 || GET_CODE (op1
) == CONST_FIXED
))
3707 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3708 rtvec v
= rtvec_alloc (n_elts
);
3710 gcc_assert (n_elts
>= 2);
3713 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3714 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3716 RTVEC_ELT (v
, 0) = op0
;
3717 RTVEC_ELT (v
, 1) = op1
;
3721 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3722 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3725 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3726 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3727 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3729 for (i
= 0; i
< op0_n_elts
; ++i
)
3730 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3731 for (i
= 0; i
< op1_n_elts
; ++i
)
3732 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3735 return gen_rtx_CONST_VECTOR (mode
, v
);
3738 if (SCALAR_FLOAT_MODE_P (mode
)
3739 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3740 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3741 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3752 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3754 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3756 for (i
= 0; i
< 4; i
++)
3773 real_from_target (&r
, tmp0
, mode
);
3774 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3778 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3781 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3782 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3783 real_convert (&f0
, mode
, &f0
);
3784 real_convert (&f1
, mode
, &f1
);
3786 if (HONOR_SNANS (mode
)
3787 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3791 && REAL_VALUES_EQUAL (f1
, dconst0
)
3792 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3795 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3796 && flag_trapping_math
3797 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3799 int s0
= REAL_VALUE_NEGATIVE (f0
);
3800 int s1
= REAL_VALUE_NEGATIVE (f1
);
3805 /* Inf + -Inf = NaN plus exception. */
3810 /* Inf - Inf = NaN plus exception. */
3815 /* Inf / Inf = NaN plus exception. */
3822 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3823 && flag_trapping_math
3824 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3825 || (REAL_VALUE_ISINF (f1
)
3826 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3827 /* Inf * 0 = NaN plus exception. */
3830 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3832 real_convert (&result
, mode
, &value
);
3834 /* Don't constant fold this floating point operation if
3835 the result has overflowed and flag_trapping_math. */
3837 if (flag_trapping_math
3838 && MODE_HAS_INFINITIES (mode
)
3839 && REAL_VALUE_ISINF (result
)
3840 && !REAL_VALUE_ISINF (f0
)
3841 && !REAL_VALUE_ISINF (f1
))
3842 /* Overflow plus exception. */
3845 /* Don't constant fold this floating point operation if the
3846 result may dependent upon the run-time rounding mode and
3847 flag_rounding_math is set, or if GCC's software emulation
3848 is unable to accurately represent the result. */
3850 if ((flag_rounding_math
3851 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3852 && (inexact
|| !real_identical (&result
, &value
)))
3855 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3859 /* We can fold some multi-word operations. */
3860 if ((GET_MODE_CLASS (mode
) == MODE_INT
3861 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3862 && CONST_SCALAR_INT_P (op0
)
3863 && CONST_SCALAR_INT_P (op1
))
3867 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3868 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3870 #if TARGET_SUPPORTS_WIDE_INT == 0
3871 /* This assert keeps the simplification from producing a result
3872 that cannot be represented in a CONST_DOUBLE but a lot of
3873 upstream callers expect that this function never fails to
3874 simplify something and so you if you added this to the test
3875 above the code would die later anyway. If this assert
3876 happens, you just need to make the port support wide int. */
3877 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3882 result
= wi::sub (pop0
, pop1
);
3886 result
= wi::add (pop0
, pop1
);
3890 result
= wi::mul (pop0
, pop1
);
3894 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3900 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3906 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3912 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3918 result
= wi::bit_and (pop0
, pop1
);
3922 result
= wi::bit_or (pop0
, pop1
);
3926 result
= wi::bit_xor (pop0
, pop1
);
3930 result
= wi::smin (pop0
, pop1
);
3934 result
= wi::smax (pop0
, pop1
);
3938 result
= wi::umin (pop0
, pop1
);
3942 result
= wi::umax (pop0
, pop1
);
3949 wide_int wop1
= pop1
;
3950 if (SHIFT_COUNT_TRUNCATED
)
3951 wop1
= wi::umod_trunc (wop1
, width
);
3952 else if (wi::geu_p (wop1
, width
))
3958 result
= wi::lrshift (pop0
, wop1
);
3962 result
= wi::arshift (pop0
, wop1
);
3966 result
= wi::lshift (pop0
, wop1
);
3977 if (wi::neg_p (pop1
))
3983 result
= wi::lrotate (pop0
, pop1
);
3987 result
= wi::rrotate (pop0
, pop1
);
3998 return immed_wide_int_const (result
, mode
);
4006 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4009 Rather than test for specific case, we do this by a brute-force method
4010 and do all possible simplifications until no more changes occur. Then
4011 we rebuild the operation. */
4013 struct simplify_plus_minus_op_data
4020 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4024 result
= (commutative_operand_precedence (y
)
4025 - commutative_operand_precedence (x
));
4029 /* Group together equal REGs to do more simplification. */
4030 if (REG_P (x
) && REG_P (y
))
4031 return REGNO (x
) > REGNO (y
);
4037 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4040 struct simplify_plus_minus_op_data ops
[16];
4043 int changed
, n_constants
, canonicalized
= 0;
4046 memset (ops
, 0, sizeof ops
);
4048 /* Set up the two operands and then expand them until nothing has been
4049 changed. If we run out of room in our array, give up; this should
4050 almost never happen. */
4055 ops
[1].neg
= (code
== MINUS
);
4062 for (i
= 0; i
< n_ops
; i
++)
4064 rtx this_op
= ops
[i
].op
;
4065 int this_neg
= ops
[i
].neg
;
4066 enum rtx_code this_code
= GET_CODE (this_op
);
4072 if (n_ops
== ARRAY_SIZE (ops
))
4075 ops
[n_ops
].op
= XEXP (this_op
, 1);
4076 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4079 ops
[i
].op
= XEXP (this_op
, 0);
4081 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4085 ops
[i
].op
= XEXP (this_op
, 0);
4086 ops
[i
].neg
= ! this_neg
;
4092 if (n_ops
!= ARRAY_SIZE (ops
)
4093 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4094 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4095 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4097 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4098 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4099 ops
[n_ops
].neg
= this_neg
;
4107 /* ~a -> (-a - 1) */
4108 if (n_ops
!= ARRAY_SIZE (ops
))
4110 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4111 ops
[n_ops
++].neg
= this_neg
;
4112 ops
[i
].op
= XEXP (this_op
, 0);
4113 ops
[i
].neg
= !this_neg
;
4123 ops
[i
].op
= neg_const_int (mode
, this_op
);
4137 if (n_constants
> 1)
4140 gcc_assert (n_ops
>= 2);
4142 /* If we only have two operands, we can avoid the loops. */
4145 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4148 /* Get the two operands. Be careful with the order, especially for
4149 the cases where code == MINUS. */
4150 if (ops
[0].neg
&& ops
[1].neg
)
4152 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4155 else if (ops
[0].neg
)
4166 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4169 /* Now simplify each pair of operands until nothing changes. */
4172 /* Insertion sort is good enough for a small array. */
4173 for (i
= 1; i
< n_ops
; i
++)
4175 struct simplify_plus_minus_op_data save
;
4177 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4183 ops
[j
+ 1] = ops
[j
];
4184 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4189 for (i
= n_ops
- 1; i
> 0; i
--)
4190 for (j
= i
- 1; j
>= 0; j
--)
4192 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4193 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4195 if (lhs
!= 0 && rhs
!= 0)
4197 enum rtx_code ncode
= PLUS
;
4203 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4205 else if (swap_commutative_operands_p (lhs
, rhs
))
4206 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4208 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4209 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4211 rtx tem_lhs
, tem_rhs
;
4213 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4214 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4215 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4217 if (tem
&& !CONSTANT_P (tem
))
4218 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4221 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4225 /* Reject "simplifications" that just wrap the two
4226 arguments in a CONST. Failure to do so can result
4227 in infinite recursion with simplify_binary_operation
4228 when it calls us to simplify CONST operations.
4229 Also, if we find such a simplification, don't try
4230 any more combinations with this rhs: We must have
4231 something like symbol+offset, ie. one of the
4232 trivial CONST expressions we handle later. */
4233 if (GET_CODE (tem
) == CONST
4234 && GET_CODE (XEXP (tem
, 0)) == ncode
4235 && XEXP (XEXP (tem
, 0), 0) == lhs
4236 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4239 if (GET_CODE (tem
) == NEG
)
4240 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4241 if (CONST_INT_P (tem
) && lneg
)
4242 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4246 ops
[j
].op
= NULL_RTX
;
4253 /* If nothing changed, fail. */
4257 /* Pack all the operands to the lower-numbered entries. */
4258 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4268 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4270 && CONST_INT_P (ops
[1].op
)
4271 && CONSTANT_P (ops
[0].op
)
4273 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4275 /* We suppressed creation of trivial CONST expressions in the
4276 combination loop to avoid recursion. Create one manually now.
4277 The combination loop should have ensured that there is exactly
4278 one CONST_INT, and the sort will have ensured that it is last
4279 in the array and that any other constant will be next-to-last. */
4282 && CONST_INT_P (ops
[n_ops
- 1].op
)
4283 && CONSTANT_P (ops
[n_ops
- 2].op
))
4285 rtx value
= ops
[n_ops
- 1].op
;
4286 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4287 value
= neg_const_int (mode
, value
);
4288 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4293 /* Put a non-negated operand first, if possible. */
4295 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4298 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4307 /* Now make the result by performing the requested operations. */
4309 for (i
= 1; i
< n_ops
; i
++)
4310 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4311 mode
, result
, ops
[i
].op
);
4316 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4318 plus_minus_operand_p (const_rtx x
)
4320 return GET_CODE (x
) == PLUS
4321 || GET_CODE (x
) == MINUS
4322 || (GET_CODE (x
) == CONST
4323 && GET_CODE (XEXP (x
, 0)) == PLUS
4324 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4325 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4328 /* Like simplify_binary_operation except used for relational operators.
4329 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4330 not also be VOIDmode.
4332 CMP_MODE specifies in which mode the comparison is done in, so it is
4333 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4334 the operands or, if both are VOIDmode, the operands are compared in
4335 "infinite precision". */
4337 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4338 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4340 rtx tem
, trueop0
, trueop1
;
4342 if (cmp_mode
== VOIDmode
)
4343 cmp_mode
= GET_MODE (op0
);
4344 if (cmp_mode
== VOIDmode
)
4345 cmp_mode
= GET_MODE (op1
);
4347 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4350 if (SCALAR_FLOAT_MODE_P (mode
))
4352 if (tem
== const0_rtx
)
4353 return CONST0_RTX (mode
);
4354 #ifdef FLOAT_STORE_FLAG_VALUE
4356 REAL_VALUE_TYPE val
;
4357 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4358 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4364 if (VECTOR_MODE_P (mode
))
4366 if (tem
== const0_rtx
)
4367 return CONST0_RTX (mode
);
4368 #ifdef VECTOR_STORE_FLAG_VALUE
4373 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4374 if (val
== NULL_RTX
)
4376 if (val
== const1_rtx
)
4377 return CONST1_RTX (mode
);
4379 units
= GET_MODE_NUNITS (mode
);
4380 v
= rtvec_alloc (units
);
4381 for (i
= 0; i
< units
; i
++)
4382 RTVEC_ELT (v
, i
) = val
;
4383 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4393 /* For the following tests, ensure const0_rtx is op1. */
4394 if (swap_commutative_operands_p (op0
, op1
)
4395 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4396 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4398 /* If op0 is a compare, extract the comparison arguments from it. */
4399 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4400 return simplify_gen_relational (code
, mode
, VOIDmode
,
4401 XEXP (op0
, 0), XEXP (op0
, 1));
4403 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4407 trueop0
= avoid_constant_pool_reference (op0
);
4408 trueop1
= avoid_constant_pool_reference (op1
);
4409 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4413 /* This part of simplify_relational_operation is only used when CMP_MODE
4414 is not in class MODE_CC (i.e. it is a real comparison).
4416 MODE is the mode of the result, while CMP_MODE specifies in which
4417 mode the comparison is done in, so it is the mode of the operands. */
4420 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4421 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4423 enum rtx_code op0code
= GET_CODE (op0
);
4425 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4427 /* If op0 is a comparison, extract the comparison arguments
4431 if (GET_MODE (op0
) == mode
)
4432 return simplify_rtx (op0
);
4434 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4435 XEXP (op0
, 0), XEXP (op0
, 1));
4437 else if (code
== EQ
)
4439 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4440 if (new_code
!= UNKNOWN
)
4441 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4442 XEXP (op0
, 0), XEXP (op0
, 1));
4446 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4447 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4448 if ((code
== LTU
|| code
== GEU
)
4449 && GET_CODE (op0
) == PLUS
4450 && CONST_INT_P (XEXP (op0
, 1))
4451 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4452 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4453 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4454 && XEXP (op0
, 1) != const0_rtx
)
4457 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4458 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4459 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4462 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4463 if ((code
== LTU
|| code
== GEU
)
4464 && GET_CODE (op0
) == PLUS
4465 && rtx_equal_p (op1
, XEXP (op0
, 1))
4466 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4467 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4468 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4469 copy_rtx (XEXP (op0
, 0)));
4471 if (op1
== const0_rtx
)
4473 /* Canonicalize (GTU x 0) as (NE x 0). */
4475 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4476 /* Canonicalize (LEU x 0) as (EQ x 0). */
4478 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4480 else if (op1
== const1_rtx
)
4485 /* Canonicalize (GE x 1) as (GT x 0). */
4486 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4489 /* Canonicalize (GEU x 1) as (NE x 0). */
4490 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4493 /* Canonicalize (LT x 1) as (LE x 0). */
4494 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4497 /* Canonicalize (LTU x 1) as (EQ x 0). */
4498 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4504 else if (op1
== constm1_rtx
)
4506 /* Canonicalize (LE x -1) as (LT x 0). */
4508 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4509 /* Canonicalize (GT x -1) as (GE x 0). */
4511 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4514 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4515 if ((code
== EQ
|| code
== NE
)
4516 && (op0code
== PLUS
|| op0code
== MINUS
)
4518 && CONSTANT_P (XEXP (op0
, 1))
4519 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4521 rtx x
= XEXP (op0
, 0);
4522 rtx c
= XEXP (op0
, 1);
4523 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4524 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4526 /* Detect an infinite recursive condition, where we oscillate at this
4527 simplification case between:
4528 A + B == C <---> C - B == A,
4529 where A, B, and C are all constants with non-simplifiable expressions,
4530 usually SYMBOL_REFs. */
4531 if (GET_CODE (tem
) == invcode
4533 && rtx_equal_p (c
, XEXP (tem
, 1)))
4536 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4539 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4540 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4542 && op1
== const0_rtx
4543 && GET_MODE_CLASS (mode
) == MODE_INT
4544 && cmp_mode
!= VOIDmode
4545 /* ??? Work-around BImode bugs in the ia64 backend. */
4547 && cmp_mode
!= BImode
4548 && nonzero_bits (op0
, cmp_mode
) == 1
4549 && STORE_FLAG_VALUE
== 1)
4550 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4551 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4552 : lowpart_subreg (mode
, op0
, cmp_mode
);
4554 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4555 if ((code
== EQ
|| code
== NE
)
4556 && op1
== const0_rtx
4558 return simplify_gen_relational (code
, mode
, cmp_mode
,
4559 XEXP (op0
, 0), XEXP (op0
, 1));
4561 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4562 if ((code
== EQ
|| code
== NE
)
4564 && rtx_equal_p (XEXP (op0
, 0), op1
)
4565 && !side_effects_p (XEXP (op0
, 0)))
4566 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4569 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4570 if ((code
== EQ
|| code
== NE
)
4572 && rtx_equal_p (XEXP (op0
, 1), op1
)
4573 && !side_effects_p (XEXP (op0
, 1)))
4574 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4577 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4578 if ((code
== EQ
|| code
== NE
)
4580 && CONST_SCALAR_INT_P (op1
)
4581 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4582 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4583 simplify_gen_binary (XOR
, cmp_mode
,
4584 XEXP (op0
, 1), op1
));
4586 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4587 can be implemented with a BICS instruction on some targets, or
4588 constant-folded if y is a constant. */
4589 if ((code
== EQ
|| code
== NE
)
4591 && rtx_equal_p (XEXP (op0
, 0), op1
)
4592 && !side_effects_p (op1
)
4593 && op1
!= CONST0_RTX (cmp_mode
))
4595 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4596 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4598 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4599 CONST0_RTX (cmp_mode
));
4602 /* Likewise for (eq/ne (and x y) y). */
4603 if ((code
== EQ
|| code
== NE
)
4605 && rtx_equal_p (XEXP (op0
, 1), op1
)
4606 && !side_effects_p (op1
)
4607 && op1
!= CONST0_RTX (cmp_mode
))
4609 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4610 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4612 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4613 CONST0_RTX (cmp_mode
));
4616 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4617 if ((code
== EQ
|| code
== NE
)
4618 && GET_CODE (op0
) == BSWAP
4619 && CONST_SCALAR_INT_P (op1
))
4620 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4621 simplify_gen_unary (BSWAP
, cmp_mode
,
4624 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4625 if ((code
== EQ
|| code
== NE
)
4626 && GET_CODE (op0
) == BSWAP
4627 && GET_CODE (op1
) == BSWAP
)
4628 return simplify_gen_relational (code
, mode
, cmp_mode
,
4629 XEXP (op0
, 0), XEXP (op1
, 0));
4631 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4637 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4638 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4639 XEXP (op0
, 0), const0_rtx
);
4644 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4645 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4646 XEXP (op0
, 0), const0_rtx
);
4665 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4666 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4667 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4668 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4669 For floating-point comparisons, assume that the operands were ordered. */
4672 comparison_result (enum rtx_code code
, int known_results
)
4678 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4681 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4685 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4688 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4692 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4695 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4698 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4700 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4703 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4705 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4708 return const_true_rtx
;
4716 /* Check if the given comparison (done in the given MODE) is actually
4717 a tautology or a contradiction. If the mode is VOID_mode, the
4718 comparison is done in "infinite precision". If no simplification
4719 is possible, this function returns zero. Otherwise, it returns
4720 either const_true_rtx or const0_rtx. */
4723 simplify_const_relational_operation (enum rtx_code code
,
4731 gcc_assert (mode
!= VOIDmode
4732 || (GET_MODE (op0
) == VOIDmode
4733 && GET_MODE (op1
) == VOIDmode
));
4735 /* If op0 is a compare, extract the comparison arguments from it. */
4736 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4738 op1
= XEXP (op0
, 1);
4739 op0
= XEXP (op0
, 0);
4741 if (GET_MODE (op0
) != VOIDmode
)
4742 mode
= GET_MODE (op0
);
4743 else if (GET_MODE (op1
) != VOIDmode
)
4744 mode
= GET_MODE (op1
);
4749 /* We can't simplify MODE_CC values since we don't know what the
4750 actual comparison is. */
4751 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4754 /* Make sure the constant is second. */
4755 if (swap_commutative_operands_p (op0
, op1
))
4757 tem
= op0
, op0
= op1
, op1
= tem
;
4758 code
= swap_condition (code
);
4761 trueop0
= avoid_constant_pool_reference (op0
);
4762 trueop1
= avoid_constant_pool_reference (op1
);
4764 /* For integer comparisons of A and B maybe we can simplify A - B and can
4765 then simplify a comparison of that with zero. If A and B are both either
4766 a register or a CONST_INT, this can't help; testing for these cases will
4767 prevent infinite recursion here and speed things up.
4769 We can only do this for EQ and NE comparisons as otherwise we may
4770 lose or introduce overflow which we cannot disregard as undefined as
4771 we do not know the signedness of the operation on either the left or
4772 the right hand side of the comparison. */
4774 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4775 && (code
== EQ
|| code
== NE
)
4776 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4777 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4778 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4779 /* We cannot do this if tem is a nonzero address. */
4780 && ! nonzero_address_p (tem
))
4781 return simplify_const_relational_operation (signed_condition (code
),
4782 mode
, tem
, const0_rtx
);
4784 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4785 return const_true_rtx
;
4787 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4790 /* For modes without NaNs, if the two operands are equal, we know the
4791 result except if they have side-effects. Even with NaNs we know
4792 the result of unordered comparisons and, if signaling NaNs are
4793 irrelevant, also the result of LT/GT/LTGT. */
4794 if ((! HONOR_NANS (trueop0
)
4795 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4796 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4797 && ! HONOR_SNANS (trueop0
)))
4798 && rtx_equal_p (trueop0
, trueop1
)
4799 && ! side_effects_p (trueop0
))
4800 return comparison_result (code
, CMP_EQ
);
4802 /* If the operands are floating-point constants, see if we can fold
4804 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4805 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4806 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4808 REAL_VALUE_TYPE d0
, d1
;
4810 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4811 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4813 /* Comparisons are unordered iff at least one of the values is NaN. */
4814 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4824 return const_true_rtx
;
4837 return comparison_result (code
,
4838 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4839 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4842 /* Otherwise, see if the operands are both integers. */
4843 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4844 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4846 /* It would be nice if we really had a mode here. However, the
4847 largest int representable on the target is as good as
4849 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4850 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4851 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4853 if (wi::eq_p (ptrueop0
, ptrueop1
))
4854 return comparison_result (code
, CMP_EQ
);
4857 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4858 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4859 return comparison_result (code
, cr
);
4863 /* Optimize comparisons with upper and lower bounds. */
4864 if (HWI_COMPUTABLE_MODE_P (mode
)
4865 && CONST_INT_P (trueop1
))
4868 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4869 HOST_WIDE_INT val
= INTVAL (trueop1
);
4870 HOST_WIDE_INT mmin
, mmax
;
4880 /* Get a reduced range if the sign bit is zero. */
4881 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4888 rtx mmin_rtx
, mmax_rtx
;
4889 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4891 mmin
= INTVAL (mmin_rtx
);
4892 mmax
= INTVAL (mmax_rtx
);
4895 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4897 mmin
>>= (sign_copies
- 1);
4898 mmax
>>= (sign_copies
- 1);
4904 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4906 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4907 return const_true_rtx
;
4908 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4913 return const_true_rtx
;
4918 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4920 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4921 return const_true_rtx
;
4922 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4927 return const_true_rtx
;
4933 /* x == y is always false for y out of range. */
4934 if (val
< mmin
|| val
> mmax
)
4938 /* x > y is always false for y >= mmax, always true for y < mmin. */
4940 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4942 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4943 return const_true_rtx
;
4949 return const_true_rtx
;
4952 /* x < y is always false for y <= mmin, always true for y > mmax. */
4954 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4956 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4957 return const_true_rtx
;
4963 return const_true_rtx
;
4967 /* x != y is always true for y out of range. */
4968 if (val
< mmin
|| val
> mmax
)
4969 return const_true_rtx
;
4977 /* Optimize integer comparisons with zero. */
4978 if (trueop1
== const0_rtx
)
4980 /* Some addresses are known to be nonzero. We don't know
4981 their sign, but equality comparisons are known. */
4982 if (nonzero_address_p (trueop0
))
4984 if (code
== EQ
|| code
== LEU
)
4986 if (code
== NE
|| code
== GTU
)
4987 return const_true_rtx
;
4990 /* See if the first operand is an IOR with a constant. If so, we
4991 may be able to determine the result of this comparison. */
4992 if (GET_CODE (op0
) == IOR
)
4994 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4995 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4997 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4998 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4999 && (UINTVAL (inner_const
)
5000 & ((unsigned HOST_WIDE_INT
) 1
5010 return const_true_rtx
;
5014 return const_true_rtx
;
5028 /* Optimize comparison of ABS with zero. */
5029 if (trueop1
== CONST0_RTX (mode
)
5030 && (GET_CODE (trueop0
) == ABS
5031 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5032 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5037 /* Optimize abs(x) < 0.0. */
5038 if (!HONOR_SNANS (mode
)
5039 && (!INTEGRAL_MODE_P (mode
)
5040 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5042 if (INTEGRAL_MODE_P (mode
)
5043 && (issue_strict_overflow_warning
5044 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5045 warning (OPT_Wstrict_overflow
,
5046 ("assuming signed overflow does not occur when "
5047 "assuming abs (x) < 0 is false"));
5053 /* Optimize abs(x) >= 0.0. */
5054 if (!HONOR_NANS (mode
)
5055 && (!INTEGRAL_MODE_P (mode
)
5056 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5058 if (INTEGRAL_MODE_P (mode
)
5059 && (issue_strict_overflow_warning
5060 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5061 warning (OPT_Wstrict_overflow
,
5062 ("assuming signed overflow does not occur when "
5063 "assuming abs (x) >= 0 is true"));
5064 return const_true_rtx
;
5069 /* Optimize ! (abs(x) < 0.0). */
5070 return const_true_rtx
;
5080 /* Simplify CODE, an operation with result mode MODE and three operands,
5081 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5082 a constant. Return 0 if no simplifications is possible. */
5085 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5086 machine_mode op0_mode
, rtx op0
, rtx op1
,
5089 unsigned int width
= GET_MODE_PRECISION (mode
);
5090 bool any_change
= false;
5093 /* VOIDmode means "infinite" precision. */
5095 width
= HOST_BITS_PER_WIDE_INT
;
5100 /* Simplify negations around the multiplication. */
5101 /* -a * -b + c => a * b + c. */
5102 if (GET_CODE (op0
) == NEG
)
5104 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5106 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5108 else if (GET_CODE (op1
) == NEG
)
5110 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5112 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5115 /* Canonicalize the two multiplication operands. */
5116 /* a * -b + c => -b * a + c. */
5117 if (swap_commutative_operands_p (op0
, op1
))
5118 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5121 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5126 if (CONST_INT_P (op0
)
5127 && CONST_INT_P (op1
)
5128 && CONST_INT_P (op2
)
5129 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5130 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5132 /* Extracting a bit-field from a constant */
5133 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5134 HOST_WIDE_INT op1val
= INTVAL (op1
);
5135 HOST_WIDE_INT op2val
= INTVAL (op2
);
5136 if (BITS_BIG_ENDIAN
)
5137 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5141 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5143 /* First zero-extend. */
5144 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5145 /* If desired, propagate sign bit. */
5146 if (code
== SIGN_EXTRACT
5147 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5149 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5152 return gen_int_mode (val
, mode
);
5157 if (CONST_INT_P (op0
))
5158 return op0
!= const0_rtx
? op1
: op2
;
5160 /* Convert c ? a : a into "a". */
5161 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5164 /* Convert a != b ? a : b into "a". */
5165 if (GET_CODE (op0
) == NE
5166 && ! side_effects_p (op0
)
5167 && ! HONOR_NANS (mode
)
5168 && ! HONOR_SIGNED_ZEROS (mode
)
5169 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5170 && rtx_equal_p (XEXP (op0
, 1), op2
))
5171 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5172 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5175 /* Convert a == b ? a : b into "b". */
5176 if (GET_CODE (op0
) == EQ
5177 && ! side_effects_p (op0
)
5178 && ! HONOR_NANS (mode
)
5179 && ! HONOR_SIGNED_ZEROS (mode
)
5180 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5181 && rtx_equal_p (XEXP (op0
, 1), op2
))
5182 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5183 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5186 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5188 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5189 ? GET_MODE (XEXP (op0
, 1))
5190 : GET_MODE (XEXP (op0
, 0)));
5193 /* Look for happy constants in op1 and op2. */
5194 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5196 HOST_WIDE_INT t
= INTVAL (op1
);
5197 HOST_WIDE_INT f
= INTVAL (op2
);
5199 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5200 code
= GET_CODE (op0
);
5201 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5204 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5212 return simplify_gen_relational (code
, mode
, cmp_mode
,
5213 XEXP (op0
, 0), XEXP (op0
, 1));
5216 if (cmp_mode
== VOIDmode
)
5217 cmp_mode
= op0_mode
;
5218 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5219 cmp_mode
, XEXP (op0
, 0),
5222 /* See if any simplifications were possible. */
5225 if (CONST_INT_P (temp
))
5226 return temp
== const0_rtx
? op2
: op1
;
5228 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5234 gcc_assert (GET_MODE (op0
) == mode
);
5235 gcc_assert (GET_MODE (op1
) == mode
);
5236 gcc_assert (VECTOR_MODE_P (mode
));
5237 trueop2
= avoid_constant_pool_reference (op2
);
5238 if (CONST_INT_P (trueop2
))
5240 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5241 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5242 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5243 unsigned HOST_WIDE_INT mask
;
5244 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5247 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5249 if (!(sel
& mask
) && !side_effects_p (op0
))
5251 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5254 rtx trueop0
= avoid_constant_pool_reference (op0
);
5255 rtx trueop1
= avoid_constant_pool_reference (op1
);
5256 if (GET_CODE (trueop0
) == CONST_VECTOR
5257 && GET_CODE (trueop1
) == CONST_VECTOR
)
5259 rtvec v
= rtvec_alloc (n_elts
);
5262 for (i
= 0; i
< n_elts
; i
++)
5263 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5264 ? CONST_VECTOR_ELT (trueop0
, i
)
5265 : CONST_VECTOR_ELT (trueop1
, i
));
5266 return gen_rtx_CONST_VECTOR (mode
, v
);
5269 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5270 if no element from a appears in the result. */
5271 if (GET_CODE (op0
) == VEC_MERGE
)
5273 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5274 if (CONST_INT_P (tem
))
5276 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5277 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5278 return simplify_gen_ternary (code
, mode
, mode
,
5279 XEXP (op0
, 1), op1
, op2
);
5280 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5281 return simplify_gen_ternary (code
, mode
, mode
,
5282 XEXP (op0
, 0), op1
, op2
);
5285 if (GET_CODE (op1
) == VEC_MERGE
)
5287 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5288 if (CONST_INT_P (tem
))
5290 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5291 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5292 return simplify_gen_ternary (code
, mode
, mode
,
5293 op0
, XEXP (op1
, 1), op2
);
5294 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5295 return simplify_gen_ternary (code
, mode
, mode
,
5296 op0
, XEXP (op1
, 0), op2
);
5300 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5302 if (GET_CODE (op0
) == VEC_DUPLICATE
5303 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5304 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5305 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5307 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5308 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5310 if (XEXP (XEXP (op0
, 0), 0) == op1
5311 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5317 if (rtx_equal_p (op0
, op1
)
5318 && !side_effects_p (op2
) && !side_effects_p (op1
))
5330 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5331 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5332 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5334 Works by unpacking OP into a collection of 8-bit values
5335 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5336 and then repacking them again for OUTERMODE. */
5339 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5340 machine_mode innermode
, unsigned int byte
)
5344 value_mask
= (1 << value_bit
) - 1
5346 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5355 rtvec result_v
= NULL
;
5356 enum mode_class outer_class
;
5357 machine_mode outer_submode
;
5360 /* Some ports misuse CCmode. */
5361 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5364 /* We have no way to represent a complex constant at the rtl level. */
5365 if (COMPLEX_MODE_P (outermode
))
5368 /* We support any size mode. */
5369 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5370 GET_MODE_BITSIZE (innermode
));
5372 /* Unpack the value. */
5374 if (GET_CODE (op
) == CONST_VECTOR
)
5376 num_elem
= CONST_VECTOR_NUNITS (op
);
5377 elems
= &CONST_VECTOR_ELT (op
, 0);
5378 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5384 elem_bitsize
= max_bitsize
;
5386 /* If this asserts, it is too complicated; reducing value_bit may help. */
5387 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5388 /* I don't know how to handle endianness of sub-units. */
5389 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5391 for (elem
= 0; elem
< num_elem
; elem
++)
5394 rtx el
= elems
[elem
];
5396 /* Vectors are kept in target memory order. (This is probably
5399 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5400 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5402 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5403 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5404 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5405 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5406 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5409 switch (GET_CODE (el
))
5413 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5415 *vp
++ = INTVAL (el
) >> i
;
5416 /* CONST_INTs are always logically sign-extended. */
5417 for (; i
< elem_bitsize
; i
+= value_bit
)
5418 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5421 case CONST_WIDE_INT
:
5423 rtx_mode_t val
= std::make_pair (el
, innermode
);
5424 unsigned char extend
= wi::sign_mask (val
);
5426 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5427 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5428 for (; i
< elem_bitsize
; i
+= value_bit
)
5434 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5436 unsigned char extend
= 0;
5437 /* If this triggers, someone should have generated a
5438 CONST_INT instead. */
5439 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5441 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5442 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5443 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5446 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5450 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5452 for (; i
< elem_bitsize
; i
+= value_bit
)
5457 /* This is big enough for anything on the platform. */
5458 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5459 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5461 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5462 gcc_assert (bitsize
<= elem_bitsize
);
5463 gcc_assert (bitsize
% value_bit
== 0);
5465 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5468 /* real_to_target produces its result in words affected by
5469 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5470 and use WORDS_BIG_ENDIAN instead; see the documentation
5471 of SUBREG in rtl.texi. */
5472 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5475 if (WORDS_BIG_ENDIAN
)
5476 ibase
= bitsize
- 1 - i
;
5479 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5482 /* It shouldn't matter what's done here, so fill it with
5484 for (; i
< elem_bitsize
; i
+= value_bit
)
5490 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5492 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5493 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5497 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5498 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5499 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5501 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5502 >> (i
- HOST_BITS_PER_WIDE_INT
);
5503 for (; i
< elem_bitsize
; i
+= value_bit
)
5513 /* Now, pick the right byte to start with. */
5514 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5515 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5516 will already have offset 0. */
5517 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5519 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5521 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5522 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5523 byte
= (subword_byte
% UNITS_PER_WORD
5524 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5527 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5528 so if it's become negative it will instead be very large.) */
5529 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5531 /* Convert from bytes to chunks of size value_bit. */
5532 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5534 /* Re-pack the value. */
5536 if (VECTOR_MODE_P (outermode
))
5538 num_elem
= GET_MODE_NUNITS (outermode
);
5539 result_v
= rtvec_alloc (num_elem
);
5540 elems
= &RTVEC_ELT (result_v
, 0);
5541 outer_submode
= GET_MODE_INNER (outermode
);
5547 outer_submode
= outermode
;
5550 outer_class
= GET_MODE_CLASS (outer_submode
);
5551 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5553 gcc_assert (elem_bitsize
% value_bit
== 0);
5554 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5556 for (elem
= 0; elem
< num_elem
; elem
++)
5560 /* Vectors are stored in target memory order. (This is probably
5563 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5564 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5566 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5567 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5568 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5569 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5570 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5573 switch (outer_class
)
5576 case MODE_PARTIAL_INT
:
5581 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5582 / HOST_BITS_PER_WIDE_INT
;
5583 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5586 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5588 for (u
= 0; u
< units
; u
++)
5590 unsigned HOST_WIDE_INT buf
= 0;
5592 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5594 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5597 base
+= HOST_BITS_PER_WIDE_INT
;
5599 r
= wide_int::from_array (tmp
, units
,
5600 GET_MODE_PRECISION (outer_submode
));
5601 #if TARGET_SUPPORTS_WIDE_INT == 0
5602 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5603 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5606 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5611 case MODE_DECIMAL_FLOAT
:
5614 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5616 /* real_from_target wants its input in words affected by
5617 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5618 and use WORDS_BIG_ENDIAN instead; see the documentation
5619 of SUBREG in rtl.texi. */
5620 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5622 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5625 if (WORDS_BIG_ENDIAN
)
5626 ibase
= elem_bitsize
- 1 - i
;
5629 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5632 real_from_target (&r
, tmp
, outer_submode
);
5633 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5645 f
.mode
= outer_submode
;
5648 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5650 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5651 for (; i
< elem_bitsize
; i
+= value_bit
)
5652 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5653 << (i
- HOST_BITS_PER_WIDE_INT
));
5655 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5663 if (VECTOR_MODE_P (outermode
))
5664 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5669 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5670 Return 0 if no simplifications are possible. */
5672 simplify_subreg (machine_mode outermode
, rtx op
,
5673 machine_mode innermode
, unsigned int byte
)
5675 /* Little bit of sanity checking. */
5676 gcc_assert (innermode
!= VOIDmode
);
5677 gcc_assert (outermode
!= VOIDmode
);
5678 gcc_assert (innermode
!= BLKmode
);
5679 gcc_assert (outermode
!= BLKmode
);
5681 gcc_assert (GET_MODE (op
) == innermode
5682 || GET_MODE (op
) == VOIDmode
);
5684 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5687 if (byte
>= GET_MODE_SIZE (innermode
))
5690 if (outermode
== innermode
&& !byte
)
5693 if (CONST_SCALAR_INT_P (op
)
5694 || CONST_DOUBLE_AS_FLOAT_P (op
)
5695 || GET_CODE (op
) == CONST_FIXED
5696 || GET_CODE (op
) == CONST_VECTOR
)
5697 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5699 /* Changing mode twice with SUBREG => just change it once,
5700 or not at all if changing back op starting mode. */
5701 if (GET_CODE (op
) == SUBREG
)
5703 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5704 int final_offset
= byte
+ SUBREG_BYTE (op
);
5707 if (outermode
== innermostmode
5708 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5709 return SUBREG_REG (op
);
5711 /* The SUBREG_BYTE represents offset, as if the value were stored
5712 in memory. Irritating exception is paradoxical subreg, where
5713 we define SUBREG_BYTE to be 0. On big endian machines, this
5714 value should be negative. For a moment, undo this exception. */
5715 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5717 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5718 if (WORDS_BIG_ENDIAN
)
5719 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5720 if (BYTES_BIG_ENDIAN
)
5721 final_offset
+= difference
% UNITS_PER_WORD
;
5723 if (SUBREG_BYTE (op
) == 0
5724 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5726 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5727 if (WORDS_BIG_ENDIAN
)
5728 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5729 if (BYTES_BIG_ENDIAN
)
5730 final_offset
+= difference
% UNITS_PER_WORD
;
5733 /* See whether resulting subreg will be paradoxical. */
5734 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5736 /* In nonparadoxical subregs we can't handle negative offsets. */
5737 if (final_offset
< 0)
5739 /* Bail out in case resulting subreg would be incorrect. */
5740 if (final_offset
% GET_MODE_SIZE (outermode
)
5741 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5747 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5749 /* In paradoxical subreg, see if we are still looking on lower part.
5750 If so, our SUBREG_BYTE will be 0. */
5751 if (WORDS_BIG_ENDIAN
)
5752 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5753 if (BYTES_BIG_ENDIAN
)
5754 offset
+= difference
% UNITS_PER_WORD
;
5755 if (offset
== final_offset
)
5761 /* Recurse for further possible simplifications. */
5762 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5766 if (validate_subreg (outermode
, innermostmode
,
5767 SUBREG_REG (op
), final_offset
))
5769 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5770 if (SUBREG_PROMOTED_VAR_P (op
)
5771 && SUBREG_PROMOTED_SIGN (op
) >= 0
5772 && GET_MODE_CLASS (outermode
) == MODE_INT
5773 && IN_RANGE (GET_MODE_SIZE (outermode
),
5774 GET_MODE_SIZE (innermode
),
5775 GET_MODE_SIZE (innermostmode
))
5776 && subreg_lowpart_p (newx
))
5778 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5779 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5786 /* SUBREG of a hard register => just change the register number
5787 and/or mode. If the hard register is not valid in that mode,
5788 suppress this simplification. If the hard register is the stack,
5789 frame, or argument pointer, leave this as a SUBREG. */
5791 if (REG_P (op
) && HARD_REGISTER_P (op
))
5793 unsigned int regno
, final_regno
;
5796 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5797 if (HARD_REGISTER_NUM_P (final_regno
))
5800 int final_offset
= byte
;
5802 /* Adjust offset for paradoxical subregs. */
5804 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5806 int difference
= (GET_MODE_SIZE (innermode
)
5807 - GET_MODE_SIZE (outermode
));
5808 if (WORDS_BIG_ENDIAN
)
5809 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5810 if (BYTES_BIG_ENDIAN
)
5811 final_offset
+= difference
% UNITS_PER_WORD
;
5814 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5816 /* Propagate original regno. We don't have any way to specify
5817 the offset inside original regno, so do so only for lowpart.
5818 The information is used only by alias analysis that can not
5819 grog partial register anyway. */
5821 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5822 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5827 /* If we have a SUBREG of a register that we are replacing and we are
5828 replacing it with a MEM, make a new MEM and try replacing the
5829 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5830 or if we would be widening it. */
5833 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5834 /* Allow splitting of volatile memory references in case we don't
5835 have instruction to move the whole thing. */
5836 && (! MEM_VOLATILE_P (op
)
5837 || ! have_insn_for (SET
, innermode
))
5838 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5839 return adjust_address_nv (op
, outermode
, byte
);
5841 /* Handle complex values represented as CONCAT
5842 of real and imaginary part. */
5843 if (GET_CODE (op
) == CONCAT
)
5845 unsigned int part_size
, final_offset
;
5848 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5849 if (byte
< part_size
)
5851 part
= XEXP (op
, 0);
5852 final_offset
= byte
;
5856 part
= XEXP (op
, 1);
5857 final_offset
= byte
- part_size
;
5860 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5863 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5866 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5867 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5871 /* A SUBREG resulting from a zero extension may fold to zero if
5872 it extracts higher bits that the ZERO_EXTEND's source bits. */
5873 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5875 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5876 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5877 return CONST0_RTX (outermode
);
5880 if (SCALAR_INT_MODE_P (outermode
)
5881 && SCALAR_INT_MODE_P (innermode
)
5882 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5883 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5885 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5893 /* Make a SUBREG operation or equivalent if it folds. */
5896 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5897 machine_mode innermode
, unsigned int byte
)
5901 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5905 if (GET_CODE (op
) == SUBREG
5906 || GET_CODE (op
) == CONCAT
5907 || GET_MODE (op
) == VOIDmode
)
5910 if (validate_subreg (outermode
, innermode
, op
, byte
))
5911 return gen_rtx_SUBREG (outermode
, op
, byte
);
5916 /* Simplify X, an rtx expression.
5918 Return the simplified expression or NULL if no simplifications
5921 This is the preferred entry point into the simplification routines;
5922 however, we still allow passes to call the more specific routines.
5924 Right now GCC has three (yes, three) major bodies of RTL simplification
5925 code that need to be unified.
5927 1. fold_rtx in cse.c. This code uses various CSE specific
5928 information to aid in RTL simplification.
5930 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5931 it uses combine specific information to aid in RTL
5934 3. The routines in this file.
5937 Long term we want to only have one body of simplification code; to
5938 get to that state I recommend the following steps:
5940 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5941 which are not pass dependent state into these routines.
5943 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5944 use this routine whenever possible.
5946 3. Allow for pass dependent state to be provided to these
5947 routines and add simplifications based on the pass dependent
5948 state. Remove code from cse.c & combine.c that becomes
5951 It will take time, but ultimately the compiler will be easier to
5952 maintain and improve. It's totally silly that when we add a
5953 simplification that it needs to be added to 4 places (3 for RTL
5954 simplification and 1 for tree simplification. */
5957 simplify_rtx (const_rtx x
)
5959 const enum rtx_code code
= GET_CODE (x
);
5960 const machine_mode mode
= GET_MODE (x
);
5962 switch (GET_RTX_CLASS (code
))
5965 return simplify_unary_operation (code
, mode
,
5966 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5967 case RTX_COMM_ARITH
:
5968 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5969 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5971 /* Fall through.... */
5974 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5977 case RTX_BITFIELD_OPS
:
5978 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5979 XEXP (x
, 0), XEXP (x
, 1),
5983 case RTX_COMM_COMPARE
:
5984 return simplify_relational_operation (code
, mode
,
5985 ((GET_MODE (XEXP (x
, 0))
5987 ? GET_MODE (XEXP (x
, 0))
5988 : GET_MODE (XEXP (x
, 1))),
5994 return simplify_subreg (mode
, SUBREG_REG (x
),
5995 GET_MODE (SUBREG_REG (x
)),
6002 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6003 if (GET_CODE (XEXP (x
, 0)) == HIGH
6004 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))