1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "double-int.h"
36 #include "fold-const.h"
40 #include "hard-reg-set.h"
42 #include "insn-config.h"
45 #include "insn-codes.h"
48 #include "statistics.h"
50 #include "fixed-value.h"
58 #include "diagnostic-core.h"
63 /* Simplification and canonicalization of RTL. */
65 /* Much code operates on (low, high) pairs; the low value is an
66 unsigned wide int, the high value a signed wide int. We
67 occasionally need to sign extend from low to high as if low were a
69 #define HWI_SIGN_EXTEND(low) \
70 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
72 static rtx
neg_const_int (machine_mode
, const_rtx
);
73 static bool plus_minus_operand_p (const_rtx
);
74 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
75 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
76 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
78 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
80 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
81 machine_mode
, rtx
, rtx
);
82 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
83 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87 maximally negative number can overflow). */
89 neg_const_int (machine_mode mode
, const_rtx i
)
91 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
94 /* Test whether expression, X, is an immediate constant that represents
95 the most significant bit of machine mode MODE. */
98 mode_signbit_p (machine_mode mode
, const_rtx x
)
100 unsigned HOST_WIDE_INT val
;
103 if (GET_MODE_CLASS (mode
) != MODE_INT
)
106 width
= GET_MODE_PRECISION (mode
);
110 if (width
<= HOST_BITS_PER_WIDE_INT
113 #if TARGET_SUPPORTS_WIDE_INT
114 else if (CONST_WIDE_INT_P (x
))
117 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
118 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
120 for (i
= 0; i
< elts
- 1; i
++)
121 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
123 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
124 width
%= HOST_BITS_PER_WIDE_INT
;
126 width
= HOST_BITS_PER_WIDE_INT
;
129 else if (width
<= HOST_BITS_PER_DOUBLE_INT
130 && CONST_DOUBLE_AS_INT_P (x
)
131 && CONST_DOUBLE_LOW (x
) == 0)
133 val
= CONST_DOUBLE_HIGH (x
);
134 width
-= HOST_BITS_PER_WIDE_INT
;
138 /* X is not an integer constant. */
141 if (width
< HOST_BITS_PER_WIDE_INT
)
142 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
143 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147 (after masking with the mode mask of MODE). Returns false if the
148 precision of MODE is too large to handle. */
151 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
155 if (GET_MODE_CLASS (mode
) != MODE_INT
)
158 width
= GET_MODE_PRECISION (mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= GET_MODE_MASK (mode
);
163 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 if (GET_MODE_CLASS (mode
) != MODE_INT
)
176 width
= GET_MODE_PRECISION (mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185 Returns false if the precision of MODE is too large to handle. */
187 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
191 if (GET_MODE_CLASS (mode
) != MODE_INT
)
194 width
= GET_MODE_PRECISION (mode
);
195 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
198 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
202 /* Make a binary operation by properly ordering the operands and
203 seeing if the expression folds. */
206 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
211 /* If this simplifies, do it. */
212 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Put complex operands first and constants second if commutative. */
217 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
218 && swap_commutative_operands_p (op0
, op1
))
219 tem
= op0
, op0
= op1
, op1
= tem
;
221 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
224 /* If X is a MEM referencing the constant pool, return the real value.
225 Otherwise return X. */
227 avoid_constant_pool_reference (rtx x
)
231 HOST_WIDE_INT offset
= 0;
233 switch (GET_CODE (x
))
239 /* Handle float extensions of constant pool references. */
241 c
= avoid_constant_pool_reference (tmp
);
242 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
246 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
247 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
255 if (GET_MODE (x
) == BLKmode
)
260 /* Call target hook to avoid the effects of -fpic etc.... */
261 addr
= targetm
.delegitimize_address (addr
);
263 /* Split the address into a base and integer offset. */
264 if (GET_CODE (addr
) == CONST
265 && GET_CODE (XEXP (addr
, 0)) == PLUS
266 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
268 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
269 addr
= XEXP (XEXP (addr
, 0), 0);
272 if (GET_CODE (addr
) == LO_SUM
)
273 addr
= XEXP (addr
, 1);
275 /* If this is a constant pool reference, we can turn it into its
276 constant and hope that simplifications happen. */
277 if (GET_CODE (addr
) == SYMBOL_REF
278 && CONSTANT_POOL_ADDRESS_P (addr
))
280 c
= get_pool_constant (addr
);
281 cmode
= get_pool_mode (addr
);
283 /* If we're accessing the constant in a different mode than it was
284 originally stored, attempt to fix that up via subreg simplifications.
285 If that fails we have no choice but to return the original memory. */
286 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
287 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
289 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
290 if (tem
&& CONSTANT_P (tem
))
300 /* Simplify a MEM based on its attributes. This is the default
301 delegitimize_address target hook, and it's recommended that every
302 overrider call it. */
305 delegitimize_mem_from_attrs (rtx x
)
307 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308 use their base addresses as equivalent. */
311 && MEM_OFFSET_KNOWN_P (x
))
313 tree decl
= MEM_EXPR (x
);
314 machine_mode mode
= GET_MODE (x
);
315 HOST_WIDE_INT offset
= 0;
317 switch (TREE_CODE (decl
))
327 case ARRAY_RANGE_REF
:
332 case VIEW_CONVERT_EXPR
:
334 HOST_WIDE_INT bitsize
, bitpos
;
336 int unsignedp
, volatilep
= 0;
338 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
339 &mode
, &unsignedp
, &volatilep
, false);
340 if (bitsize
!= GET_MODE_BITSIZE (mode
)
341 || (bitpos
% BITS_PER_UNIT
)
342 || (toffset
&& !tree_fits_shwi_p (toffset
)))
346 offset
+= bitpos
/ BITS_PER_UNIT
;
348 offset
+= tree_to_shwi (toffset
);
355 && mode
== GET_MODE (x
)
356 && TREE_CODE (decl
) == VAR_DECL
357 && (TREE_STATIC (decl
)
358 || DECL_THREAD_LOCAL_P (decl
))
359 && DECL_RTL_SET_P (decl
)
360 && MEM_P (DECL_RTL (decl
)))
364 offset
+= MEM_OFFSET (x
);
366 newx
= DECL_RTL (decl
);
370 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
372 /* Avoid creating a new MEM needlessly if we already had
373 the same address. We do if there's no OFFSET and the
374 old address X is identical to NEWX, or if X is of the
375 form (plus NEWX OFFSET), or the NEWX is of the form
376 (plus Y (const_int Z)) and X is that with the offset
377 added: (plus Y (const_int Z+OFFSET)). */
379 || (GET_CODE (o
) == PLUS
380 && GET_CODE (XEXP (o
, 1)) == CONST_INT
381 && (offset
== INTVAL (XEXP (o
, 1))
382 || (GET_CODE (n
) == PLUS
383 && GET_CODE (XEXP (n
, 1)) == CONST_INT
384 && (INTVAL (XEXP (n
, 1)) + offset
385 == INTVAL (XEXP (o
, 1)))
386 && (n
= XEXP (n
, 0))))
387 && (o
= XEXP (o
, 0))))
388 && rtx_equal_p (o
, n
)))
389 x
= adjust_address_nv (newx
, mode
, offset
);
391 else if (GET_MODE (x
) == GET_MODE (newx
)
400 /* Make a unary operation by first seeing if it folds and otherwise making
401 the specified operation. */
404 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
405 machine_mode op_mode
)
409 /* If this simplifies, use it. */
410 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
413 return gen_rtx_fmt_e (code
, mode
, op
);
416 /* Likewise for ternary operations. */
419 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
420 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
424 /* If this simplifies, use it. */
425 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
429 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
432 /* Likewise, for relational operations.
433 CMP_MODE specifies mode comparison is done in. */
436 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
437 machine_mode cmp_mode
, rtx op0
, rtx op1
)
441 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
445 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
448 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449 and simplify the result. If FN is non-NULL, call this callback on each
450 X, if it returns non-NULL, replace X with its return value and simplify the
454 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
455 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
457 enum rtx_code code
= GET_CODE (x
);
458 machine_mode mode
= GET_MODE (x
);
459 machine_mode op_mode
;
461 rtx op0
, op1
, op2
, newx
, op
;
465 if (__builtin_expect (fn
!= NULL
, 0))
467 newx
= fn (x
, old_rtx
, data
);
471 else if (rtx_equal_p (x
, old_rtx
))
472 return copy_rtx ((rtx
) data
);
474 switch (GET_RTX_CLASS (code
))
478 op_mode
= GET_MODE (op0
);
479 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
480 if (op0
== XEXP (x
, 0))
482 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
486 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
487 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
490 return simplify_gen_binary (code
, mode
, op0
, op1
);
493 case RTX_COMM_COMPARE
:
496 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
497 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
498 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
499 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
501 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
504 case RTX_BITFIELD_OPS
:
506 op_mode
= GET_MODE (op0
);
507 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
508 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
509 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
510 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
512 if (op_mode
== VOIDmode
)
513 op_mode
= GET_MODE (op0
);
514 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
519 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
520 if (op0
== SUBREG_REG (x
))
522 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
523 GET_MODE (SUBREG_REG (x
)),
525 return op0
? op0
: x
;
532 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
533 if (op0
== XEXP (x
, 0))
535 return replace_equiv_address_nv (x
, op0
);
537 else if (code
== LO_SUM
)
539 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
540 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
542 /* (lo_sum (high x) y) -> y where x and y have the same base. */
543 if (GET_CODE (op0
) == HIGH
)
545 rtx base0
, base1
, offset0
, offset1
;
546 split_const (XEXP (op0
, 0), &base0
, &offset0
);
547 split_const (op1
, &base1
, &offset1
);
548 if (rtx_equal_p (base0
, base1
))
552 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
554 return gen_rtx_LO_SUM (mode
, op0
, op1
);
563 fmt
= GET_RTX_FORMAT (code
);
564 for (i
= 0; fmt
[i
]; i
++)
569 newvec
= XVEC (newx
, i
);
570 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
572 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
574 if (op
!= RTVEC_ELT (vec
, j
))
578 newvec
= shallow_copy_rtvec (vec
);
580 newx
= shallow_copy_rtx (x
);
581 XVEC (newx
, i
) = newvec
;
583 RTVEC_ELT (newvec
, j
) = op
;
591 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
592 if (op
!= XEXP (x
, i
))
595 newx
= shallow_copy_rtx (x
);
604 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
605 resulting RTX. Return a new RTX which is as simplified as possible. */
608 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
610 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
613 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
614 Only handle cases where the truncated value is inherently an rvalue.
616 RTL provides two ways of truncating a value:
618 1. a lowpart subreg. This form is only a truncation when both
619 the outer and inner modes (here MODE and OP_MODE respectively)
620 are scalar integers, and only then when the subreg is used as
623 It is only valid to form such truncating subregs if the
624 truncation requires no action by the target. The onus for
625 proving this is on the creator of the subreg -- e.g. the
626 caller to simplify_subreg or simplify_gen_subreg -- and typically
627 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
629 2. a TRUNCATE. This form handles both scalar and compound integers.
631 The first form is preferred where valid. However, the TRUNCATE
632 handling in simplify_unary_operation turns the second form into the
633 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
634 so it is generally safe to form rvalue truncations using:
636 simplify_gen_unary (TRUNCATE, ...)
638 and leave simplify_unary_operation to work out which representation
641 Because of the proof requirements on (1), simplify_truncation must
642 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
643 regardless of whether the outer truncation came from a SUBREG or a
644 TRUNCATE. For example, if the caller has proven that an SImode
649 is a no-op and can be represented as a subreg, it does not follow
650 that SImode truncations of X and Y are also no-ops. On a target
651 like 64-bit MIPS that requires SImode values to be stored in
652 sign-extended form, an SImode truncation of:
654 (and:DI (reg:DI X) (const_int 63))
656 is trivially a no-op because only the lower 6 bits can be set.
657 However, X is still an arbitrary 64-bit number and so we cannot
658 assume that truncating it too is a no-op. */
661 simplify_truncation (machine_mode mode
, rtx op
,
662 machine_mode op_mode
)
664 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
665 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
666 gcc_assert (precision
<= op_precision
);
668 /* Optimize truncations of zero and sign extended values. */
669 if (GET_CODE (op
) == ZERO_EXTEND
670 || GET_CODE (op
) == SIGN_EXTEND
)
672 /* There are three possibilities. If MODE is the same as the
673 origmode, we can omit both the extension and the subreg.
674 If MODE is not larger than the origmode, we can apply the
675 truncation without the extension. Finally, if the outermode
676 is larger than the origmode, we can just extend to the appropriate
678 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
679 if (mode
== origmode
)
681 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
682 return simplify_gen_unary (TRUNCATE
, mode
,
683 XEXP (op
, 0), origmode
);
685 return simplify_gen_unary (GET_CODE (op
), mode
,
686 XEXP (op
, 0), origmode
);
689 /* If the machine can perform operations in the truncated mode, distribute
690 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
691 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
693 #ifdef WORD_REGISTER_OPERATIONS
694 && precision
>= BITS_PER_WORD
696 && (GET_CODE (op
) == PLUS
697 || GET_CODE (op
) == MINUS
698 || GET_CODE (op
) == MULT
))
700 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
703 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
705 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
709 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
710 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if ((GET_CODE (op
) == LSHIFTRT
713 || GET_CODE (op
) == ASHIFTRT
)
714 /* Ensure that OP_MODE is at least twice as wide as MODE
715 to avoid the possibility that an outer LSHIFTRT shifts by more
716 than the sign extension's sign_bit_copies and introduces zeros
717 into the high bits of the result. */
718 && 2 * precision
<= op_precision
719 && CONST_INT_P (XEXP (op
, 1))
720 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
722 && UINTVAL (XEXP (op
, 1)) < precision
)
723 return simplify_gen_binary (ASHIFTRT
, mode
,
724 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
726 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
727 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if ((GET_CODE (op
) == LSHIFTRT
730 || GET_CODE (op
) == ASHIFTRT
)
731 && CONST_INT_P (XEXP (op
, 1))
732 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
733 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
734 && UINTVAL (XEXP (op
, 1)) < precision
)
735 return simplify_gen_binary (LSHIFTRT
, mode
,
736 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
738 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
739 to (ashift:QI (x:QI) C), where C is a suitable small constant and
740 the outer subreg is effectively a truncation to the original mode. */
741 if (GET_CODE (op
) == ASHIFT
742 && CONST_INT_P (XEXP (op
, 1))
743 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
744 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
745 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
746 && UINTVAL (XEXP (op
, 1)) < precision
)
747 return simplify_gen_binary (ASHIFT
, mode
,
748 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
750 /* Recognize a word extraction from a multi-word subreg. */
751 if ((GET_CODE (op
) == LSHIFTRT
752 || GET_CODE (op
) == ASHIFTRT
)
753 && SCALAR_INT_MODE_P (mode
)
754 && SCALAR_INT_MODE_P (op_mode
)
755 && precision
>= BITS_PER_WORD
756 && 2 * precision
<= op_precision
757 && CONST_INT_P (XEXP (op
, 1))
758 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
759 && UINTVAL (XEXP (op
, 1)) < op_precision
)
761 int byte
= subreg_lowpart_offset (mode
, op_mode
);
762 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
763 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
765 ? byte
- shifted_bytes
766 : byte
+ shifted_bytes
));
769 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
770 and try replacing the TRUNCATE and shift with it. Don't do this
771 if the MEM has a mode-dependent address. */
772 if ((GET_CODE (op
) == LSHIFTRT
773 || GET_CODE (op
) == ASHIFTRT
)
774 && SCALAR_INT_MODE_P (op_mode
)
775 && MEM_P (XEXP (op
, 0))
776 && CONST_INT_P (XEXP (op
, 1))
777 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
778 && INTVAL (XEXP (op
, 1)) > 0
779 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
780 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
781 MEM_ADDR_SPACE (XEXP (op
, 0)))
782 && ! MEM_VOLATILE_P (XEXP (op
, 0))
783 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
784 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
786 int byte
= subreg_lowpart_offset (mode
, op_mode
);
787 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
788 return adjust_address_nv (XEXP (op
, 0), mode
,
790 ? byte
- shifted_bytes
791 : byte
+ shifted_bytes
));
794 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
795 (OP:SI foo:SI) if OP is NEG or ABS. */
796 if ((GET_CODE (op
) == ABS
797 || GET_CODE (op
) == NEG
)
798 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
799 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
800 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
801 return simplify_gen_unary (GET_CODE (op
), mode
,
802 XEXP (XEXP (op
, 0), 0), mode
);
804 /* (truncate:A (subreg:B (truncate:C X) 0)) is
806 if (GET_CODE (op
) == SUBREG
807 && SCALAR_INT_MODE_P (mode
)
808 && SCALAR_INT_MODE_P (op_mode
)
809 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
810 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
811 && subreg_lowpart_p (op
))
813 rtx inner
= XEXP (SUBREG_REG (op
), 0);
814 if (GET_MODE_PRECISION (mode
)
815 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
816 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
818 /* If subreg above is paradoxical and C is narrower
819 than A, return (subreg:A (truncate:C X) 0). */
820 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
821 GET_MODE (SUBREG_REG (op
)), 0);
824 /* (truncate:A (truncate:B X)) is (truncate:A X). */
825 if (GET_CODE (op
) == TRUNCATE
)
826 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
827 GET_MODE (XEXP (op
, 0)));
832 /* Try to simplify a unary operation CODE whose output mode is to be
833 MODE with input operand OP whose mode was originally OP_MODE.
834 Return zero if no simplification can be made. */
836 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
837 rtx op
, machine_mode op_mode
)
841 trueop
= avoid_constant_pool_reference (op
);
843 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
847 return simplify_unary_operation_1 (code
, mode
, op
);
850 /* Perform some simplifications we can do even if the operands
853 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
855 enum rtx_code reversed
;
861 /* (not (not X)) == X. */
862 if (GET_CODE (op
) == NOT
)
865 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
866 comparison is all ones. */
867 if (COMPARISON_P (op
)
868 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
869 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
870 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
871 XEXP (op
, 0), XEXP (op
, 1));
873 /* (not (plus X -1)) can become (neg X). */
874 if (GET_CODE (op
) == PLUS
875 && XEXP (op
, 1) == constm1_rtx
)
876 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
878 /* Similarly, (not (neg X)) is (plus X -1). */
879 if (GET_CODE (op
) == NEG
)
880 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
883 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
884 if (GET_CODE (op
) == XOR
885 && CONST_INT_P (XEXP (op
, 1))
886 && (temp
= simplify_unary_operation (NOT
, mode
,
887 XEXP (op
, 1), mode
)) != 0)
888 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
890 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
891 if (GET_CODE (op
) == PLUS
892 && CONST_INT_P (XEXP (op
, 1))
893 && mode_signbit_p (mode
, XEXP (op
, 1))
894 && (temp
= simplify_unary_operation (NOT
, mode
,
895 XEXP (op
, 1), mode
)) != 0)
896 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
899 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
900 operands other than 1, but that is not valid. We could do a
901 similar simplification for (not (lshiftrt C X)) where C is
902 just the sign bit, but this doesn't seem common enough to
904 if (GET_CODE (op
) == ASHIFT
905 && XEXP (op
, 0) == const1_rtx
)
907 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
908 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
911 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
912 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
913 so we can perform the above simplification. */
914 if (STORE_FLAG_VALUE
== -1
915 && GET_CODE (op
) == ASHIFTRT
916 && CONST_INT_P (XEXP (op
, 1))
917 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
918 return simplify_gen_relational (GE
, mode
, VOIDmode
,
919 XEXP (op
, 0), const0_rtx
);
922 if (GET_CODE (op
) == SUBREG
923 && subreg_lowpart_p (op
)
924 && (GET_MODE_SIZE (GET_MODE (op
))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
926 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
927 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
929 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
932 x
= gen_rtx_ROTATE (inner_mode
,
933 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
935 XEXP (SUBREG_REG (op
), 1));
936 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
941 /* Apply De Morgan's laws to reduce number of patterns for machines
942 with negating logical insns (and-not, nand, etc.). If result has
943 only one NOT, put it first, since that is how the patterns are
945 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
947 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
948 machine_mode op_mode
;
950 op_mode
= GET_MODE (in1
);
951 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
953 op_mode
= GET_MODE (in2
);
954 if (op_mode
== VOIDmode
)
956 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
958 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
961 in2
= in1
; in1
= tem
;
964 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
968 /* (not (bswap x)) -> (bswap (not x)). */
969 if (GET_CODE (op
) == BSWAP
)
971 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
972 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
977 /* (neg (neg X)) == X. */
978 if (GET_CODE (op
) == NEG
)
981 /* (neg (plus X 1)) can become (not X). */
982 if (GET_CODE (op
) == PLUS
983 && XEXP (op
, 1) == const1_rtx
)
984 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
986 /* Similarly, (neg (not X)) is (plus X 1). */
987 if (GET_CODE (op
) == NOT
)
988 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
991 /* (neg (minus X Y)) can become (minus Y X). This transformation
992 isn't safe for modes with signed zeros, since if X and Y are
993 both +0, (minus Y X) is the same as (minus X Y). If the
994 rounding mode is towards +infinity (or -infinity) then the two
995 expressions will be rounded differently. */
996 if (GET_CODE (op
) == MINUS
997 && !HONOR_SIGNED_ZEROS (mode
)
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
999 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1001 if (GET_CODE (op
) == PLUS
1002 && !HONOR_SIGNED_ZEROS (mode
)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1005 /* (neg (plus A C)) is simplified to (minus -C A). */
1006 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1007 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1011 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1014 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1015 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1016 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1019 /* (neg (mult A B)) becomes (mult A (neg B)).
1020 This works even for floating-point values. */
1021 if (GET_CODE (op
) == MULT
1022 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1024 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1025 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1028 /* NEG commutes with ASHIFT since it is multiplication. Only do
1029 this if we can then eliminate the NEG (e.g., if the operand
1031 if (GET_CODE (op
) == ASHIFT
)
1033 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1035 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1038 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1039 C is equal to the width of MODE minus 1. */
1040 if (GET_CODE (op
) == ASHIFTRT
1041 && CONST_INT_P (XEXP (op
, 1))
1042 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1043 return simplify_gen_binary (LSHIFTRT
, mode
,
1044 XEXP (op
, 0), XEXP (op
, 1));
1046 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1047 C is equal to the width of MODE minus 1. */
1048 if (GET_CODE (op
) == LSHIFTRT
1049 && CONST_INT_P (XEXP (op
, 1))
1050 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1051 return simplify_gen_binary (ASHIFTRT
, mode
,
1052 XEXP (op
, 0), XEXP (op
, 1));
1054 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1055 if (GET_CODE (op
) == XOR
1056 && XEXP (op
, 1) == const1_rtx
1057 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1058 return plus_constant (mode
, XEXP (op
, 0), -1);
1060 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1061 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1062 if (GET_CODE (op
) == LT
1063 && XEXP (op
, 1) == const0_rtx
1064 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1066 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1067 int isize
= GET_MODE_PRECISION (inner
);
1068 if (STORE_FLAG_VALUE
== 1)
1070 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1071 GEN_INT (isize
- 1));
1074 if (GET_MODE_PRECISION (mode
) > isize
)
1075 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1076 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1078 else if (STORE_FLAG_VALUE
== -1)
1080 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1081 GEN_INT (isize
- 1));
1084 if (GET_MODE_PRECISION (mode
) > isize
)
1085 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1086 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1092 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1093 with the umulXi3_highpart patterns. */
1094 if (GET_CODE (op
) == LSHIFTRT
1095 && GET_CODE (XEXP (op
, 0)) == MULT
)
1098 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1100 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1102 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1106 /* We can't handle truncation to a partial integer mode here
1107 because we don't know the real bitsize of the partial
1112 if (GET_MODE (op
) != VOIDmode
)
1114 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1119 /* If we know that the value is already truncated, we can
1120 replace the TRUNCATE with a SUBREG. */
1121 if (GET_MODE_NUNITS (mode
) == 1
1122 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1123 || truncated_to_mode (mode
, op
)))
1125 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1130 /* A truncate of a comparison can be replaced with a subreg if
1131 STORE_FLAG_VALUE permits. This is like the previous test,
1132 but it works even if the comparison is done in a mode larger
1133 than HOST_BITS_PER_WIDE_INT. */
1134 if (HWI_COMPUTABLE_MODE_P (mode
)
1135 && COMPARISON_P (op
)
1136 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1138 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1143 /* A truncate of a memory is just loading the low part of the memory
1144 if we are not changing the meaning of the address. */
1145 if (GET_CODE (op
) == MEM
1146 && !VECTOR_MODE_P (mode
)
1147 && !MEM_VOLATILE_P (op
)
1148 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1150 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1157 case FLOAT_TRUNCATE
:
1158 if (DECIMAL_FLOAT_MODE_P (mode
))
1161 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1162 if (GET_CODE (op
) == FLOAT_EXTEND
1163 && GET_MODE (XEXP (op
, 0)) == mode
)
1164 return XEXP (op
, 0);
1166 /* (float_truncate:SF (float_truncate:DF foo:XF))
1167 = (float_truncate:SF foo:XF).
1168 This may eliminate double rounding, so it is unsafe.
1170 (float_truncate:SF (float_extend:XF foo:DF))
1171 = (float_truncate:SF foo:DF).
1173 (float_truncate:DF (float_extend:XF foo:SF))
1174 = (float_extend:DF foo:SF). */
1175 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1176 && flag_unsafe_math_optimizations
)
1177 || GET_CODE (op
) == FLOAT_EXTEND
)
1178 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1180 > GET_MODE_SIZE (mode
)
1181 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1183 XEXP (op
, 0), mode
);
1185 /* (float_truncate (float x)) is (float x) */
1186 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1187 && (flag_unsafe_math_optimizations
1188 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1189 && ((unsigned)significand_size (GET_MODE (op
))
1190 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1191 - num_sign_bit_copies (XEXP (op
, 0),
1192 GET_MODE (XEXP (op
, 0))))))))
1193 return simplify_gen_unary (GET_CODE (op
), mode
,
1195 GET_MODE (XEXP (op
, 0)));
1197 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1198 (OP:SF foo:SF) if OP is NEG or ABS. */
1199 if ((GET_CODE (op
) == ABS
1200 || GET_CODE (op
) == NEG
)
1201 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1202 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1203 return simplify_gen_unary (GET_CODE (op
), mode
,
1204 XEXP (XEXP (op
, 0), 0), mode
);
1206 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1207 is (float_truncate:SF x). */
1208 if (GET_CODE (op
) == SUBREG
1209 && subreg_lowpart_p (op
)
1210 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1211 return SUBREG_REG (op
);
1215 if (DECIMAL_FLOAT_MODE_P (mode
))
1218 /* (float_extend (float_extend x)) is (float_extend x)
1220 (float_extend (float x)) is (float x) assuming that double
1221 rounding can't happen.
1223 if (GET_CODE (op
) == FLOAT_EXTEND
1224 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1225 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1226 && ((unsigned)significand_size (GET_MODE (op
))
1227 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1228 - num_sign_bit_copies (XEXP (op
, 0),
1229 GET_MODE (XEXP (op
, 0)))))))
1230 return simplify_gen_unary (GET_CODE (op
), mode
,
1232 GET_MODE (XEXP (op
, 0)));
1237 /* (abs (neg <foo>)) -> (abs <foo>) */
1238 if (GET_CODE (op
) == NEG
)
1239 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1240 GET_MODE (XEXP (op
, 0)));
1242 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1244 if (GET_MODE (op
) == VOIDmode
)
1247 /* If operand is something known to be positive, ignore the ABS. */
1248 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1249 || val_signbit_known_clear_p (GET_MODE (op
),
1250 nonzero_bits (op
, GET_MODE (op
))))
1253 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1254 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1255 return gen_rtx_NEG (mode
, op
);
1260 /* (ffs (*_extend <X>)) = (ffs <X>) */
1261 if (GET_CODE (op
) == SIGN_EXTEND
1262 || GET_CODE (op
) == ZERO_EXTEND
)
1263 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1264 GET_MODE (XEXP (op
, 0)));
1268 switch (GET_CODE (op
))
1272 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1273 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1274 GET_MODE (XEXP (op
, 0)));
1278 /* Rotations don't affect popcount. */
1279 if (!side_effects_p (XEXP (op
, 1)))
1280 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1281 GET_MODE (XEXP (op
, 0)));
1290 switch (GET_CODE (op
))
1296 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1297 GET_MODE (XEXP (op
, 0)));
1301 /* Rotations don't affect parity. */
1302 if (!side_effects_p (XEXP (op
, 1)))
1303 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1304 GET_MODE (XEXP (op
, 0)));
1313 /* (bswap (bswap x)) -> x. */
1314 if (GET_CODE (op
) == BSWAP
)
1315 return XEXP (op
, 0);
1319 /* (float (sign_extend <X>)) = (float <X>). */
1320 if (GET_CODE (op
) == SIGN_EXTEND
)
1321 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1322 GET_MODE (XEXP (op
, 0)));
1326 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1327 becomes just the MINUS if its mode is MODE. This allows
1328 folding switch statements on machines using casesi (such as
1330 if (GET_CODE (op
) == TRUNCATE
1331 && GET_MODE (XEXP (op
, 0)) == mode
1332 && GET_CODE (XEXP (op
, 0)) == MINUS
1333 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1334 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1335 return XEXP (op
, 0);
1337 /* Extending a widening multiplication should be canonicalized to
1338 a wider widening multiplication. */
1339 if (GET_CODE (op
) == MULT
)
1341 rtx lhs
= XEXP (op
, 0);
1342 rtx rhs
= XEXP (op
, 1);
1343 enum rtx_code lcode
= GET_CODE (lhs
);
1344 enum rtx_code rcode
= GET_CODE (rhs
);
1346 /* Widening multiplies usually extend both operands, but sometimes
1347 they use a shift to extract a portion of a register. */
1348 if ((lcode
== SIGN_EXTEND
1349 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1350 && (rcode
== SIGN_EXTEND
1351 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1353 machine_mode lmode
= GET_MODE (lhs
);
1354 machine_mode rmode
= GET_MODE (rhs
);
1357 if (lcode
== ASHIFTRT
)
1358 /* Number of bits not shifted off the end. */
1359 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1360 else /* lcode == SIGN_EXTEND */
1361 /* Size of inner mode. */
1362 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1364 if (rcode
== ASHIFTRT
)
1365 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1366 else /* rcode == SIGN_EXTEND */
1367 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1369 /* We can only widen multiplies if the result is mathematiclly
1370 equivalent. I.e. if overflow was impossible. */
1371 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1372 return simplify_gen_binary
1374 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1375 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1379 /* Check for a sign extension of a subreg of a promoted
1380 variable, where the promotion is sign-extended, and the
1381 target mode is the same as the variable's promotion. */
1382 if (GET_CODE (op
) == SUBREG
1383 && SUBREG_PROMOTED_VAR_P (op
)
1384 && SUBREG_PROMOTED_SIGNED_P (op
)
1385 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1387 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1392 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1393 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1394 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1396 gcc_assert (GET_MODE_PRECISION (mode
)
1397 > GET_MODE_PRECISION (GET_MODE (op
)));
1398 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1399 GET_MODE (XEXP (op
, 0)));
1402 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1403 is (sign_extend:M (subreg:O <X>)) if there is mode with
1404 GET_MODE_BITSIZE (N) - I bits.
1405 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1406 is similarly (zero_extend:M (subreg:O <X>)). */
1407 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1408 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1409 && CONST_INT_P (XEXP (op
, 1))
1410 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1411 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1414 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1415 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1416 gcc_assert (GET_MODE_BITSIZE (mode
)
1417 > GET_MODE_BITSIZE (GET_MODE (op
)));
1418 if (tmode
!= BLKmode
)
1421 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1423 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1424 ? SIGN_EXTEND
: ZERO_EXTEND
,
1425 mode
, inner
, tmode
);
1429 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1430 /* As we do not know which address space the pointer is referring to,
1431 we can do this only if the target does not support different pointer
1432 or address modes depending on the address space. */
1433 if (target_default_pointer_address_modes_p ()
1434 && ! POINTERS_EXTEND_UNSIGNED
1435 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1437 || (GET_CODE (op
) == SUBREG
1438 && REG_P (SUBREG_REG (op
))
1439 && REG_POINTER (SUBREG_REG (op
))
1440 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1441 return convert_memory_address (Pmode
, op
);
1446 /* Check for a zero extension of a subreg of a promoted
1447 variable, where the promotion is zero-extended, and the
1448 target mode is the same as the variable's promotion. */
1449 if (GET_CODE (op
) == SUBREG
1450 && SUBREG_PROMOTED_VAR_P (op
)
1451 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1452 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1454 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1459 /* Extending a widening multiplication should be canonicalized to
1460 a wider widening multiplication. */
1461 if (GET_CODE (op
) == MULT
)
1463 rtx lhs
= XEXP (op
, 0);
1464 rtx rhs
= XEXP (op
, 1);
1465 enum rtx_code lcode
= GET_CODE (lhs
);
1466 enum rtx_code rcode
= GET_CODE (rhs
);
1468 /* Widening multiplies usually extend both operands, but sometimes
1469 they use a shift to extract a portion of a register. */
1470 if ((lcode
== ZERO_EXTEND
1471 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1472 && (rcode
== ZERO_EXTEND
1473 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1475 machine_mode lmode
= GET_MODE (lhs
);
1476 machine_mode rmode
= GET_MODE (rhs
);
1479 if (lcode
== LSHIFTRT
)
1480 /* Number of bits not shifted off the end. */
1481 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1482 else /* lcode == ZERO_EXTEND */
1483 /* Size of inner mode. */
1484 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1486 if (rcode
== LSHIFTRT
)
1487 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1488 else /* rcode == ZERO_EXTEND */
1489 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1491 /* We can only widen multiplies if the result is mathematiclly
1492 equivalent. I.e. if overflow was impossible. */
1493 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1494 return simplify_gen_binary
1496 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1497 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1501 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op
) == ZERO_EXTEND
)
1503 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1504 GET_MODE (XEXP (op
, 0)));
1506 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1507 is (zero_extend:M (subreg:O <X>)) if there is mode with
1508 GET_MODE_PRECISION (N) - I bits. */
1509 if (GET_CODE (op
) == LSHIFTRT
1510 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1511 && CONST_INT_P (XEXP (op
, 1))
1512 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1513 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1516 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1517 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1518 if (tmode
!= BLKmode
)
1521 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1523 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1527 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1528 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1530 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1531 (and:SI (reg:SI) (const_int 63)). */
1532 if (GET_CODE (op
) == SUBREG
1533 && GET_MODE_PRECISION (GET_MODE (op
))
1534 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1535 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1536 <= HOST_BITS_PER_WIDE_INT
1537 && GET_MODE_PRECISION (mode
)
1538 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1539 && subreg_lowpart_p (op
)
1540 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1541 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1543 if (GET_MODE_PRECISION (mode
)
1544 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1545 return SUBREG_REG (op
);
1546 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1547 GET_MODE (SUBREG_REG (op
)));
1550 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1551 /* As we do not know which address space the pointer is referring to,
1552 we can do this only if the target does not support different pointer
1553 or address modes depending on the address space. */
1554 if (target_default_pointer_address_modes_p ()
1555 && POINTERS_EXTEND_UNSIGNED
> 0
1556 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1558 || (GET_CODE (op
) == SUBREG
1559 && REG_P (SUBREG_REG (op
))
1560 && REG_POINTER (SUBREG_REG (op
))
1561 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1562 return convert_memory_address (Pmode
, op
);
1573 /* Try to compute the value of a unary operation CODE whose output mode is to
1574 be MODE with input operand OP whose mode was originally OP_MODE.
1575 Return zero if the value cannot be computed. */
1577 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1578 rtx op
, machine_mode op_mode
)
1580 unsigned int width
= GET_MODE_PRECISION (mode
);
1582 if (code
== VEC_DUPLICATE
)
1584 gcc_assert (VECTOR_MODE_P (mode
));
1585 if (GET_MODE (op
) != VOIDmode
)
1587 if (!VECTOR_MODE_P (GET_MODE (op
)))
1588 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1590 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1593 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1594 || GET_CODE (op
) == CONST_VECTOR
)
1596 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1597 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1598 rtvec v
= rtvec_alloc (n_elts
);
1601 if (GET_CODE (op
) != CONST_VECTOR
)
1602 for (i
= 0; i
< n_elts
; i
++)
1603 RTVEC_ELT (v
, i
) = op
;
1606 machine_mode inmode
= GET_MODE (op
);
1607 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1608 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1610 gcc_assert (in_n_elts
< n_elts
);
1611 gcc_assert ((n_elts
% in_n_elts
) == 0);
1612 for (i
= 0; i
< n_elts
; i
++)
1613 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1615 return gen_rtx_CONST_VECTOR (mode
, v
);
1619 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1621 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1622 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1623 machine_mode opmode
= GET_MODE (op
);
1624 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1625 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1626 rtvec v
= rtvec_alloc (n_elts
);
1629 gcc_assert (op_n_elts
== n_elts
);
1630 for (i
= 0; i
< n_elts
; i
++)
1632 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1633 CONST_VECTOR_ELT (op
, i
),
1634 GET_MODE_INNER (opmode
));
1637 RTVEC_ELT (v
, i
) = x
;
1639 return gen_rtx_CONST_VECTOR (mode
, v
);
1642 /* The order of these tests is critical so that, for example, we don't
1643 check the wrong mode (input vs. output) for a conversion operation,
1644 such as FIX. At some point, this should be simplified. */
1646 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1650 if (op_mode
== VOIDmode
)
1652 /* CONST_INT have VOIDmode as the mode. We assume that all
1653 the bits of the constant are significant, though, this is
1654 a dangerous assumption as many times CONST_INTs are
1655 created and used with garbage in the bits outside of the
1656 precision of the implied mode of the const_int. */
1657 op_mode
= MAX_MODE_INT
;
1660 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1661 d
= real_value_truncate (mode
, d
);
1662 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1664 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1668 if (op_mode
== VOIDmode
)
1670 /* CONST_INT have VOIDmode as the mode. We assume that all
1671 the bits of the constant are significant, though, this is
1672 a dangerous assumption as many times CONST_INTs are
1673 created and used with garbage in the bits outside of the
1674 precision of the implied mode of the const_int. */
1675 op_mode
= MAX_MODE_INT
;
1678 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1679 d
= real_value_truncate (mode
, d
);
1680 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1683 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1686 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1687 rtx_mode_t op0
= std::make_pair (op
, imode
);
1690 #if TARGET_SUPPORTS_WIDE_INT == 0
1691 /* This assert keeps the simplification from producing a result
1692 that cannot be represented in a CONST_DOUBLE but a lot of
1693 upstream callers expect that this function never fails to
1694 simplify something and so you if you added this to the test
1695 above the code would die later anyway. If this assert
1696 happens, you just need to make the port support wide int. */
1697 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1703 result
= wi::bit_not (op0
);
1707 result
= wi::neg (op0
);
1711 result
= wi::abs (op0
);
1715 result
= wi::shwi (wi::ffs (op0
), mode
);
1719 if (wi::ne_p (op0
, 0))
1720 int_value
= wi::clz (op0
);
1721 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1722 int_value
= GET_MODE_PRECISION (mode
);
1723 result
= wi::shwi (int_value
, mode
);
1727 result
= wi::shwi (wi::clrsb (op0
), mode
);
1731 if (wi::ne_p (op0
, 0))
1732 int_value
= wi::ctz (op0
);
1733 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1734 int_value
= GET_MODE_PRECISION (mode
);
1735 result
= wi::shwi (int_value
, mode
);
1739 result
= wi::shwi (wi::popcount (op0
), mode
);
1743 result
= wi::shwi (wi::parity (op0
), mode
);
1747 result
= wide_int (op0
).bswap ();
1752 result
= wide_int::from (op0
, width
, UNSIGNED
);
1756 result
= wide_int::from (op0
, width
, SIGNED
);
1764 return immed_wide_int_const (result
, mode
);
1767 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1768 && SCALAR_FLOAT_MODE_P (mode
)
1769 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1772 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1779 d
= real_value_abs (&d
);
1782 d
= real_value_negate (&d
);
1784 case FLOAT_TRUNCATE
:
1785 d
= real_value_truncate (mode
, d
);
1788 /* All this does is change the mode, unless changing
1790 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1791 real_convert (&d
, mode
, &d
);
1794 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1801 real_to_target (tmp
, &d
, GET_MODE (op
));
1802 for (i
= 0; i
< 4; i
++)
1804 real_from_target (&d
, tmp
, mode
);
1810 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1812 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1813 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1814 && GET_MODE_CLASS (mode
) == MODE_INT
1817 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1818 operators are intentionally left unspecified (to ease implementation
1819 by target backends), for consistency, this routine implements the
1820 same semantics for constant folding as used by the middle-end. */
1822 /* This was formerly used only for non-IEEE float.
1823 eggert@twinsun.com says it is safe for IEEE also. */
1824 REAL_VALUE_TYPE x
, t
;
1825 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1826 wide_int wmax
, wmin
;
1827 /* This is part of the abi to real_to_integer, but we check
1828 things before making this call. */
1834 if (REAL_VALUE_ISNAN (x
))
1837 /* Test against the signed upper bound. */
1838 wmax
= wi::max_value (width
, SIGNED
);
1839 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1840 if (REAL_VALUES_LESS (t
, x
))
1841 return immed_wide_int_const (wmax
, mode
);
1843 /* Test against the signed lower bound. */
1844 wmin
= wi::min_value (width
, SIGNED
);
1845 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1846 if (REAL_VALUES_LESS (x
, t
))
1847 return immed_wide_int_const (wmin
, mode
);
1849 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1853 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1856 /* Test against the unsigned upper bound. */
1857 wmax
= wi::max_value (width
, UNSIGNED
);
1858 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1859 if (REAL_VALUES_LESS (t
, x
))
1860 return immed_wide_int_const (wmax
, mode
);
1862 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1874 /* Subroutine of simplify_binary_operation to simplify a binary operation
1875 CODE that can commute with byte swapping, with result mode MODE and
1876 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1877 Return zero if no simplification or canonicalization is possible. */
1880 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1885 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1886 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1888 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1889 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1890 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1893 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1894 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1896 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1897 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1903 /* Subroutine of simplify_binary_operation to simplify a commutative,
1904 associative binary operation CODE with result mode MODE, operating
1905 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1906 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1907 canonicalization is possible. */
1910 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1915 /* Linearize the operator to the left. */
1916 if (GET_CODE (op1
) == code
)
1918 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1919 if (GET_CODE (op0
) == code
)
1921 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1922 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1925 /* "a op (b op c)" becomes "(b op c) op a". */
1926 if (! swap_commutative_operands_p (op1
, op0
))
1927 return simplify_gen_binary (code
, mode
, op1
, op0
);
1934 if (GET_CODE (op0
) == code
)
1936 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1937 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1939 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1940 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1943 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1944 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1946 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1948 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1949 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1951 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1958 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1959 and OP1. Return 0 if no simplification is possible.
1961 Don't use this for relational operations such as EQ or LT.
1962 Use simplify_relational_operation instead. */
1964 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1967 rtx trueop0
, trueop1
;
1970 /* Relational operations don't work here. We must know the mode
1971 of the operands in order to do the comparison correctly.
1972 Assuming a full word can give incorrect results.
1973 Consider comparing 128 with -128 in QImode. */
1974 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1975 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1977 /* Make sure the constant is second. */
1978 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1979 && swap_commutative_operands_p (op0
, op1
))
1981 tem
= op0
, op0
= op1
, op1
= tem
;
1984 trueop0
= avoid_constant_pool_reference (op0
);
1985 trueop1
= avoid_constant_pool_reference (op1
);
1987 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1990 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1993 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1994 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1995 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1996 actual constants. */
1999 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2000 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2002 rtx tem
, reversed
, opleft
, opright
;
2004 unsigned int width
= GET_MODE_PRECISION (mode
);
2006 /* Even if we can't compute a constant result,
2007 there are some cases worth simplifying. */
2012 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2013 when x is NaN, infinite, or finite and nonzero. They aren't
2014 when x is -0 and the rounding mode is not towards -infinity,
2015 since (-0) + 0 is then 0. */
2016 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2019 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2020 transformations are safe even for IEEE. */
2021 if (GET_CODE (op0
) == NEG
)
2022 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2023 else if (GET_CODE (op1
) == NEG
)
2024 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2026 /* (~a) + 1 -> -a */
2027 if (INTEGRAL_MODE_P (mode
)
2028 && GET_CODE (op0
) == NOT
2029 && trueop1
== const1_rtx
)
2030 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2032 /* Handle both-operands-constant cases. We can only add
2033 CONST_INTs to constants since the sum of relocatable symbols
2034 can't be handled by most assemblers. Don't add CONST_INT
2035 to CONST_INT since overflow won't be computed properly if wider
2036 than HOST_BITS_PER_WIDE_INT. */
2038 if ((GET_CODE (op0
) == CONST
2039 || GET_CODE (op0
) == SYMBOL_REF
2040 || GET_CODE (op0
) == LABEL_REF
)
2041 && CONST_INT_P (op1
))
2042 return plus_constant (mode
, op0
, INTVAL (op1
));
2043 else if ((GET_CODE (op1
) == CONST
2044 || GET_CODE (op1
) == SYMBOL_REF
2045 || GET_CODE (op1
) == LABEL_REF
)
2046 && CONST_INT_P (op0
))
2047 return plus_constant (mode
, op1
, INTVAL (op0
));
2049 /* See if this is something like X * C - X or vice versa or
2050 if the multiplication is written as a shift. If so, we can
2051 distribute and make a new multiply, shift, or maybe just
2052 have X (if C is 2 in the example above). But don't make
2053 something more expensive than we had before. */
2055 if (SCALAR_INT_MODE_P (mode
))
2057 rtx lhs
= op0
, rhs
= op1
;
2059 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2060 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2062 if (GET_CODE (lhs
) == NEG
)
2064 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2065 lhs
= XEXP (lhs
, 0);
2067 else if (GET_CODE (lhs
) == MULT
2068 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2070 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2071 lhs
= XEXP (lhs
, 0);
2073 else if (GET_CODE (lhs
) == ASHIFT
2074 && CONST_INT_P (XEXP (lhs
, 1))
2075 && INTVAL (XEXP (lhs
, 1)) >= 0
2076 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2078 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2079 GET_MODE_PRECISION (mode
));
2080 lhs
= XEXP (lhs
, 0);
2083 if (GET_CODE (rhs
) == NEG
)
2085 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2086 rhs
= XEXP (rhs
, 0);
2088 else if (GET_CODE (rhs
) == MULT
2089 && CONST_INT_P (XEXP (rhs
, 1)))
2091 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2092 rhs
= XEXP (rhs
, 0);
2094 else if (GET_CODE (rhs
) == ASHIFT
2095 && CONST_INT_P (XEXP (rhs
, 1))
2096 && INTVAL (XEXP (rhs
, 1)) >= 0
2097 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2099 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2100 GET_MODE_PRECISION (mode
));
2101 rhs
= XEXP (rhs
, 0);
2104 if (rtx_equal_p (lhs
, rhs
))
2106 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2108 bool speed
= optimize_function_for_speed_p (cfun
);
2110 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2112 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2113 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2118 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2119 if (CONST_SCALAR_INT_P (op1
)
2120 && GET_CODE (op0
) == XOR
2121 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2122 && mode_signbit_p (mode
, op1
))
2123 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2124 simplify_gen_binary (XOR
, mode
, op1
,
2127 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2128 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2129 && GET_CODE (op0
) == MULT
2130 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2134 in1
= XEXP (XEXP (op0
, 0), 0);
2135 in2
= XEXP (op0
, 1);
2136 return simplify_gen_binary (MINUS
, mode
, op1
,
2137 simplify_gen_binary (MULT
, mode
,
2141 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2142 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2144 if (COMPARISON_P (op0
)
2145 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2146 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2147 && (reversed
= reversed_comparison (op0
, mode
)))
2149 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2151 /* If one of the operands is a PLUS or a MINUS, see if we can
2152 simplify this by the associative law.
2153 Don't use the associative law for floating point.
2154 The inaccuracy makes it nonassociative,
2155 and subtle programs can break if operations are associated. */
2157 if (INTEGRAL_MODE_P (mode
)
2158 && (plus_minus_operand_p (op0
)
2159 || plus_minus_operand_p (op1
))
2160 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2163 /* Reassociate floating point addition only when the user
2164 specifies associative math operations. */
2165 if (FLOAT_MODE_P (mode
)
2166 && flag_associative_math
)
2168 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2175 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2176 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2177 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2178 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2180 rtx xop00
= XEXP (op0
, 0);
2181 rtx xop10
= XEXP (op1
, 0);
2183 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2186 if (REG_P (xop00
) && REG_P (xop10
)
2187 && GET_MODE (xop00
) == GET_MODE (xop10
)
2188 && REGNO (xop00
) == REGNO (xop10
)
2189 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2190 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2196 /* We can't assume x-x is 0 even with non-IEEE floating point,
2197 but since it is zero except in very strange circumstances, we
2198 will treat it as zero with -ffinite-math-only. */
2199 if (rtx_equal_p (trueop0
, trueop1
)
2200 && ! side_effects_p (op0
)
2201 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2202 return CONST0_RTX (mode
);
2204 /* Change subtraction from zero into negation. (0 - x) is the
2205 same as -x when x is NaN, infinite, or finite and nonzero.
2206 But if the mode has signed zeros, and does not round towards
2207 -infinity, then 0 - 0 is 0, not -0. */
2208 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2209 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2211 /* (-1 - a) is ~a. */
2212 if (trueop0
== constm1_rtx
)
2213 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2215 /* Subtracting 0 has no effect unless the mode has signed zeros
2216 and supports rounding towards -infinity. In such a case,
2218 if (!(HONOR_SIGNED_ZEROS (mode
)
2219 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2220 && trueop1
== CONST0_RTX (mode
))
2223 /* See if this is something like X * C - X or vice versa or
2224 if the multiplication is written as a shift. If so, we can
2225 distribute and make a new multiply, shift, or maybe just
2226 have X (if C is 2 in the example above). But don't make
2227 something more expensive than we had before. */
2229 if (SCALAR_INT_MODE_P (mode
))
2231 rtx lhs
= op0
, rhs
= op1
;
2233 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2234 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2236 if (GET_CODE (lhs
) == NEG
)
2238 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2239 lhs
= XEXP (lhs
, 0);
2241 else if (GET_CODE (lhs
) == MULT
2242 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2244 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2245 lhs
= XEXP (lhs
, 0);
2247 else if (GET_CODE (lhs
) == ASHIFT
2248 && CONST_INT_P (XEXP (lhs
, 1))
2249 && INTVAL (XEXP (lhs
, 1)) >= 0
2250 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2252 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2253 GET_MODE_PRECISION (mode
));
2254 lhs
= XEXP (lhs
, 0);
2257 if (GET_CODE (rhs
) == NEG
)
2259 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2260 rhs
= XEXP (rhs
, 0);
2262 else if (GET_CODE (rhs
) == MULT
2263 && CONST_INT_P (XEXP (rhs
, 1)))
2265 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2266 rhs
= XEXP (rhs
, 0);
2268 else if (GET_CODE (rhs
) == ASHIFT
2269 && CONST_INT_P (XEXP (rhs
, 1))
2270 && INTVAL (XEXP (rhs
, 1)) >= 0
2271 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2273 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2274 GET_MODE_PRECISION (mode
));
2275 negcoeff1
= -negcoeff1
;
2276 rhs
= XEXP (rhs
, 0);
2279 if (rtx_equal_p (lhs
, rhs
))
2281 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2283 bool speed
= optimize_function_for_speed_p (cfun
);
2285 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2287 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2288 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2293 /* (a - (-b)) -> (a + b). True even for IEEE. */
2294 if (GET_CODE (op1
) == NEG
)
2295 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2297 /* (-x - c) may be simplified as (-c - x). */
2298 if (GET_CODE (op0
) == NEG
2299 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2301 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2303 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2306 /* Don't let a relocatable value get a negative coeff. */
2307 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2308 return simplify_gen_binary (PLUS
, mode
,
2310 neg_const_int (mode
, op1
));
2312 /* (x - (x & y)) -> (x & ~y) */
2313 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2315 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2317 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2318 GET_MODE (XEXP (op1
, 1)));
2319 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2321 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2323 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2324 GET_MODE (XEXP (op1
, 0)));
2325 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2329 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2330 by reversing the comparison code if valid. */
2331 if (STORE_FLAG_VALUE
== 1
2332 && trueop0
== const1_rtx
2333 && COMPARISON_P (op1
)
2334 && (reversed
= reversed_comparison (op1
, mode
)))
2337 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2338 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2339 && GET_CODE (op1
) == MULT
2340 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2344 in1
= XEXP (XEXP (op1
, 0), 0);
2345 in2
= XEXP (op1
, 1);
2346 return simplify_gen_binary (PLUS
, mode
,
2347 simplify_gen_binary (MULT
, mode
,
2352 /* Canonicalize (minus (neg A) (mult B C)) to
2353 (minus (mult (neg B) C) A). */
2354 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2355 && GET_CODE (op1
) == MULT
2356 && GET_CODE (op0
) == NEG
)
2360 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2361 in2
= XEXP (op1
, 1);
2362 return simplify_gen_binary (MINUS
, mode
,
2363 simplify_gen_binary (MULT
, mode
,
2368 /* If one of the operands is a PLUS or a MINUS, see if we can
2369 simplify this by the associative law. This will, for example,
2370 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2371 Don't use the associative law for floating point.
2372 The inaccuracy makes it nonassociative,
2373 and subtle programs can break if operations are associated. */
2375 if (INTEGRAL_MODE_P (mode
)
2376 && (plus_minus_operand_p (op0
)
2377 || plus_minus_operand_p (op1
))
2378 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2383 if (trueop1
== constm1_rtx
)
2384 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2386 if (GET_CODE (op0
) == NEG
)
2388 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2389 /* If op1 is a MULT as well and simplify_unary_operation
2390 just moved the NEG to the second operand, simplify_gen_binary
2391 below could through simplify_associative_operation move
2392 the NEG around again and recurse endlessly. */
2394 && GET_CODE (op1
) == MULT
2395 && GET_CODE (temp
) == MULT
2396 && XEXP (op1
, 0) == XEXP (temp
, 0)
2397 && GET_CODE (XEXP (temp
, 1)) == NEG
2398 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2401 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2403 if (GET_CODE (op1
) == NEG
)
2405 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2406 /* If op0 is a MULT as well and simplify_unary_operation
2407 just moved the NEG to the second operand, simplify_gen_binary
2408 below could through simplify_associative_operation move
2409 the NEG around again and recurse endlessly. */
2411 && GET_CODE (op0
) == MULT
2412 && GET_CODE (temp
) == MULT
2413 && XEXP (op0
, 0) == XEXP (temp
, 0)
2414 && GET_CODE (XEXP (temp
, 1)) == NEG
2415 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2418 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2421 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2422 x is NaN, since x * 0 is then also NaN. Nor is it valid
2423 when the mode has signed zeros, since multiplying a negative
2424 number by 0 will give -0, not 0. */
2425 if (!HONOR_NANS (mode
)
2426 && !HONOR_SIGNED_ZEROS (mode
)
2427 && trueop1
== CONST0_RTX (mode
)
2428 && ! side_effects_p (op0
))
2431 /* In IEEE floating point, x*1 is not equivalent to x for
2433 if (!HONOR_SNANS (mode
)
2434 && trueop1
== CONST1_RTX (mode
))
2437 /* Convert multiply by constant power of two into shift. */
2438 if (CONST_SCALAR_INT_P (trueop1
))
2440 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2442 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2445 /* x*2 is x+x and x*(-1) is -x */
2446 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2447 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2448 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2449 && GET_MODE (op0
) == mode
)
2452 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2454 if (REAL_VALUES_EQUAL (d
, dconst2
))
2455 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2457 if (!HONOR_SNANS (mode
)
2458 && REAL_VALUES_EQUAL (d
, dconstm1
))
2459 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2462 /* Optimize -x * -x as x * x. */
2463 if (FLOAT_MODE_P (mode
)
2464 && GET_CODE (op0
) == NEG
2465 && GET_CODE (op1
) == NEG
2466 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2467 && !side_effects_p (XEXP (op0
, 0)))
2468 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2470 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2471 if (SCALAR_FLOAT_MODE_P (mode
)
2472 && GET_CODE (op0
) == ABS
2473 && GET_CODE (op1
) == ABS
2474 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2475 && !side_effects_p (XEXP (op0
, 0)))
2476 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2478 /* Reassociate multiplication, but for floating point MULTs
2479 only when the user specifies unsafe math optimizations. */
2480 if (! FLOAT_MODE_P (mode
)
2481 || flag_unsafe_math_optimizations
)
2483 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2490 if (trueop1
== CONST0_RTX (mode
))
2492 if (INTEGRAL_MODE_P (mode
)
2493 && trueop1
== CONSTM1_RTX (mode
)
2494 && !side_effects_p (op0
))
2496 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2498 /* A | (~A) -> -1 */
2499 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2500 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2501 && ! side_effects_p (op0
)
2502 && SCALAR_INT_MODE_P (mode
))
2505 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2506 if (CONST_INT_P (op1
)
2507 && HWI_COMPUTABLE_MODE_P (mode
)
2508 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2509 && !side_effects_p (op0
))
2512 /* Canonicalize (X & C1) | C2. */
2513 if (GET_CODE (op0
) == AND
2514 && CONST_INT_P (trueop1
)
2515 && CONST_INT_P (XEXP (op0
, 1)))
2517 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2518 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2519 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2521 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2523 && !side_effects_p (XEXP (op0
, 0)))
2526 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2527 if (((c1
|c2
) & mask
) == mask
)
2528 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2530 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2531 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2533 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2534 gen_int_mode (c1
& ~c2
, mode
));
2535 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2539 /* Convert (A & B) | A to A. */
2540 if (GET_CODE (op0
) == AND
2541 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2542 || rtx_equal_p (XEXP (op0
, 1), op1
))
2543 && ! side_effects_p (XEXP (op0
, 0))
2544 && ! side_effects_p (XEXP (op0
, 1)))
2547 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2548 mode size to (rotate A CX). */
2550 if (GET_CODE (op1
) == ASHIFT
2551 || GET_CODE (op1
) == SUBREG
)
2562 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2563 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2564 && CONST_INT_P (XEXP (opleft
, 1))
2565 && CONST_INT_P (XEXP (opright
, 1))
2566 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2567 == GET_MODE_PRECISION (mode
)))
2568 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2570 /* Same, but for ashift that has been "simplified" to a wider mode
2571 by simplify_shift_const. */
2573 if (GET_CODE (opleft
) == SUBREG
2574 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2575 && GET_CODE (opright
) == LSHIFTRT
2576 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2577 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2578 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2579 && (GET_MODE_SIZE (GET_MODE (opleft
))
2580 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2581 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2582 SUBREG_REG (XEXP (opright
, 0)))
2583 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2584 && CONST_INT_P (XEXP (opright
, 1))
2585 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2586 == GET_MODE_PRECISION (mode
)))
2587 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2588 XEXP (SUBREG_REG (opleft
), 1));
2590 /* If we have (ior (and (X C1) C2)), simplify this by making
2591 C1 as small as possible if C1 actually changes. */
2592 if (CONST_INT_P (op1
)
2593 && (HWI_COMPUTABLE_MODE_P (mode
)
2594 || INTVAL (op1
) > 0)
2595 && GET_CODE (op0
) == AND
2596 && CONST_INT_P (XEXP (op0
, 1))
2597 && CONST_INT_P (op1
)
2598 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2600 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2601 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2604 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2607 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2608 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2609 the PLUS does not affect any of the bits in OP1: then we can do
2610 the IOR as a PLUS and we can associate. This is valid if OP1
2611 can be safely shifted left C bits. */
2612 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2613 && GET_CODE (XEXP (op0
, 0)) == PLUS
2614 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2615 && CONST_INT_P (XEXP (op0
, 1))
2616 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2618 int count
= INTVAL (XEXP (op0
, 1));
2619 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2621 if (mask
>> count
== INTVAL (trueop1
)
2622 && trunc_int_for_mode (mask
, mode
) == mask
2623 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2624 return simplify_gen_binary (ASHIFTRT
, mode
,
2625 plus_constant (mode
, XEXP (op0
, 0),
2630 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2634 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2640 if (trueop1
== CONST0_RTX (mode
))
2642 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2643 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2644 if (rtx_equal_p (trueop0
, trueop1
)
2645 && ! side_effects_p (op0
)
2646 && GET_MODE_CLASS (mode
) != MODE_CC
)
2647 return CONST0_RTX (mode
);
2649 /* Canonicalize XOR of the most significant bit to PLUS. */
2650 if (CONST_SCALAR_INT_P (op1
)
2651 && mode_signbit_p (mode
, op1
))
2652 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2653 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2654 if (CONST_SCALAR_INT_P (op1
)
2655 && GET_CODE (op0
) == PLUS
2656 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2657 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2658 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2659 simplify_gen_binary (XOR
, mode
, op1
,
2662 /* If we are XORing two things that have no bits in common,
2663 convert them into an IOR. This helps to detect rotation encoded
2664 using those methods and possibly other simplifications. */
2666 if (HWI_COMPUTABLE_MODE_P (mode
)
2667 && (nonzero_bits (op0
, mode
)
2668 & nonzero_bits (op1
, mode
)) == 0)
2669 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2671 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2672 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2675 int num_negated
= 0;
2677 if (GET_CODE (op0
) == NOT
)
2678 num_negated
++, op0
= XEXP (op0
, 0);
2679 if (GET_CODE (op1
) == NOT
)
2680 num_negated
++, op1
= XEXP (op1
, 0);
2682 if (num_negated
== 2)
2683 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2684 else if (num_negated
== 1)
2685 return simplify_gen_unary (NOT
, mode
,
2686 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2690 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2691 correspond to a machine insn or result in further simplifications
2692 if B is a constant. */
2694 if (GET_CODE (op0
) == AND
2695 && rtx_equal_p (XEXP (op0
, 1), op1
)
2696 && ! side_effects_p (op1
))
2697 return simplify_gen_binary (AND
, mode
,
2698 simplify_gen_unary (NOT
, mode
,
2699 XEXP (op0
, 0), mode
),
2702 else if (GET_CODE (op0
) == AND
2703 && rtx_equal_p (XEXP (op0
, 0), op1
)
2704 && ! side_effects_p (op1
))
2705 return simplify_gen_binary (AND
, mode
,
2706 simplify_gen_unary (NOT
, mode
,
2707 XEXP (op0
, 1), mode
),
2710 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2711 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2712 out bits inverted twice and not set by C. Similarly, given
2713 (xor (and (xor A B) C) D), simplify without inverting C in
2714 the xor operand: (xor (and A C) (B&C)^D).
2716 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2717 && GET_CODE (XEXP (op0
, 0)) == XOR
2718 && CONST_INT_P (op1
)
2719 && CONST_INT_P (XEXP (op0
, 1))
2720 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2722 enum rtx_code op
= GET_CODE (op0
);
2723 rtx a
= XEXP (XEXP (op0
, 0), 0);
2724 rtx b
= XEXP (XEXP (op0
, 0), 1);
2725 rtx c
= XEXP (op0
, 1);
2727 HOST_WIDE_INT bval
= INTVAL (b
);
2728 HOST_WIDE_INT cval
= INTVAL (c
);
2729 HOST_WIDE_INT dval
= INTVAL (d
);
2730 HOST_WIDE_INT xcval
;
2737 return simplify_gen_binary (XOR
, mode
,
2738 simplify_gen_binary (op
, mode
, a
, c
),
2739 gen_int_mode ((bval
& xcval
) ^ dval
,
2743 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2744 we can transform like this:
2745 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2746 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2747 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2748 Attempt a few simplifications when B and C are both constants. */
2749 if (GET_CODE (op0
) == AND
2750 && CONST_INT_P (op1
)
2751 && CONST_INT_P (XEXP (op0
, 1)))
2753 rtx a
= XEXP (op0
, 0);
2754 rtx b
= XEXP (op0
, 1);
2756 HOST_WIDE_INT bval
= INTVAL (b
);
2757 HOST_WIDE_INT cval
= INTVAL (c
);
2759 /* Instead of computing ~A&C, we compute its negated value,
2760 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2761 optimize for sure. If it does not simplify, we still try
2762 to compute ~A&C below, but since that always allocates
2763 RTL, we don't try that before committing to returning a
2764 simplified expression. */
2765 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2768 if ((~cval
& bval
) == 0)
2770 rtx na_c
= NULL_RTX
;
2772 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2775 /* If ~A does not simplify, don't bother: we don't
2776 want to simplify 2 operations into 3, and if na_c
2777 were to simplify with na, n_na_c would have
2778 simplified as well. */
2779 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2781 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2784 /* Try to simplify ~A&C | ~B&C. */
2785 if (na_c
!= NULL_RTX
)
2786 return simplify_gen_binary (IOR
, mode
, na_c
,
2787 gen_int_mode (~bval
& cval
, mode
));
2791 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2792 if (n_na_c
== CONSTM1_RTX (mode
))
2794 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2795 gen_int_mode (~cval
& bval
,
2797 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2798 gen_int_mode (~bval
& cval
,
2804 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2805 comparison if STORE_FLAG_VALUE is 1. */
2806 if (STORE_FLAG_VALUE
== 1
2807 && trueop1
== const1_rtx
2808 && COMPARISON_P (op0
)
2809 && (reversed
= reversed_comparison (op0
, mode
)))
2812 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2813 is (lt foo (const_int 0)), so we can perform the above
2814 simplification if STORE_FLAG_VALUE is 1. */
2816 if (STORE_FLAG_VALUE
== 1
2817 && trueop1
== const1_rtx
2818 && GET_CODE (op0
) == LSHIFTRT
2819 && CONST_INT_P (XEXP (op0
, 1))
2820 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2821 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2823 /* (xor (comparison foo bar) (const_int sign-bit))
2824 when STORE_FLAG_VALUE is the sign bit. */
2825 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2826 && trueop1
== const_true_rtx
2827 && COMPARISON_P (op0
)
2828 && (reversed
= reversed_comparison (op0
, mode
)))
2831 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2835 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2841 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2843 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2845 if (HWI_COMPUTABLE_MODE_P (mode
))
2847 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2848 HOST_WIDE_INT nzop1
;
2849 if (CONST_INT_P (trueop1
))
2851 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2852 /* If we are turning off bits already known off in OP0, we need
2854 if ((nzop0
& ~val1
) == 0)
2857 nzop1
= nonzero_bits (trueop1
, mode
);
2858 /* If we are clearing all the nonzero bits, the result is zero. */
2859 if ((nzop1
& nzop0
) == 0
2860 && !side_effects_p (op0
) && !side_effects_p (op1
))
2861 return CONST0_RTX (mode
);
2863 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2864 && GET_MODE_CLASS (mode
) != MODE_CC
)
2867 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2868 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2869 && ! side_effects_p (op0
)
2870 && GET_MODE_CLASS (mode
) != MODE_CC
)
2871 return CONST0_RTX (mode
);
2873 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2874 there are no nonzero bits of C outside of X's mode. */
2875 if ((GET_CODE (op0
) == SIGN_EXTEND
2876 || GET_CODE (op0
) == ZERO_EXTEND
)
2877 && CONST_INT_P (trueop1
)
2878 && HWI_COMPUTABLE_MODE_P (mode
)
2879 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2880 & UINTVAL (trueop1
)) == 0)
2882 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2883 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2884 gen_int_mode (INTVAL (trueop1
),
2886 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2889 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2890 we might be able to further simplify the AND with X and potentially
2891 remove the truncation altogether. */
2892 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2894 rtx x
= XEXP (op0
, 0);
2895 machine_mode xmode
= GET_MODE (x
);
2896 tem
= simplify_gen_binary (AND
, xmode
, x
,
2897 gen_int_mode (INTVAL (trueop1
), xmode
));
2898 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2901 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2902 if (GET_CODE (op0
) == IOR
2903 && CONST_INT_P (trueop1
)
2904 && CONST_INT_P (XEXP (op0
, 1)))
2906 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2907 return simplify_gen_binary (IOR
, mode
,
2908 simplify_gen_binary (AND
, mode
,
2909 XEXP (op0
, 0), op1
),
2910 gen_int_mode (tmp
, mode
));
2913 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2914 insn (and may simplify more). */
2915 if (GET_CODE (op0
) == XOR
2916 && rtx_equal_p (XEXP (op0
, 0), op1
)
2917 && ! side_effects_p (op1
))
2918 return simplify_gen_binary (AND
, mode
,
2919 simplify_gen_unary (NOT
, mode
,
2920 XEXP (op0
, 1), mode
),
2923 if (GET_CODE (op0
) == XOR
2924 && rtx_equal_p (XEXP (op0
, 1), op1
)
2925 && ! side_effects_p (op1
))
2926 return simplify_gen_binary (AND
, mode
,
2927 simplify_gen_unary (NOT
, mode
,
2928 XEXP (op0
, 0), mode
),
2931 /* Similarly for (~(A ^ B)) & A. */
2932 if (GET_CODE (op0
) == NOT
2933 && GET_CODE (XEXP (op0
, 0)) == XOR
2934 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2935 && ! side_effects_p (op1
))
2936 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2938 if (GET_CODE (op0
) == NOT
2939 && GET_CODE (XEXP (op0
, 0)) == XOR
2940 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2941 && ! side_effects_p (op1
))
2942 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2944 /* Convert (A | B) & A to A. */
2945 if (GET_CODE (op0
) == IOR
2946 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2947 || rtx_equal_p (XEXP (op0
, 1), op1
))
2948 && ! side_effects_p (XEXP (op0
, 0))
2949 && ! side_effects_p (XEXP (op0
, 1)))
2952 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2953 ((A & N) + B) & M -> (A + B) & M
2954 Similarly if (N & M) == 0,
2955 ((A | N) + B) & M -> (A + B) & M
2956 and for - instead of + and/or ^ instead of |.
2957 Also, if (N & M) == 0, then
2958 (A +- N) & M -> A & M. */
2959 if (CONST_INT_P (trueop1
)
2960 && HWI_COMPUTABLE_MODE_P (mode
)
2961 && ~UINTVAL (trueop1
)
2962 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2963 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2968 pmop
[0] = XEXP (op0
, 0);
2969 pmop
[1] = XEXP (op0
, 1);
2971 if (CONST_INT_P (pmop
[1])
2972 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2973 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2975 for (which
= 0; which
< 2; which
++)
2978 switch (GET_CODE (tem
))
2981 if (CONST_INT_P (XEXP (tem
, 1))
2982 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2983 == UINTVAL (trueop1
))
2984 pmop
[which
] = XEXP (tem
, 0);
2988 if (CONST_INT_P (XEXP (tem
, 1))
2989 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2990 pmop
[which
] = XEXP (tem
, 0);
2997 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2999 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3001 return simplify_gen_binary (code
, mode
, tem
, op1
);
3005 /* (and X (ior (not X) Y) -> (and X Y) */
3006 if (GET_CODE (op1
) == IOR
3007 && GET_CODE (XEXP (op1
, 0)) == NOT
3008 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3009 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3011 /* (and (ior (not X) Y) X) -> (and X Y) */
3012 if (GET_CODE (op0
) == IOR
3013 && GET_CODE (XEXP (op0
, 0)) == NOT
3014 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3015 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3017 /* (and X (ior Y (not X)) -> (and X Y) */
3018 if (GET_CODE (op1
) == IOR
3019 && GET_CODE (XEXP (op1
, 1)) == NOT
3020 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3021 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3023 /* (and (ior Y (not X)) X) -> (and X Y) */
3024 if (GET_CODE (op0
) == IOR
3025 && GET_CODE (XEXP (op0
, 1)) == NOT
3026 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3027 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3029 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3033 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3039 /* 0/x is 0 (or x&0 if x has side-effects). */
3040 if (trueop0
== CONST0_RTX (mode
))
3042 if (side_effects_p (op1
))
3043 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3047 if (trueop1
== CONST1_RTX (mode
))
3049 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3053 /* Convert divide by power of two into shift. */
3054 if (CONST_INT_P (trueop1
)
3055 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3056 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3060 /* Handle floating point and integers separately. */
3061 if (SCALAR_FLOAT_MODE_P (mode
))
3063 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3064 safe for modes with NaNs, since 0.0 / 0.0 will then be
3065 NaN rather than 0.0. Nor is it safe for modes with signed
3066 zeros, since dividing 0 by a negative number gives -0.0 */
3067 if (trueop0
== CONST0_RTX (mode
)
3068 && !HONOR_NANS (mode
)
3069 && !HONOR_SIGNED_ZEROS (mode
)
3070 && ! side_effects_p (op1
))
3073 if (trueop1
== CONST1_RTX (mode
)
3074 && !HONOR_SNANS (mode
))
3077 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3078 && trueop1
!= CONST0_RTX (mode
))
3081 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3084 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3085 && !HONOR_SNANS (mode
))
3086 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3088 /* Change FP division by a constant into multiplication.
3089 Only do this with -freciprocal-math. */
3090 if (flag_reciprocal_math
3091 && !REAL_VALUES_EQUAL (d
, dconst0
))
3093 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3094 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3095 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3099 else if (SCALAR_INT_MODE_P (mode
))
3101 /* 0/x is 0 (or x&0 if x has side-effects). */
3102 if (trueop0
== CONST0_RTX (mode
)
3103 && !cfun
->can_throw_non_call_exceptions
)
3105 if (side_effects_p (op1
))
3106 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3110 if (trueop1
== CONST1_RTX (mode
))
3112 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3117 if (trueop1
== constm1_rtx
)
3119 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3121 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3127 /* 0%x is 0 (or x&0 if x has side-effects). */
3128 if (trueop0
== CONST0_RTX (mode
))
3130 if (side_effects_p (op1
))
3131 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3134 /* x%1 is 0 (of x&0 if x has side-effects). */
3135 if (trueop1
== CONST1_RTX (mode
))
3137 if (side_effects_p (op0
))
3138 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3139 return CONST0_RTX (mode
);
3141 /* Implement modulus by power of two as AND. */
3142 if (CONST_INT_P (trueop1
)
3143 && exact_log2 (UINTVAL (trueop1
)) > 0)
3144 return simplify_gen_binary (AND
, mode
, op0
,
3145 gen_int_mode (INTVAL (op1
) - 1, mode
));
3149 /* 0%x is 0 (or x&0 if x has side-effects). */
3150 if (trueop0
== CONST0_RTX (mode
))
3152 if (side_effects_p (op1
))
3153 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3156 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3157 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3159 if (side_effects_p (op0
))
3160 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3161 return CONST0_RTX (mode
);
3167 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3168 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3169 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3171 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3172 if (CONST_INT_P (trueop1
)
3173 && IN_RANGE (INTVAL (trueop1
),
3174 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3175 GET_MODE_PRECISION (mode
) - 1))
3176 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3177 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3178 - INTVAL (trueop1
)));
3182 if (trueop1
== CONST0_RTX (mode
))
3184 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3186 /* Rotating ~0 always results in ~0. */
3187 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3188 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3189 && ! side_effects_p (op1
))
3193 scalar constants c1, c2
3194 size (M2) > size (M1)
3195 c1 == size (M2) - size (M1)
3197 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3201 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3203 if (code
== ASHIFTRT
3204 && !VECTOR_MODE_P (mode
)
3206 && CONST_INT_P (op1
)
3207 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3208 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3209 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3210 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3211 > GET_MODE_BITSIZE (mode
))
3212 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3213 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3214 - GET_MODE_BITSIZE (mode
)))
3215 && subreg_lowpart_p (op0
))
3217 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3219 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3220 tmp
= simplify_gen_binary (ASHIFTRT
,
3221 GET_MODE (SUBREG_REG (op0
)),
3222 XEXP (SUBREG_REG (op0
), 0),
3224 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3225 subreg_lowpart_offset (mode
,
3229 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3231 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3232 if (val
!= INTVAL (op1
))
3233 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3240 if (trueop1
== CONST0_RTX (mode
))
3242 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3244 goto canonicalize_shift
;
3247 if (trueop1
== CONST0_RTX (mode
))
3249 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3251 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3252 if (GET_CODE (op0
) == CLZ
3253 && CONST_INT_P (trueop1
)
3254 && STORE_FLAG_VALUE
== 1
3255 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3257 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3258 unsigned HOST_WIDE_INT zero_val
= 0;
3260 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3261 && zero_val
== GET_MODE_PRECISION (imode
)
3262 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3263 return simplify_gen_relational (EQ
, mode
, imode
,
3264 XEXP (op0
, 0), const0_rtx
);
3266 goto canonicalize_shift
;
3269 if (width
<= HOST_BITS_PER_WIDE_INT
3270 && mode_signbit_p (mode
, trueop1
)
3271 && ! side_effects_p (op0
))
3273 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3275 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3281 if (width
<= HOST_BITS_PER_WIDE_INT
3282 && CONST_INT_P (trueop1
)
3283 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3284 && ! side_effects_p (op0
))
3286 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3288 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3294 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3296 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3298 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3304 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3306 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3308 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3321 /* ??? There are simplifications that can be done. */
3325 if (!VECTOR_MODE_P (mode
))
3327 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3328 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3329 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3330 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3331 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3333 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3334 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3337 /* Extract a scalar element from a nested VEC_SELECT expression
3338 (with optional nested VEC_CONCAT expression). Some targets
3339 (i386) extract scalar element from a vector using chain of
3340 nested VEC_SELECT expressions. When input operand is a memory
3341 operand, this operation can be simplified to a simple scalar
3342 load from an offseted memory address. */
3343 if (GET_CODE (trueop0
) == VEC_SELECT
)
3345 rtx op0
= XEXP (trueop0
, 0);
3346 rtx op1
= XEXP (trueop0
, 1);
3348 machine_mode opmode
= GET_MODE (op0
);
3349 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3350 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3352 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3358 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3359 gcc_assert (i
< n_elts
);
3361 /* Select element, pointed by nested selector. */
3362 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3364 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3365 if (GET_CODE (op0
) == VEC_CONCAT
)
3367 rtx op00
= XEXP (op0
, 0);
3368 rtx op01
= XEXP (op0
, 1);
3370 machine_mode mode00
, mode01
;
3371 int n_elts00
, n_elts01
;
3373 mode00
= GET_MODE (op00
);
3374 mode01
= GET_MODE (op01
);
3376 /* Find out number of elements of each operand. */
3377 if (VECTOR_MODE_P (mode00
))
3379 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3380 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3385 if (VECTOR_MODE_P (mode01
))
3387 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3388 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3393 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3395 /* Select correct operand of VEC_CONCAT
3396 and adjust selector. */
3397 if (elem
< n_elts01
)
3408 vec
= rtvec_alloc (1);
3409 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3411 tmp
= gen_rtx_fmt_ee (code
, mode
,
3412 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3415 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3416 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3417 return XEXP (trueop0
, 0);
3421 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3422 gcc_assert (GET_MODE_INNER (mode
)
3423 == GET_MODE_INNER (GET_MODE (trueop0
)));
3424 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3426 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3428 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3429 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3430 rtvec v
= rtvec_alloc (n_elts
);
3433 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3434 for (i
= 0; i
< n_elts
; i
++)
3436 rtx x
= XVECEXP (trueop1
, 0, i
);
3438 gcc_assert (CONST_INT_P (x
));
3439 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3443 return gen_rtx_CONST_VECTOR (mode
, v
);
3446 /* Recognize the identity. */
3447 if (GET_MODE (trueop0
) == mode
)
3449 bool maybe_ident
= true;
3450 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3452 rtx j
= XVECEXP (trueop1
, 0, i
);
3453 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3455 maybe_ident
= false;
3463 /* If we build {a,b} then permute it, build the result directly. */
3464 if (XVECLEN (trueop1
, 0) == 2
3465 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3466 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3467 && GET_CODE (trueop0
) == VEC_CONCAT
3468 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3469 && GET_MODE (XEXP (trueop0
, 0)) == mode
3470 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3471 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3473 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3474 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3477 gcc_assert (i0
< 4 && i1
< 4);
3478 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3479 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3481 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3484 if (XVECLEN (trueop1
, 0) == 2
3485 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3486 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3487 && GET_CODE (trueop0
) == VEC_CONCAT
3488 && GET_MODE (trueop0
) == mode
)
3490 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3491 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3494 gcc_assert (i0
< 2 && i1
< 2);
3495 subop0
= XEXP (trueop0
, i0
);
3496 subop1
= XEXP (trueop0
, i1
);
3498 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3501 /* If we select one half of a vec_concat, return that. */
3502 if (GET_CODE (trueop0
) == VEC_CONCAT
3503 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3505 rtx subop0
= XEXP (trueop0
, 0);
3506 rtx subop1
= XEXP (trueop0
, 1);
3507 machine_mode mode0
= GET_MODE (subop0
);
3508 machine_mode mode1
= GET_MODE (subop1
);
3509 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3510 int l0
= GET_MODE_SIZE (mode0
) / li
;
3511 int l1
= GET_MODE_SIZE (mode1
) / li
;
3512 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3513 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3515 bool success
= true;
3516 for (int i
= 1; i
< l0
; ++i
)
3518 rtx j
= XVECEXP (trueop1
, 0, i
);
3519 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3528 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3530 bool success
= true;
3531 for (int i
= 1; i
< l1
; ++i
)
3533 rtx j
= XVECEXP (trueop1
, 0, i
);
3534 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3546 if (XVECLEN (trueop1
, 0) == 1
3547 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3548 && GET_CODE (trueop0
) == VEC_CONCAT
)
3551 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3553 /* Try to find the element in the VEC_CONCAT. */
3554 while (GET_MODE (vec
) != mode
3555 && GET_CODE (vec
) == VEC_CONCAT
)
3557 HOST_WIDE_INT vec_size
;
3559 if (CONST_INT_P (XEXP (vec
, 0)))
3561 /* vec_concat of two const_ints doesn't make sense with
3562 respect to modes. */
3563 if (CONST_INT_P (XEXP (vec
, 1)))
3566 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3567 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3570 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3572 if (offset
< vec_size
)
3573 vec
= XEXP (vec
, 0);
3577 vec
= XEXP (vec
, 1);
3579 vec
= avoid_constant_pool_reference (vec
);
3582 if (GET_MODE (vec
) == mode
)
3586 /* If we select elements in a vec_merge that all come from the same
3587 operand, select from that operand directly. */
3588 if (GET_CODE (op0
) == VEC_MERGE
)
3590 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3591 if (CONST_INT_P (trueop02
))
3593 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3594 bool all_operand0
= true;
3595 bool all_operand1
= true;
3596 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3598 rtx j
= XVECEXP (trueop1
, 0, i
);
3599 if (sel
& (1 << UINTVAL (j
)))
3600 all_operand1
= false;
3602 all_operand0
= false;
3604 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3605 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3606 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3607 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3611 /* If we have two nested selects that are inverses of each
3612 other, replace them with the source operand. */
3613 if (GET_CODE (trueop0
) == VEC_SELECT
3614 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3616 rtx op0_subop1
= XEXP (trueop0
, 1);
3617 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3618 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3620 /* Apply the outer ordering vector to the inner one. (The inner
3621 ordering vector is expressly permitted to be of a different
3622 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3623 then the two VEC_SELECTs cancel. */
3624 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3626 rtx x
= XVECEXP (trueop1
, 0, i
);
3627 if (!CONST_INT_P (x
))
3629 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3630 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3633 return XEXP (trueop0
, 0);
3639 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3640 ? GET_MODE (trueop0
)
3641 : GET_MODE_INNER (mode
));
3642 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3643 ? GET_MODE (trueop1
)
3644 : GET_MODE_INNER (mode
));
3646 gcc_assert (VECTOR_MODE_P (mode
));
3647 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3648 == GET_MODE_SIZE (mode
));
3650 if (VECTOR_MODE_P (op0_mode
))
3651 gcc_assert (GET_MODE_INNER (mode
)
3652 == GET_MODE_INNER (op0_mode
));
3654 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3656 if (VECTOR_MODE_P (op1_mode
))
3657 gcc_assert (GET_MODE_INNER (mode
)
3658 == GET_MODE_INNER (op1_mode
));
3660 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3662 if ((GET_CODE (trueop0
) == CONST_VECTOR
3663 || CONST_SCALAR_INT_P (trueop0
)
3664 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3665 && (GET_CODE (trueop1
) == CONST_VECTOR
3666 || CONST_SCALAR_INT_P (trueop1
)
3667 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3669 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3670 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3671 rtvec v
= rtvec_alloc (n_elts
);
3673 unsigned in_n_elts
= 1;
3675 if (VECTOR_MODE_P (op0_mode
))
3676 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3677 for (i
= 0; i
< n_elts
; i
++)
3681 if (!VECTOR_MODE_P (op0_mode
))
3682 RTVEC_ELT (v
, i
) = trueop0
;
3684 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3688 if (!VECTOR_MODE_P (op1_mode
))
3689 RTVEC_ELT (v
, i
) = trueop1
;
3691 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3696 return gen_rtx_CONST_VECTOR (mode
, v
);
3699 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3700 Restrict the transformation to avoid generating a VEC_SELECT with a
3701 mode unrelated to its operand. */
3702 if (GET_CODE (trueop0
) == VEC_SELECT
3703 && GET_CODE (trueop1
) == VEC_SELECT
3704 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3705 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3707 rtx par0
= XEXP (trueop0
, 1);
3708 rtx par1
= XEXP (trueop1
, 1);
3709 int len0
= XVECLEN (par0
, 0);
3710 int len1
= XVECLEN (par1
, 0);
3711 rtvec vec
= rtvec_alloc (len0
+ len1
);
3712 for (int i
= 0; i
< len0
; i
++)
3713 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3714 for (int i
= 0; i
< len1
; i
++)
3715 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3716 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3717 gen_rtx_PARALLEL (VOIDmode
, vec
));
3730 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3733 unsigned int width
= GET_MODE_PRECISION (mode
);
3735 if (VECTOR_MODE_P (mode
)
3736 && code
!= VEC_CONCAT
3737 && GET_CODE (op0
) == CONST_VECTOR
3738 && GET_CODE (op1
) == CONST_VECTOR
)
3740 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3741 machine_mode op0mode
= GET_MODE (op0
);
3742 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3743 machine_mode op1mode
= GET_MODE (op1
);
3744 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3745 rtvec v
= rtvec_alloc (n_elts
);
3748 gcc_assert (op0_n_elts
== n_elts
);
3749 gcc_assert (op1_n_elts
== n_elts
);
3750 for (i
= 0; i
< n_elts
; i
++)
3752 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3753 CONST_VECTOR_ELT (op0
, i
),
3754 CONST_VECTOR_ELT (op1
, i
));
3757 RTVEC_ELT (v
, i
) = x
;
3760 return gen_rtx_CONST_VECTOR (mode
, v
);
3763 if (VECTOR_MODE_P (mode
)
3764 && code
== VEC_CONCAT
3765 && (CONST_SCALAR_INT_P (op0
)
3766 || GET_CODE (op0
) == CONST_FIXED
3767 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3768 && (CONST_SCALAR_INT_P (op1
)
3769 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3770 || GET_CODE (op1
) == CONST_FIXED
))
3772 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3773 rtvec v
= rtvec_alloc (n_elts
);
3775 gcc_assert (n_elts
>= 2);
3778 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3779 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3781 RTVEC_ELT (v
, 0) = op0
;
3782 RTVEC_ELT (v
, 1) = op1
;
3786 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3787 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3790 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3791 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3792 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3794 for (i
= 0; i
< op0_n_elts
; ++i
)
3795 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3796 for (i
= 0; i
< op1_n_elts
; ++i
)
3797 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3800 return gen_rtx_CONST_VECTOR (mode
, v
);
3803 if (SCALAR_FLOAT_MODE_P (mode
)
3804 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3805 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3806 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3817 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3819 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3821 for (i
= 0; i
< 4; i
++)
3838 real_from_target (&r
, tmp0
, mode
);
3839 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3843 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3846 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3847 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3848 real_convert (&f0
, mode
, &f0
);
3849 real_convert (&f1
, mode
, &f1
);
3851 if (HONOR_SNANS (mode
)
3852 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3856 && REAL_VALUES_EQUAL (f1
, dconst0
)
3857 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3860 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3861 && flag_trapping_math
3862 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3864 int s0
= REAL_VALUE_NEGATIVE (f0
);
3865 int s1
= REAL_VALUE_NEGATIVE (f1
);
3870 /* Inf + -Inf = NaN plus exception. */
3875 /* Inf - Inf = NaN plus exception. */
3880 /* Inf / Inf = NaN plus exception. */
3887 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3888 && flag_trapping_math
3889 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3890 || (REAL_VALUE_ISINF (f1
)
3891 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3892 /* Inf * 0 = NaN plus exception. */
3895 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3897 real_convert (&result
, mode
, &value
);
3899 /* Don't constant fold this floating point operation if
3900 the result has overflowed and flag_trapping_math. */
3902 if (flag_trapping_math
3903 && MODE_HAS_INFINITIES (mode
)
3904 && REAL_VALUE_ISINF (result
)
3905 && !REAL_VALUE_ISINF (f0
)
3906 && !REAL_VALUE_ISINF (f1
))
3907 /* Overflow plus exception. */
3910 /* Don't constant fold this floating point operation if the
3911 result may dependent upon the run-time rounding mode and
3912 flag_rounding_math is set, or if GCC's software emulation
3913 is unable to accurately represent the result. */
3915 if ((flag_rounding_math
3916 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3917 && (inexact
|| !real_identical (&result
, &value
)))
3920 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3924 /* We can fold some multi-word operations. */
3925 if ((GET_MODE_CLASS (mode
) == MODE_INT
3926 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3927 && CONST_SCALAR_INT_P (op0
)
3928 && CONST_SCALAR_INT_P (op1
))
3932 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3933 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3935 #if TARGET_SUPPORTS_WIDE_INT == 0
3936 /* This assert keeps the simplification from producing a result
3937 that cannot be represented in a CONST_DOUBLE but a lot of
3938 upstream callers expect that this function never fails to
3939 simplify something and so you if you added this to the test
3940 above the code would die later anyway. If this assert
3941 happens, you just need to make the port support wide int. */
3942 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3947 result
= wi::sub (pop0
, pop1
);
3951 result
= wi::add (pop0
, pop1
);
3955 result
= wi::mul (pop0
, pop1
);
3959 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3965 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3971 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3977 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3983 result
= wi::bit_and (pop0
, pop1
);
3987 result
= wi::bit_or (pop0
, pop1
);
3991 result
= wi::bit_xor (pop0
, pop1
);
3995 result
= wi::smin (pop0
, pop1
);
3999 result
= wi::smax (pop0
, pop1
);
4003 result
= wi::umin (pop0
, pop1
);
4007 result
= wi::umax (pop0
, pop1
);
4014 wide_int wop1
= pop1
;
4015 if (SHIFT_COUNT_TRUNCATED
)
4016 wop1
= wi::umod_trunc (wop1
, width
);
4017 else if (wi::geu_p (wop1
, width
))
4023 result
= wi::lrshift (pop0
, wop1
);
4027 result
= wi::arshift (pop0
, wop1
);
4031 result
= wi::lshift (pop0
, wop1
);
4042 if (wi::neg_p (pop1
))
4048 result
= wi::lrotate (pop0
, pop1
);
4052 result
= wi::rrotate (pop0
, pop1
);
4063 return immed_wide_int_const (result
, mode
);
4071 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4074 Rather than test for specific case, we do this by a brute-force method
4075 and do all possible simplifications until no more changes occur. Then
4076 we rebuild the operation. */
4078 struct simplify_plus_minus_op_data
4085 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4089 result
= (commutative_operand_precedence (y
)
4090 - commutative_operand_precedence (x
));
4094 /* Group together equal REGs to do more simplification. */
4095 if (REG_P (x
) && REG_P (y
))
4096 return REGNO (x
) > REGNO (y
);
4102 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4105 struct simplify_plus_minus_op_data ops
[16];
4108 int changed
, n_constants
, canonicalized
= 0;
4111 memset (ops
, 0, sizeof ops
);
4113 /* Set up the two operands and then expand them until nothing has been
4114 changed. If we run out of room in our array, give up; this should
4115 almost never happen. */
4120 ops
[1].neg
= (code
== MINUS
);
4127 for (i
= 0; i
< n_ops
; i
++)
4129 rtx this_op
= ops
[i
].op
;
4130 int this_neg
= ops
[i
].neg
;
4131 enum rtx_code this_code
= GET_CODE (this_op
);
4137 if (n_ops
== ARRAY_SIZE (ops
))
4140 ops
[n_ops
].op
= XEXP (this_op
, 1);
4141 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4144 ops
[i
].op
= XEXP (this_op
, 0);
4146 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4150 ops
[i
].op
= XEXP (this_op
, 0);
4151 ops
[i
].neg
= ! this_neg
;
4157 if (n_ops
!= ARRAY_SIZE (ops
)
4158 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4159 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4160 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4162 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4163 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4164 ops
[n_ops
].neg
= this_neg
;
4172 /* ~a -> (-a - 1) */
4173 if (n_ops
!= ARRAY_SIZE (ops
))
4175 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4176 ops
[n_ops
++].neg
= this_neg
;
4177 ops
[i
].op
= XEXP (this_op
, 0);
4178 ops
[i
].neg
= !this_neg
;
4188 ops
[i
].op
= neg_const_int (mode
, this_op
);
4202 if (n_constants
> 1)
4205 gcc_assert (n_ops
>= 2);
4207 /* If we only have two operands, we can avoid the loops. */
4210 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4213 /* Get the two operands. Be careful with the order, especially for
4214 the cases where code == MINUS. */
4215 if (ops
[0].neg
&& ops
[1].neg
)
4217 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4220 else if (ops
[0].neg
)
4231 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4234 /* Now simplify each pair of operands until nothing changes. */
4237 /* Insertion sort is good enough for a small array. */
4238 for (i
= 1; i
< n_ops
; i
++)
4240 struct simplify_plus_minus_op_data save
;
4242 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4248 ops
[j
+ 1] = ops
[j
];
4249 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4254 for (i
= n_ops
- 1; i
> 0; i
--)
4255 for (j
= i
- 1; j
>= 0; j
--)
4257 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4258 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4260 if (lhs
!= 0 && rhs
!= 0)
4262 enum rtx_code ncode
= PLUS
;
4268 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4270 else if (swap_commutative_operands_p (lhs
, rhs
))
4271 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4273 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4274 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4276 rtx tem_lhs
, tem_rhs
;
4278 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4279 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4280 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4282 if (tem
&& !CONSTANT_P (tem
))
4283 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4286 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4290 /* Reject "simplifications" that just wrap the two
4291 arguments in a CONST. Failure to do so can result
4292 in infinite recursion with simplify_binary_operation
4293 when it calls us to simplify CONST operations.
4294 Also, if we find such a simplification, don't try
4295 any more combinations with this rhs: We must have
4296 something like symbol+offset, ie. one of the
4297 trivial CONST expressions we handle later. */
4298 if (GET_CODE (tem
) == CONST
4299 && GET_CODE (XEXP (tem
, 0)) == ncode
4300 && XEXP (XEXP (tem
, 0), 0) == lhs
4301 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4304 if (GET_CODE (tem
) == NEG
)
4305 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4306 if (CONST_INT_P (tem
) && lneg
)
4307 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4311 ops
[j
].op
= NULL_RTX
;
4318 /* If nothing changed, fail. */
4322 /* Pack all the operands to the lower-numbered entries. */
4323 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4333 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4335 && CONST_INT_P (ops
[1].op
)
4336 && CONSTANT_P (ops
[0].op
)
4338 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4340 /* We suppressed creation of trivial CONST expressions in the
4341 combination loop to avoid recursion. Create one manually now.
4342 The combination loop should have ensured that there is exactly
4343 one CONST_INT, and the sort will have ensured that it is last
4344 in the array and that any other constant will be next-to-last. */
4347 && CONST_INT_P (ops
[n_ops
- 1].op
)
4348 && CONSTANT_P (ops
[n_ops
- 2].op
))
4350 rtx value
= ops
[n_ops
- 1].op
;
4351 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4352 value
= neg_const_int (mode
, value
);
4353 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4358 /* Put a non-negated operand first, if possible. */
4360 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4363 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4372 /* Now make the result by performing the requested operations. */
4374 for (i
= 1; i
< n_ops
; i
++)
4375 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4376 mode
, result
, ops
[i
].op
);
4381 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4383 plus_minus_operand_p (const_rtx x
)
4385 return GET_CODE (x
) == PLUS
4386 || GET_CODE (x
) == MINUS
4387 || (GET_CODE (x
) == CONST
4388 && GET_CODE (XEXP (x
, 0)) == PLUS
4389 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4390 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4393 /* Like simplify_binary_operation except used for relational operators.
4394 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4395 not also be VOIDmode.
4397 CMP_MODE specifies in which mode the comparison is done in, so it is
4398 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4399 the operands or, if both are VOIDmode, the operands are compared in
4400 "infinite precision". */
4402 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4403 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4405 rtx tem
, trueop0
, trueop1
;
4407 if (cmp_mode
== VOIDmode
)
4408 cmp_mode
= GET_MODE (op0
);
4409 if (cmp_mode
== VOIDmode
)
4410 cmp_mode
= GET_MODE (op1
);
4412 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4415 if (SCALAR_FLOAT_MODE_P (mode
))
4417 if (tem
== const0_rtx
)
4418 return CONST0_RTX (mode
);
4419 #ifdef FLOAT_STORE_FLAG_VALUE
4421 REAL_VALUE_TYPE val
;
4422 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4423 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4429 if (VECTOR_MODE_P (mode
))
4431 if (tem
== const0_rtx
)
4432 return CONST0_RTX (mode
);
4433 #ifdef VECTOR_STORE_FLAG_VALUE
4438 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4439 if (val
== NULL_RTX
)
4441 if (val
== const1_rtx
)
4442 return CONST1_RTX (mode
);
4444 units
= GET_MODE_NUNITS (mode
);
4445 v
= rtvec_alloc (units
);
4446 for (i
= 0; i
< units
; i
++)
4447 RTVEC_ELT (v
, i
) = val
;
4448 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4458 /* For the following tests, ensure const0_rtx is op1. */
4459 if (swap_commutative_operands_p (op0
, op1
)
4460 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4461 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4463 /* If op0 is a compare, extract the comparison arguments from it. */
4464 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4465 return simplify_gen_relational (code
, mode
, VOIDmode
,
4466 XEXP (op0
, 0), XEXP (op0
, 1));
4468 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4472 trueop0
= avoid_constant_pool_reference (op0
);
4473 trueop1
= avoid_constant_pool_reference (op1
);
4474 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4478 /* This part of simplify_relational_operation is only used when CMP_MODE
4479 is not in class MODE_CC (i.e. it is a real comparison).
4481 MODE is the mode of the result, while CMP_MODE specifies in which
4482 mode the comparison is done in, so it is the mode of the operands. */
4485 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4486 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4488 enum rtx_code op0code
= GET_CODE (op0
);
4490 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4492 /* If op0 is a comparison, extract the comparison arguments
4496 if (GET_MODE (op0
) == mode
)
4497 return simplify_rtx (op0
);
4499 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4500 XEXP (op0
, 0), XEXP (op0
, 1));
4502 else if (code
== EQ
)
4504 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4505 if (new_code
!= UNKNOWN
)
4506 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4507 XEXP (op0
, 0), XEXP (op0
, 1));
4511 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4512 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4513 if ((code
== LTU
|| code
== GEU
)
4514 && GET_CODE (op0
) == PLUS
4515 && CONST_INT_P (XEXP (op0
, 1))
4516 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4517 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4518 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4519 && XEXP (op0
, 1) != const0_rtx
)
4522 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4523 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4524 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4527 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4528 if ((code
== LTU
|| code
== GEU
)
4529 && GET_CODE (op0
) == PLUS
4530 && rtx_equal_p (op1
, XEXP (op0
, 1))
4531 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4532 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4533 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4534 copy_rtx (XEXP (op0
, 0)));
4536 if (op1
== const0_rtx
)
4538 /* Canonicalize (GTU x 0) as (NE x 0). */
4540 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4541 /* Canonicalize (LEU x 0) as (EQ x 0). */
4543 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4545 else if (op1
== const1_rtx
)
4550 /* Canonicalize (GE x 1) as (GT x 0). */
4551 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4554 /* Canonicalize (GEU x 1) as (NE x 0). */
4555 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4558 /* Canonicalize (LT x 1) as (LE x 0). */
4559 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4562 /* Canonicalize (LTU x 1) as (EQ x 0). */
4563 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4569 else if (op1
== constm1_rtx
)
4571 /* Canonicalize (LE x -1) as (LT x 0). */
4573 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4574 /* Canonicalize (GT x -1) as (GE x 0). */
4576 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4579 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4580 if ((code
== EQ
|| code
== NE
)
4581 && (op0code
== PLUS
|| op0code
== MINUS
)
4583 && CONSTANT_P (XEXP (op0
, 1))
4584 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4586 rtx x
= XEXP (op0
, 0);
4587 rtx c
= XEXP (op0
, 1);
4588 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4589 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4591 /* Detect an infinite recursive condition, where we oscillate at this
4592 simplification case between:
4593 A + B == C <---> C - B == A,
4594 where A, B, and C are all constants with non-simplifiable expressions,
4595 usually SYMBOL_REFs. */
4596 if (GET_CODE (tem
) == invcode
4598 && rtx_equal_p (c
, XEXP (tem
, 1)))
4601 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4604 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4605 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4607 && op1
== const0_rtx
4608 && GET_MODE_CLASS (mode
) == MODE_INT
4609 && cmp_mode
!= VOIDmode
4610 /* ??? Work-around BImode bugs in the ia64 backend. */
4612 && cmp_mode
!= BImode
4613 && nonzero_bits (op0
, cmp_mode
) == 1
4614 && STORE_FLAG_VALUE
== 1)
4615 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4616 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4617 : lowpart_subreg (mode
, op0
, cmp_mode
);
4619 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4620 if ((code
== EQ
|| code
== NE
)
4621 && op1
== const0_rtx
4623 return simplify_gen_relational (code
, mode
, cmp_mode
,
4624 XEXP (op0
, 0), XEXP (op0
, 1));
4626 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4627 if ((code
== EQ
|| code
== NE
)
4629 && rtx_equal_p (XEXP (op0
, 0), op1
)
4630 && !side_effects_p (XEXP (op0
, 0)))
4631 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4634 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4635 if ((code
== EQ
|| code
== NE
)
4637 && rtx_equal_p (XEXP (op0
, 1), op1
)
4638 && !side_effects_p (XEXP (op0
, 1)))
4639 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4642 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4643 if ((code
== EQ
|| code
== NE
)
4645 && CONST_SCALAR_INT_P (op1
)
4646 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4647 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4648 simplify_gen_binary (XOR
, cmp_mode
,
4649 XEXP (op0
, 1), op1
));
4651 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4652 can be implemented with a BICS instruction on some targets, or
4653 constant-folded if y is a constant. */
4654 if ((code
== EQ
|| code
== NE
)
4656 && rtx_equal_p (XEXP (op0
, 0), op1
)
4657 && !side_effects_p (op1
)
4658 && op1
!= CONST0_RTX (cmp_mode
))
4660 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4661 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4663 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4664 CONST0_RTX (cmp_mode
));
4667 /* Likewise for (eq/ne (and x y) y). */
4668 if ((code
== EQ
|| code
== NE
)
4670 && rtx_equal_p (XEXP (op0
, 1), op1
)
4671 && !side_effects_p (op1
)
4672 && op1
!= CONST0_RTX (cmp_mode
))
4674 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4675 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4677 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4678 CONST0_RTX (cmp_mode
));
4681 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4682 if ((code
== EQ
|| code
== NE
)
4683 && GET_CODE (op0
) == BSWAP
4684 && CONST_SCALAR_INT_P (op1
))
4685 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4686 simplify_gen_unary (BSWAP
, cmp_mode
,
4689 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4690 if ((code
== EQ
|| code
== NE
)
4691 && GET_CODE (op0
) == BSWAP
4692 && GET_CODE (op1
) == BSWAP
)
4693 return simplify_gen_relational (code
, mode
, cmp_mode
,
4694 XEXP (op0
, 0), XEXP (op1
, 0));
4696 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4702 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4703 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4704 XEXP (op0
, 0), const0_rtx
);
4709 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4710 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4711 XEXP (op0
, 0), const0_rtx
);
4730 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4731 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4732 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4733 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4734 For floating-point comparisons, assume that the operands were ordered. */
4737 comparison_result (enum rtx_code code
, int known_results
)
4743 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4746 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4750 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4753 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4757 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4760 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4763 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4765 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4768 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4770 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4773 return const_true_rtx
;
4781 /* Check if the given comparison (done in the given MODE) is actually
4782 a tautology or a contradiction. If the mode is VOID_mode, the
4783 comparison is done in "infinite precision". If no simplification
4784 is possible, this function returns zero. Otherwise, it returns
4785 either const_true_rtx or const0_rtx. */
4788 simplify_const_relational_operation (enum rtx_code code
,
4796 gcc_assert (mode
!= VOIDmode
4797 || (GET_MODE (op0
) == VOIDmode
4798 && GET_MODE (op1
) == VOIDmode
));
4800 /* If op0 is a compare, extract the comparison arguments from it. */
4801 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4803 op1
= XEXP (op0
, 1);
4804 op0
= XEXP (op0
, 0);
4806 if (GET_MODE (op0
) != VOIDmode
)
4807 mode
= GET_MODE (op0
);
4808 else if (GET_MODE (op1
) != VOIDmode
)
4809 mode
= GET_MODE (op1
);
4814 /* We can't simplify MODE_CC values since we don't know what the
4815 actual comparison is. */
4816 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4819 /* Make sure the constant is second. */
4820 if (swap_commutative_operands_p (op0
, op1
))
4822 tem
= op0
, op0
= op1
, op1
= tem
;
4823 code
= swap_condition (code
);
4826 trueop0
= avoid_constant_pool_reference (op0
);
4827 trueop1
= avoid_constant_pool_reference (op1
);
4829 /* For integer comparisons of A and B maybe we can simplify A - B and can
4830 then simplify a comparison of that with zero. If A and B are both either
4831 a register or a CONST_INT, this can't help; testing for these cases will
4832 prevent infinite recursion here and speed things up.
4834 We can only do this for EQ and NE comparisons as otherwise we may
4835 lose or introduce overflow which we cannot disregard as undefined as
4836 we do not know the signedness of the operation on either the left or
4837 the right hand side of the comparison. */
4839 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4840 && (code
== EQ
|| code
== NE
)
4841 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4842 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4843 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4844 /* We cannot do this if tem is a nonzero address. */
4845 && ! nonzero_address_p (tem
))
4846 return simplify_const_relational_operation (signed_condition (code
),
4847 mode
, tem
, const0_rtx
);
4849 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4850 return const_true_rtx
;
4852 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4855 /* For modes without NaNs, if the two operands are equal, we know the
4856 result except if they have side-effects. Even with NaNs we know
4857 the result of unordered comparisons and, if signaling NaNs are
4858 irrelevant, also the result of LT/GT/LTGT. */
4859 if ((! HONOR_NANS (trueop0
)
4860 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4861 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4862 && ! HONOR_SNANS (trueop0
)))
4863 && rtx_equal_p (trueop0
, trueop1
)
4864 && ! side_effects_p (trueop0
))
4865 return comparison_result (code
, CMP_EQ
);
4867 /* If the operands are floating-point constants, see if we can fold
4869 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4870 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4871 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4873 REAL_VALUE_TYPE d0
, d1
;
4875 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4876 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4878 /* Comparisons are unordered iff at least one of the values is NaN. */
4879 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4889 return const_true_rtx
;
4902 return comparison_result (code
,
4903 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4904 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4907 /* Otherwise, see if the operands are both integers. */
4908 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4909 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4911 /* It would be nice if we really had a mode here. However, the
4912 largest int representable on the target is as good as
4914 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4915 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4916 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4918 if (wi::eq_p (ptrueop0
, ptrueop1
))
4919 return comparison_result (code
, CMP_EQ
);
4922 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4923 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4924 return comparison_result (code
, cr
);
4928 /* Optimize comparisons with upper and lower bounds. */
4929 if (HWI_COMPUTABLE_MODE_P (mode
)
4930 && CONST_INT_P (trueop1
))
4933 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4934 HOST_WIDE_INT val
= INTVAL (trueop1
);
4935 HOST_WIDE_INT mmin
, mmax
;
4945 /* Get a reduced range if the sign bit is zero. */
4946 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4953 rtx mmin_rtx
, mmax_rtx
;
4954 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4956 mmin
= INTVAL (mmin_rtx
);
4957 mmax
= INTVAL (mmax_rtx
);
4960 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4962 mmin
>>= (sign_copies
- 1);
4963 mmax
>>= (sign_copies
- 1);
4969 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4971 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4972 return const_true_rtx
;
4973 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4978 return const_true_rtx
;
4983 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4985 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4986 return const_true_rtx
;
4987 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4992 return const_true_rtx
;
4998 /* x == y is always false for y out of range. */
4999 if (val
< mmin
|| val
> mmax
)
5003 /* x > y is always false for y >= mmax, always true for y < mmin. */
5005 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5007 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5008 return const_true_rtx
;
5014 return const_true_rtx
;
5017 /* x < y is always false for y <= mmin, always true for y > mmax. */
5019 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5021 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5022 return const_true_rtx
;
5028 return const_true_rtx
;
5032 /* x != y is always true for y out of range. */
5033 if (val
< mmin
|| val
> mmax
)
5034 return const_true_rtx
;
5042 /* Optimize integer comparisons with zero. */
5043 if (trueop1
== const0_rtx
)
5045 /* Some addresses are known to be nonzero. We don't know
5046 their sign, but equality comparisons are known. */
5047 if (nonzero_address_p (trueop0
))
5049 if (code
== EQ
|| code
== LEU
)
5051 if (code
== NE
|| code
== GTU
)
5052 return const_true_rtx
;
5055 /* See if the first operand is an IOR with a constant. If so, we
5056 may be able to determine the result of this comparison. */
5057 if (GET_CODE (op0
) == IOR
)
5059 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5060 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5062 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5063 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5064 && (UINTVAL (inner_const
)
5065 & ((unsigned HOST_WIDE_INT
) 1
5075 return const_true_rtx
;
5079 return const_true_rtx
;
5093 /* Optimize comparison of ABS with zero. */
5094 if (trueop1
== CONST0_RTX (mode
)
5095 && (GET_CODE (trueop0
) == ABS
5096 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5097 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5102 /* Optimize abs(x) < 0.0. */
5103 if (!HONOR_SNANS (mode
)
5104 && (!INTEGRAL_MODE_P (mode
)
5105 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5107 if (INTEGRAL_MODE_P (mode
)
5108 && (issue_strict_overflow_warning
5109 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5110 warning (OPT_Wstrict_overflow
,
5111 ("assuming signed overflow does not occur when "
5112 "assuming abs (x) < 0 is false"));
5118 /* Optimize abs(x) >= 0.0. */
5119 if (!HONOR_NANS (mode
)
5120 && (!INTEGRAL_MODE_P (mode
)
5121 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5123 if (INTEGRAL_MODE_P (mode
)
5124 && (issue_strict_overflow_warning
5125 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5126 warning (OPT_Wstrict_overflow
,
5127 ("assuming signed overflow does not occur when "
5128 "assuming abs (x) >= 0 is true"));
5129 return const_true_rtx
;
5134 /* Optimize ! (abs(x) < 0.0). */
5135 return const_true_rtx
;
5145 /* Simplify CODE, an operation with result mode MODE and three operands,
5146 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5147 a constant. Return 0 if no simplifications is possible. */
5150 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5151 machine_mode op0_mode
, rtx op0
, rtx op1
,
5154 unsigned int width
= GET_MODE_PRECISION (mode
);
5155 bool any_change
= false;
5158 /* VOIDmode means "infinite" precision. */
5160 width
= HOST_BITS_PER_WIDE_INT
;
5165 /* Simplify negations around the multiplication. */
5166 /* -a * -b + c => a * b + c. */
5167 if (GET_CODE (op0
) == NEG
)
5169 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5171 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5173 else if (GET_CODE (op1
) == NEG
)
5175 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5177 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5180 /* Canonicalize the two multiplication operands. */
5181 /* a * -b + c => -b * a + c. */
5182 if (swap_commutative_operands_p (op0
, op1
))
5183 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5186 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5191 if (CONST_INT_P (op0
)
5192 && CONST_INT_P (op1
)
5193 && CONST_INT_P (op2
)
5194 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5195 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5197 /* Extracting a bit-field from a constant */
5198 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5199 HOST_WIDE_INT op1val
= INTVAL (op1
);
5200 HOST_WIDE_INT op2val
= INTVAL (op2
);
5201 if (BITS_BIG_ENDIAN
)
5202 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5206 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5208 /* First zero-extend. */
5209 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5210 /* If desired, propagate sign bit. */
5211 if (code
== SIGN_EXTRACT
5212 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5214 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5217 return gen_int_mode (val
, mode
);
5222 if (CONST_INT_P (op0
))
5223 return op0
!= const0_rtx
? op1
: op2
;
5225 /* Convert c ? a : a into "a". */
5226 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5229 /* Convert a != b ? a : b into "a". */
5230 if (GET_CODE (op0
) == NE
5231 && ! side_effects_p (op0
)
5232 && ! HONOR_NANS (mode
)
5233 && ! HONOR_SIGNED_ZEROS (mode
)
5234 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5235 && rtx_equal_p (XEXP (op0
, 1), op2
))
5236 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5237 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5240 /* Convert a == b ? a : b into "b". */
5241 if (GET_CODE (op0
) == EQ
5242 && ! side_effects_p (op0
)
5243 && ! HONOR_NANS (mode
)
5244 && ! HONOR_SIGNED_ZEROS (mode
)
5245 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5246 && rtx_equal_p (XEXP (op0
, 1), op2
))
5247 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5248 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5251 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5253 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5254 ? GET_MODE (XEXP (op0
, 1))
5255 : GET_MODE (XEXP (op0
, 0)));
5258 /* Look for happy constants in op1 and op2. */
5259 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5261 HOST_WIDE_INT t
= INTVAL (op1
);
5262 HOST_WIDE_INT f
= INTVAL (op2
);
5264 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5265 code
= GET_CODE (op0
);
5266 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5269 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5277 return simplify_gen_relational (code
, mode
, cmp_mode
,
5278 XEXP (op0
, 0), XEXP (op0
, 1));
5281 if (cmp_mode
== VOIDmode
)
5282 cmp_mode
= op0_mode
;
5283 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5284 cmp_mode
, XEXP (op0
, 0),
5287 /* See if any simplifications were possible. */
5290 if (CONST_INT_P (temp
))
5291 return temp
== const0_rtx
? op2
: op1
;
5293 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5299 gcc_assert (GET_MODE (op0
) == mode
);
5300 gcc_assert (GET_MODE (op1
) == mode
);
5301 gcc_assert (VECTOR_MODE_P (mode
));
5302 trueop2
= avoid_constant_pool_reference (op2
);
5303 if (CONST_INT_P (trueop2
))
5305 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5306 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5307 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5308 unsigned HOST_WIDE_INT mask
;
5309 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5312 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5314 if (!(sel
& mask
) && !side_effects_p (op0
))
5316 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5319 rtx trueop0
= avoid_constant_pool_reference (op0
);
5320 rtx trueop1
= avoid_constant_pool_reference (op1
);
5321 if (GET_CODE (trueop0
) == CONST_VECTOR
5322 && GET_CODE (trueop1
) == CONST_VECTOR
)
5324 rtvec v
= rtvec_alloc (n_elts
);
5327 for (i
= 0; i
< n_elts
; i
++)
5328 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5329 ? CONST_VECTOR_ELT (trueop0
, i
)
5330 : CONST_VECTOR_ELT (trueop1
, i
));
5331 return gen_rtx_CONST_VECTOR (mode
, v
);
5334 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5335 if no element from a appears in the result. */
5336 if (GET_CODE (op0
) == VEC_MERGE
)
5338 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5339 if (CONST_INT_P (tem
))
5341 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5342 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5343 return simplify_gen_ternary (code
, mode
, mode
,
5344 XEXP (op0
, 1), op1
, op2
);
5345 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5346 return simplify_gen_ternary (code
, mode
, mode
,
5347 XEXP (op0
, 0), op1
, op2
);
5350 if (GET_CODE (op1
) == VEC_MERGE
)
5352 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5353 if (CONST_INT_P (tem
))
5355 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5356 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5357 return simplify_gen_ternary (code
, mode
, mode
,
5358 op0
, XEXP (op1
, 1), op2
);
5359 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5360 return simplify_gen_ternary (code
, mode
, mode
,
5361 op0
, XEXP (op1
, 0), op2
);
5365 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5367 if (GET_CODE (op0
) == VEC_DUPLICATE
5368 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5369 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5370 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5372 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5373 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5375 if (XEXP (XEXP (op0
, 0), 0) == op1
5376 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5382 if (rtx_equal_p (op0
, op1
)
5383 && !side_effects_p (op2
) && !side_effects_p (op1
))
5395 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5396 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5397 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5399 Works by unpacking OP into a collection of 8-bit values
5400 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5401 and then repacking them again for OUTERMODE. */
5404 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5405 machine_mode innermode
, unsigned int byte
)
5409 value_mask
= (1 << value_bit
) - 1
5411 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5420 rtvec result_v
= NULL
;
5421 enum mode_class outer_class
;
5422 machine_mode outer_submode
;
5425 /* Some ports misuse CCmode. */
5426 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5429 /* We have no way to represent a complex constant at the rtl level. */
5430 if (COMPLEX_MODE_P (outermode
))
5433 /* We support any size mode. */
5434 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5435 GET_MODE_BITSIZE (innermode
));
5437 /* Unpack the value. */
5439 if (GET_CODE (op
) == CONST_VECTOR
)
5441 num_elem
= CONST_VECTOR_NUNITS (op
);
5442 elems
= &CONST_VECTOR_ELT (op
, 0);
5443 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5449 elem_bitsize
= max_bitsize
;
5451 /* If this asserts, it is too complicated; reducing value_bit may help. */
5452 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5453 /* I don't know how to handle endianness of sub-units. */
5454 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5456 for (elem
= 0; elem
< num_elem
; elem
++)
5459 rtx el
= elems
[elem
];
5461 /* Vectors are kept in target memory order. (This is probably
5464 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5465 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5467 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5468 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5469 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5470 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5471 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5474 switch (GET_CODE (el
))
5478 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5480 *vp
++ = INTVAL (el
) >> i
;
5481 /* CONST_INTs are always logically sign-extended. */
5482 for (; i
< elem_bitsize
; i
+= value_bit
)
5483 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5486 case CONST_WIDE_INT
:
5488 rtx_mode_t val
= std::make_pair (el
, innermode
);
5489 unsigned char extend
= wi::sign_mask (val
);
5491 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5492 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5493 for (; i
< elem_bitsize
; i
+= value_bit
)
5499 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5501 unsigned char extend
= 0;
5502 /* If this triggers, someone should have generated a
5503 CONST_INT instead. */
5504 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5506 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5507 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5508 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5511 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5515 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5517 for (; i
< elem_bitsize
; i
+= value_bit
)
5522 /* This is big enough for anything on the platform. */
5523 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5524 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5526 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5527 gcc_assert (bitsize
<= elem_bitsize
);
5528 gcc_assert (bitsize
% value_bit
== 0);
5530 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5533 /* real_to_target produces its result in words affected by
5534 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5535 and use WORDS_BIG_ENDIAN instead; see the documentation
5536 of SUBREG in rtl.texi. */
5537 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5540 if (WORDS_BIG_ENDIAN
)
5541 ibase
= bitsize
- 1 - i
;
5544 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5547 /* It shouldn't matter what's done here, so fill it with
5549 for (; i
< elem_bitsize
; i
+= value_bit
)
5555 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5557 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5558 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5562 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5563 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5564 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5566 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5567 >> (i
- HOST_BITS_PER_WIDE_INT
);
5568 for (; i
< elem_bitsize
; i
+= value_bit
)
5578 /* Now, pick the right byte to start with. */
5579 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5580 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5581 will already have offset 0. */
5582 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5584 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5586 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5587 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5588 byte
= (subword_byte
% UNITS_PER_WORD
5589 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5592 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5593 so if it's become negative it will instead be very large.) */
5594 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5596 /* Convert from bytes to chunks of size value_bit. */
5597 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5599 /* Re-pack the value. */
5601 if (VECTOR_MODE_P (outermode
))
5603 num_elem
= GET_MODE_NUNITS (outermode
);
5604 result_v
= rtvec_alloc (num_elem
);
5605 elems
= &RTVEC_ELT (result_v
, 0);
5606 outer_submode
= GET_MODE_INNER (outermode
);
5612 outer_submode
= outermode
;
5615 outer_class
= GET_MODE_CLASS (outer_submode
);
5616 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5618 gcc_assert (elem_bitsize
% value_bit
== 0);
5619 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5621 for (elem
= 0; elem
< num_elem
; elem
++)
5625 /* Vectors are stored in target memory order. (This is probably
5628 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5629 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5631 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5632 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5633 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5634 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5635 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5638 switch (outer_class
)
5641 case MODE_PARTIAL_INT
:
5646 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5647 / HOST_BITS_PER_WIDE_INT
;
5648 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5651 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5653 for (u
= 0; u
< units
; u
++)
5655 unsigned HOST_WIDE_INT buf
= 0;
5657 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5659 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5662 base
+= HOST_BITS_PER_WIDE_INT
;
5664 r
= wide_int::from_array (tmp
, units
,
5665 GET_MODE_PRECISION (outer_submode
));
5666 #if TARGET_SUPPORTS_WIDE_INT == 0
5667 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5668 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5671 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5676 case MODE_DECIMAL_FLOAT
:
5679 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5681 /* real_from_target wants its input in words affected by
5682 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5683 and use WORDS_BIG_ENDIAN instead; see the documentation
5684 of SUBREG in rtl.texi. */
5685 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5687 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5690 if (WORDS_BIG_ENDIAN
)
5691 ibase
= elem_bitsize
- 1 - i
;
5694 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5697 real_from_target (&r
, tmp
, outer_submode
);
5698 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5710 f
.mode
= outer_submode
;
5713 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5715 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5716 for (; i
< elem_bitsize
; i
+= value_bit
)
5717 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5718 << (i
- HOST_BITS_PER_WIDE_INT
));
5720 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5728 if (VECTOR_MODE_P (outermode
))
5729 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5734 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5735 Return 0 if no simplifications are possible. */
5737 simplify_subreg (machine_mode outermode
, rtx op
,
5738 machine_mode innermode
, unsigned int byte
)
5740 /* Little bit of sanity checking. */
5741 gcc_assert (innermode
!= VOIDmode
);
5742 gcc_assert (outermode
!= VOIDmode
);
5743 gcc_assert (innermode
!= BLKmode
);
5744 gcc_assert (outermode
!= BLKmode
);
5746 gcc_assert (GET_MODE (op
) == innermode
5747 || GET_MODE (op
) == VOIDmode
);
5749 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5752 if (byte
>= GET_MODE_SIZE (innermode
))
5755 if (outermode
== innermode
&& !byte
)
5758 if (CONST_SCALAR_INT_P (op
)
5759 || CONST_DOUBLE_AS_FLOAT_P (op
)
5760 || GET_CODE (op
) == CONST_FIXED
5761 || GET_CODE (op
) == CONST_VECTOR
)
5762 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5764 /* Changing mode twice with SUBREG => just change it once,
5765 or not at all if changing back op starting mode. */
5766 if (GET_CODE (op
) == SUBREG
)
5768 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5769 int final_offset
= byte
+ SUBREG_BYTE (op
);
5772 if (outermode
== innermostmode
5773 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5774 return SUBREG_REG (op
);
5776 /* The SUBREG_BYTE represents offset, as if the value were stored
5777 in memory. Irritating exception is paradoxical subreg, where
5778 we define SUBREG_BYTE to be 0. On big endian machines, this
5779 value should be negative. For a moment, undo this exception. */
5780 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5782 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5783 if (WORDS_BIG_ENDIAN
)
5784 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5785 if (BYTES_BIG_ENDIAN
)
5786 final_offset
+= difference
% UNITS_PER_WORD
;
5788 if (SUBREG_BYTE (op
) == 0
5789 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5791 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5792 if (WORDS_BIG_ENDIAN
)
5793 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5794 if (BYTES_BIG_ENDIAN
)
5795 final_offset
+= difference
% UNITS_PER_WORD
;
5798 /* See whether resulting subreg will be paradoxical. */
5799 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5801 /* In nonparadoxical subregs we can't handle negative offsets. */
5802 if (final_offset
< 0)
5804 /* Bail out in case resulting subreg would be incorrect. */
5805 if (final_offset
% GET_MODE_SIZE (outermode
)
5806 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5812 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5814 /* In paradoxical subreg, see if we are still looking on lower part.
5815 If so, our SUBREG_BYTE will be 0. */
5816 if (WORDS_BIG_ENDIAN
)
5817 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5818 if (BYTES_BIG_ENDIAN
)
5819 offset
+= difference
% UNITS_PER_WORD
;
5820 if (offset
== final_offset
)
5826 /* Recurse for further possible simplifications. */
5827 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5831 if (validate_subreg (outermode
, innermostmode
,
5832 SUBREG_REG (op
), final_offset
))
5834 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5835 if (SUBREG_PROMOTED_VAR_P (op
)
5836 && SUBREG_PROMOTED_SIGN (op
) >= 0
5837 && GET_MODE_CLASS (outermode
) == MODE_INT
5838 && IN_RANGE (GET_MODE_SIZE (outermode
),
5839 GET_MODE_SIZE (innermode
),
5840 GET_MODE_SIZE (innermostmode
))
5841 && subreg_lowpart_p (newx
))
5843 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5844 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5851 /* SUBREG of a hard register => just change the register number
5852 and/or mode. If the hard register is not valid in that mode,
5853 suppress this simplification. If the hard register is the stack,
5854 frame, or argument pointer, leave this as a SUBREG. */
5856 if (REG_P (op
) && HARD_REGISTER_P (op
))
5858 unsigned int regno
, final_regno
;
5861 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5862 if (HARD_REGISTER_NUM_P (final_regno
))
5865 int final_offset
= byte
;
5867 /* Adjust offset for paradoxical subregs. */
5869 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5871 int difference
= (GET_MODE_SIZE (innermode
)
5872 - GET_MODE_SIZE (outermode
));
5873 if (WORDS_BIG_ENDIAN
)
5874 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5875 if (BYTES_BIG_ENDIAN
)
5876 final_offset
+= difference
% UNITS_PER_WORD
;
5879 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5881 /* Propagate original regno. We don't have any way to specify
5882 the offset inside original regno, so do so only for lowpart.
5883 The information is used only by alias analysis that can not
5884 grog partial register anyway. */
5886 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5887 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5892 /* If we have a SUBREG of a register that we are replacing and we are
5893 replacing it with a MEM, make a new MEM and try replacing the
5894 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5895 or if we would be widening it. */
5898 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5899 /* Allow splitting of volatile memory references in case we don't
5900 have instruction to move the whole thing. */
5901 && (! MEM_VOLATILE_P (op
)
5902 || ! have_insn_for (SET
, innermode
))
5903 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5904 return adjust_address_nv (op
, outermode
, byte
);
5906 /* Handle complex values represented as CONCAT
5907 of real and imaginary part. */
5908 if (GET_CODE (op
) == CONCAT
)
5910 unsigned int part_size
, final_offset
;
5913 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5914 if (byte
< part_size
)
5916 part
= XEXP (op
, 0);
5917 final_offset
= byte
;
5921 part
= XEXP (op
, 1);
5922 final_offset
= byte
- part_size
;
5925 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5928 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5931 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5932 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5936 /* A SUBREG resulting from a zero extension may fold to zero if
5937 it extracts higher bits that the ZERO_EXTEND's source bits. */
5938 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5940 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5941 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5942 return CONST0_RTX (outermode
);
5945 if (SCALAR_INT_MODE_P (outermode
)
5946 && SCALAR_INT_MODE_P (innermode
)
5947 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5948 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5950 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5958 /* Make a SUBREG operation or equivalent if it folds. */
5961 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5962 machine_mode innermode
, unsigned int byte
)
5966 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5970 if (GET_CODE (op
) == SUBREG
5971 || GET_CODE (op
) == CONCAT
5972 || GET_MODE (op
) == VOIDmode
)
5975 if (validate_subreg (outermode
, innermode
, op
, byte
))
5976 return gen_rtx_SUBREG (outermode
, op
, byte
);
5981 /* Simplify X, an rtx expression.
5983 Return the simplified expression or NULL if no simplifications
5986 This is the preferred entry point into the simplification routines;
5987 however, we still allow passes to call the more specific routines.
5989 Right now GCC has three (yes, three) major bodies of RTL simplification
5990 code that need to be unified.
5992 1. fold_rtx in cse.c. This code uses various CSE specific
5993 information to aid in RTL simplification.
5995 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5996 it uses combine specific information to aid in RTL
5999 3. The routines in this file.
6002 Long term we want to only have one body of simplification code; to
6003 get to that state I recommend the following steps:
6005 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6006 which are not pass dependent state into these routines.
6008 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6009 use this routine whenever possible.
6011 3. Allow for pass dependent state to be provided to these
6012 routines and add simplifications based on the pass dependent
6013 state. Remove code from cse.c & combine.c that becomes
6016 It will take time, but ultimately the compiler will be easier to
6017 maintain and improve. It's totally silly that when we add a
6018 simplification that it needs to be added to 4 places (3 for RTL
6019 simplification and 1 for tree simplification. */
6022 simplify_rtx (const_rtx x
)
6024 const enum rtx_code code
= GET_CODE (x
);
6025 const machine_mode mode
= GET_MODE (x
);
6027 switch (GET_RTX_CLASS (code
))
6030 return simplify_unary_operation (code
, mode
,
6031 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6032 case RTX_COMM_ARITH
:
6033 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6034 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6036 /* Fall through.... */
6039 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6042 case RTX_BITFIELD_OPS
:
6043 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6044 XEXP (x
, 0), XEXP (x
, 1),
6048 case RTX_COMM_COMPARE
:
6049 return simplify_relational_operation (code
, mode
,
6050 ((GET_MODE (XEXP (x
, 0))
6052 ? GET_MODE (XEXP (x
, 0))
6053 : GET_MODE (XEXP (x
, 1))),
6059 return simplify_subreg (mode
, SUBREG_REG (x
),
6060 GET_MODE (SUBREG_REG (x
)),
6067 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6068 if (GET_CODE (XEXP (x
, 0)) == HIGH
6069 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))