1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "double-int.h"
36 #include "fold-const.h"
40 #include "hard-reg-set.h"
42 #include "insn-config.h"
45 #include "insn-codes.h"
48 #include "statistics.h"
50 #include "fixed-value.h"
58 #include "diagnostic-core.h"
63 /* Simplification and canonicalization of RTL. */
65 /* Much code operates on (low, high) pairs; the low value is an
66 unsigned wide int, the high value a signed wide int. We
67 occasionally need to sign extend from low to high as if low were a
69 #define HWI_SIGN_EXTEND(low) \
70 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
72 static rtx
neg_const_int (machine_mode
, const_rtx
);
73 static bool plus_minus_operand_p (const_rtx
);
74 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
75 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
76 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
78 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
80 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
81 machine_mode
, rtx
, rtx
);
82 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
83 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87 maximally negative number can overflow). */
89 neg_const_int (machine_mode mode
, const_rtx i
)
91 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
94 /* Test whether expression, X, is an immediate constant that represents
95 the most significant bit of machine mode MODE. */
98 mode_signbit_p (machine_mode mode
, const_rtx x
)
100 unsigned HOST_WIDE_INT val
;
103 if (GET_MODE_CLASS (mode
) != MODE_INT
)
106 width
= GET_MODE_PRECISION (mode
);
110 if (width
<= HOST_BITS_PER_WIDE_INT
113 #if TARGET_SUPPORTS_WIDE_INT
114 else if (CONST_WIDE_INT_P (x
))
117 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
118 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
120 for (i
= 0; i
< elts
- 1; i
++)
121 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
123 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
124 width
%= HOST_BITS_PER_WIDE_INT
;
126 width
= HOST_BITS_PER_WIDE_INT
;
129 else if (width
<= HOST_BITS_PER_DOUBLE_INT
130 && CONST_DOUBLE_AS_INT_P (x
)
131 && CONST_DOUBLE_LOW (x
) == 0)
133 val
= CONST_DOUBLE_HIGH (x
);
134 width
-= HOST_BITS_PER_WIDE_INT
;
138 /* X is not an integer constant. */
141 if (width
< HOST_BITS_PER_WIDE_INT
)
142 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
143 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147 (after masking with the mode mask of MODE). Returns false if the
148 precision of MODE is too large to handle. */
151 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
155 if (GET_MODE_CLASS (mode
) != MODE_INT
)
158 width
= GET_MODE_PRECISION (mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= GET_MODE_MASK (mode
);
163 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 if (GET_MODE_CLASS (mode
) != MODE_INT
)
176 width
= GET_MODE_PRECISION (mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185 Returns false if the precision of MODE is too large to handle. */
187 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
191 if (GET_MODE_CLASS (mode
) != MODE_INT
)
194 width
= GET_MODE_PRECISION (mode
);
195 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
198 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
202 /* Make a binary operation by properly ordering the operands and
203 seeing if the expression folds. */
206 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
211 /* If this simplifies, do it. */
212 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Put complex operands first and constants second if commutative. */
217 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
218 && swap_commutative_operands_p (op0
, op1
))
219 tem
= op0
, op0
= op1
, op1
= tem
;
221 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
224 /* If X is a MEM referencing the constant pool, return the real value.
225 Otherwise return X. */
227 avoid_constant_pool_reference (rtx x
)
231 HOST_WIDE_INT offset
= 0;
233 switch (GET_CODE (x
))
239 /* Handle float extensions of constant pool references. */
241 c
= avoid_constant_pool_reference (tmp
);
242 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
246 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
247 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
255 if (GET_MODE (x
) == BLKmode
)
260 /* Call target hook to avoid the effects of -fpic etc.... */
261 addr
= targetm
.delegitimize_address (addr
);
263 /* Split the address into a base and integer offset. */
264 if (GET_CODE (addr
) == CONST
265 && GET_CODE (XEXP (addr
, 0)) == PLUS
266 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
268 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
269 addr
= XEXP (XEXP (addr
, 0), 0);
272 if (GET_CODE (addr
) == LO_SUM
)
273 addr
= XEXP (addr
, 1);
275 /* If this is a constant pool reference, we can turn it into its
276 constant and hope that simplifications happen. */
277 if (GET_CODE (addr
) == SYMBOL_REF
278 && CONSTANT_POOL_ADDRESS_P (addr
))
280 c
= get_pool_constant (addr
);
281 cmode
= get_pool_mode (addr
);
283 /* If we're accessing the constant in a different mode than it was
284 originally stored, attempt to fix that up via subreg simplifications.
285 If that fails we have no choice but to return the original memory. */
286 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
287 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
289 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
290 if (tem
&& CONSTANT_P (tem
))
300 /* Simplify a MEM based on its attributes. This is the default
301 delegitimize_address target hook, and it's recommended that every
302 overrider call it. */
305 delegitimize_mem_from_attrs (rtx x
)
307 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308 use their base addresses as equivalent. */
311 && MEM_OFFSET_KNOWN_P (x
))
313 tree decl
= MEM_EXPR (x
);
314 machine_mode mode
= GET_MODE (x
);
315 HOST_WIDE_INT offset
= 0;
317 switch (TREE_CODE (decl
))
327 case ARRAY_RANGE_REF
:
332 case VIEW_CONVERT_EXPR
:
334 HOST_WIDE_INT bitsize
, bitpos
;
336 int unsignedp
, volatilep
= 0;
338 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
339 &mode
, &unsignedp
, &volatilep
, false);
340 if (bitsize
!= GET_MODE_BITSIZE (mode
)
341 || (bitpos
% BITS_PER_UNIT
)
342 || (toffset
&& !tree_fits_shwi_p (toffset
)))
346 offset
+= bitpos
/ BITS_PER_UNIT
;
348 offset
+= tree_to_shwi (toffset
);
355 && mode
== GET_MODE (x
)
356 && TREE_CODE (decl
) == VAR_DECL
357 && (TREE_STATIC (decl
)
358 || DECL_THREAD_LOCAL_P (decl
))
359 && DECL_RTL_SET_P (decl
)
360 && MEM_P (DECL_RTL (decl
)))
364 offset
+= MEM_OFFSET (x
);
366 newx
= DECL_RTL (decl
);
370 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
372 /* Avoid creating a new MEM needlessly if we already had
373 the same address. We do if there's no OFFSET and the
374 old address X is identical to NEWX, or if X is of the
375 form (plus NEWX OFFSET), or the NEWX is of the form
376 (plus Y (const_int Z)) and X is that with the offset
377 added: (plus Y (const_int Z+OFFSET)). */
379 || (GET_CODE (o
) == PLUS
380 && GET_CODE (XEXP (o
, 1)) == CONST_INT
381 && (offset
== INTVAL (XEXP (o
, 1))
382 || (GET_CODE (n
) == PLUS
383 && GET_CODE (XEXP (n
, 1)) == CONST_INT
384 && (INTVAL (XEXP (n
, 1)) + offset
385 == INTVAL (XEXP (o
, 1)))
386 && (n
= XEXP (n
, 0))))
387 && (o
= XEXP (o
, 0))))
388 && rtx_equal_p (o
, n
)))
389 x
= adjust_address_nv (newx
, mode
, offset
);
391 else if (GET_MODE (x
) == GET_MODE (newx
)
400 /* Make a unary operation by first seeing if it folds and otherwise making
401 the specified operation. */
404 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
405 machine_mode op_mode
)
409 /* If this simplifies, use it. */
410 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
413 return gen_rtx_fmt_e (code
, mode
, op
);
416 /* Likewise for ternary operations. */
419 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
420 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
424 /* If this simplifies, use it. */
425 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
429 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
432 /* Likewise, for relational operations.
433 CMP_MODE specifies mode comparison is done in. */
436 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
437 machine_mode cmp_mode
, rtx op0
, rtx op1
)
441 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
445 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
448 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449 and simplify the result. If FN is non-NULL, call this callback on each
450 X, if it returns non-NULL, replace X with its return value and simplify the
454 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
455 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
457 enum rtx_code code
= GET_CODE (x
);
458 machine_mode mode
= GET_MODE (x
);
459 machine_mode op_mode
;
461 rtx op0
, op1
, op2
, newx
, op
;
465 if (__builtin_expect (fn
!= NULL
, 0))
467 newx
= fn (x
, old_rtx
, data
);
471 else if (rtx_equal_p (x
, old_rtx
))
472 return copy_rtx ((rtx
) data
);
474 switch (GET_RTX_CLASS (code
))
478 op_mode
= GET_MODE (op0
);
479 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
480 if (op0
== XEXP (x
, 0))
482 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
486 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
487 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
490 return simplify_gen_binary (code
, mode
, op0
, op1
);
493 case RTX_COMM_COMPARE
:
496 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
497 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
498 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
499 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
501 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
504 case RTX_BITFIELD_OPS
:
506 op_mode
= GET_MODE (op0
);
507 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
508 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
509 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
510 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
512 if (op_mode
== VOIDmode
)
513 op_mode
= GET_MODE (op0
);
514 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
519 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
520 if (op0
== SUBREG_REG (x
))
522 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
523 GET_MODE (SUBREG_REG (x
)),
525 return op0
? op0
: x
;
532 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
533 if (op0
== XEXP (x
, 0))
535 return replace_equiv_address_nv (x
, op0
);
537 else if (code
== LO_SUM
)
539 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
540 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
542 /* (lo_sum (high x) y) -> y where x and y have the same base. */
543 if (GET_CODE (op0
) == HIGH
)
545 rtx base0
, base1
, offset0
, offset1
;
546 split_const (XEXP (op0
, 0), &base0
, &offset0
);
547 split_const (op1
, &base1
, &offset1
);
548 if (rtx_equal_p (base0
, base1
))
552 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
554 return gen_rtx_LO_SUM (mode
, op0
, op1
);
563 fmt
= GET_RTX_FORMAT (code
);
564 for (i
= 0; fmt
[i
]; i
++)
569 newvec
= XVEC (newx
, i
);
570 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
572 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
574 if (op
!= RTVEC_ELT (vec
, j
))
578 newvec
= shallow_copy_rtvec (vec
);
580 newx
= shallow_copy_rtx (x
);
581 XVEC (newx
, i
) = newvec
;
583 RTVEC_ELT (newvec
, j
) = op
;
591 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
592 if (op
!= XEXP (x
, i
))
595 newx
= shallow_copy_rtx (x
);
604 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
605 resulting RTX. Return a new RTX which is as simplified as possible. */
608 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
610 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
613 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
614 Only handle cases where the truncated value is inherently an rvalue.
616 RTL provides two ways of truncating a value:
618 1. a lowpart subreg. This form is only a truncation when both
619 the outer and inner modes (here MODE and OP_MODE respectively)
620 are scalar integers, and only then when the subreg is used as
623 It is only valid to form such truncating subregs if the
624 truncation requires no action by the target. The onus for
625 proving this is on the creator of the subreg -- e.g. the
626 caller to simplify_subreg or simplify_gen_subreg -- and typically
627 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
629 2. a TRUNCATE. This form handles both scalar and compound integers.
631 The first form is preferred where valid. However, the TRUNCATE
632 handling in simplify_unary_operation turns the second form into the
633 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
634 so it is generally safe to form rvalue truncations using:
636 simplify_gen_unary (TRUNCATE, ...)
638 and leave simplify_unary_operation to work out which representation
641 Because of the proof requirements on (1), simplify_truncation must
642 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
643 regardless of whether the outer truncation came from a SUBREG or a
644 TRUNCATE. For example, if the caller has proven that an SImode
649 is a no-op and can be represented as a subreg, it does not follow
650 that SImode truncations of X and Y are also no-ops. On a target
651 like 64-bit MIPS that requires SImode values to be stored in
652 sign-extended form, an SImode truncation of:
654 (and:DI (reg:DI X) (const_int 63))
656 is trivially a no-op because only the lower 6 bits can be set.
657 However, X is still an arbitrary 64-bit number and so we cannot
658 assume that truncating it too is a no-op. */
661 simplify_truncation (machine_mode mode
, rtx op
,
662 machine_mode op_mode
)
664 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
665 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
666 gcc_assert (precision
<= op_precision
);
668 /* Optimize truncations of zero and sign extended values. */
669 if (GET_CODE (op
) == ZERO_EXTEND
670 || GET_CODE (op
) == SIGN_EXTEND
)
672 /* There are three possibilities. If MODE is the same as the
673 origmode, we can omit both the extension and the subreg.
674 If MODE is not larger than the origmode, we can apply the
675 truncation without the extension. Finally, if the outermode
676 is larger than the origmode, we can just extend to the appropriate
678 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
679 if (mode
== origmode
)
681 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
682 return simplify_gen_unary (TRUNCATE
, mode
,
683 XEXP (op
, 0), origmode
);
685 return simplify_gen_unary (GET_CODE (op
), mode
,
686 XEXP (op
, 0), origmode
);
689 /* If the machine can perform operations in the truncated mode, distribute
690 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
691 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
693 #ifdef WORD_REGISTER_OPERATIONS
694 && precision
>= BITS_PER_WORD
696 && (GET_CODE (op
) == PLUS
697 || GET_CODE (op
) == MINUS
698 || GET_CODE (op
) == MULT
))
700 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
703 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
705 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
709 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
710 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if ((GET_CODE (op
) == LSHIFTRT
713 || GET_CODE (op
) == ASHIFTRT
)
714 /* Ensure that OP_MODE is at least twice as wide as MODE
715 to avoid the possibility that an outer LSHIFTRT shifts by more
716 than the sign extension's sign_bit_copies and introduces zeros
717 into the high bits of the result. */
718 && 2 * precision
<= op_precision
719 && CONST_INT_P (XEXP (op
, 1))
720 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
722 && UINTVAL (XEXP (op
, 1)) < precision
)
723 return simplify_gen_binary (ASHIFTRT
, mode
,
724 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
726 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
727 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if ((GET_CODE (op
) == LSHIFTRT
730 || GET_CODE (op
) == ASHIFTRT
)
731 && CONST_INT_P (XEXP (op
, 1))
732 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
733 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
734 && UINTVAL (XEXP (op
, 1)) < precision
)
735 return simplify_gen_binary (LSHIFTRT
, mode
,
736 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
738 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
739 to (ashift:QI (x:QI) C), where C is a suitable small constant and
740 the outer subreg is effectively a truncation to the original mode. */
741 if (GET_CODE (op
) == ASHIFT
742 && CONST_INT_P (XEXP (op
, 1))
743 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
744 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
745 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
746 && UINTVAL (XEXP (op
, 1)) < precision
)
747 return simplify_gen_binary (ASHIFT
, mode
,
748 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
750 /* Recognize a word extraction from a multi-word subreg. */
751 if ((GET_CODE (op
) == LSHIFTRT
752 || GET_CODE (op
) == ASHIFTRT
)
753 && SCALAR_INT_MODE_P (mode
)
754 && SCALAR_INT_MODE_P (op_mode
)
755 && precision
>= BITS_PER_WORD
756 && 2 * precision
<= op_precision
757 && CONST_INT_P (XEXP (op
, 1))
758 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
759 && UINTVAL (XEXP (op
, 1)) < op_precision
)
761 int byte
= subreg_lowpart_offset (mode
, op_mode
);
762 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
763 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
765 ? byte
- shifted_bytes
766 : byte
+ shifted_bytes
));
769 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
770 and try replacing the TRUNCATE and shift with it. Don't do this
771 if the MEM has a mode-dependent address. */
772 if ((GET_CODE (op
) == LSHIFTRT
773 || GET_CODE (op
) == ASHIFTRT
)
774 && SCALAR_INT_MODE_P (op_mode
)
775 && MEM_P (XEXP (op
, 0))
776 && CONST_INT_P (XEXP (op
, 1))
777 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
778 && INTVAL (XEXP (op
, 1)) > 0
779 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
780 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
781 MEM_ADDR_SPACE (XEXP (op
, 0)))
782 && ! MEM_VOLATILE_P (XEXP (op
, 0))
783 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
784 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
786 int byte
= subreg_lowpart_offset (mode
, op_mode
);
787 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
788 return adjust_address_nv (XEXP (op
, 0), mode
,
790 ? byte
- shifted_bytes
791 : byte
+ shifted_bytes
));
794 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
795 (OP:SI foo:SI) if OP is NEG or ABS. */
796 if ((GET_CODE (op
) == ABS
797 || GET_CODE (op
) == NEG
)
798 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
799 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
800 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
801 return simplify_gen_unary (GET_CODE (op
), mode
,
802 XEXP (XEXP (op
, 0), 0), mode
);
804 /* (truncate:A (subreg:B (truncate:C X) 0)) is
806 if (GET_CODE (op
) == SUBREG
807 && SCALAR_INT_MODE_P (mode
)
808 && SCALAR_INT_MODE_P (op_mode
)
809 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
810 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
811 && subreg_lowpart_p (op
))
813 rtx inner
= XEXP (SUBREG_REG (op
), 0);
814 if (GET_MODE_PRECISION (mode
)
815 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
816 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
818 /* If subreg above is paradoxical and C is narrower
819 than A, return (subreg:A (truncate:C X) 0). */
820 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
821 GET_MODE (SUBREG_REG (op
)), 0);
824 /* (truncate:A (truncate:B X)) is (truncate:A X). */
825 if (GET_CODE (op
) == TRUNCATE
)
826 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
827 GET_MODE (XEXP (op
, 0)));
832 /* Try to simplify a unary operation CODE whose output mode is to be
833 MODE with input operand OP whose mode was originally OP_MODE.
834 Return zero if no simplification can be made. */
836 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
837 rtx op
, machine_mode op_mode
)
841 trueop
= avoid_constant_pool_reference (op
);
843 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
847 return simplify_unary_operation_1 (code
, mode
, op
);
850 /* Perform some simplifications we can do even if the operands
853 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
855 enum rtx_code reversed
;
861 /* (not (not X)) == X. */
862 if (GET_CODE (op
) == NOT
)
865 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
866 comparison is all ones. */
867 if (COMPARISON_P (op
)
868 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
869 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
870 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
871 XEXP (op
, 0), XEXP (op
, 1));
873 /* (not (plus X -1)) can become (neg X). */
874 if (GET_CODE (op
) == PLUS
875 && XEXP (op
, 1) == constm1_rtx
)
876 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
878 /* Similarly, (not (neg X)) is (plus X -1). */
879 if (GET_CODE (op
) == NEG
)
880 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
883 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
884 if (GET_CODE (op
) == XOR
885 && CONST_INT_P (XEXP (op
, 1))
886 && (temp
= simplify_unary_operation (NOT
, mode
,
887 XEXP (op
, 1), mode
)) != 0)
888 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
890 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
891 if (GET_CODE (op
) == PLUS
892 && CONST_INT_P (XEXP (op
, 1))
893 && mode_signbit_p (mode
, XEXP (op
, 1))
894 && (temp
= simplify_unary_operation (NOT
, mode
,
895 XEXP (op
, 1), mode
)) != 0)
896 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
899 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
900 operands other than 1, but that is not valid. We could do a
901 similar simplification for (not (lshiftrt C X)) where C is
902 just the sign bit, but this doesn't seem common enough to
904 if (GET_CODE (op
) == ASHIFT
905 && XEXP (op
, 0) == const1_rtx
)
907 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
908 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
911 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
912 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
913 so we can perform the above simplification. */
914 if (STORE_FLAG_VALUE
== -1
915 && GET_CODE (op
) == ASHIFTRT
916 && CONST_INT_P (XEXP (op
, 1))
917 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
918 return simplify_gen_relational (GE
, mode
, VOIDmode
,
919 XEXP (op
, 0), const0_rtx
);
922 if (GET_CODE (op
) == SUBREG
923 && subreg_lowpart_p (op
)
924 && (GET_MODE_SIZE (GET_MODE (op
))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
926 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
927 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
929 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
932 x
= gen_rtx_ROTATE (inner_mode
,
933 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
935 XEXP (SUBREG_REG (op
), 1));
936 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
941 /* Apply De Morgan's laws to reduce number of patterns for machines
942 with negating logical insns (and-not, nand, etc.). If result has
943 only one NOT, put it first, since that is how the patterns are
945 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
947 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
948 machine_mode op_mode
;
950 op_mode
= GET_MODE (in1
);
951 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
953 op_mode
= GET_MODE (in2
);
954 if (op_mode
== VOIDmode
)
956 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
958 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
961 in2
= in1
; in1
= tem
;
964 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
968 /* (not (bswap x)) -> (bswap (not x)). */
969 if (GET_CODE (op
) == BSWAP
)
971 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
972 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
977 /* (neg (neg X)) == X. */
978 if (GET_CODE (op
) == NEG
)
981 /* (neg (plus X 1)) can become (not X). */
982 if (GET_CODE (op
) == PLUS
983 && XEXP (op
, 1) == const1_rtx
)
984 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
986 /* Similarly, (neg (not X)) is (plus X 1). */
987 if (GET_CODE (op
) == NOT
)
988 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
991 /* (neg (minus X Y)) can become (minus Y X). This transformation
992 isn't safe for modes with signed zeros, since if X and Y are
993 both +0, (minus Y X) is the same as (minus X Y). If the
994 rounding mode is towards +infinity (or -infinity) then the two
995 expressions will be rounded differently. */
996 if (GET_CODE (op
) == MINUS
997 && !HONOR_SIGNED_ZEROS (mode
)
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
999 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1001 if (GET_CODE (op
) == PLUS
1002 && !HONOR_SIGNED_ZEROS (mode
)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1005 /* (neg (plus A C)) is simplified to (minus -C A). */
1006 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1007 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1011 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1014 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1015 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1016 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1019 /* (neg (mult A B)) becomes (mult A (neg B)).
1020 This works even for floating-point values. */
1021 if (GET_CODE (op
) == MULT
1022 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1024 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1025 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1028 /* NEG commutes with ASHIFT since it is multiplication. Only do
1029 this if we can then eliminate the NEG (e.g., if the operand
1031 if (GET_CODE (op
) == ASHIFT
)
1033 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1035 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1038 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1039 C is equal to the width of MODE minus 1. */
1040 if (GET_CODE (op
) == ASHIFTRT
1041 && CONST_INT_P (XEXP (op
, 1))
1042 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1043 return simplify_gen_binary (LSHIFTRT
, mode
,
1044 XEXP (op
, 0), XEXP (op
, 1));
1046 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1047 C is equal to the width of MODE minus 1. */
1048 if (GET_CODE (op
) == LSHIFTRT
1049 && CONST_INT_P (XEXP (op
, 1))
1050 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1051 return simplify_gen_binary (ASHIFTRT
, mode
,
1052 XEXP (op
, 0), XEXP (op
, 1));
1054 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1055 if (GET_CODE (op
) == XOR
1056 && XEXP (op
, 1) == const1_rtx
1057 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1058 return plus_constant (mode
, XEXP (op
, 0), -1);
1060 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1061 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1062 if (GET_CODE (op
) == LT
1063 && XEXP (op
, 1) == const0_rtx
1064 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1066 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1067 int isize
= GET_MODE_PRECISION (inner
);
1068 if (STORE_FLAG_VALUE
== 1)
1070 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1071 GEN_INT (isize
- 1));
1074 if (GET_MODE_PRECISION (mode
) > isize
)
1075 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1076 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1078 else if (STORE_FLAG_VALUE
== -1)
1080 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1081 GEN_INT (isize
- 1));
1084 if (GET_MODE_PRECISION (mode
) > isize
)
1085 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1086 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1092 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1093 with the umulXi3_highpart patterns. */
1094 if (GET_CODE (op
) == LSHIFTRT
1095 && GET_CODE (XEXP (op
, 0)) == MULT
)
1098 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1100 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1102 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1106 /* We can't handle truncation to a partial integer mode here
1107 because we don't know the real bitsize of the partial
1112 if (GET_MODE (op
) != VOIDmode
)
1114 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1119 /* If we know that the value is already truncated, we can
1120 replace the TRUNCATE with a SUBREG. */
1121 if (GET_MODE_NUNITS (mode
) == 1
1122 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1123 || truncated_to_mode (mode
, op
)))
1125 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1130 /* A truncate of a comparison can be replaced with a subreg if
1131 STORE_FLAG_VALUE permits. This is like the previous test,
1132 but it works even if the comparison is done in a mode larger
1133 than HOST_BITS_PER_WIDE_INT. */
1134 if (HWI_COMPUTABLE_MODE_P (mode
)
1135 && COMPARISON_P (op
)
1136 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1138 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1143 /* A truncate of a memory is just loading the low part of the memory
1144 if we are not changing the meaning of the address. */
1145 if (GET_CODE (op
) == MEM
1146 && !VECTOR_MODE_P (mode
)
1147 && !MEM_VOLATILE_P (op
)
1148 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1150 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1157 case FLOAT_TRUNCATE
:
1158 if (DECIMAL_FLOAT_MODE_P (mode
))
1161 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1162 if (GET_CODE (op
) == FLOAT_EXTEND
1163 && GET_MODE (XEXP (op
, 0)) == mode
)
1164 return XEXP (op
, 0);
1166 /* (float_truncate:SF (float_truncate:DF foo:XF))
1167 = (float_truncate:SF foo:XF).
1168 This may eliminate double rounding, so it is unsafe.
1170 (float_truncate:SF (float_extend:XF foo:DF))
1171 = (float_truncate:SF foo:DF).
1173 (float_truncate:DF (float_extend:XF foo:SF))
1174 = (float_extend:SF foo:DF). */
1175 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1176 && flag_unsafe_math_optimizations
)
1177 || GET_CODE (op
) == FLOAT_EXTEND
)
1178 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1180 > GET_MODE_SIZE (mode
)
1181 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1183 XEXP (op
, 0), mode
);
1185 /* (float_truncate (float x)) is (float x) */
1186 if (GET_CODE (op
) == FLOAT
1187 && (flag_unsafe_math_optimizations
1188 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1189 && ((unsigned)significand_size (GET_MODE (op
))
1190 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1191 - num_sign_bit_copies (XEXP (op
, 0),
1192 GET_MODE (XEXP (op
, 0))))))))
1193 return simplify_gen_unary (FLOAT
, mode
,
1195 GET_MODE (XEXP (op
, 0)));
1197 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1198 (OP:SF foo:SF) if OP is NEG or ABS. */
1199 if ((GET_CODE (op
) == ABS
1200 || GET_CODE (op
) == NEG
)
1201 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1202 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1203 return simplify_gen_unary (GET_CODE (op
), mode
,
1204 XEXP (XEXP (op
, 0), 0), mode
);
1206 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1207 is (float_truncate:SF x). */
1208 if (GET_CODE (op
) == SUBREG
1209 && subreg_lowpart_p (op
)
1210 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1211 return SUBREG_REG (op
);
1215 if (DECIMAL_FLOAT_MODE_P (mode
))
1218 /* (float_extend (float_extend x)) is (float_extend x)
1220 (float_extend (float x)) is (float x) assuming that double
1221 rounding can't happen.
1223 if (GET_CODE (op
) == FLOAT_EXTEND
1224 || (GET_CODE (op
) == FLOAT
1225 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1226 && ((unsigned)significand_size (GET_MODE (op
))
1227 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1228 - num_sign_bit_copies (XEXP (op
, 0),
1229 GET_MODE (XEXP (op
, 0)))))))
1230 return simplify_gen_unary (GET_CODE (op
), mode
,
1232 GET_MODE (XEXP (op
, 0)));
1237 /* (abs (neg <foo>)) -> (abs <foo>) */
1238 if (GET_CODE (op
) == NEG
)
1239 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1240 GET_MODE (XEXP (op
, 0)));
1242 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1244 if (GET_MODE (op
) == VOIDmode
)
1247 /* If operand is something known to be positive, ignore the ABS. */
1248 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1249 || val_signbit_known_clear_p (GET_MODE (op
),
1250 nonzero_bits (op
, GET_MODE (op
))))
1253 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1254 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1255 return gen_rtx_NEG (mode
, op
);
1260 /* (ffs (*_extend <X>)) = (ffs <X>) */
1261 if (GET_CODE (op
) == SIGN_EXTEND
1262 || GET_CODE (op
) == ZERO_EXTEND
)
1263 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1264 GET_MODE (XEXP (op
, 0)));
1268 switch (GET_CODE (op
))
1272 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1273 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1274 GET_MODE (XEXP (op
, 0)));
1278 /* Rotations don't affect popcount. */
1279 if (!side_effects_p (XEXP (op
, 1)))
1280 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1281 GET_MODE (XEXP (op
, 0)));
1290 switch (GET_CODE (op
))
1296 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1297 GET_MODE (XEXP (op
, 0)));
1301 /* Rotations don't affect parity. */
1302 if (!side_effects_p (XEXP (op
, 1)))
1303 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1304 GET_MODE (XEXP (op
, 0)));
1313 /* (bswap (bswap x)) -> x. */
1314 if (GET_CODE (op
) == BSWAP
)
1315 return XEXP (op
, 0);
1319 /* (float (sign_extend <X>)) = (float <X>). */
1320 if (GET_CODE (op
) == SIGN_EXTEND
)
1321 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1322 GET_MODE (XEXP (op
, 0)));
1326 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1327 becomes just the MINUS if its mode is MODE. This allows
1328 folding switch statements on machines using casesi (such as
1330 if (GET_CODE (op
) == TRUNCATE
1331 && GET_MODE (XEXP (op
, 0)) == mode
1332 && GET_CODE (XEXP (op
, 0)) == MINUS
1333 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1334 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1335 return XEXP (op
, 0);
1337 /* Extending a widening multiplication should be canonicalized to
1338 a wider widening multiplication. */
1339 if (GET_CODE (op
) == MULT
)
1341 rtx lhs
= XEXP (op
, 0);
1342 rtx rhs
= XEXP (op
, 1);
1343 enum rtx_code lcode
= GET_CODE (lhs
);
1344 enum rtx_code rcode
= GET_CODE (rhs
);
1346 /* Widening multiplies usually extend both operands, but sometimes
1347 they use a shift to extract a portion of a register. */
1348 if ((lcode
== SIGN_EXTEND
1349 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1350 && (rcode
== SIGN_EXTEND
1351 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1353 machine_mode lmode
= GET_MODE (lhs
);
1354 machine_mode rmode
= GET_MODE (rhs
);
1357 if (lcode
== ASHIFTRT
)
1358 /* Number of bits not shifted off the end. */
1359 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1360 else /* lcode == SIGN_EXTEND */
1361 /* Size of inner mode. */
1362 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1364 if (rcode
== ASHIFTRT
)
1365 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1366 else /* rcode == SIGN_EXTEND */
1367 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1369 /* We can only widen multiplies if the result is mathematiclly
1370 equivalent. I.e. if overflow was impossible. */
1371 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1372 return simplify_gen_binary
1374 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1375 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1379 /* Check for a sign extension of a subreg of a promoted
1380 variable, where the promotion is sign-extended, and the
1381 target mode is the same as the variable's promotion. */
1382 if (GET_CODE (op
) == SUBREG
1383 && SUBREG_PROMOTED_VAR_P (op
)
1384 && SUBREG_PROMOTED_SIGNED_P (op
)
1385 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1387 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1392 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1393 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1394 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1396 gcc_assert (GET_MODE_PRECISION (mode
)
1397 > GET_MODE_PRECISION (GET_MODE (op
)));
1398 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1399 GET_MODE (XEXP (op
, 0)));
1402 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1403 is (sign_extend:M (subreg:O <X>)) if there is mode with
1404 GET_MODE_BITSIZE (N) - I bits.
1405 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1406 is similarly (zero_extend:M (subreg:O <X>)). */
1407 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1408 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1409 && CONST_INT_P (XEXP (op
, 1))
1410 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1411 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1414 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1415 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1416 gcc_assert (GET_MODE_BITSIZE (mode
)
1417 > GET_MODE_BITSIZE (GET_MODE (op
)));
1418 if (tmode
!= BLKmode
)
1421 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1423 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1424 ? SIGN_EXTEND
: ZERO_EXTEND
,
1425 mode
, inner
, tmode
);
1429 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1430 /* As we do not know which address space the pointer is referring to,
1431 we can do this only if the target does not support different pointer
1432 or address modes depending on the address space. */
1433 if (target_default_pointer_address_modes_p ()
1434 && ! POINTERS_EXTEND_UNSIGNED
1435 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1437 || (GET_CODE (op
) == SUBREG
1438 && REG_P (SUBREG_REG (op
))
1439 && REG_POINTER (SUBREG_REG (op
))
1440 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1443 = convert_memory_address_addr_space_1 (Pmode
, op
,
1444 ADDR_SPACE_GENERIC
, false,
1453 /* Check for a zero extension of a subreg of a promoted
1454 variable, where the promotion is zero-extended, and the
1455 target mode is the same as the variable's promotion. */
1456 if (GET_CODE (op
) == SUBREG
1457 && SUBREG_PROMOTED_VAR_P (op
)
1458 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1459 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1461 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1466 /* Extending a widening multiplication should be canonicalized to
1467 a wider widening multiplication. */
1468 if (GET_CODE (op
) == MULT
)
1470 rtx lhs
= XEXP (op
, 0);
1471 rtx rhs
= XEXP (op
, 1);
1472 enum rtx_code lcode
= GET_CODE (lhs
);
1473 enum rtx_code rcode
= GET_CODE (rhs
);
1475 /* Widening multiplies usually extend both operands, but sometimes
1476 they use a shift to extract a portion of a register. */
1477 if ((lcode
== ZERO_EXTEND
1478 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1479 && (rcode
== ZERO_EXTEND
1480 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1482 machine_mode lmode
= GET_MODE (lhs
);
1483 machine_mode rmode
= GET_MODE (rhs
);
1486 if (lcode
== LSHIFTRT
)
1487 /* Number of bits not shifted off the end. */
1488 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1489 else /* lcode == ZERO_EXTEND */
1490 /* Size of inner mode. */
1491 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1493 if (rcode
== LSHIFTRT
)
1494 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1495 else /* rcode == ZERO_EXTEND */
1496 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1498 /* We can only widen multiplies if the result is mathematiclly
1499 equivalent. I.e. if overflow was impossible. */
1500 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1501 return simplify_gen_binary
1503 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1504 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1508 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1509 if (GET_CODE (op
) == ZERO_EXTEND
)
1510 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1511 GET_MODE (XEXP (op
, 0)));
1513 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1514 is (zero_extend:M (subreg:O <X>)) if there is mode with
1515 GET_MODE_PRECISION (N) - I bits. */
1516 if (GET_CODE (op
) == LSHIFTRT
1517 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1518 && CONST_INT_P (XEXP (op
, 1))
1519 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1520 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1523 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1524 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1525 if (tmode
!= BLKmode
)
1528 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1530 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1534 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1535 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1537 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1538 (and:SI (reg:SI) (const_int 63)). */
1539 if (GET_CODE (op
) == SUBREG
1540 && GET_MODE_PRECISION (GET_MODE (op
))
1541 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1542 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1543 <= HOST_BITS_PER_WIDE_INT
1544 && GET_MODE_PRECISION (mode
)
1545 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1546 && subreg_lowpart_p (op
)
1547 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1548 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1550 if (GET_MODE_PRECISION (mode
)
1551 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1552 return SUBREG_REG (op
);
1553 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1554 GET_MODE (SUBREG_REG (op
)));
1557 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1558 /* As we do not know which address space the pointer is referring to,
1559 we can do this only if the target does not support different pointer
1560 or address modes depending on the address space. */
1561 if (target_default_pointer_address_modes_p ()
1562 && POINTERS_EXTEND_UNSIGNED
> 0
1563 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1565 || (GET_CODE (op
) == SUBREG
1566 && REG_P (SUBREG_REG (op
))
1567 && REG_POINTER (SUBREG_REG (op
))
1568 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1571 = convert_memory_address_addr_space_1 (Pmode
, op
,
1572 ADDR_SPACE_GENERIC
, false,
1587 /* Try to compute the value of a unary operation CODE whose output mode is to
1588 be MODE with input operand OP whose mode was originally OP_MODE.
1589 Return zero if the value cannot be computed. */
1591 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1592 rtx op
, machine_mode op_mode
)
1594 unsigned int width
= GET_MODE_PRECISION (mode
);
1596 if (code
== VEC_DUPLICATE
)
1598 gcc_assert (VECTOR_MODE_P (mode
));
1599 if (GET_MODE (op
) != VOIDmode
)
1601 if (!VECTOR_MODE_P (GET_MODE (op
)))
1602 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1604 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1607 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1608 || GET_CODE (op
) == CONST_VECTOR
)
1610 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1611 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1612 rtvec v
= rtvec_alloc (n_elts
);
1615 if (GET_CODE (op
) != CONST_VECTOR
)
1616 for (i
= 0; i
< n_elts
; i
++)
1617 RTVEC_ELT (v
, i
) = op
;
1620 machine_mode inmode
= GET_MODE (op
);
1621 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1622 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1624 gcc_assert (in_n_elts
< n_elts
);
1625 gcc_assert ((n_elts
% in_n_elts
) == 0);
1626 for (i
= 0; i
< n_elts
; i
++)
1627 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1629 return gen_rtx_CONST_VECTOR (mode
, v
);
1633 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1635 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1636 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1637 machine_mode opmode
= GET_MODE (op
);
1638 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1639 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1640 rtvec v
= rtvec_alloc (n_elts
);
1643 gcc_assert (op_n_elts
== n_elts
);
1644 for (i
= 0; i
< n_elts
; i
++)
1646 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1647 CONST_VECTOR_ELT (op
, i
),
1648 GET_MODE_INNER (opmode
));
1651 RTVEC_ELT (v
, i
) = x
;
1653 return gen_rtx_CONST_VECTOR (mode
, v
);
1656 /* The order of these tests is critical so that, for example, we don't
1657 check the wrong mode (input vs. output) for a conversion operation,
1658 such as FIX. At some point, this should be simplified. */
1660 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1664 if (op_mode
== VOIDmode
)
1666 /* CONST_INT have VOIDmode as the mode. We assume that all
1667 the bits of the constant are significant, though, this is
1668 a dangerous assumption as many times CONST_INTs are
1669 created and used with garbage in the bits outside of the
1670 precision of the implied mode of the const_int. */
1671 op_mode
= MAX_MODE_INT
;
1674 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1675 d
= real_value_truncate (mode
, d
);
1676 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1678 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1682 if (op_mode
== VOIDmode
)
1684 /* CONST_INT have VOIDmode as the mode. We assume that all
1685 the bits of the constant are significant, though, this is
1686 a dangerous assumption as many times CONST_INTs are
1687 created and used with garbage in the bits outside of the
1688 precision of the implied mode of the const_int. */
1689 op_mode
= MAX_MODE_INT
;
1692 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1693 d
= real_value_truncate (mode
, d
);
1694 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1697 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1700 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1701 rtx_mode_t op0
= std::make_pair (op
, imode
);
1704 #if TARGET_SUPPORTS_WIDE_INT == 0
1705 /* This assert keeps the simplification from producing a result
1706 that cannot be represented in a CONST_DOUBLE but a lot of
1707 upstream callers expect that this function never fails to
1708 simplify something and so you if you added this to the test
1709 above the code would die later anyway. If this assert
1710 happens, you just need to make the port support wide int. */
1711 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1717 result
= wi::bit_not (op0
);
1721 result
= wi::neg (op0
);
1725 result
= wi::abs (op0
);
1729 result
= wi::shwi (wi::ffs (op0
), mode
);
1733 if (wi::ne_p (op0
, 0))
1734 int_value
= wi::clz (op0
);
1735 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1736 int_value
= GET_MODE_PRECISION (mode
);
1737 result
= wi::shwi (int_value
, mode
);
1741 result
= wi::shwi (wi::clrsb (op0
), mode
);
1745 if (wi::ne_p (op0
, 0))
1746 int_value
= wi::ctz (op0
);
1747 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1748 int_value
= GET_MODE_PRECISION (mode
);
1749 result
= wi::shwi (int_value
, mode
);
1753 result
= wi::shwi (wi::popcount (op0
), mode
);
1757 result
= wi::shwi (wi::parity (op0
), mode
);
1761 result
= wide_int (op0
).bswap ();
1766 result
= wide_int::from (op0
, width
, UNSIGNED
);
1770 result
= wide_int::from (op0
, width
, SIGNED
);
1778 return immed_wide_int_const (result
, mode
);
1781 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1782 && SCALAR_FLOAT_MODE_P (mode
)
1783 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1786 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1793 d
= real_value_abs (&d
);
1796 d
= real_value_negate (&d
);
1798 case FLOAT_TRUNCATE
:
1799 d
= real_value_truncate (mode
, d
);
1802 /* All this does is change the mode, unless changing
1804 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1805 real_convert (&d
, mode
, &d
);
1808 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1815 real_to_target (tmp
, &d
, GET_MODE (op
));
1816 for (i
= 0; i
< 4; i
++)
1818 real_from_target (&d
, tmp
, mode
);
1824 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1826 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1827 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1828 && GET_MODE_CLASS (mode
) == MODE_INT
1831 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1832 operators are intentionally left unspecified (to ease implementation
1833 by target backends), for consistency, this routine implements the
1834 same semantics for constant folding as used by the middle-end. */
1836 /* This was formerly used only for non-IEEE float.
1837 eggert@twinsun.com says it is safe for IEEE also. */
1838 REAL_VALUE_TYPE x
, t
;
1839 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1840 wide_int wmax
, wmin
;
1841 /* This is part of the abi to real_to_integer, but we check
1842 things before making this call. */
1848 if (REAL_VALUE_ISNAN (x
))
1851 /* Test against the signed upper bound. */
1852 wmax
= wi::max_value (width
, SIGNED
);
1853 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1854 if (REAL_VALUES_LESS (t
, x
))
1855 return immed_wide_int_const (wmax
, mode
);
1857 /* Test against the signed lower bound. */
1858 wmin
= wi::min_value (width
, SIGNED
);
1859 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1860 if (REAL_VALUES_LESS (x
, t
))
1861 return immed_wide_int_const (wmin
, mode
);
1863 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1867 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1870 /* Test against the unsigned upper bound. */
1871 wmax
= wi::max_value (width
, UNSIGNED
);
1872 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1873 if (REAL_VALUES_LESS (t
, x
))
1874 return immed_wide_int_const (wmax
, mode
);
1876 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1888 /* Subroutine of simplify_binary_operation to simplify a binary operation
1889 CODE that can commute with byte swapping, with result mode MODE and
1890 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1891 Return zero if no simplification or canonicalization is possible. */
1894 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1899 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1900 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1902 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1903 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1904 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1907 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1908 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1910 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1911 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1917 /* Subroutine of simplify_binary_operation to simplify a commutative,
1918 associative binary operation CODE with result mode MODE, operating
1919 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1920 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1921 canonicalization is possible. */
1924 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1929 /* Linearize the operator to the left. */
1930 if (GET_CODE (op1
) == code
)
1932 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1933 if (GET_CODE (op0
) == code
)
1935 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1936 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1939 /* "a op (b op c)" becomes "(b op c) op a". */
1940 if (! swap_commutative_operands_p (op1
, op0
))
1941 return simplify_gen_binary (code
, mode
, op1
, op0
);
1948 if (GET_CODE (op0
) == code
)
1950 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1951 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1953 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1954 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1957 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1958 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1960 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1962 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1963 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1965 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1972 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1973 and OP1. Return 0 if no simplification is possible.
1975 Don't use this for relational operations such as EQ or LT.
1976 Use simplify_relational_operation instead. */
1978 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1981 rtx trueop0
, trueop1
;
1984 /* Relational operations don't work here. We must know the mode
1985 of the operands in order to do the comparison correctly.
1986 Assuming a full word can give incorrect results.
1987 Consider comparing 128 with -128 in QImode. */
1988 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1989 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1991 /* Make sure the constant is second. */
1992 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1993 && swap_commutative_operands_p (op0
, op1
))
1995 tem
= op0
, op0
= op1
, op1
= tem
;
1998 trueop0
= avoid_constant_pool_reference (op0
);
1999 trueop1
= avoid_constant_pool_reference (op1
);
2001 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2004 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2007 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2008 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2009 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2010 actual constants. */
2013 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2014 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2016 rtx tem
, reversed
, opleft
, opright
;
2018 unsigned int width
= GET_MODE_PRECISION (mode
);
2020 /* Even if we can't compute a constant result,
2021 there are some cases worth simplifying. */
2026 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2027 when x is NaN, infinite, or finite and nonzero. They aren't
2028 when x is -0 and the rounding mode is not towards -infinity,
2029 since (-0) + 0 is then 0. */
2030 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2033 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2034 transformations are safe even for IEEE. */
2035 if (GET_CODE (op0
) == NEG
)
2036 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2037 else if (GET_CODE (op1
) == NEG
)
2038 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2040 /* (~a) + 1 -> -a */
2041 if (INTEGRAL_MODE_P (mode
)
2042 && GET_CODE (op0
) == NOT
2043 && trueop1
== const1_rtx
)
2044 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2046 /* Handle both-operands-constant cases. We can only add
2047 CONST_INTs to constants since the sum of relocatable symbols
2048 can't be handled by most assemblers. Don't add CONST_INT
2049 to CONST_INT since overflow won't be computed properly if wider
2050 than HOST_BITS_PER_WIDE_INT. */
2052 if ((GET_CODE (op0
) == CONST
2053 || GET_CODE (op0
) == SYMBOL_REF
2054 || GET_CODE (op0
) == LABEL_REF
)
2055 && CONST_INT_P (op1
))
2056 return plus_constant (mode
, op0
, INTVAL (op1
));
2057 else if ((GET_CODE (op1
) == CONST
2058 || GET_CODE (op1
) == SYMBOL_REF
2059 || GET_CODE (op1
) == LABEL_REF
)
2060 && CONST_INT_P (op0
))
2061 return plus_constant (mode
, op1
, INTVAL (op0
));
2063 /* See if this is something like X * C - X or vice versa or
2064 if the multiplication is written as a shift. If so, we can
2065 distribute and make a new multiply, shift, or maybe just
2066 have X (if C is 2 in the example above). But don't make
2067 something more expensive than we had before. */
2069 if (SCALAR_INT_MODE_P (mode
))
2071 rtx lhs
= op0
, rhs
= op1
;
2073 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2074 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2076 if (GET_CODE (lhs
) == NEG
)
2078 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2079 lhs
= XEXP (lhs
, 0);
2081 else if (GET_CODE (lhs
) == MULT
2082 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2084 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2085 lhs
= XEXP (lhs
, 0);
2087 else if (GET_CODE (lhs
) == ASHIFT
2088 && CONST_INT_P (XEXP (lhs
, 1))
2089 && INTVAL (XEXP (lhs
, 1)) >= 0
2090 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2092 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2093 GET_MODE_PRECISION (mode
));
2094 lhs
= XEXP (lhs
, 0);
2097 if (GET_CODE (rhs
) == NEG
)
2099 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2100 rhs
= XEXP (rhs
, 0);
2102 else if (GET_CODE (rhs
) == MULT
2103 && CONST_INT_P (XEXP (rhs
, 1)))
2105 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2106 rhs
= XEXP (rhs
, 0);
2108 else if (GET_CODE (rhs
) == ASHIFT
2109 && CONST_INT_P (XEXP (rhs
, 1))
2110 && INTVAL (XEXP (rhs
, 1)) >= 0
2111 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2113 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2114 GET_MODE_PRECISION (mode
));
2115 rhs
= XEXP (rhs
, 0);
2118 if (rtx_equal_p (lhs
, rhs
))
2120 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2122 bool speed
= optimize_function_for_speed_p (cfun
);
2124 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2126 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2127 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2132 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2133 if (CONST_SCALAR_INT_P (op1
)
2134 && GET_CODE (op0
) == XOR
2135 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2136 && mode_signbit_p (mode
, op1
))
2137 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2138 simplify_gen_binary (XOR
, mode
, op1
,
2141 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2142 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2143 && GET_CODE (op0
) == MULT
2144 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2148 in1
= XEXP (XEXP (op0
, 0), 0);
2149 in2
= XEXP (op0
, 1);
2150 return simplify_gen_binary (MINUS
, mode
, op1
,
2151 simplify_gen_binary (MULT
, mode
,
2155 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2156 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2158 if (COMPARISON_P (op0
)
2159 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2160 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2161 && (reversed
= reversed_comparison (op0
, mode
)))
2163 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2165 /* If one of the operands is a PLUS or a MINUS, see if we can
2166 simplify this by the associative law.
2167 Don't use the associative law for floating point.
2168 The inaccuracy makes it nonassociative,
2169 and subtle programs can break if operations are associated. */
2171 if (INTEGRAL_MODE_P (mode
)
2172 && (plus_minus_operand_p (op0
)
2173 || plus_minus_operand_p (op1
))
2174 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2177 /* Reassociate floating point addition only when the user
2178 specifies associative math operations. */
2179 if (FLOAT_MODE_P (mode
)
2180 && flag_associative_math
)
2182 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2189 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2190 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2191 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2192 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2194 rtx xop00
= XEXP (op0
, 0);
2195 rtx xop10
= XEXP (op1
, 0);
2198 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2200 if (REG_P (xop00
) && REG_P (xop10
)
2201 && GET_MODE (xop00
) == GET_MODE (xop10
)
2202 && REGNO (xop00
) == REGNO (xop10
)
2203 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2204 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2211 /* We can't assume x-x is 0 even with non-IEEE floating point,
2212 but since it is zero except in very strange circumstances, we
2213 will treat it as zero with -ffinite-math-only. */
2214 if (rtx_equal_p (trueop0
, trueop1
)
2215 && ! side_effects_p (op0
)
2216 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2217 return CONST0_RTX (mode
);
2219 /* Change subtraction from zero into negation. (0 - x) is the
2220 same as -x when x is NaN, infinite, or finite and nonzero.
2221 But if the mode has signed zeros, and does not round towards
2222 -infinity, then 0 - 0 is 0, not -0. */
2223 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2224 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2226 /* (-1 - a) is ~a. */
2227 if (trueop0
== constm1_rtx
)
2228 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2230 /* Subtracting 0 has no effect unless the mode has signed zeros
2231 and supports rounding towards -infinity. In such a case,
2233 if (!(HONOR_SIGNED_ZEROS (mode
)
2234 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2235 && trueop1
== CONST0_RTX (mode
))
2238 /* See if this is something like X * C - X or vice versa or
2239 if the multiplication is written as a shift. If so, we can
2240 distribute and make a new multiply, shift, or maybe just
2241 have X (if C is 2 in the example above). But don't make
2242 something more expensive than we had before. */
2244 if (SCALAR_INT_MODE_P (mode
))
2246 rtx lhs
= op0
, rhs
= op1
;
2248 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2249 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2251 if (GET_CODE (lhs
) == NEG
)
2253 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2254 lhs
= XEXP (lhs
, 0);
2256 else if (GET_CODE (lhs
) == MULT
2257 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2259 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2260 lhs
= XEXP (lhs
, 0);
2262 else if (GET_CODE (lhs
) == ASHIFT
2263 && CONST_INT_P (XEXP (lhs
, 1))
2264 && INTVAL (XEXP (lhs
, 1)) >= 0
2265 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2267 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2268 GET_MODE_PRECISION (mode
));
2269 lhs
= XEXP (lhs
, 0);
2272 if (GET_CODE (rhs
) == NEG
)
2274 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2275 rhs
= XEXP (rhs
, 0);
2277 else if (GET_CODE (rhs
) == MULT
2278 && CONST_INT_P (XEXP (rhs
, 1)))
2280 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2281 rhs
= XEXP (rhs
, 0);
2283 else if (GET_CODE (rhs
) == ASHIFT
2284 && CONST_INT_P (XEXP (rhs
, 1))
2285 && INTVAL (XEXP (rhs
, 1)) >= 0
2286 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2288 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2289 GET_MODE_PRECISION (mode
));
2290 negcoeff1
= -negcoeff1
;
2291 rhs
= XEXP (rhs
, 0);
2294 if (rtx_equal_p (lhs
, rhs
))
2296 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2298 bool speed
= optimize_function_for_speed_p (cfun
);
2300 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2302 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2303 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2308 /* (a - (-b)) -> (a + b). True even for IEEE. */
2309 if (GET_CODE (op1
) == NEG
)
2310 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2312 /* (-x - c) may be simplified as (-c - x). */
2313 if (GET_CODE (op0
) == NEG
2314 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2316 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2318 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2321 /* Don't let a relocatable value get a negative coeff. */
2322 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2323 return simplify_gen_binary (PLUS
, mode
,
2325 neg_const_int (mode
, op1
));
2327 /* (x - (x & y)) -> (x & ~y) */
2328 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2330 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2332 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2333 GET_MODE (XEXP (op1
, 1)));
2334 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2336 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2338 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2339 GET_MODE (XEXP (op1
, 0)));
2340 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2344 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2345 by reversing the comparison code if valid. */
2346 if (STORE_FLAG_VALUE
== 1
2347 && trueop0
== const1_rtx
2348 && COMPARISON_P (op1
)
2349 && (reversed
= reversed_comparison (op1
, mode
)))
2352 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2353 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2354 && GET_CODE (op1
) == MULT
2355 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2359 in1
= XEXP (XEXP (op1
, 0), 0);
2360 in2
= XEXP (op1
, 1);
2361 return simplify_gen_binary (PLUS
, mode
,
2362 simplify_gen_binary (MULT
, mode
,
2367 /* Canonicalize (minus (neg A) (mult B C)) to
2368 (minus (mult (neg B) C) A). */
2369 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2370 && GET_CODE (op1
) == MULT
2371 && GET_CODE (op0
) == NEG
)
2375 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2376 in2
= XEXP (op1
, 1);
2377 return simplify_gen_binary (MINUS
, mode
,
2378 simplify_gen_binary (MULT
, mode
,
2383 /* If one of the operands is a PLUS or a MINUS, see if we can
2384 simplify this by the associative law. This will, for example,
2385 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2386 Don't use the associative law for floating point.
2387 The inaccuracy makes it nonassociative,
2388 and subtle programs can break if operations are associated. */
2390 if (INTEGRAL_MODE_P (mode
)
2391 && (plus_minus_operand_p (op0
)
2392 || plus_minus_operand_p (op1
))
2393 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2398 if (trueop1
== constm1_rtx
)
2399 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2401 if (GET_CODE (op0
) == NEG
)
2403 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2404 /* If op1 is a MULT as well and simplify_unary_operation
2405 just moved the NEG to the second operand, simplify_gen_binary
2406 below could through simplify_associative_operation move
2407 the NEG around again and recurse endlessly. */
2409 && GET_CODE (op1
) == MULT
2410 && GET_CODE (temp
) == MULT
2411 && XEXP (op1
, 0) == XEXP (temp
, 0)
2412 && GET_CODE (XEXP (temp
, 1)) == NEG
2413 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2416 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2418 if (GET_CODE (op1
) == NEG
)
2420 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2421 /* If op0 is a MULT as well and simplify_unary_operation
2422 just moved the NEG to the second operand, simplify_gen_binary
2423 below could through simplify_associative_operation move
2424 the NEG around again and recurse endlessly. */
2426 && GET_CODE (op0
) == MULT
2427 && GET_CODE (temp
) == MULT
2428 && XEXP (op0
, 0) == XEXP (temp
, 0)
2429 && GET_CODE (XEXP (temp
, 1)) == NEG
2430 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2433 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2436 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2437 x is NaN, since x * 0 is then also NaN. Nor is it valid
2438 when the mode has signed zeros, since multiplying a negative
2439 number by 0 will give -0, not 0. */
2440 if (!HONOR_NANS (mode
)
2441 && !HONOR_SIGNED_ZEROS (mode
)
2442 && trueop1
== CONST0_RTX (mode
)
2443 && ! side_effects_p (op0
))
2446 /* In IEEE floating point, x*1 is not equivalent to x for
2448 if (!HONOR_SNANS (mode
)
2449 && trueop1
== CONST1_RTX (mode
))
2452 /* Convert multiply by constant power of two into shift. */
2453 if (CONST_SCALAR_INT_P (trueop1
))
2455 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2457 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2460 /* x*2 is x+x and x*(-1) is -x */
2461 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2462 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2463 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2464 && GET_MODE (op0
) == mode
)
2467 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2469 if (REAL_VALUES_EQUAL (d
, dconst2
))
2470 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2472 if (!HONOR_SNANS (mode
)
2473 && REAL_VALUES_EQUAL (d
, dconstm1
))
2474 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2477 /* Optimize -x * -x as x * x. */
2478 if (FLOAT_MODE_P (mode
)
2479 && GET_CODE (op0
) == NEG
2480 && GET_CODE (op1
) == NEG
2481 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2482 && !side_effects_p (XEXP (op0
, 0)))
2483 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2485 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2486 if (SCALAR_FLOAT_MODE_P (mode
)
2487 && GET_CODE (op0
) == ABS
2488 && GET_CODE (op1
) == ABS
2489 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2490 && !side_effects_p (XEXP (op0
, 0)))
2491 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2493 /* Reassociate multiplication, but for floating point MULTs
2494 only when the user specifies unsafe math optimizations. */
2495 if (! FLOAT_MODE_P (mode
)
2496 || flag_unsafe_math_optimizations
)
2498 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2505 if (trueop1
== CONST0_RTX (mode
))
2507 if (INTEGRAL_MODE_P (mode
)
2508 && trueop1
== CONSTM1_RTX (mode
)
2509 && !side_effects_p (op0
))
2511 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2513 /* A | (~A) -> -1 */
2514 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2515 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2516 && ! side_effects_p (op0
)
2517 && SCALAR_INT_MODE_P (mode
))
2520 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2521 if (CONST_INT_P (op1
)
2522 && HWI_COMPUTABLE_MODE_P (mode
)
2523 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2524 && !side_effects_p (op0
))
2527 /* Canonicalize (X & C1) | C2. */
2528 if (GET_CODE (op0
) == AND
2529 && CONST_INT_P (trueop1
)
2530 && CONST_INT_P (XEXP (op0
, 1)))
2532 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2533 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2534 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2536 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2538 && !side_effects_p (XEXP (op0
, 0)))
2541 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2542 if (((c1
|c2
) & mask
) == mask
)
2543 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2545 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2546 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2548 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2549 gen_int_mode (c1
& ~c2
, mode
));
2550 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2554 /* Convert (A & B) | A to A. */
2555 if (GET_CODE (op0
) == AND
2556 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2557 || rtx_equal_p (XEXP (op0
, 1), op1
))
2558 && ! side_effects_p (XEXP (op0
, 0))
2559 && ! side_effects_p (XEXP (op0
, 1)))
2562 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2563 mode size to (rotate A CX). */
2565 if (GET_CODE (op1
) == ASHIFT
2566 || GET_CODE (op1
) == SUBREG
)
2577 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2578 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2579 && CONST_INT_P (XEXP (opleft
, 1))
2580 && CONST_INT_P (XEXP (opright
, 1))
2581 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2582 == GET_MODE_PRECISION (mode
)))
2583 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2585 /* Same, but for ashift that has been "simplified" to a wider mode
2586 by simplify_shift_const. */
2588 if (GET_CODE (opleft
) == SUBREG
2589 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2590 && GET_CODE (opright
) == LSHIFTRT
2591 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2592 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2593 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2594 && (GET_MODE_SIZE (GET_MODE (opleft
))
2595 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2596 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2597 SUBREG_REG (XEXP (opright
, 0)))
2598 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2599 && CONST_INT_P (XEXP (opright
, 1))
2600 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2601 == GET_MODE_PRECISION (mode
)))
2602 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2603 XEXP (SUBREG_REG (opleft
), 1));
2605 /* If we have (ior (and (X C1) C2)), simplify this by making
2606 C1 as small as possible if C1 actually changes. */
2607 if (CONST_INT_P (op1
)
2608 && (HWI_COMPUTABLE_MODE_P (mode
)
2609 || INTVAL (op1
) > 0)
2610 && GET_CODE (op0
) == AND
2611 && CONST_INT_P (XEXP (op0
, 1))
2612 && CONST_INT_P (op1
)
2613 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2615 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2616 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2619 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2622 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2623 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2624 the PLUS does not affect any of the bits in OP1: then we can do
2625 the IOR as a PLUS and we can associate. This is valid if OP1
2626 can be safely shifted left C bits. */
2627 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2628 && GET_CODE (XEXP (op0
, 0)) == PLUS
2629 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2630 && CONST_INT_P (XEXP (op0
, 1))
2631 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2633 int count
= INTVAL (XEXP (op0
, 1));
2634 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2636 if (mask
>> count
== INTVAL (trueop1
)
2637 && trunc_int_for_mode (mask
, mode
) == mask
2638 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2639 return simplify_gen_binary (ASHIFTRT
, mode
,
2640 plus_constant (mode
, XEXP (op0
, 0),
2645 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2649 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2655 if (trueop1
== CONST0_RTX (mode
))
2657 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2658 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2659 if (rtx_equal_p (trueop0
, trueop1
)
2660 && ! side_effects_p (op0
)
2661 && GET_MODE_CLASS (mode
) != MODE_CC
)
2662 return CONST0_RTX (mode
);
2664 /* Canonicalize XOR of the most significant bit to PLUS. */
2665 if (CONST_SCALAR_INT_P (op1
)
2666 && mode_signbit_p (mode
, op1
))
2667 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2668 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2669 if (CONST_SCALAR_INT_P (op1
)
2670 && GET_CODE (op0
) == PLUS
2671 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2672 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2673 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2674 simplify_gen_binary (XOR
, mode
, op1
,
2677 /* If we are XORing two things that have no bits in common,
2678 convert them into an IOR. This helps to detect rotation encoded
2679 using those methods and possibly other simplifications. */
2681 if (HWI_COMPUTABLE_MODE_P (mode
)
2682 && (nonzero_bits (op0
, mode
)
2683 & nonzero_bits (op1
, mode
)) == 0)
2684 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2686 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2687 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2690 int num_negated
= 0;
2692 if (GET_CODE (op0
) == NOT
)
2693 num_negated
++, op0
= XEXP (op0
, 0);
2694 if (GET_CODE (op1
) == NOT
)
2695 num_negated
++, op1
= XEXP (op1
, 0);
2697 if (num_negated
== 2)
2698 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2699 else if (num_negated
== 1)
2700 return simplify_gen_unary (NOT
, mode
,
2701 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2705 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2706 correspond to a machine insn or result in further simplifications
2707 if B is a constant. */
2709 if (GET_CODE (op0
) == AND
2710 && rtx_equal_p (XEXP (op0
, 1), op1
)
2711 && ! side_effects_p (op1
))
2712 return simplify_gen_binary (AND
, mode
,
2713 simplify_gen_unary (NOT
, mode
,
2714 XEXP (op0
, 0), mode
),
2717 else if (GET_CODE (op0
) == AND
2718 && rtx_equal_p (XEXP (op0
, 0), op1
)
2719 && ! side_effects_p (op1
))
2720 return simplify_gen_binary (AND
, mode
,
2721 simplify_gen_unary (NOT
, mode
,
2722 XEXP (op0
, 1), mode
),
2725 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2726 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2727 out bits inverted twice and not set by C. Similarly, given
2728 (xor (and (xor A B) C) D), simplify without inverting C in
2729 the xor operand: (xor (and A C) (B&C)^D).
2731 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2732 && GET_CODE (XEXP (op0
, 0)) == XOR
2733 && CONST_INT_P (op1
)
2734 && CONST_INT_P (XEXP (op0
, 1))
2735 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2737 enum rtx_code op
= GET_CODE (op0
);
2738 rtx a
= XEXP (XEXP (op0
, 0), 0);
2739 rtx b
= XEXP (XEXP (op0
, 0), 1);
2740 rtx c
= XEXP (op0
, 1);
2742 HOST_WIDE_INT bval
= INTVAL (b
);
2743 HOST_WIDE_INT cval
= INTVAL (c
);
2744 HOST_WIDE_INT dval
= INTVAL (d
);
2745 HOST_WIDE_INT xcval
;
2752 return simplify_gen_binary (XOR
, mode
,
2753 simplify_gen_binary (op
, mode
, a
, c
),
2754 gen_int_mode ((bval
& xcval
) ^ dval
,
2758 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2759 we can transform like this:
2760 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2761 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2762 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2763 Attempt a few simplifications when B and C are both constants. */
2764 if (GET_CODE (op0
) == AND
2765 && CONST_INT_P (op1
)
2766 && CONST_INT_P (XEXP (op0
, 1)))
2768 rtx a
= XEXP (op0
, 0);
2769 rtx b
= XEXP (op0
, 1);
2771 HOST_WIDE_INT bval
= INTVAL (b
);
2772 HOST_WIDE_INT cval
= INTVAL (c
);
2774 /* Instead of computing ~A&C, we compute its negated value,
2775 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2776 optimize for sure. If it does not simplify, we still try
2777 to compute ~A&C below, but since that always allocates
2778 RTL, we don't try that before committing to returning a
2779 simplified expression. */
2780 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2783 if ((~cval
& bval
) == 0)
2785 rtx na_c
= NULL_RTX
;
2787 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2790 /* If ~A does not simplify, don't bother: we don't
2791 want to simplify 2 operations into 3, and if na_c
2792 were to simplify with na, n_na_c would have
2793 simplified as well. */
2794 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2796 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2799 /* Try to simplify ~A&C | ~B&C. */
2800 if (na_c
!= NULL_RTX
)
2801 return simplify_gen_binary (IOR
, mode
, na_c
,
2802 gen_int_mode (~bval
& cval
, mode
));
2806 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2807 if (n_na_c
== CONSTM1_RTX (mode
))
2809 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2810 gen_int_mode (~cval
& bval
,
2812 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2813 gen_int_mode (~bval
& cval
,
2819 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2820 comparison if STORE_FLAG_VALUE is 1. */
2821 if (STORE_FLAG_VALUE
== 1
2822 && trueop1
== const1_rtx
2823 && COMPARISON_P (op0
)
2824 && (reversed
= reversed_comparison (op0
, mode
)))
2827 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2828 is (lt foo (const_int 0)), so we can perform the above
2829 simplification if STORE_FLAG_VALUE is 1. */
2831 if (STORE_FLAG_VALUE
== 1
2832 && trueop1
== const1_rtx
2833 && GET_CODE (op0
) == LSHIFTRT
2834 && CONST_INT_P (XEXP (op0
, 1))
2835 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2836 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2838 /* (xor (comparison foo bar) (const_int sign-bit))
2839 when STORE_FLAG_VALUE is the sign bit. */
2840 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2841 && trueop1
== const_true_rtx
2842 && COMPARISON_P (op0
)
2843 && (reversed
= reversed_comparison (op0
, mode
)))
2846 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2850 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2856 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2858 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2860 if (HWI_COMPUTABLE_MODE_P (mode
))
2862 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2863 HOST_WIDE_INT nzop1
;
2864 if (CONST_INT_P (trueop1
))
2866 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2867 /* If we are turning off bits already known off in OP0, we need
2869 if ((nzop0
& ~val1
) == 0)
2872 nzop1
= nonzero_bits (trueop1
, mode
);
2873 /* If we are clearing all the nonzero bits, the result is zero. */
2874 if ((nzop1
& nzop0
) == 0
2875 && !side_effects_p (op0
) && !side_effects_p (op1
))
2876 return CONST0_RTX (mode
);
2878 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2879 && GET_MODE_CLASS (mode
) != MODE_CC
)
2882 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2883 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2884 && ! side_effects_p (op0
)
2885 && GET_MODE_CLASS (mode
) != MODE_CC
)
2886 return CONST0_RTX (mode
);
2888 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2889 there are no nonzero bits of C outside of X's mode. */
2890 if ((GET_CODE (op0
) == SIGN_EXTEND
2891 || GET_CODE (op0
) == ZERO_EXTEND
)
2892 && CONST_INT_P (trueop1
)
2893 && HWI_COMPUTABLE_MODE_P (mode
)
2894 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2895 & UINTVAL (trueop1
)) == 0)
2897 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2898 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2899 gen_int_mode (INTVAL (trueop1
),
2901 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2904 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2905 we might be able to further simplify the AND with X and potentially
2906 remove the truncation altogether. */
2907 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2909 rtx x
= XEXP (op0
, 0);
2910 machine_mode xmode
= GET_MODE (x
);
2911 tem
= simplify_gen_binary (AND
, xmode
, x
,
2912 gen_int_mode (INTVAL (trueop1
), xmode
));
2913 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2916 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2917 if (GET_CODE (op0
) == IOR
2918 && CONST_INT_P (trueop1
)
2919 && CONST_INT_P (XEXP (op0
, 1)))
2921 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2922 return simplify_gen_binary (IOR
, mode
,
2923 simplify_gen_binary (AND
, mode
,
2924 XEXP (op0
, 0), op1
),
2925 gen_int_mode (tmp
, mode
));
2928 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2929 insn (and may simplify more). */
2930 if (GET_CODE (op0
) == XOR
2931 && rtx_equal_p (XEXP (op0
, 0), op1
)
2932 && ! side_effects_p (op1
))
2933 return simplify_gen_binary (AND
, mode
,
2934 simplify_gen_unary (NOT
, mode
,
2935 XEXP (op0
, 1), mode
),
2938 if (GET_CODE (op0
) == XOR
2939 && rtx_equal_p (XEXP (op0
, 1), op1
)
2940 && ! side_effects_p (op1
))
2941 return simplify_gen_binary (AND
, mode
,
2942 simplify_gen_unary (NOT
, mode
,
2943 XEXP (op0
, 0), mode
),
2946 /* Similarly for (~(A ^ B)) & A. */
2947 if (GET_CODE (op0
) == NOT
2948 && GET_CODE (XEXP (op0
, 0)) == XOR
2949 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2950 && ! side_effects_p (op1
))
2951 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2953 if (GET_CODE (op0
) == NOT
2954 && GET_CODE (XEXP (op0
, 0)) == XOR
2955 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2956 && ! side_effects_p (op1
))
2957 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2959 /* Convert (A | B) & A to A. */
2960 if (GET_CODE (op0
) == IOR
2961 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2962 || rtx_equal_p (XEXP (op0
, 1), op1
))
2963 && ! side_effects_p (XEXP (op0
, 0))
2964 && ! side_effects_p (XEXP (op0
, 1)))
2967 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2968 ((A & N) + B) & M -> (A + B) & M
2969 Similarly if (N & M) == 0,
2970 ((A | N) + B) & M -> (A + B) & M
2971 and for - instead of + and/or ^ instead of |.
2972 Also, if (N & M) == 0, then
2973 (A +- N) & M -> A & M. */
2974 if (CONST_INT_P (trueop1
)
2975 && HWI_COMPUTABLE_MODE_P (mode
)
2976 && ~UINTVAL (trueop1
)
2977 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2978 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2983 pmop
[0] = XEXP (op0
, 0);
2984 pmop
[1] = XEXP (op0
, 1);
2986 if (CONST_INT_P (pmop
[1])
2987 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2988 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2990 for (which
= 0; which
< 2; which
++)
2993 switch (GET_CODE (tem
))
2996 if (CONST_INT_P (XEXP (tem
, 1))
2997 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2998 == UINTVAL (trueop1
))
2999 pmop
[which
] = XEXP (tem
, 0);
3003 if (CONST_INT_P (XEXP (tem
, 1))
3004 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3005 pmop
[which
] = XEXP (tem
, 0);
3012 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3014 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3016 return simplify_gen_binary (code
, mode
, tem
, op1
);
3020 /* (and X (ior (not X) Y) -> (and X Y) */
3021 if (GET_CODE (op1
) == IOR
3022 && GET_CODE (XEXP (op1
, 0)) == NOT
3023 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3024 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3026 /* (and (ior (not X) Y) X) -> (and X Y) */
3027 if (GET_CODE (op0
) == IOR
3028 && GET_CODE (XEXP (op0
, 0)) == NOT
3029 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3030 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3032 /* (and X (ior Y (not X)) -> (and X Y) */
3033 if (GET_CODE (op1
) == IOR
3034 && GET_CODE (XEXP (op1
, 1)) == NOT
3035 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3036 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3038 /* (and (ior Y (not X)) X) -> (and X Y) */
3039 if (GET_CODE (op0
) == IOR
3040 && GET_CODE (XEXP (op0
, 1)) == NOT
3041 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3042 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3044 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3048 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3054 /* 0/x is 0 (or x&0 if x has side-effects). */
3055 if (trueop0
== CONST0_RTX (mode
))
3057 if (side_effects_p (op1
))
3058 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3062 if (trueop1
== CONST1_RTX (mode
))
3064 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3068 /* Convert divide by power of two into shift. */
3069 if (CONST_INT_P (trueop1
)
3070 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3071 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3075 /* Handle floating point and integers separately. */
3076 if (SCALAR_FLOAT_MODE_P (mode
))
3078 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3079 safe for modes with NaNs, since 0.0 / 0.0 will then be
3080 NaN rather than 0.0. Nor is it safe for modes with signed
3081 zeros, since dividing 0 by a negative number gives -0.0 */
3082 if (trueop0
== CONST0_RTX (mode
)
3083 && !HONOR_NANS (mode
)
3084 && !HONOR_SIGNED_ZEROS (mode
)
3085 && ! side_effects_p (op1
))
3088 if (trueop1
== CONST1_RTX (mode
)
3089 && !HONOR_SNANS (mode
))
3092 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3093 && trueop1
!= CONST0_RTX (mode
))
3096 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3099 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3100 && !HONOR_SNANS (mode
))
3101 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3103 /* Change FP division by a constant into multiplication.
3104 Only do this with -freciprocal-math. */
3105 if (flag_reciprocal_math
3106 && !REAL_VALUES_EQUAL (d
, dconst0
))
3108 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3109 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3110 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3114 else if (SCALAR_INT_MODE_P (mode
))
3116 /* 0/x is 0 (or x&0 if x has side-effects). */
3117 if (trueop0
== CONST0_RTX (mode
)
3118 && !cfun
->can_throw_non_call_exceptions
)
3120 if (side_effects_p (op1
))
3121 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3125 if (trueop1
== CONST1_RTX (mode
))
3127 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3132 if (trueop1
== constm1_rtx
)
3134 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3136 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3142 /* 0%x is 0 (or x&0 if x has side-effects). */
3143 if (trueop0
== CONST0_RTX (mode
))
3145 if (side_effects_p (op1
))
3146 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3149 /* x%1 is 0 (of x&0 if x has side-effects). */
3150 if (trueop1
== CONST1_RTX (mode
))
3152 if (side_effects_p (op0
))
3153 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3154 return CONST0_RTX (mode
);
3156 /* Implement modulus by power of two as AND. */
3157 if (CONST_INT_P (trueop1
)
3158 && exact_log2 (UINTVAL (trueop1
)) > 0)
3159 return simplify_gen_binary (AND
, mode
, op0
,
3160 gen_int_mode (INTVAL (op1
) - 1, mode
));
3164 /* 0%x is 0 (or x&0 if x has side-effects). */
3165 if (trueop0
== CONST0_RTX (mode
))
3167 if (side_effects_p (op1
))
3168 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3171 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3172 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3174 if (side_effects_p (op0
))
3175 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3176 return CONST0_RTX (mode
);
3182 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3183 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3184 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3186 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3187 if (CONST_INT_P (trueop1
)
3188 && IN_RANGE (INTVAL (trueop1
),
3189 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3190 GET_MODE_PRECISION (mode
) - 1))
3191 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3192 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3193 - INTVAL (trueop1
)));
3197 if (trueop1
== CONST0_RTX (mode
))
3199 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3201 /* Rotating ~0 always results in ~0. */
3202 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3203 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3204 && ! side_effects_p (op1
))
3208 scalar constants c1, c2
3209 size (M2) > size (M1)
3210 c1 == size (M2) - size (M1)
3212 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3216 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3218 if (code
== ASHIFTRT
3219 && !VECTOR_MODE_P (mode
)
3221 && CONST_INT_P (op1
)
3222 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3223 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3224 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3225 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3226 > GET_MODE_BITSIZE (mode
))
3227 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3228 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3229 - GET_MODE_BITSIZE (mode
)))
3230 && subreg_lowpart_p (op0
))
3232 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3234 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3235 tmp
= simplify_gen_binary (ASHIFTRT
,
3236 GET_MODE (SUBREG_REG (op0
)),
3237 XEXP (SUBREG_REG (op0
), 0),
3239 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3240 subreg_lowpart_offset (mode
,
3244 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3246 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3247 if (val
!= INTVAL (op1
))
3248 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3255 if (trueop1
== CONST0_RTX (mode
))
3257 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3259 goto canonicalize_shift
;
3262 if (trueop1
== CONST0_RTX (mode
))
3264 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3266 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3267 if (GET_CODE (op0
) == CLZ
3268 && CONST_INT_P (trueop1
)
3269 && STORE_FLAG_VALUE
== 1
3270 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3272 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3273 unsigned HOST_WIDE_INT zero_val
= 0;
3275 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3276 && zero_val
== GET_MODE_PRECISION (imode
)
3277 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3278 return simplify_gen_relational (EQ
, mode
, imode
,
3279 XEXP (op0
, 0), const0_rtx
);
3281 goto canonicalize_shift
;
3284 if (width
<= HOST_BITS_PER_WIDE_INT
3285 && mode_signbit_p (mode
, trueop1
)
3286 && ! side_effects_p (op0
))
3288 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3290 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3296 if (width
<= HOST_BITS_PER_WIDE_INT
3297 && CONST_INT_P (trueop1
)
3298 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3299 && ! side_effects_p (op0
))
3301 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3303 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3309 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3311 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3313 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3319 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3321 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3323 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3336 /* ??? There are simplifications that can be done. */
3340 if (!VECTOR_MODE_P (mode
))
3342 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3343 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3344 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3345 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3346 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3348 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3349 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3352 /* Extract a scalar element from a nested VEC_SELECT expression
3353 (with optional nested VEC_CONCAT expression). Some targets
3354 (i386) extract scalar element from a vector using chain of
3355 nested VEC_SELECT expressions. When input operand is a memory
3356 operand, this operation can be simplified to a simple scalar
3357 load from an offseted memory address. */
3358 if (GET_CODE (trueop0
) == VEC_SELECT
)
3360 rtx op0
= XEXP (trueop0
, 0);
3361 rtx op1
= XEXP (trueop0
, 1);
3363 machine_mode opmode
= GET_MODE (op0
);
3364 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3365 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3367 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3373 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3374 gcc_assert (i
< n_elts
);
3376 /* Select element, pointed by nested selector. */
3377 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3379 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3380 if (GET_CODE (op0
) == VEC_CONCAT
)
3382 rtx op00
= XEXP (op0
, 0);
3383 rtx op01
= XEXP (op0
, 1);
3385 machine_mode mode00
, mode01
;
3386 int n_elts00
, n_elts01
;
3388 mode00
= GET_MODE (op00
);
3389 mode01
= GET_MODE (op01
);
3391 /* Find out number of elements of each operand. */
3392 if (VECTOR_MODE_P (mode00
))
3394 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3395 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3400 if (VECTOR_MODE_P (mode01
))
3402 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3403 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3408 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3410 /* Select correct operand of VEC_CONCAT
3411 and adjust selector. */
3412 if (elem
< n_elts01
)
3423 vec
= rtvec_alloc (1);
3424 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3426 tmp
= gen_rtx_fmt_ee (code
, mode
,
3427 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3430 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3431 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3432 return XEXP (trueop0
, 0);
3436 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3437 gcc_assert (GET_MODE_INNER (mode
)
3438 == GET_MODE_INNER (GET_MODE (trueop0
)));
3439 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3441 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3443 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3444 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3445 rtvec v
= rtvec_alloc (n_elts
);
3448 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3449 for (i
= 0; i
< n_elts
; i
++)
3451 rtx x
= XVECEXP (trueop1
, 0, i
);
3453 gcc_assert (CONST_INT_P (x
));
3454 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3458 return gen_rtx_CONST_VECTOR (mode
, v
);
3461 /* Recognize the identity. */
3462 if (GET_MODE (trueop0
) == mode
)
3464 bool maybe_ident
= true;
3465 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3467 rtx j
= XVECEXP (trueop1
, 0, i
);
3468 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3470 maybe_ident
= false;
3478 /* If we build {a,b} then permute it, build the result directly. */
3479 if (XVECLEN (trueop1
, 0) == 2
3480 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3481 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3482 && GET_CODE (trueop0
) == VEC_CONCAT
3483 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3484 && GET_MODE (XEXP (trueop0
, 0)) == mode
3485 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3486 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3488 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3489 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3492 gcc_assert (i0
< 4 && i1
< 4);
3493 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3494 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3496 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3499 if (XVECLEN (trueop1
, 0) == 2
3500 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3501 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3502 && GET_CODE (trueop0
) == VEC_CONCAT
3503 && GET_MODE (trueop0
) == mode
)
3505 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3506 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3509 gcc_assert (i0
< 2 && i1
< 2);
3510 subop0
= XEXP (trueop0
, i0
);
3511 subop1
= XEXP (trueop0
, i1
);
3513 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3516 /* If we select one half of a vec_concat, return that. */
3517 if (GET_CODE (trueop0
) == VEC_CONCAT
3518 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3520 rtx subop0
= XEXP (trueop0
, 0);
3521 rtx subop1
= XEXP (trueop0
, 1);
3522 machine_mode mode0
= GET_MODE (subop0
);
3523 machine_mode mode1
= GET_MODE (subop1
);
3524 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3525 int l0
= GET_MODE_SIZE (mode0
) / li
;
3526 int l1
= GET_MODE_SIZE (mode1
) / li
;
3527 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3528 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3530 bool success
= true;
3531 for (int i
= 1; i
< l0
; ++i
)
3533 rtx j
= XVECEXP (trueop1
, 0, i
);
3534 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3543 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3545 bool success
= true;
3546 for (int i
= 1; i
< l1
; ++i
)
3548 rtx j
= XVECEXP (trueop1
, 0, i
);
3549 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3561 if (XVECLEN (trueop1
, 0) == 1
3562 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3563 && GET_CODE (trueop0
) == VEC_CONCAT
)
3566 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3568 /* Try to find the element in the VEC_CONCAT. */
3569 while (GET_MODE (vec
) != mode
3570 && GET_CODE (vec
) == VEC_CONCAT
)
3572 HOST_WIDE_INT vec_size
;
3574 if (CONST_INT_P (XEXP (vec
, 0)))
3576 /* vec_concat of two const_ints doesn't make sense with
3577 respect to modes. */
3578 if (CONST_INT_P (XEXP (vec
, 1)))
3581 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3582 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3585 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3587 if (offset
< vec_size
)
3588 vec
= XEXP (vec
, 0);
3592 vec
= XEXP (vec
, 1);
3594 vec
= avoid_constant_pool_reference (vec
);
3597 if (GET_MODE (vec
) == mode
)
3601 /* If we select elements in a vec_merge that all come from the same
3602 operand, select from that operand directly. */
3603 if (GET_CODE (op0
) == VEC_MERGE
)
3605 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3606 if (CONST_INT_P (trueop02
))
3608 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3609 bool all_operand0
= true;
3610 bool all_operand1
= true;
3611 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3613 rtx j
= XVECEXP (trueop1
, 0, i
);
3614 if (sel
& (1 << UINTVAL (j
)))
3615 all_operand1
= false;
3617 all_operand0
= false;
3619 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3620 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3621 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3622 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3626 /* If we have two nested selects that are inverses of each
3627 other, replace them with the source operand. */
3628 if (GET_CODE (trueop0
) == VEC_SELECT
3629 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3631 rtx op0_subop1
= XEXP (trueop0
, 1);
3632 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3633 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3635 /* Apply the outer ordering vector to the inner one. (The inner
3636 ordering vector is expressly permitted to be of a different
3637 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3638 then the two VEC_SELECTs cancel. */
3639 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3641 rtx x
= XVECEXP (trueop1
, 0, i
);
3642 if (!CONST_INT_P (x
))
3644 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3645 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3648 return XEXP (trueop0
, 0);
3654 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3655 ? GET_MODE (trueop0
)
3656 : GET_MODE_INNER (mode
));
3657 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3658 ? GET_MODE (trueop1
)
3659 : GET_MODE_INNER (mode
));
3661 gcc_assert (VECTOR_MODE_P (mode
));
3662 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3663 == GET_MODE_SIZE (mode
));
3665 if (VECTOR_MODE_P (op0_mode
))
3666 gcc_assert (GET_MODE_INNER (mode
)
3667 == GET_MODE_INNER (op0_mode
));
3669 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3671 if (VECTOR_MODE_P (op1_mode
))
3672 gcc_assert (GET_MODE_INNER (mode
)
3673 == GET_MODE_INNER (op1_mode
));
3675 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3677 if ((GET_CODE (trueop0
) == CONST_VECTOR
3678 || CONST_SCALAR_INT_P (trueop0
)
3679 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3680 && (GET_CODE (trueop1
) == CONST_VECTOR
3681 || CONST_SCALAR_INT_P (trueop1
)
3682 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3684 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3685 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3686 rtvec v
= rtvec_alloc (n_elts
);
3688 unsigned in_n_elts
= 1;
3690 if (VECTOR_MODE_P (op0_mode
))
3691 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3692 for (i
= 0; i
< n_elts
; i
++)
3696 if (!VECTOR_MODE_P (op0_mode
))
3697 RTVEC_ELT (v
, i
) = trueop0
;
3699 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3703 if (!VECTOR_MODE_P (op1_mode
))
3704 RTVEC_ELT (v
, i
) = trueop1
;
3706 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3711 return gen_rtx_CONST_VECTOR (mode
, v
);
3714 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3715 Restrict the transformation to avoid generating a VEC_SELECT with a
3716 mode unrelated to its operand. */
3717 if (GET_CODE (trueop0
) == VEC_SELECT
3718 && GET_CODE (trueop1
) == VEC_SELECT
3719 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3720 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3722 rtx par0
= XEXP (trueop0
, 1);
3723 rtx par1
= XEXP (trueop1
, 1);
3724 int len0
= XVECLEN (par0
, 0);
3725 int len1
= XVECLEN (par1
, 0);
3726 rtvec vec
= rtvec_alloc (len0
+ len1
);
3727 for (int i
= 0; i
< len0
; i
++)
3728 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3729 for (int i
= 0; i
< len1
; i
++)
3730 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3731 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3732 gen_rtx_PARALLEL (VOIDmode
, vec
));
3745 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3748 unsigned int width
= GET_MODE_PRECISION (mode
);
3750 if (VECTOR_MODE_P (mode
)
3751 && code
!= VEC_CONCAT
3752 && GET_CODE (op0
) == CONST_VECTOR
3753 && GET_CODE (op1
) == CONST_VECTOR
)
3755 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3756 machine_mode op0mode
= GET_MODE (op0
);
3757 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3758 machine_mode op1mode
= GET_MODE (op1
);
3759 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3760 rtvec v
= rtvec_alloc (n_elts
);
3763 gcc_assert (op0_n_elts
== n_elts
);
3764 gcc_assert (op1_n_elts
== n_elts
);
3765 for (i
= 0; i
< n_elts
; i
++)
3767 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3768 CONST_VECTOR_ELT (op0
, i
),
3769 CONST_VECTOR_ELT (op1
, i
));
3772 RTVEC_ELT (v
, i
) = x
;
3775 return gen_rtx_CONST_VECTOR (mode
, v
);
3778 if (VECTOR_MODE_P (mode
)
3779 && code
== VEC_CONCAT
3780 && (CONST_SCALAR_INT_P (op0
)
3781 || GET_CODE (op0
) == CONST_FIXED
3782 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3783 && (CONST_SCALAR_INT_P (op1
)
3784 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3785 || GET_CODE (op1
) == CONST_FIXED
))
3787 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3788 rtvec v
= rtvec_alloc (n_elts
);
3790 gcc_assert (n_elts
>= 2);
3793 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3794 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3796 RTVEC_ELT (v
, 0) = op0
;
3797 RTVEC_ELT (v
, 1) = op1
;
3801 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3802 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3805 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3806 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3807 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3809 for (i
= 0; i
< op0_n_elts
; ++i
)
3810 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3811 for (i
= 0; i
< op1_n_elts
; ++i
)
3812 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3815 return gen_rtx_CONST_VECTOR (mode
, v
);
3818 if (SCALAR_FLOAT_MODE_P (mode
)
3819 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3820 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3821 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3832 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3834 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3836 for (i
= 0; i
< 4; i
++)
3853 real_from_target (&r
, tmp0
, mode
);
3854 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3858 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3861 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3862 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3863 real_convert (&f0
, mode
, &f0
);
3864 real_convert (&f1
, mode
, &f1
);
3866 if (HONOR_SNANS (mode
)
3867 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3871 && REAL_VALUES_EQUAL (f1
, dconst0
)
3872 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3875 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3876 && flag_trapping_math
3877 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3879 int s0
= REAL_VALUE_NEGATIVE (f0
);
3880 int s1
= REAL_VALUE_NEGATIVE (f1
);
3885 /* Inf + -Inf = NaN plus exception. */
3890 /* Inf - Inf = NaN plus exception. */
3895 /* Inf / Inf = NaN plus exception. */
3902 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3903 && flag_trapping_math
3904 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3905 || (REAL_VALUE_ISINF (f1
)
3906 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3907 /* Inf * 0 = NaN plus exception. */
3910 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3912 real_convert (&result
, mode
, &value
);
3914 /* Don't constant fold this floating point operation if
3915 the result has overflowed and flag_trapping_math. */
3917 if (flag_trapping_math
3918 && MODE_HAS_INFINITIES (mode
)
3919 && REAL_VALUE_ISINF (result
)
3920 && !REAL_VALUE_ISINF (f0
)
3921 && !REAL_VALUE_ISINF (f1
))
3922 /* Overflow plus exception. */
3925 /* Don't constant fold this floating point operation if the
3926 result may dependent upon the run-time rounding mode and
3927 flag_rounding_math is set, or if GCC's software emulation
3928 is unable to accurately represent the result. */
3930 if ((flag_rounding_math
3931 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3932 && (inexact
|| !real_identical (&result
, &value
)))
3935 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3939 /* We can fold some multi-word operations. */
3940 if ((GET_MODE_CLASS (mode
) == MODE_INT
3941 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3942 && CONST_SCALAR_INT_P (op0
)
3943 && CONST_SCALAR_INT_P (op1
))
3947 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3948 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3950 #if TARGET_SUPPORTS_WIDE_INT == 0
3951 /* This assert keeps the simplification from producing a result
3952 that cannot be represented in a CONST_DOUBLE but a lot of
3953 upstream callers expect that this function never fails to
3954 simplify something and so you if you added this to the test
3955 above the code would die later anyway. If this assert
3956 happens, you just need to make the port support wide int. */
3957 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3962 result
= wi::sub (pop0
, pop1
);
3966 result
= wi::add (pop0
, pop1
);
3970 result
= wi::mul (pop0
, pop1
);
3974 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3980 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3986 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3992 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3998 result
= wi::bit_and (pop0
, pop1
);
4002 result
= wi::bit_or (pop0
, pop1
);
4006 result
= wi::bit_xor (pop0
, pop1
);
4010 result
= wi::smin (pop0
, pop1
);
4014 result
= wi::smax (pop0
, pop1
);
4018 result
= wi::umin (pop0
, pop1
);
4022 result
= wi::umax (pop0
, pop1
);
4029 wide_int wop1
= pop1
;
4030 if (SHIFT_COUNT_TRUNCATED
)
4031 wop1
= wi::umod_trunc (wop1
, width
);
4032 else if (wi::geu_p (wop1
, width
))
4038 result
= wi::lrshift (pop0
, wop1
);
4042 result
= wi::arshift (pop0
, wop1
);
4046 result
= wi::lshift (pop0
, wop1
);
4057 if (wi::neg_p (pop1
))
4063 result
= wi::lrotate (pop0
, pop1
);
4067 result
= wi::rrotate (pop0
, pop1
);
4078 return immed_wide_int_const (result
, mode
);
4086 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4089 Rather than test for specific case, we do this by a brute-force method
4090 and do all possible simplifications until no more changes occur. Then
4091 we rebuild the operation. */
4093 struct simplify_plus_minus_op_data
4100 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4104 result
= (commutative_operand_precedence (y
)
4105 - commutative_operand_precedence (x
));
4109 /* Group together equal REGs to do more simplification. */
4110 if (REG_P (x
) && REG_P (y
))
4111 return REGNO (x
) > REGNO (y
);
4117 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4120 struct simplify_plus_minus_op_data ops
[16];
4123 int changed
, n_constants
, canonicalized
= 0;
4126 memset (ops
, 0, sizeof ops
);
4128 /* Set up the two operands and then expand them until nothing has been
4129 changed. If we run out of room in our array, give up; this should
4130 almost never happen. */
4135 ops
[1].neg
= (code
== MINUS
);
4142 for (i
= 0; i
< n_ops
; i
++)
4144 rtx this_op
= ops
[i
].op
;
4145 int this_neg
= ops
[i
].neg
;
4146 enum rtx_code this_code
= GET_CODE (this_op
);
4152 if (n_ops
== ARRAY_SIZE (ops
))
4155 ops
[n_ops
].op
= XEXP (this_op
, 1);
4156 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4159 ops
[i
].op
= XEXP (this_op
, 0);
4161 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4165 ops
[i
].op
= XEXP (this_op
, 0);
4166 ops
[i
].neg
= ! this_neg
;
4172 if (n_ops
!= ARRAY_SIZE (ops
)
4173 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4174 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4175 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4177 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4178 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4179 ops
[n_ops
].neg
= this_neg
;
4187 /* ~a -> (-a - 1) */
4188 if (n_ops
!= ARRAY_SIZE (ops
))
4190 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4191 ops
[n_ops
++].neg
= this_neg
;
4192 ops
[i
].op
= XEXP (this_op
, 0);
4193 ops
[i
].neg
= !this_neg
;
4203 ops
[i
].op
= neg_const_int (mode
, this_op
);
4217 if (n_constants
> 1)
4220 gcc_assert (n_ops
>= 2);
4222 /* If we only have two operands, we can avoid the loops. */
4225 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4228 /* Get the two operands. Be careful with the order, especially for
4229 the cases where code == MINUS. */
4230 if (ops
[0].neg
&& ops
[1].neg
)
4232 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4235 else if (ops
[0].neg
)
4246 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4249 /* Now simplify each pair of operands until nothing changes. */
4252 /* Insertion sort is good enough for a small array. */
4253 for (i
= 1; i
< n_ops
; i
++)
4255 struct simplify_plus_minus_op_data save
;
4257 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4263 ops
[j
+ 1] = ops
[j
];
4264 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4269 for (i
= n_ops
- 1; i
> 0; i
--)
4270 for (j
= i
- 1; j
>= 0; j
--)
4272 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4273 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4275 if (lhs
!= 0 && rhs
!= 0)
4277 enum rtx_code ncode
= PLUS
;
4283 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4285 else if (swap_commutative_operands_p (lhs
, rhs
))
4286 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4288 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4289 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4291 rtx tem_lhs
, tem_rhs
;
4293 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4294 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4295 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4297 if (tem
&& !CONSTANT_P (tem
))
4298 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4301 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4305 /* Reject "simplifications" that just wrap the two
4306 arguments in a CONST. Failure to do so can result
4307 in infinite recursion with simplify_binary_operation
4308 when it calls us to simplify CONST operations.
4309 Also, if we find such a simplification, don't try
4310 any more combinations with this rhs: We must have
4311 something like symbol+offset, ie. one of the
4312 trivial CONST expressions we handle later. */
4313 if (GET_CODE (tem
) == CONST
4314 && GET_CODE (XEXP (tem
, 0)) == ncode
4315 && XEXP (XEXP (tem
, 0), 0) == lhs
4316 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4319 if (GET_CODE (tem
) == NEG
)
4320 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4321 if (CONST_INT_P (tem
) && lneg
)
4322 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4326 ops
[j
].op
= NULL_RTX
;
4333 /* If nothing changed, fail. */
4337 /* Pack all the operands to the lower-numbered entries. */
4338 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4348 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4350 && CONST_INT_P (ops
[1].op
)
4351 && CONSTANT_P (ops
[0].op
)
4353 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4355 /* We suppressed creation of trivial CONST expressions in the
4356 combination loop to avoid recursion. Create one manually now.
4357 The combination loop should have ensured that there is exactly
4358 one CONST_INT, and the sort will have ensured that it is last
4359 in the array and that any other constant will be next-to-last. */
4362 && CONST_INT_P (ops
[n_ops
- 1].op
)
4363 && CONSTANT_P (ops
[n_ops
- 2].op
))
4365 rtx value
= ops
[n_ops
- 1].op
;
4366 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4367 value
= neg_const_int (mode
, value
);
4368 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4373 /* Put a non-negated operand first, if possible. */
4375 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4378 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4387 /* Now make the result by performing the requested operations. */
4389 for (i
= 1; i
< n_ops
; i
++)
4390 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4391 mode
, result
, ops
[i
].op
);
4396 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4398 plus_minus_operand_p (const_rtx x
)
4400 return GET_CODE (x
) == PLUS
4401 || GET_CODE (x
) == MINUS
4402 || (GET_CODE (x
) == CONST
4403 && GET_CODE (XEXP (x
, 0)) == PLUS
4404 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4405 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4408 /* Like simplify_binary_operation except used for relational operators.
4409 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4410 not also be VOIDmode.
4412 CMP_MODE specifies in which mode the comparison is done in, so it is
4413 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4414 the operands or, if both are VOIDmode, the operands are compared in
4415 "infinite precision". */
4417 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4418 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4420 rtx tem
, trueop0
, trueop1
;
4422 if (cmp_mode
== VOIDmode
)
4423 cmp_mode
= GET_MODE (op0
);
4424 if (cmp_mode
== VOIDmode
)
4425 cmp_mode
= GET_MODE (op1
);
4427 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4430 if (SCALAR_FLOAT_MODE_P (mode
))
4432 if (tem
== const0_rtx
)
4433 return CONST0_RTX (mode
);
4434 #ifdef FLOAT_STORE_FLAG_VALUE
4436 REAL_VALUE_TYPE val
;
4437 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4438 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4444 if (VECTOR_MODE_P (mode
))
4446 if (tem
== const0_rtx
)
4447 return CONST0_RTX (mode
);
4448 #ifdef VECTOR_STORE_FLAG_VALUE
4453 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4454 if (val
== NULL_RTX
)
4456 if (val
== const1_rtx
)
4457 return CONST1_RTX (mode
);
4459 units
= GET_MODE_NUNITS (mode
);
4460 v
= rtvec_alloc (units
);
4461 for (i
= 0; i
< units
; i
++)
4462 RTVEC_ELT (v
, i
) = val
;
4463 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4473 /* For the following tests, ensure const0_rtx is op1. */
4474 if (swap_commutative_operands_p (op0
, op1
)
4475 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4476 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4478 /* If op0 is a compare, extract the comparison arguments from it. */
4479 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4480 return simplify_gen_relational (code
, mode
, VOIDmode
,
4481 XEXP (op0
, 0), XEXP (op0
, 1));
4483 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4487 trueop0
= avoid_constant_pool_reference (op0
);
4488 trueop1
= avoid_constant_pool_reference (op1
);
4489 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4493 /* This part of simplify_relational_operation is only used when CMP_MODE
4494 is not in class MODE_CC (i.e. it is a real comparison).
4496 MODE is the mode of the result, while CMP_MODE specifies in which
4497 mode the comparison is done in, so it is the mode of the operands. */
4500 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4501 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4503 enum rtx_code op0code
= GET_CODE (op0
);
4505 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4507 /* If op0 is a comparison, extract the comparison arguments
4511 if (GET_MODE (op0
) == mode
)
4512 return simplify_rtx (op0
);
4514 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4515 XEXP (op0
, 0), XEXP (op0
, 1));
4517 else if (code
== EQ
)
4519 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4520 if (new_code
!= UNKNOWN
)
4521 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4522 XEXP (op0
, 0), XEXP (op0
, 1));
4526 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4527 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4528 if ((code
== LTU
|| code
== GEU
)
4529 && GET_CODE (op0
) == PLUS
4530 && CONST_INT_P (XEXP (op0
, 1))
4531 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4532 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4533 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4534 && XEXP (op0
, 1) != const0_rtx
)
4537 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4538 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4539 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4542 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4543 if ((code
== LTU
|| code
== GEU
)
4544 && GET_CODE (op0
) == PLUS
4545 && rtx_equal_p (op1
, XEXP (op0
, 1))
4546 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4547 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4548 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4549 copy_rtx (XEXP (op0
, 0)));
4551 if (op1
== const0_rtx
)
4553 /* Canonicalize (GTU x 0) as (NE x 0). */
4555 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4556 /* Canonicalize (LEU x 0) as (EQ x 0). */
4558 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4560 else if (op1
== const1_rtx
)
4565 /* Canonicalize (GE x 1) as (GT x 0). */
4566 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4569 /* Canonicalize (GEU x 1) as (NE x 0). */
4570 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4573 /* Canonicalize (LT x 1) as (LE x 0). */
4574 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4577 /* Canonicalize (LTU x 1) as (EQ x 0). */
4578 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4584 else if (op1
== constm1_rtx
)
4586 /* Canonicalize (LE x -1) as (LT x 0). */
4588 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4589 /* Canonicalize (GT x -1) as (GE x 0). */
4591 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4594 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4595 if ((code
== EQ
|| code
== NE
)
4596 && (op0code
== PLUS
|| op0code
== MINUS
)
4598 && CONSTANT_P (XEXP (op0
, 1))
4599 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4601 rtx x
= XEXP (op0
, 0);
4602 rtx c
= XEXP (op0
, 1);
4603 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4604 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4606 /* Detect an infinite recursive condition, where we oscillate at this
4607 simplification case between:
4608 A + B == C <---> C - B == A,
4609 where A, B, and C are all constants with non-simplifiable expressions,
4610 usually SYMBOL_REFs. */
4611 if (GET_CODE (tem
) == invcode
4613 && rtx_equal_p (c
, XEXP (tem
, 1)))
4616 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4619 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4620 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4622 && op1
== const0_rtx
4623 && GET_MODE_CLASS (mode
) == MODE_INT
4624 && cmp_mode
!= VOIDmode
4625 /* ??? Work-around BImode bugs in the ia64 backend. */
4627 && cmp_mode
!= BImode
4628 && nonzero_bits (op0
, cmp_mode
) == 1
4629 && STORE_FLAG_VALUE
== 1)
4630 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4631 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4632 : lowpart_subreg (mode
, op0
, cmp_mode
);
4634 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4635 if ((code
== EQ
|| code
== NE
)
4636 && op1
== const0_rtx
4638 return simplify_gen_relational (code
, mode
, cmp_mode
,
4639 XEXP (op0
, 0), XEXP (op0
, 1));
4641 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4642 if ((code
== EQ
|| code
== NE
)
4644 && rtx_equal_p (XEXP (op0
, 0), op1
)
4645 && !side_effects_p (XEXP (op0
, 0)))
4646 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4649 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4650 if ((code
== EQ
|| code
== NE
)
4652 && rtx_equal_p (XEXP (op0
, 1), op1
)
4653 && !side_effects_p (XEXP (op0
, 1)))
4654 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4657 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4658 if ((code
== EQ
|| code
== NE
)
4660 && CONST_SCALAR_INT_P (op1
)
4661 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4662 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4663 simplify_gen_binary (XOR
, cmp_mode
,
4664 XEXP (op0
, 1), op1
));
4666 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4667 can be implemented with a BICS instruction on some targets, or
4668 constant-folded if y is a constant. */
4669 if ((code
== EQ
|| code
== NE
)
4671 && rtx_equal_p (XEXP (op0
, 0), op1
)
4672 && !side_effects_p (op1
)
4673 && op1
!= CONST0_RTX (cmp_mode
))
4675 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4676 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4678 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4679 CONST0_RTX (cmp_mode
));
4682 /* Likewise for (eq/ne (and x y) y). */
4683 if ((code
== EQ
|| code
== NE
)
4685 && rtx_equal_p (XEXP (op0
, 1), op1
)
4686 && !side_effects_p (op1
)
4687 && op1
!= CONST0_RTX (cmp_mode
))
4689 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4690 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4692 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4693 CONST0_RTX (cmp_mode
));
4696 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4697 if ((code
== EQ
|| code
== NE
)
4698 && GET_CODE (op0
) == BSWAP
4699 && CONST_SCALAR_INT_P (op1
))
4700 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4701 simplify_gen_unary (BSWAP
, cmp_mode
,
4704 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4705 if ((code
== EQ
|| code
== NE
)
4706 && GET_CODE (op0
) == BSWAP
4707 && GET_CODE (op1
) == BSWAP
)
4708 return simplify_gen_relational (code
, mode
, cmp_mode
,
4709 XEXP (op0
, 0), XEXP (op1
, 0));
4711 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4717 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4718 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4719 XEXP (op0
, 0), const0_rtx
);
4724 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4725 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4726 XEXP (op0
, 0), const0_rtx
);
4745 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4746 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4747 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4748 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4749 For floating-point comparisons, assume that the operands were ordered. */
4752 comparison_result (enum rtx_code code
, int known_results
)
4758 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4761 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4765 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4768 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4772 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4775 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4778 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4780 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4783 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4785 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4788 return const_true_rtx
;
4796 /* Check if the given comparison (done in the given MODE) is actually
4797 a tautology or a contradiction. If the mode is VOID_mode, the
4798 comparison is done in "infinite precision". If no simplification
4799 is possible, this function returns zero. Otherwise, it returns
4800 either const_true_rtx or const0_rtx. */
4803 simplify_const_relational_operation (enum rtx_code code
,
4811 gcc_assert (mode
!= VOIDmode
4812 || (GET_MODE (op0
) == VOIDmode
4813 && GET_MODE (op1
) == VOIDmode
));
4815 /* If op0 is a compare, extract the comparison arguments from it. */
4816 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4818 op1
= XEXP (op0
, 1);
4819 op0
= XEXP (op0
, 0);
4821 if (GET_MODE (op0
) != VOIDmode
)
4822 mode
= GET_MODE (op0
);
4823 else if (GET_MODE (op1
) != VOIDmode
)
4824 mode
= GET_MODE (op1
);
4829 /* We can't simplify MODE_CC values since we don't know what the
4830 actual comparison is. */
4831 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4834 /* Make sure the constant is second. */
4835 if (swap_commutative_operands_p (op0
, op1
))
4837 tem
= op0
, op0
= op1
, op1
= tem
;
4838 code
= swap_condition (code
);
4841 trueop0
= avoid_constant_pool_reference (op0
);
4842 trueop1
= avoid_constant_pool_reference (op1
);
4844 /* For integer comparisons of A and B maybe we can simplify A - B and can
4845 then simplify a comparison of that with zero. If A and B are both either
4846 a register or a CONST_INT, this can't help; testing for these cases will
4847 prevent infinite recursion here and speed things up.
4849 We can only do this for EQ and NE comparisons as otherwise we may
4850 lose or introduce overflow which we cannot disregard as undefined as
4851 we do not know the signedness of the operation on either the left or
4852 the right hand side of the comparison. */
4854 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4855 && (code
== EQ
|| code
== NE
)
4856 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4857 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4858 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4859 /* We cannot do this if tem is a nonzero address. */
4860 && ! nonzero_address_p (tem
))
4861 return simplify_const_relational_operation (signed_condition (code
),
4862 mode
, tem
, const0_rtx
);
4864 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4865 return const_true_rtx
;
4867 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4870 /* For modes without NaNs, if the two operands are equal, we know the
4871 result except if they have side-effects. Even with NaNs we know
4872 the result of unordered comparisons and, if signaling NaNs are
4873 irrelevant, also the result of LT/GT/LTGT. */
4874 if ((! HONOR_NANS (trueop0
)
4875 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4876 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4877 && ! HONOR_SNANS (trueop0
)))
4878 && rtx_equal_p (trueop0
, trueop1
)
4879 && ! side_effects_p (trueop0
))
4880 return comparison_result (code
, CMP_EQ
);
4882 /* If the operands are floating-point constants, see if we can fold
4884 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4885 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4886 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4888 REAL_VALUE_TYPE d0
, d1
;
4890 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4891 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4893 /* Comparisons are unordered iff at least one of the values is NaN. */
4894 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4904 return const_true_rtx
;
4917 return comparison_result (code
,
4918 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4919 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4922 /* Otherwise, see if the operands are both integers. */
4923 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4924 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4926 /* It would be nice if we really had a mode here. However, the
4927 largest int representable on the target is as good as
4929 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4930 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4931 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4933 if (wi::eq_p (ptrueop0
, ptrueop1
))
4934 return comparison_result (code
, CMP_EQ
);
4937 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4938 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4939 return comparison_result (code
, cr
);
4943 /* Optimize comparisons with upper and lower bounds. */
4944 if (HWI_COMPUTABLE_MODE_P (mode
)
4945 && CONST_INT_P (trueop1
)
4946 && !side_effects_p (trueop0
))
4949 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4950 HOST_WIDE_INT val
= INTVAL (trueop1
);
4951 HOST_WIDE_INT mmin
, mmax
;
4961 /* Get a reduced range if the sign bit is zero. */
4962 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4969 rtx mmin_rtx
, mmax_rtx
;
4970 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4972 mmin
= INTVAL (mmin_rtx
);
4973 mmax
= INTVAL (mmax_rtx
);
4976 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4978 mmin
>>= (sign_copies
- 1);
4979 mmax
>>= (sign_copies
- 1);
4985 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4987 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4988 return const_true_rtx
;
4989 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4994 return const_true_rtx
;
4999 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5001 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5002 return const_true_rtx
;
5003 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5008 return const_true_rtx
;
5014 /* x == y is always false for y out of range. */
5015 if (val
< mmin
|| val
> mmax
)
5019 /* x > y is always false for y >= mmax, always true for y < mmin. */
5021 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5023 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5024 return const_true_rtx
;
5030 return const_true_rtx
;
5033 /* x < y is always false for y <= mmin, always true for y > mmax. */
5035 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5037 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5038 return const_true_rtx
;
5044 return const_true_rtx
;
5048 /* x != y is always true for y out of range. */
5049 if (val
< mmin
|| val
> mmax
)
5050 return const_true_rtx
;
5058 /* Optimize integer comparisons with zero. */
5059 if (trueop1
== const0_rtx
&& !side_effects_p (trueop0
))
5061 /* Some addresses are known to be nonzero. We don't know
5062 their sign, but equality comparisons are known. */
5063 if (nonzero_address_p (trueop0
))
5065 if (code
== EQ
|| code
== LEU
)
5067 if (code
== NE
|| code
== GTU
)
5068 return const_true_rtx
;
5071 /* See if the first operand is an IOR with a constant. If so, we
5072 may be able to determine the result of this comparison. */
5073 if (GET_CODE (op0
) == IOR
)
5075 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5076 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5078 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5079 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5080 && (UINTVAL (inner_const
)
5081 & ((unsigned HOST_WIDE_INT
) 1
5091 return const_true_rtx
;
5095 return const_true_rtx
;
5109 /* Optimize comparison of ABS with zero. */
5110 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5111 && (GET_CODE (trueop0
) == ABS
5112 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5113 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5118 /* Optimize abs(x) < 0.0. */
5119 if (!HONOR_SNANS (mode
)
5120 && (!INTEGRAL_MODE_P (mode
)
5121 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5123 if (INTEGRAL_MODE_P (mode
)
5124 && (issue_strict_overflow_warning
5125 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5126 warning (OPT_Wstrict_overflow
,
5127 ("assuming signed overflow does not occur when "
5128 "assuming abs (x) < 0 is false"));
5134 /* Optimize abs(x) >= 0.0. */
5135 if (!HONOR_NANS (mode
)
5136 && (!INTEGRAL_MODE_P (mode
)
5137 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5139 if (INTEGRAL_MODE_P (mode
)
5140 && (issue_strict_overflow_warning
5141 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5142 warning (OPT_Wstrict_overflow
,
5143 ("assuming signed overflow does not occur when "
5144 "assuming abs (x) >= 0 is true"));
5145 return const_true_rtx
;
5150 /* Optimize ! (abs(x) < 0.0). */
5151 return const_true_rtx
;
5161 /* Simplify CODE, an operation with result mode MODE and three operands,
5162 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5163 a constant. Return 0 if no simplifications is possible. */
5166 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5167 machine_mode op0_mode
, rtx op0
, rtx op1
,
5170 unsigned int width
= GET_MODE_PRECISION (mode
);
5171 bool any_change
= false;
5174 /* VOIDmode means "infinite" precision. */
5176 width
= HOST_BITS_PER_WIDE_INT
;
5181 /* Simplify negations around the multiplication. */
5182 /* -a * -b + c => a * b + c. */
5183 if (GET_CODE (op0
) == NEG
)
5185 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5187 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5189 else if (GET_CODE (op1
) == NEG
)
5191 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5193 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5196 /* Canonicalize the two multiplication operands. */
5197 /* a * -b + c => -b * a + c. */
5198 if (swap_commutative_operands_p (op0
, op1
))
5199 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5202 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5207 if (CONST_INT_P (op0
)
5208 && CONST_INT_P (op1
)
5209 && CONST_INT_P (op2
)
5210 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5211 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5213 /* Extracting a bit-field from a constant */
5214 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5215 HOST_WIDE_INT op1val
= INTVAL (op1
);
5216 HOST_WIDE_INT op2val
= INTVAL (op2
);
5217 if (BITS_BIG_ENDIAN
)
5218 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5222 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5224 /* First zero-extend. */
5225 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5226 /* If desired, propagate sign bit. */
5227 if (code
== SIGN_EXTRACT
5228 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5230 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5233 return gen_int_mode (val
, mode
);
5238 if (CONST_INT_P (op0
))
5239 return op0
!= const0_rtx
? op1
: op2
;
5241 /* Convert c ? a : a into "a". */
5242 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5245 /* Convert a != b ? a : b into "a". */
5246 if (GET_CODE (op0
) == NE
5247 && ! side_effects_p (op0
)
5248 && ! HONOR_NANS (mode
)
5249 && ! HONOR_SIGNED_ZEROS (mode
)
5250 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5251 && rtx_equal_p (XEXP (op0
, 1), op2
))
5252 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5253 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5256 /* Convert a == b ? a : b into "b". */
5257 if (GET_CODE (op0
) == EQ
5258 && ! side_effects_p (op0
)
5259 && ! HONOR_NANS (mode
)
5260 && ! HONOR_SIGNED_ZEROS (mode
)
5261 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5262 && rtx_equal_p (XEXP (op0
, 1), op2
))
5263 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5264 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5267 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5269 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5270 ? GET_MODE (XEXP (op0
, 1))
5271 : GET_MODE (XEXP (op0
, 0)));
5274 /* Look for happy constants in op1 and op2. */
5275 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5277 HOST_WIDE_INT t
= INTVAL (op1
);
5278 HOST_WIDE_INT f
= INTVAL (op2
);
5280 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5281 code
= GET_CODE (op0
);
5282 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5285 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5293 return simplify_gen_relational (code
, mode
, cmp_mode
,
5294 XEXP (op0
, 0), XEXP (op0
, 1));
5297 if (cmp_mode
== VOIDmode
)
5298 cmp_mode
= op0_mode
;
5299 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5300 cmp_mode
, XEXP (op0
, 0),
5303 /* See if any simplifications were possible. */
5306 if (CONST_INT_P (temp
))
5307 return temp
== const0_rtx
? op2
: op1
;
5309 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5315 gcc_assert (GET_MODE (op0
) == mode
);
5316 gcc_assert (GET_MODE (op1
) == mode
);
5317 gcc_assert (VECTOR_MODE_P (mode
));
5318 trueop2
= avoid_constant_pool_reference (op2
);
5319 if (CONST_INT_P (trueop2
))
5321 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5322 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5323 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5324 unsigned HOST_WIDE_INT mask
;
5325 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5328 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5330 if (!(sel
& mask
) && !side_effects_p (op0
))
5332 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5335 rtx trueop0
= avoid_constant_pool_reference (op0
);
5336 rtx trueop1
= avoid_constant_pool_reference (op1
);
5337 if (GET_CODE (trueop0
) == CONST_VECTOR
5338 && GET_CODE (trueop1
) == CONST_VECTOR
)
5340 rtvec v
= rtvec_alloc (n_elts
);
5343 for (i
= 0; i
< n_elts
; i
++)
5344 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5345 ? CONST_VECTOR_ELT (trueop0
, i
)
5346 : CONST_VECTOR_ELT (trueop1
, i
));
5347 return gen_rtx_CONST_VECTOR (mode
, v
);
5350 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5351 if no element from a appears in the result. */
5352 if (GET_CODE (op0
) == VEC_MERGE
)
5354 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5355 if (CONST_INT_P (tem
))
5357 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5358 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5359 return simplify_gen_ternary (code
, mode
, mode
,
5360 XEXP (op0
, 1), op1
, op2
);
5361 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5362 return simplify_gen_ternary (code
, mode
, mode
,
5363 XEXP (op0
, 0), op1
, op2
);
5366 if (GET_CODE (op1
) == VEC_MERGE
)
5368 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5369 if (CONST_INT_P (tem
))
5371 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5372 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5373 return simplify_gen_ternary (code
, mode
, mode
,
5374 op0
, XEXP (op1
, 1), op2
);
5375 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5376 return simplify_gen_ternary (code
, mode
, mode
,
5377 op0
, XEXP (op1
, 0), op2
);
5381 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5383 if (GET_CODE (op0
) == VEC_DUPLICATE
5384 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5385 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5386 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5388 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5389 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5391 if (XEXP (XEXP (op0
, 0), 0) == op1
5392 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5398 if (rtx_equal_p (op0
, op1
)
5399 && !side_effects_p (op2
) && !side_effects_p (op1
))
5411 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5412 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5413 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5415 Works by unpacking OP into a collection of 8-bit values
5416 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5417 and then repacking them again for OUTERMODE. */
5420 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5421 machine_mode innermode
, unsigned int byte
)
5425 value_mask
= (1 << value_bit
) - 1
5427 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5436 rtvec result_v
= NULL
;
5437 enum mode_class outer_class
;
5438 machine_mode outer_submode
;
5441 /* Some ports misuse CCmode. */
5442 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5445 /* We have no way to represent a complex constant at the rtl level. */
5446 if (COMPLEX_MODE_P (outermode
))
5449 /* We support any size mode. */
5450 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5451 GET_MODE_BITSIZE (innermode
));
5453 /* Unpack the value. */
5455 if (GET_CODE (op
) == CONST_VECTOR
)
5457 num_elem
= CONST_VECTOR_NUNITS (op
);
5458 elems
= &CONST_VECTOR_ELT (op
, 0);
5459 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5465 elem_bitsize
= max_bitsize
;
5467 /* If this asserts, it is too complicated; reducing value_bit may help. */
5468 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5469 /* I don't know how to handle endianness of sub-units. */
5470 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5472 for (elem
= 0; elem
< num_elem
; elem
++)
5475 rtx el
= elems
[elem
];
5477 /* Vectors are kept in target memory order. (This is probably
5480 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5481 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5483 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5484 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5485 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5486 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5487 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5490 switch (GET_CODE (el
))
5494 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5496 *vp
++ = INTVAL (el
) >> i
;
5497 /* CONST_INTs are always logically sign-extended. */
5498 for (; i
< elem_bitsize
; i
+= value_bit
)
5499 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5502 case CONST_WIDE_INT
:
5504 rtx_mode_t val
= std::make_pair (el
, innermode
);
5505 unsigned char extend
= wi::sign_mask (val
);
5507 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5508 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5509 for (; i
< elem_bitsize
; i
+= value_bit
)
5515 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5517 unsigned char extend
= 0;
5518 /* If this triggers, someone should have generated a
5519 CONST_INT instead. */
5520 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5522 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5523 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5524 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5527 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5531 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5533 for (; i
< elem_bitsize
; i
+= value_bit
)
5538 /* This is big enough for anything on the platform. */
5539 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5540 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5542 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5543 gcc_assert (bitsize
<= elem_bitsize
);
5544 gcc_assert (bitsize
% value_bit
== 0);
5546 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5549 /* real_to_target produces its result in words affected by
5550 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5551 and use WORDS_BIG_ENDIAN instead; see the documentation
5552 of SUBREG in rtl.texi. */
5553 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5556 if (WORDS_BIG_ENDIAN
)
5557 ibase
= bitsize
- 1 - i
;
5560 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5563 /* It shouldn't matter what's done here, so fill it with
5565 for (; i
< elem_bitsize
; i
+= value_bit
)
5571 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5573 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5574 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5578 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5579 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5580 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5582 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5583 >> (i
- HOST_BITS_PER_WIDE_INT
);
5584 for (; i
< elem_bitsize
; i
+= value_bit
)
5594 /* Now, pick the right byte to start with. */
5595 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5596 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5597 will already have offset 0. */
5598 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5600 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5602 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5603 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5604 byte
= (subword_byte
% UNITS_PER_WORD
5605 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5608 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5609 so if it's become negative it will instead be very large.) */
5610 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5612 /* Convert from bytes to chunks of size value_bit. */
5613 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5615 /* Re-pack the value. */
5617 if (VECTOR_MODE_P (outermode
))
5619 num_elem
= GET_MODE_NUNITS (outermode
);
5620 result_v
= rtvec_alloc (num_elem
);
5621 elems
= &RTVEC_ELT (result_v
, 0);
5622 outer_submode
= GET_MODE_INNER (outermode
);
5628 outer_submode
= outermode
;
5631 outer_class
= GET_MODE_CLASS (outer_submode
);
5632 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5634 gcc_assert (elem_bitsize
% value_bit
== 0);
5635 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5637 for (elem
= 0; elem
< num_elem
; elem
++)
5641 /* Vectors are stored in target memory order. (This is probably
5644 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5645 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5647 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5648 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5649 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5650 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5651 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5654 switch (outer_class
)
5657 case MODE_PARTIAL_INT
:
5662 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5663 / HOST_BITS_PER_WIDE_INT
;
5664 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5667 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5669 for (u
= 0; u
< units
; u
++)
5671 unsigned HOST_WIDE_INT buf
= 0;
5673 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5675 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5678 base
+= HOST_BITS_PER_WIDE_INT
;
5680 r
= wide_int::from_array (tmp
, units
,
5681 GET_MODE_PRECISION (outer_submode
));
5682 #if TARGET_SUPPORTS_WIDE_INT == 0
5683 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5684 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5687 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5692 case MODE_DECIMAL_FLOAT
:
5695 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5697 /* real_from_target wants its input in words affected by
5698 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5699 and use WORDS_BIG_ENDIAN instead; see the documentation
5700 of SUBREG in rtl.texi. */
5701 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5703 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5706 if (WORDS_BIG_ENDIAN
)
5707 ibase
= elem_bitsize
- 1 - i
;
5710 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5713 real_from_target (&r
, tmp
, outer_submode
);
5714 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5726 f
.mode
= outer_submode
;
5729 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5731 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5732 for (; i
< elem_bitsize
; i
+= value_bit
)
5733 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5734 << (i
- HOST_BITS_PER_WIDE_INT
));
5736 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5744 if (VECTOR_MODE_P (outermode
))
5745 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5750 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5751 Return 0 if no simplifications are possible. */
5753 simplify_subreg (machine_mode outermode
, rtx op
,
5754 machine_mode innermode
, unsigned int byte
)
5756 /* Little bit of sanity checking. */
5757 gcc_assert (innermode
!= VOIDmode
);
5758 gcc_assert (outermode
!= VOIDmode
);
5759 gcc_assert (innermode
!= BLKmode
);
5760 gcc_assert (outermode
!= BLKmode
);
5762 gcc_assert (GET_MODE (op
) == innermode
5763 || GET_MODE (op
) == VOIDmode
);
5765 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5768 if (byte
>= GET_MODE_SIZE (innermode
))
5771 if (outermode
== innermode
&& !byte
)
5774 if (CONST_SCALAR_INT_P (op
)
5775 || CONST_DOUBLE_AS_FLOAT_P (op
)
5776 || GET_CODE (op
) == CONST_FIXED
5777 || GET_CODE (op
) == CONST_VECTOR
)
5778 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5780 /* Changing mode twice with SUBREG => just change it once,
5781 or not at all if changing back op starting mode. */
5782 if (GET_CODE (op
) == SUBREG
)
5784 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5785 int final_offset
= byte
+ SUBREG_BYTE (op
);
5788 if (outermode
== innermostmode
5789 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5790 return SUBREG_REG (op
);
5792 /* The SUBREG_BYTE represents offset, as if the value were stored
5793 in memory. Irritating exception is paradoxical subreg, where
5794 we define SUBREG_BYTE to be 0. On big endian machines, this
5795 value should be negative. For a moment, undo this exception. */
5796 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5798 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5799 if (WORDS_BIG_ENDIAN
)
5800 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5801 if (BYTES_BIG_ENDIAN
)
5802 final_offset
+= difference
% UNITS_PER_WORD
;
5804 if (SUBREG_BYTE (op
) == 0
5805 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5807 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5808 if (WORDS_BIG_ENDIAN
)
5809 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5810 if (BYTES_BIG_ENDIAN
)
5811 final_offset
+= difference
% UNITS_PER_WORD
;
5814 /* See whether resulting subreg will be paradoxical. */
5815 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5817 /* In nonparadoxical subregs we can't handle negative offsets. */
5818 if (final_offset
< 0)
5820 /* Bail out in case resulting subreg would be incorrect. */
5821 if (final_offset
% GET_MODE_SIZE (outermode
)
5822 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5828 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5830 /* In paradoxical subreg, see if we are still looking on lower part.
5831 If so, our SUBREG_BYTE will be 0. */
5832 if (WORDS_BIG_ENDIAN
)
5833 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5834 if (BYTES_BIG_ENDIAN
)
5835 offset
+= difference
% UNITS_PER_WORD
;
5836 if (offset
== final_offset
)
5842 /* Recurse for further possible simplifications. */
5843 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5847 if (validate_subreg (outermode
, innermostmode
,
5848 SUBREG_REG (op
), final_offset
))
5850 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5851 if (SUBREG_PROMOTED_VAR_P (op
)
5852 && SUBREG_PROMOTED_SIGN (op
) >= 0
5853 && GET_MODE_CLASS (outermode
) == MODE_INT
5854 && IN_RANGE (GET_MODE_SIZE (outermode
),
5855 GET_MODE_SIZE (innermode
),
5856 GET_MODE_SIZE (innermostmode
))
5857 && subreg_lowpart_p (newx
))
5859 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5860 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5867 /* SUBREG of a hard register => just change the register number
5868 and/or mode. If the hard register is not valid in that mode,
5869 suppress this simplification. If the hard register is the stack,
5870 frame, or argument pointer, leave this as a SUBREG. */
5872 if (REG_P (op
) && HARD_REGISTER_P (op
))
5874 unsigned int regno
, final_regno
;
5877 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5878 if (HARD_REGISTER_NUM_P (final_regno
))
5881 int final_offset
= byte
;
5883 /* Adjust offset for paradoxical subregs. */
5885 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5887 int difference
= (GET_MODE_SIZE (innermode
)
5888 - GET_MODE_SIZE (outermode
));
5889 if (WORDS_BIG_ENDIAN
)
5890 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5891 if (BYTES_BIG_ENDIAN
)
5892 final_offset
+= difference
% UNITS_PER_WORD
;
5895 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5897 /* Propagate original regno. We don't have any way to specify
5898 the offset inside original regno, so do so only for lowpart.
5899 The information is used only by alias analysis that can not
5900 grog partial register anyway. */
5902 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5903 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5908 /* If we have a SUBREG of a register that we are replacing and we are
5909 replacing it with a MEM, make a new MEM and try replacing the
5910 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5911 or if we would be widening it. */
5914 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5915 /* Allow splitting of volatile memory references in case we don't
5916 have instruction to move the whole thing. */
5917 && (! MEM_VOLATILE_P (op
)
5918 || ! have_insn_for (SET
, innermode
))
5919 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5920 return adjust_address_nv (op
, outermode
, byte
);
5922 /* Handle complex values represented as CONCAT
5923 of real and imaginary part. */
5924 if (GET_CODE (op
) == CONCAT
)
5926 unsigned int part_size
, final_offset
;
5929 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5930 if (byte
< part_size
)
5932 part
= XEXP (op
, 0);
5933 final_offset
= byte
;
5937 part
= XEXP (op
, 1);
5938 final_offset
= byte
- part_size
;
5941 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5944 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5947 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5948 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5952 /* A SUBREG resulting from a zero extension may fold to zero if
5953 it extracts higher bits that the ZERO_EXTEND's source bits. */
5954 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5956 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5957 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5958 return CONST0_RTX (outermode
);
5961 if (SCALAR_INT_MODE_P (outermode
)
5962 && SCALAR_INT_MODE_P (innermode
)
5963 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5964 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5966 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5974 /* Make a SUBREG operation or equivalent if it folds. */
5977 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5978 machine_mode innermode
, unsigned int byte
)
5982 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5986 if (GET_CODE (op
) == SUBREG
5987 || GET_CODE (op
) == CONCAT
5988 || GET_MODE (op
) == VOIDmode
)
5991 if (validate_subreg (outermode
, innermode
, op
, byte
))
5992 return gen_rtx_SUBREG (outermode
, op
, byte
);
5997 /* Simplify X, an rtx expression.
5999 Return the simplified expression or NULL if no simplifications
6002 This is the preferred entry point into the simplification routines;
6003 however, we still allow passes to call the more specific routines.
6005 Right now GCC has three (yes, three) major bodies of RTL simplification
6006 code that need to be unified.
6008 1. fold_rtx in cse.c. This code uses various CSE specific
6009 information to aid in RTL simplification.
6011 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6012 it uses combine specific information to aid in RTL
6015 3. The routines in this file.
6018 Long term we want to only have one body of simplification code; to
6019 get to that state I recommend the following steps:
6021 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6022 which are not pass dependent state into these routines.
6024 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6025 use this routine whenever possible.
6027 3. Allow for pass dependent state to be provided to these
6028 routines and add simplifications based on the pass dependent
6029 state. Remove code from cse.c & combine.c that becomes
6032 It will take time, but ultimately the compiler will be easier to
6033 maintain and improve. It's totally silly that when we add a
6034 simplification that it needs to be added to 4 places (3 for RTL
6035 simplification and 1 for tree simplification. */
6038 simplify_rtx (const_rtx x
)
6040 const enum rtx_code code
= GET_CODE (x
);
6041 const machine_mode mode
= GET_MODE (x
);
6043 switch (GET_RTX_CLASS (code
))
6046 return simplify_unary_operation (code
, mode
,
6047 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6048 case RTX_COMM_ARITH
:
6049 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6050 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6052 /* Fall through.... */
6055 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6058 case RTX_BITFIELD_OPS
:
6059 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6060 XEXP (x
, 0), XEXP (x
, 1),
6064 case RTX_COMM_COMPARE
:
6065 return simplify_relational_operation (code
, mode
,
6066 ((GET_MODE (XEXP (x
, 0))
6068 ? GET_MODE (XEXP (x
, 0))
6069 : GET_MODE (XEXP (x
, 1))),
6075 return simplify_subreg (mode
, SUBREG_REG (x
),
6076 GET_MODE (SUBREG_REG (x
)),
6083 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6084 if (GET_CODE (XEXP (x
, 0)) == HIGH
6085 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))