1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "double-int.h"
36 #include "fold-const.h"
40 #include "hard-reg-set.h"
42 #include "insn-config.h"
45 #include "insn-codes.h"
48 #include "statistics.h"
50 #include "fixed-value.h"
58 #include "diagnostic-core.h"
63 /* Simplification and canonicalization of RTL. */
65 /* Much code operates on (low, high) pairs; the low value is an
66 unsigned wide int, the high value a signed wide int. We
67 occasionally need to sign extend from low to high as if low were a
69 #define HWI_SIGN_EXTEND(low) \
70 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
72 static rtx
neg_const_int (machine_mode
, const_rtx
);
73 static bool plus_minus_operand_p (const_rtx
);
74 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
75 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
76 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
78 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
80 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
81 machine_mode
, rtx
, rtx
);
82 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
83 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
86 /* Negate a CONST_INT rtx, truncating (because a conversion from a
87 maximally negative number can overflow). */
89 neg_const_int (machine_mode mode
, const_rtx i
)
91 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
94 /* Test whether expression, X, is an immediate constant that represents
95 the most significant bit of machine mode MODE. */
98 mode_signbit_p (machine_mode mode
, const_rtx x
)
100 unsigned HOST_WIDE_INT val
;
103 if (GET_MODE_CLASS (mode
) != MODE_INT
)
106 width
= GET_MODE_PRECISION (mode
);
110 if (width
<= HOST_BITS_PER_WIDE_INT
113 #if TARGET_SUPPORTS_WIDE_INT
114 else if (CONST_WIDE_INT_P (x
))
117 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
118 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
120 for (i
= 0; i
< elts
- 1; i
++)
121 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
123 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
124 width
%= HOST_BITS_PER_WIDE_INT
;
126 width
= HOST_BITS_PER_WIDE_INT
;
129 else if (width
<= HOST_BITS_PER_DOUBLE_INT
130 && CONST_DOUBLE_AS_INT_P (x
)
131 && CONST_DOUBLE_LOW (x
) == 0)
133 val
= CONST_DOUBLE_HIGH (x
);
134 width
-= HOST_BITS_PER_WIDE_INT
;
138 /* X is not an integer constant. */
141 if (width
< HOST_BITS_PER_WIDE_INT
)
142 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
143 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
146 /* Test whether VAL is equal to the most significant bit of mode MODE
147 (after masking with the mode mask of MODE). Returns false if the
148 precision of MODE is too large to handle. */
151 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
155 if (GET_MODE_CLASS (mode
) != MODE_INT
)
158 width
= GET_MODE_PRECISION (mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= GET_MODE_MASK (mode
);
163 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
166 /* Test whether the most significant bit of mode MODE is set in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 if (GET_MODE_CLASS (mode
) != MODE_INT
)
176 width
= GET_MODE_PRECISION (mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
184 /* Test whether the most significant bit of mode MODE is clear in VAL.
185 Returns false if the precision of MODE is too large to handle. */
187 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
191 if (GET_MODE_CLASS (mode
) != MODE_INT
)
194 width
= GET_MODE_PRECISION (mode
);
195 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
198 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
202 /* Make a binary operation by properly ordering the operands and
203 seeing if the expression folds. */
206 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
211 /* If this simplifies, do it. */
212 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Put complex operands first and constants second if commutative. */
217 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
218 && swap_commutative_operands_p (op0
, op1
))
219 tem
= op0
, op0
= op1
, op1
= tem
;
221 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
224 /* If X is a MEM referencing the constant pool, return the real value.
225 Otherwise return X. */
227 avoid_constant_pool_reference (rtx x
)
231 HOST_WIDE_INT offset
= 0;
233 switch (GET_CODE (x
))
239 /* Handle float extensions of constant pool references. */
241 c
= avoid_constant_pool_reference (tmp
);
242 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
246 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
247 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
255 if (GET_MODE (x
) == BLKmode
)
260 /* Call target hook to avoid the effects of -fpic etc.... */
261 addr
= targetm
.delegitimize_address (addr
);
263 /* Split the address into a base and integer offset. */
264 if (GET_CODE (addr
) == CONST
265 && GET_CODE (XEXP (addr
, 0)) == PLUS
266 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
268 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
269 addr
= XEXP (XEXP (addr
, 0), 0);
272 if (GET_CODE (addr
) == LO_SUM
)
273 addr
= XEXP (addr
, 1);
275 /* If this is a constant pool reference, we can turn it into its
276 constant and hope that simplifications happen. */
277 if (GET_CODE (addr
) == SYMBOL_REF
278 && CONSTANT_POOL_ADDRESS_P (addr
))
280 c
= get_pool_constant (addr
);
281 cmode
= get_pool_mode (addr
);
283 /* If we're accessing the constant in a different mode than it was
284 originally stored, attempt to fix that up via subreg simplifications.
285 If that fails we have no choice but to return the original memory. */
286 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
287 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
289 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
290 if (tem
&& CONSTANT_P (tem
))
300 /* Simplify a MEM based on its attributes. This is the default
301 delegitimize_address target hook, and it's recommended that every
302 overrider call it. */
305 delegitimize_mem_from_attrs (rtx x
)
307 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308 use their base addresses as equivalent. */
311 && MEM_OFFSET_KNOWN_P (x
))
313 tree decl
= MEM_EXPR (x
);
314 machine_mode mode
= GET_MODE (x
);
315 HOST_WIDE_INT offset
= 0;
317 switch (TREE_CODE (decl
))
327 case ARRAY_RANGE_REF
:
332 case VIEW_CONVERT_EXPR
:
334 HOST_WIDE_INT bitsize
, bitpos
;
336 int unsignedp
, volatilep
= 0;
338 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
339 &mode
, &unsignedp
, &volatilep
, false);
340 if (bitsize
!= GET_MODE_BITSIZE (mode
)
341 || (bitpos
% BITS_PER_UNIT
)
342 || (toffset
&& !tree_fits_shwi_p (toffset
)))
346 offset
+= bitpos
/ BITS_PER_UNIT
;
348 offset
+= tree_to_shwi (toffset
);
355 && mode
== GET_MODE (x
)
356 && TREE_CODE (decl
) == VAR_DECL
357 && (TREE_STATIC (decl
)
358 || DECL_THREAD_LOCAL_P (decl
))
359 && DECL_RTL_SET_P (decl
)
360 && MEM_P (DECL_RTL (decl
)))
364 offset
+= MEM_OFFSET (x
);
366 newx
= DECL_RTL (decl
);
370 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
372 /* Avoid creating a new MEM needlessly if we already had
373 the same address. We do if there's no OFFSET and the
374 old address X is identical to NEWX, or if X is of the
375 form (plus NEWX OFFSET), or the NEWX is of the form
376 (plus Y (const_int Z)) and X is that with the offset
377 added: (plus Y (const_int Z+OFFSET)). */
379 || (GET_CODE (o
) == PLUS
380 && GET_CODE (XEXP (o
, 1)) == CONST_INT
381 && (offset
== INTVAL (XEXP (o
, 1))
382 || (GET_CODE (n
) == PLUS
383 && GET_CODE (XEXP (n
, 1)) == CONST_INT
384 && (INTVAL (XEXP (n
, 1)) + offset
385 == INTVAL (XEXP (o
, 1)))
386 && (n
= XEXP (n
, 0))))
387 && (o
= XEXP (o
, 0))))
388 && rtx_equal_p (o
, n
)))
389 x
= adjust_address_nv (newx
, mode
, offset
);
391 else if (GET_MODE (x
) == GET_MODE (newx
)
400 /* Make a unary operation by first seeing if it folds and otherwise making
401 the specified operation. */
404 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
405 machine_mode op_mode
)
409 /* If this simplifies, use it. */
410 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
413 return gen_rtx_fmt_e (code
, mode
, op
);
416 /* Likewise for ternary operations. */
419 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
420 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
424 /* If this simplifies, use it. */
425 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
429 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
432 /* Likewise, for relational operations.
433 CMP_MODE specifies mode comparison is done in. */
436 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
437 machine_mode cmp_mode
, rtx op0
, rtx op1
)
441 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
445 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
448 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449 and simplify the result. If FN is non-NULL, call this callback on each
450 X, if it returns non-NULL, replace X with its return value and simplify the
454 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
455 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
457 enum rtx_code code
= GET_CODE (x
);
458 machine_mode mode
= GET_MODE (x
);
459 machine_mode op_mode
;
461 rtx op0
, op1
, op2
, newx
, op
;
465 if (__builtin_expect (fn
!= NULL
, 0))
467 newx
= fn (x
, old_rtx
, data
);
471 else if (rtx_equal_p (x
, old_rtx
))
472 return copy_rtx ((rtx
) data
);
474 switch (GET_RTX_CLASS (code
))
478 op_mode
= GET_MODE (op0
);
479 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
480 if (op0
== XEXP (x
, 0))
482 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
486 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
487 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
490 return simplify_gen_binary (code
, mode
, op0
, op1
);
493 case RTX_COMM_COMPARE
:
496 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
497 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
498 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
499 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
501 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
504 case RTX_BITFIELD_OPS
:
506 op_mode
= GET_MODE (op0
);
507 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
508 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
509 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
510 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
512 if (op_mode
== VOIDmode
)
513 op_mode
= GET_MODE (op0
);
514 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
519 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
520 if (op0
== SUBREG_REG (x
))
522 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
523 GET_MODE (SUBREG_REG (x
)),
525 return op0
? op0
: x
;
532 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
533 if (op0
== XEXP (x
, 0))
535 return replace_equiv_address_nv (x
, op0
);
537 else if (code
== LO_SUM
)
539 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
540 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
542 /* (lo_sum (high x) y) -> y where x and y have the same base. */
543 if (GET_CODE (op0
) == HIGH
)
545 rtx base0
, base1
, offset0
, offset1
;
546 split_const (XEXP (op0
, 0), &base0
, &offset0
);
547 split_const (op1
, &base1
, &offset1
);
548 if (rtx_equal_p (base0
, base1
))
552 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
554 return gen_rtx_LO_SUM (mode
, op0
, op1
);
563 fmt
= GET_RTX_FORMAT (code
);
564 for (i
= 0; fmt
[i
]; i
++)
569 newvec
= XVEC (newx
, i
);
570 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
572 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
574 if (op
!= RTVEC_ELT (vec
, j
))
578 newvec
= shallow_copy_rtvec (vec
);
580 newx
= shallow_copy_rtx (x
);
581 XVEC (newx
, i
) = newvec
;
583 RTVEC_ELT (newvec
, j
) = op
;
591 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
592 if (op
!= XEXP (x
, i
))
595 newx
= shallow_copy_rtx (x
);
604 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
605 resulting RTX. Return a new RTX which is as simplified as possible. */
608 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
610 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
613 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
614 Only handle cases where the truncated value is inherently an rvalue.
616 RTL provides two ways of truncating a value:
618 1. a lowpart subreg. This form is only a truncation when both
619 the outer and inner modes (here MODE and OP_MODE respectively)
620 are scalar integers, and only then when the subreg is used as
623 It is only valid to form such truncating subregs if the
624 truncation requires no action by the target. The onus for
625 proving this is on the creator of the subreg -- e.g. the
626 caller to simplify_subreg or simplify_gen_subreg -- and typically
627 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
629 2. a TRUNCATE. This form handles both scalar and compound integers.
631 The first form is preferred where valid. However, the TRUNCATE
632 handling in simplify_unary_operation turns the second form into the
633 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
634 so it is generally safe to form rvalue truncations using:
636 simplify_gen_unary (TRUNCATE, ...)
638 and leave simplify_unary_operation to work out which representation
641 Because of the proof requirements on (1), simplify_truncation must
642 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
643 regardless of whether the outer truncation came from a SUBREG or a
644 TRUNCATE. For example, if the caller has proven that an SImode
649 is a no-op and can be represented as a subreg, it does not follow
650 that SImode truncations of X and Y are also no-ops. On a target
651 like 64-bit MIPS that requires SImode values to be stored in
652 sign-extended form, an SImode truncation of:
654 (and:DI (reg:DI X) (const_int 63))
656 is trivially a no-op because only the lower 6 bits can be set.
657 However, X is still an arbitrary 64-bit number and so we cannot
658 assume that truncating it too is a no-op. */
661 simplify_truncation (machine_mode mode
, rtx op
,
662 machine_mode op_mode
)
664 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
665 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
666 gcc_assert (precision
<= op_precision
);
668 /* Optimize truncations of zero and sign extended values. */
669 if (GET_CODE (op
) == ZERO_EXTEND
670 || GET_CODE (op
) == SIGN_EXTEND
)
672 /* There are three possibilities. If MODE is the same as the
673 origmode, we can omit both the extension and the subreg.
674 If MODE is not larger than the origmode, we can apply the
675 truncation without the extension. Finally, if the outermode
676 is larger than the origmode, we can just extend to the appropriate
678 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
679 if (mode
== origmode
)
681 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
682 return simplify_gen_unary (TRUNCATE
, mode
,
683 XEXP (op
, 0), origmode
);
685 return simplify_gen_unary (GET_CODE (op
), mode
,
686 XEXP (op
, 0), origmode
);
689 /* If the machine can perform operations in the truncated mode, distribute
690 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
691 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
693 #ifdef WORD_REGISTER_OPERATIONS
694 && precision
>= BITS_PER_WORD
696 && (GET_CODE (op
) == PLUS
697 || GET_CODE (op
) == MINUS
698 || GET_CODE (op
) == MULT
))
700 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
703 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
705 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
709 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
710 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if ((GET_CODE (op
) == LSHIFTRT
713 || GET_CODE (op
) == ASHIFTRT
)
714 /* Ensure that OP_MODE is at least twice as wide as MODE
715 to avoid the possibility that an outer LSHIFTRT shifts by more
716 than the sign extension's sign_bit_copies and introduces zeros
717 into the high bits of the result. */
718 && 2 * precision
<= op_precision
719 && CONST_INT_P (XEXP (op
, 1))
720 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
722 && UINTVAL (XEXP (op
, 1)) < precision
)
723 return simplify_gen_binary (ASHIFTRT
, mode
,
724 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
726 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
727 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if ((GET_CODE (op
) == LSHIFTRT
730 || GET_CODE (op
) == ASHIFTRT
)
731 && CONST_INT_P (XEXP (op
, 1))
732 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
733 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
734 && UINTVAL (XEXP (op
, 1)) < precision
)
735 return simplify_gen_binary (LSHIFTRT
, mode
,
736 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
738 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
739 to (ashift:QI (x:QI) C), where C is a suitable small constant and
740 the outer subreg is effectively a truncation to the original mode. */
741 if (GET_CODE (op
) == ASHIFT
742 && CONST_INT_P (XEXP (op
, 1))
743 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
744 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
745 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
746 && UINTVAL (XEXP (op
, 1)) < precision
)
747 return simplify_gen_binary (ASHIFT
, mode
,
748 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
750 /* Recognize a word extraction from a multi-word subreg. */
751 if ((GET_CODE (op
) == LSHIFTRT
752 || GET_CODE (op
) == ASHIFTRT
)
753 && SCALAR_INT_MODE_P (mode
)
754 && SCALAR_INT_MODE_P (op_mode
)
755 && precision
>= BITS_PER_WORD
756 && 2 * precision
<= op_precision
757 && CONST_INT_P (XEXP (op
, 1))
758 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
759 && UINTVAL (XEXP (op
, 1)) < op_precision
)
761 int byte
= subreg_lowpart_offset (mode
, op_mode
);
762 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
763 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
765 ? byte
- shifted_bytes
766 : byte
+ shifted_bytes
));
769 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
770 and try replacing the TRUNCATE and shift with it. Don't do this
771 if the MEM has a mode-dependent address. */
772 if ((GET_CODE (op
) == LSHIFTRT
773 || GET_CODE (op
) == ASHIFTRT
)
774 && SCALAR_INT_MODE_P (op_mode
)
775 && MEM_P (XEXP (op
, 0))
776 && CONST_INT_P (XEXP (op
, 1))
777 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
778 && INTVAL (XEXP (op
, 1)) > 0
779 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
780 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
781 MEM_ADDR_SPACE (XEXP (op
, 0)))
782 && ! MEM_VOLATILE_P (XEXP (op
, 0))
783 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
784 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
786 int byte
= subreg_lowpart_offset (mode
, op_mode
);
787 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
788 return adjust_address_nv (XEXP (op
, 0), mode
,
790 ? byte
- shifted_bytes
791 : byte
+ shifted_bytes
));
794 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
795 (OP:SI foo:SI) if OP is NEG or ABS. */
796 if ((GET_CODE (op
) == ABS
797 || GET_CODE (op
) == NEG
)
798 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
799 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
800 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
801 return simplify_gen_unary (GET_CODE (op
), mode
,
802 XEXP (XEXP (op
, 0), 0), mode
);
804 /* (truncate:A (subreg:B (truncate:C X) 0)) is
806 if (GET_CODE (op
) == SUBREG
807 && SCALAR_INT_MODE_P (mode
)
808 && SCALAR_INT_MODE_P (op_mode
)
809 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
810 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
811 && subreg_lowpart_p (op
))
813 rtx inner
= XEXP (SUBREG_REG (op
), 0);
814 if (GET_MODE_PRECISION (mode
)
815 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
816 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
818 /* If subreg above is paradoxical and C is narrower
819 than A, return (subreg:A (truncate:C X) 0). */
820 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
821 GET_MODE (SUBREG_REG (op
)), 0);
824 /* (truncate:A (truncate:B X)) is (truncate:A X). */
825 if (GET_CODE (op
) == TRUNCATE
)
826 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
827 GET_MODE (XEXP (op
, 0)));
832 /* Try to simplify a unary operation CODE whose output mode is to be
833 MODE with input operand OP whose mode was originally OP_MODE.
834 Return zero if no simplification can be made. */
836 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
837 rtx op
, machine_mode op_mode
)
841 trueop
= avoid_constant_pool_reference (op
);
843 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
847 return simplify_unary_operation_1 (code
, mode
, op
);
850 /* Perform some simplifications we can do even if the operands
853 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
855 enum rtx_code reversed
;
861 /* (not (not X)) == X. */
862 if (GET_CODE (op
) == NOT
)
865 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
866 comparison is all ones. */
867 if (COMPARISON_P (op
)
868 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
869 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
870 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
871 XEXP (op
, 0), XEXP (op
, 1));
873 /* (not (plus X -1)) can become (neg X). */
874 if (GET_CODE (op
) == PLUS
875 && XEXP (op
, 1) == constm1_rtx
)
876 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
878 /* Similarly, (not (neg X)) is (plus X -1). */
879 if (GET_CODE (op
) == NEG
)
880 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
883 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
884 if (GET_CODE (op
) == XOR
885 && CONST_INT_P (XEXP (op
, 1))
886 && (temp
= simplify_unary_operation (NOT
, mode
,
887 XEXP (op
, 1), mode
)) != 0)
888 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
890 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
891 if (GET_CODE (op
) == PLUS
892 && CONST_INT_P (XEXP (op
, 1))
893 && mode_signbit_p (mode
, XEXP (op
, 1))
894 && (temp
= simplify_unary_operation (NOT
, mode
,
895 XEXP (op
, 1), mode
)) != 0)
896 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
899 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
900 operands other than 1, but that is not valid. We could do a
901 similar simplification for (not (lshiftrt C X)) where C is
902 just the sign bit, but this doesn't seem common enough to
904 if (GET_CODE (op
) == ASHIFT
905 && XEXP (op
, 0) == const1_rtx
)
907 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
908 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
911 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
912 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
913 so we can perform the above simplification. */
914 if (STORE_FLAG_VALUE
== -1
915 && GET_CODE (op
) == ASHIFTRT
916 && CONST_INT_P (XEXP (op
, 1))
917 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
918 return simplify_gen_relational (GE
, mode
, VOIDmode
,
919 XEXP (op
, 0), const0_rtx
);
922 if (GET_CODE (op
) == SUBREG
923 && subreg_lowpart_p (op
)
924 && (GET_MODE_SIZE (GET_MODE (op
))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
926 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
927 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
929 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
932 x
= gen_rtx_ROTATE (inner_mode
,
933 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
935 XEXP (SUBREG_REG (op
), 1));
936 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
941 /* Apply De Morgan's laws to reduce number of patterns for machines
942 with negating logical insns (and-not, nand, etc.). If result has
943 only one NOT, put it first, since that is how the patterns are
945 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
947 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
948 machine_mode op_mode
;
950 op_mode
= GET_MODE (in1
);
951 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
953 op_mode
= GET_MODE (in2
);
954 if (op_mode
== VOIDmode
)
956 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
958 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
961 in2
= in1
; in1
= tem
;
964 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
968 /* (not (bswap x)) -> (bswap (not x)). */
969 if (GET_CODE (op
) == BSWAP
)
971 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
972 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
977 /* (neg (neg X)) == X. */
978 if (GET_CODE (op
) == NEG
)
981 /* (neg (plus X 1)) can become (not X). */
982 if (GET_CODE (op
) == PLUS
983 && XEXP (op
, 1) == const1_rtx
)
984 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
986 /* Similarly, (neg (not X)) is (plus X 1). */
987 if (GET_CODE (op
) == NOT
)
988 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
991 /* (neg (minus X Y)) can become (minus Y X). This transformation
992 isn't safe for modes with signed zeros, since if X and Y are
993 both +0, (minus Y X) is the same as (minus X Y). If the
994 rounding mode is towards +infinity (or -infinity) then the two
995 expressions will be rounded differently. */
996 if (GET_CODE (op
) == MINUS
997 && !HONOR_SIGNED_ZEROS (mode
)
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
999 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1001 if (GET_CODE (op
) == PLUS
1002 && !HONOR_SIGNED_ZEROS (mode
)
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1005 /* (neg (plus A C)) is simplified to (minus -C A). */
1006 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1007 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1011 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1014 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1015 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1016 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1019 /* (neg (mult A B)) becomes (mult A (neg B)).
1020 This works even for floating-point values. */
1021 if (GET_CODE (op
) == MULT
1022 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1024 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1025 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1028 /* NEG commutes with ASHIFT since it is multiplication. Only do
1029 this if we can then eliminate the NEG (e.g., if the operand
1031 if (GET_CODE (op
) == ASHIFT
)
1033 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1035 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1038 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1039 C is equal to the width of MODE minus 1. */
1040 if (GET_CODE (op
) == ASHIFTRT
1041 && CONST_INT_P (XEXP (op
, 1))
1042 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1043 return simplify_gen_binary (LSHIFTRT
, mode
,
1044 XEXP (op
, 0), XEXP (op
, 1));
1046 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1047 C is equal to the width of MODE minus 1. */
1048 if (GET_CODE (op
) == LSHIFTRT
1049 && CONST_INT_P (XEXP (op
, 1))
1050 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1051 return simplify_gen_binary (ASHIFTRT
, mode
,
1052 XEXP (op
, 0), XEXP (op
, 1));
1054 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1055 if (GET_CODE (op
) == XOR
1056 && XEXP (op
, 1) == const1_rtx
1057 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1058 return plus_constant (mode
, XEXP (op
, 0), -1);
1060 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1061 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1062 if (GET_CODE (op
) == LT
1063 && XEXP (op
, 1) == const0_rtx
1064 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1066 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1067 int isize
= GET_MODE_PRECISION (inner
);
1068 if (STORE_FLAG_VALUE
== 1)
1070 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1071 GEN_INT (isize
- 1));
1074 if (GET_MODE_PRECISION (mode
) > isize
)
1075 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1076 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1078 else if (STORE_FLAG_VALUE
== -1)
1080 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1081 GEN_INT (isize
- 1));
1084 if (GET_MODE_PRECISION (mode
) > isize
)
1085 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1086 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1092 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1093 with the umulXi3_highpart patterns. */
1094 if (GET_CODE (op
) == LSHIFTRT
1095 && GET_CODE (XEXP (op
, 0)) == MULT
)
1098 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1100 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1102 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1106 /* We can't handle truncation to a partial integer mode here
1107 because we don't know the real bitsize of the partial
1112 if (GET_MODE (op
) != VOIDmode
)
1114 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1119 /* If we know that the value is already truncated, we can
1120 replace the TRUNCATE with a SUBREG. */
1121 if (GET_MODE_NUNITS (mode
) == 1
1122 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1123 || truncated_to_mode (mode
, op
)))
1125 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1130 /* A truncate of a comparison can be replaced with a subreg if
1131 STORE_FLAG_VALUE permits. This is like the previous test,
1132 but it works even if the comparison is done in a mode larger
1133 than HOST_BITS_PER_WIDE_INT. */
1134 if (HWI_COMPUTABLE_MODE_P (mode
)
1135 && COMPARISON_P (op
)
1136 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1138 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1143 /* A truncate of a memory is just loading the low part of the memory
1144 if we are not changing the meaning of the address. */
1145 if (GET_CODE (op
) == MEM
1146 && !VECTOR_MODE_P (mode
)
1147 && !MEM_VOLATILE_P (op
)
1148 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1150 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1157 case FLOAT_TRUNCATE
:
1158 if (DECIMAL_FLOAT_MODE_P (mode
))
1161 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1162 if (GET_CODE (op
) == FLOAT_EXTEND
1163 && GET_MODE (XEXP (op
, 0)) == mode
)
1164 return XEXP (op
, 0);
1166 /* (float_truncate:SF (float_truncate:DF foo:XF))
1167 = (float_truncate:SF foo:XF).
1168 This may eliminate double rounding, so it is unsafe.
1170 (float_truncate:SF (float_extend:XF foo:DF))
1171 = (float_truncate:SF foo:DF).
1173 (float_truncate:DF (float_extend:XF foo:SF))
1174 = (float_extend:SF foo:DF). */
1175 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1176 && flag_unsafe_math_optimizations
)
1177 || GET_CODE (op
) == FLOAT_EXTEND
)
1178 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1180 > GET_MODE_SIZE (mode
)
1181 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1183 XEXP (op
, 0), mode
);
1185 /* (float_truncate (float x)) is (float x) */
1186 if (GET_CODE (op
) == FLOAT
1187 && (flag_unsafe_math_optimizations
1188 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1189 && ((unsigned)significand_size (GET_MODE (op
))
1190 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1191 - num_sign_bit_copies (XEXP (op
, 0),
1192 GET_MODE (XEXP (op
, 0))))))))
1193 return simplify_gen_unary (FLOAT
, mode
,
1195 GET_MODE (XEXP (op
, 0)));
1197 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1198 (OP:SF foo:SF) if OP is NEG or ABS. */
1199 if ((GET_CODE (op
) == ABS
1200 || GET_CODE (op
) == NEG
)
1201 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1202 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1203 return simplify_gen_unary (GET_CODE (op
), mode
,
1204 XEXP (XEXP (op
, 0), 0), mode
);
1206 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1207 is (float_truncate:SF x). */
1208 if (GET_CODE (op
) == SUBREG
1209 && subreg_lowpart_p (op
)
1210 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1211 return SUBREG_REG (op
);
1215 if (DECIMAL_FLOAT_MODE_P (mode
))
1218 /* (float_extend (float_extend x)) is (float_extend x)
1220 (float_extend (float x)) is (float x) assuming that double
1221 rounding can't happen.
1223 if (GET_CODE (op
) == FLOAT_EXTEND
1224 || (GET_CODE (op
) == FLOAT
1225 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1226 && ((unsigned)significand_size (GET_MODE (op
))
1227 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1228 - num_sign_bit_copies (XEXP (op
, 0),
1229 GET_MODE (XEXP (op
, 0)))))))
1230 return simplify_gen_unary (GET_CODE (op
), mode
,
1232 GET_MODE (XEXP (op
, 0)));
1237 /* (abs (neg <foo>)) -> (abs <foo>) */
1238 if (GET_CODE (op
) == NEG
)
1239 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1240 GET_MODE (XEXP (op
, 0)));
1242 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1244 if (GET_MODE (op
) == VOIDmode
)
1247 /* If operand is something known to be positive, ignore the ABS. */
1248 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1249 || val_signbit_known_clear_p (GET_MODE (op
),
1250 nonzero_bits (op
, GET_MODE (op
))))
1253 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1254 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1255 return gen_rtx_NEG (mode
, op
);
1260 /* (ffs (*_extend <X>)) = (ffs <X>) */
1261 if (GET_CODE (op
) == SIGN_EXTEND
1262 || GET_CODE (op
) == ZERO_EXTEND
)
1263 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1264 GET_MODE (XEXP (op
, 0)));
1268 switch (GET_CODE (op
))
1272 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1273 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1274 GET_MODE (XEXP (op
, 0)));
1278 /* Rotations don't affect popcount. */
1279 if (!side_effects_p (XEXP (op
, 1)))
1280 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1281 GET_MODE (XEXP (op
, 0)));
1290 switch (GET_CODE (op
))
1296 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1297 GET_MODE (XEXP (op
, 0)));
1301 /* Rotations don't affect parity. */
1302 if (!side_effects_p (XEXP (op
, 1)))
1303 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1304 GET_MODE (XEXP (op
, 0)));
1313 /* (bswap (bswap x)) -> x. */
1314 if (GET_CODE (op
) == BSWAP
)
1315 return XEXP (op
, 0);
1319 /* (float (sign_extend <X>)) = (float <X>). */
1320 if (GET_CODE (op
) == SIGN_EXTEND
)
1321 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1322 GET_MODE (XEXP (op
, 0)));
1326 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1327 becomes just the MINUS if its mode is MODE. This allows
1328 folding switch statements on machines using casesi (such as
1330 if (GET_CODE (op
) == TRUNCATE
1331 && GET_MODE (XEXP (op
, 0)) == mode
1332 && GET_CODE (XEXP (op
, 0)) == MINUS
1333 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1334 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1335 return XEXP (op
, 0);
1337 /* Extending a widening multiplication should be canonicalized to
1338 a wider widening multiplication. */
1339 if (GET_CODE (op
) == MULT
)
1341 rtx lhs
= XEXP (op
, 0);
1342 rtx rhs
= XEXP (op
, 1);
1343 enum rtx_code lcode
= GET_CODE (lhs
);
1344 enum rtx_code rcode
= GET_CODE (rhs
);
1346 /* Widening multiplies usually extend both operands, but sometimes
1347 they use a shift to extract a portion of a register. */
1348 if ((lcode
== SIGN_EXTEND
1349 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1350 && (rcode
== SIGN_EXTEND
1351 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1353 machine_mode lmode
= GET_MODE (lhs
);
1354 machine_mode rmode
= GET_MODE (rhs
);
1357 if (lcode
== ASHIFTRT
)
1358 /* Number of bits not shifted off the end. */
1359 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1360 else /* lcode == SIGN_EXTEND */
1361 /* Size of inner mode. */
1362 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1364 if (rcode
== ASHIFTRT
)
1365 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1366 else /* rcode == SIGN_EXTEND */
1367 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1369 /* We can only widen multiplies if the result is mathematiclly
1370 equivalent. I.e. if overflow was impossible. */
1371 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1372 return simplify_gen_binary
1374 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1375 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1379 /* Check for a sign extension of a subreg of a promoted
1380 variable, where the promotion is sign-extended, and the
1381 target mode is the same as the variable's promotion. */
1382 if (GET_CODE (op
) == SUBREG
1383 && SUBREG_PROMOTED_VAR_P (op
)
1384 && SUBREG_PROMOTED_SIGNED_P (op
)
1385 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1387 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1392 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1393 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1394 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1396 gcc_assert (GET_MODE_PRECISION (mode
)
1397 > GET_MODE_PRECISION (GET_MODE (op
)));
1398 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1399 GET_MODE (XEXP (op
, 0)));
1402 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1403 is (sign_extend:M (subreg:O <X>)) if there is mode with
1404 GET_MODE_BITSIZE (N) - I bits.
1405 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1406 is similarly (zero_extend:M (subreg:O <X>)). */
1407 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1408 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1409 && CONST_INT_P (XEXP (op
, 1))
1410 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1411 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1414 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1415 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1416 gcc_assert (GET_MODE_BITSIZE (mode
)
1417 > GET_MODE_BITSIZE (GET_MODE (op
)));
1418 if (tmode
!= BLKmode
)
1421 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1423 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1424 ? SIGN_EXTEND
: ZERO_EXTEND
,
1425 mode
, inner
, tmode
);
1429 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1430 /* As we do not know which address space the pointer is referring to,
1431 we can do this only if the target does not support different pointer
1432 or address modes depending on the address space. */
1433 if (target_default_pointer_address_modes_p ()
1434 && ! POINTERS_EXTEND_UNSIGNED
1435 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1437 || (GET_CODE (op
) == SUBREG
1438 && REG_P (SUBREG_REG (op
))
1439 && REG_POINTER (SUBREG_REG (op
))
1440 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1441 return convert_memory_address (Pmode
, op
);
1446 /* Check for a zero extension of a subreg of a promoted
1447 variable, where the promotion is zero-extended, and the
1448 target mode is the same as the variable's promotion. */
1449 if (GET_CODE (op
) == SUBREG
1450 && SUBREG_PROMOTED_VAR_P (op
)
1451 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1452 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1454 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1459 /* Extending a widening multiplication should be canonicalized to
1460 a wider widening multiplication. */
1461 if (GET_CODE (op
) == MULT
)
1463 rtx lhs
= XEXP (op
, 0);
1464 rtx rhs
= XEXP (op
, 1);
1465 enum rtx_code lcode
= GET_CODE (lhs
);
1466 enum rtx_code rcode
= GET_CODE (rhs
);
1468 /* Widening multiplies usually extend both operands, but sometimes
1469 they use a shift to extract a portion of a register. */
1470 if ((lcode
== ZERO_EXTEND
1471 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1472 && (rcode
== ZERO_EXTEND
1473 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1475 machine_mode lmode
= GET_MODE (lhs
);
1476 machine_mode rmode
= GET_MODE (rhs
);
1479 if (lcode
== LSHIFTRT
)
1480 /* Number of bits not shifted off the end. */
1481 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1482 else /* lcode == ZERO_EXTEND */
1483 /* Size of inner mode. */
1484 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1486 if (rcode
== LSHIFTRT
)
1487 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1488 else /* rcode == ZERO_EXTEND */
1489 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1491 /* We can only widen multiplies if the result is mathematiclly
1492 equivalent. I.e. if overflow was impossible. */
1493 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1494 return simplify_gen_binary
1496 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1497 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1501 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op
) == ZERO_EXTEND
)
1503 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1504 GET_MODE (XEXP (op
, 0)));
1506 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1507 is (zero_extend:M (subreg:O <X>)) if there is mode with
1508 GET_MODE_PRECISION (N) - I bits. */
1509 if (GET_CODE (op
) == LSHIFTRT
1510 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1511 && CONST_INT_P (XEXP (op
, 1))
1512 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1513 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1516 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1517 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1518 if (tmode
!= BLKmode
)
1521 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1523 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1527 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1528 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1530 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1531 (and:SI (reg:SI) (const_int 63)). */
1532 if (GET_CODE (op
) == SUBREG
1533 && GET_MODE_PRECISION (GET_MODE (op
))
1534 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1535 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1536 <= HOST_BITS_PER_WIDE_INT
1537 && GET_MODE_PRECISION (mode
)
1538 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1539 && subreg_lowpart_p (op
)
1540 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1541 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1543 if (GET_MODE_PRECISION (mode
)
1544 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1545 return SUBREG_REG (op
);
1546 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1547 GET_MODE (SUBREG_REG (op
)));
1550 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1551 /* As we do not know which address space the pointer is referring to,
1552 we can do this only if the target does not support different pointer
1553 or address modes depending on the address space. */
1554 if (target_default_pointer_address_modes_p ()
1555 && POINTERS_EXTEND_UNSIGNED
> 0
1556 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1558 || (GET_CODE (op
) == SUBREG
1559 && REG_P (SUBREG_REG (op
))
1560 && REG_POINTER (SUBREG_REG (op
))
1561 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1562 return convert_memory_address (Pmode
, op
);
1573 /* Try to compute the value of a unary operation CODE whose output mode is to
1574 be MODE with input operand OP whose mode was originally OP_MODE.
1575 Return zero if the value cannot be computed. */
1577 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1578 rtx op
, machine_mode op_mode
)
1580 unsigned int width
= GET_MODE_PRECISION (mode
);
1582 if (code
== VEC_DUPLICATE
)
1584 gcc_assert (VECTOR_MODE_P (mode
));
1585 if (GET_MODE (op
) != VOIDmode
)
1587 if (!VECTOR_MODE_P (GET_MODE (op
)))
1588 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1590 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1593 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1594 || GET_CODE (op
) == CONST_VECTOR
)
1596 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1597 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1598 rtvec v
= rtvec_alloc (n_elts
);
1601 if (GET_CODE (op
) != CONST_VECTOR
)
1602 for (i
= 0; i
< n_elts
; i
++)
1603 RTVEC_ELT (v
, i
) = op
;
1606 machine_mode inmode
= GET_MODE (op
);
1607 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1608 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1610 gcc_assert (in_n_elts
< n_elts
);
1611 gcc_assert ((n_elts
% in_n_elts
) == 0);
1612 for (i
= 0; i
< n_elts
; i
++)
1613 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1615 return gen_rtx_CONST_VECTOR (mode
, v
);
1619 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1621 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1622 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1623 machine_mode opmode
= GET_MODE (op
);
1624 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1625 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1626 rtvec v
= rtvec_alloc (n_elts
);
1629 gcc_assert (op_n_elts
== n_elts
);
1630 for (i
= 0; i
< n_elts
; i
++)
1632 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1633 CONST_VECTOR_ELT (op
, i
),
1634 GET_MODE_INNER (opmode
));
1637 RTVEC_ELT (v
, i
) = x
;
1639 return gen_rtx_CONST_VECTOR (mode
, v
);
1642 /* The order of these tests is critical so that, for example, we don't
1643 check the wrong mode (input vs. output) for a conversion operation,
1644 such as FIX. At some point, this should be simplified. */
1646 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1650 if (op_mode
== VOIDmode
)
1652 /* CONST_INT have VOIDmode as the mode. We assume that all
1653 the bits of the constant are significant, though, this is
1654 a dangerous assumption as many times CONST_INTs are
1655 created and used with garbage in the bits outside of the
1656 precision of the implied mode of the const_int. */
1657 op_mode
= MAX_MODE_INT
;
1660 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1661 d
= real_value_truncate (mode
, d
);
1662 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1664 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1668 if (op_mode
== VOIDmode
)
1670 /* CONST_INT have VOIDmode as the mode. We assume that all
1671 the bits of the constant are significant, though, this is
1672 a dangerous assumption as many times CONST_INTs are
1673 created and used with garbage in the bits outside of the
1674 precision of the implied mode of the const_int. */
1675 op_mode
= MAX_MODE_INT
;
1678 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1679 d
= real_value_truncate (mode
, d
);
1680 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1683 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1686 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1687 rtx_mode_t op0
= std::make_pair (op
, imode
);
1690 #if TARGET_SUPPORTS_WIDE_INT == 0
1691 /* This assert keeps the simplification from producing a result
1692 that cannot be represented in a CONST_DOUBLE but a lot of
1693 upstream callers expect that this function never fails to
1694 simplify something and so you if you added this to the test
1695 above the code would die later anyway. If this assert
1696 happens, you just need to make the port support wide int. */
1697 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1703 result
= wi::bit_not (op0
);
1707 result
= wi::neg (op0
);
1711 result
= wi::abs (op0
);
1715 result
= wi::shwi (wi::ffs (op0
), mode
);
1719 if (wi::ne_p (op0
, 0))
1720 int_value
= wi::clz (op0
);
1721 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1722 int_value
= GET_MODE_PRECISION (mode
);
1723 result
= wi::shwi (int_value
, mode
);
1727 result
= wi::shwi (wi::clrsb (op0
), mode
);
1731 if (wi::ne_p (op0
, 0))
1732 int_value
= wi::ctz (op0
);
1733 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1734 int_value
= GET_MODE_PRECISION (mode
);
1735 result
= wi::shwi (int_value
, mode
);
1739 result
= wi::shwi (wi::popcount (op0
), mode
);
1743 result
= wi::shwi (wi::parity (op0
), mode
);
1747 result
= wide_int (op0
).bswap ();
1752 result
= wide_int::from (op0
, width
, UNSIGNED
);
1756 result
= wide_int::from (op0
, width
, SIGNED
);
1764 return immed_wide_int_const (result
, mode
);
1767 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1768 && SCALAR_FLOAT_MODE_P (mode
)
1769 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1772 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1779 d
= real_value_abs (&d
);
1782 d
= real_value_negate (&d
);
1784 case FLOAT_TRUNCATE
:
1785 d
= real_value_truncate (mode
, d
);
1788 /* All this does is change the mode, unless changing
1790 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1791 real_convert (&d
, mode
, &d
);
1794 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1801 real_to_target (tmp
, &d
, GET_MODE (op
));
1802 for (i
= 0; i
< 4; i
++)
1804 real_from_target (&d
, tmp
, mode
);
1810 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1812 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1813 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1814 && GET_MODE_CLASS (mode
) == MODE_INT
1817 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1818 operators are intentionally left unspecified (to ease implementation
1819 by target backends), for consistency, this routine implements the
1820 same semantics for constant folding as used by the middle-end. */
1822 /* This was formerly used only for non-IEEE float.
1823 eggert@twinsun.com says it is safe for IEEE also. */
1824 REAL_VALUE_TYPE x
, t
;
1825 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1826 wide_int wmax
, wmin
;
1827 /* This is part of the abi to real_to_integer, but we check
1828 things before making this call. */
1834 if (REAL_VALUE_ISNAN (x
))
1837 /* Test against the signed upper bound. */
1838 wmax
= wi::max_value (width
, SIGNED
);
1839 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1840 if (REAL_VALUES_LESS (t
, x
))
1841 return immed_wide_int_const (wmax
, mode
);
1843 /* Test against the signed lower bound. */
1844 wmin
= wi::min_value (width
, SIGNED
);
1845 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1846 if (REAL_VALUES_LESS (x
, t
))
1847 return immed_wide_int_const (wmin
, mode
);
1849 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1853 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1856 /* Test against the unsigned upper bound. */
1857 wmax
= wi::max_value (width
, UNSIGNED
);
1858 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1859 if (REAL_VALUES_LESS (t
, x
))
1860 return immed_wide_int_const (wmax
, mode
);
1862 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1874 /* Subroutine of simplify_binary_operation to simplify a binary operation
1875 CODE that can commute with byte swapping, with result mode MODE and
1876 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1877 Return zero if no simplification or canonicalization is possible. */
1880 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1885 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1886 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1888 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1889 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1890 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1893 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1894 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1896 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1897 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1903 /* Subroutine of simplify_binary_operation to simplify a commutative,
1904 associative binary operation CODE with result mode MODE, operating
1905 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1906 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1907 canonicalization is possible. */
1910 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1915 /* Linearize the operator to the left. */
1916 if (GET_CODE (op1
) == code
)
1918 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1919 if (GET_CODE (op0
) == code
)
1921 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1922 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1925 /* "a op (b op c)" becomes "(b op c) op a". */
1926 if (! swap_commutative_operands_p (op1
, op0
))
1927 return simplify_gen_binary (code
, mode
, op1
, op0
);
1934 if (GET_CODE (op0
) == code
)
1936 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1937 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1939 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1940 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1943 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1944 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1946 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1948 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1949 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1951 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1958 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1959 and OP1. Return 0 if no simplification is possible.
1961 Don't use this for relational operations such as EQ or LT.
1962 Use simplify_relational_operation instead. */
1964 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1967 rtx trueop0
, trueop1
;
1970 /* Relational operations don't work here. We must know the mode
1971 of the operands in order to do the comparison correctly.
1972 Assuming a full word can give incorrect results.
1973 Consider comparing 128 with -128 in QImode. */
1974 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1975 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1977 /* Make sure the constant is second. */
1978 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1979 && swap_commutative_operands_p (op0
, op1
))
1981 tem
= op0
, op0
= op1
, op1
= tem
;
1984 trueop0
= avoid_constant_pool_reference (op0
);
1985 trueop1
= avoid_constant_pool_reference (op1
);
1987 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1990 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1993 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1994 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1995 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1996 actual constants. */
1999 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2000 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2002 rtx tem
, reversed
, opleft
, opright
;
2004 unsigned int width
= GET_MODE_PRECISION (mode
);
2006 /* Even if we can't compute a constant result,
2007 there are some cases worth simplifying. */
2012 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2013 when x is NaN, infinite, or finite and nonzero. They aren't
2014 when x is -0 and the rounding mode is not towards -infinity,
2015 since (-0) + 0 is then 0. */
2016 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2019 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2020 transformations are safe even for IEEE. */
2021 if (GET_CODE (op0
) == NEG
)
2022 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2023 else if (GET_CODE (op1
) == NEG
)
2024 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2026 /* (~a) + 1 -> -a */
2027 if (INTEGRAL_MODE_P (mode
)
2028 && GET_CODE (op0
) == NOT
2029 && trueop1
== const1_rtx
)
2030 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2032 /* Handle both-operands-constant cases. We can only add
2033 CONST_INTs to constants since the sum of relocatable symbols
2034 can't be handled by most assemblers. Don't add CONST_INT
2035 to CONST_INT since overflow won't be computed properly if wider
2036 than HOST_BITS_PER_WIDE_INT. */
2038 if ((GET_CODE (op0
) == CONST
2039 || GET_CODE (op0
) == SYMBOL_REF
2040 || GET_CODE (op0
) == LABEL_REF
)
2041 && CONST_INT_P (op1
))
2042 return plus_constant (mode
, op0
, INTVAL (op1
));
2043 else if ((GET_CODE (op1
) == CONST
2044 || GET_CODE (op1
) == SYMBOL_REF
2045 || GET_CODE (op1
) == LABEL_REF
)
2046 && CONST_INT_P (op0
))
2047 return plus_constant (mode
, op1
, INTVAL (op0
));
2049 /* See if this is something like X * C - X or vice versa or
2050 if the multiplication is written as a shift. If so, we can
2051 distribute and make a new multiply, shift, or maybe just
2052 have X (if C is 2 in the example above). But don't make
2053 something more expensive than we had before. */
2055 if (SCALAR_INT_MODE_P (mode
))
2057 rtx lhs
= op0
, rhs
= op1
;
2059 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2060 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2062 if (GET_CODE (lhs
) == NEG
)
2064 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2065 lhs
= XEXP (lhs
, 0);
2067 else if (GET_CODE (lhs
) == MULT
2068 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2070 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2071 lhs
= XEXP (lhs
, 0);
2073 else if (GET_CODE (lhs
) == ASHIFT
2074 && CONST_INT_P (XEXP (lhs
, 1))
2075 && INTVAL (XEXP (lhs
, 1)) >= 0
2076 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2078 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2079 GET_MODE_PRECISION (mode
));
2080 lhs
= XEXP (lhs
, 0);
2083 if (GET_CODE (rhs
) == NEG
)
2085 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2086 rhs
= XEXP (rhs
, 0);
2088 else if (GET_CODE (rhs
) == MULT
2089 && CONST_INT_P (XEXP (rhs
, 1)))
2091 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2092 rhs
= XEXP (rhs
, 0);
2094 else if (GET_CODE (rhs
) == ASHIFT
2095 && CONST_INT_P (XEXP (rhs
, 1))
2096 && INTVAL (XEXP (rhs
, 1)) >= 0
2097 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2099 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2100 GET_MODE_PRECISION (mode
));
2101 rhs
= XEXP (rhs
, 0);
2104 if (rtx_equal_p (lhs
, rhs
))
2106 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2108 bool speed
= optimize_function_for_speed_p (cfun
);
2110 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2112 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2113 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2118 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2119 if (CONST_SCALAR_INT_P (op1
)
2120 && GET_CODE (op0
) == XOR
2121 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2122 && mode_signbit_p (mode
, op1
))
2123 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2124 simplify_gen_binary (XOR
, mode
, op1
,
2127 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2128 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2129 && GET_CODE (op0
) == MULT
2130 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2134 in1
= XEXP (XEXP (op0
, 0), 0);
2135 in2
= XEXP (op0
, 1);
2136 return simplify_gen_binary (MINUS
, mode
, op1
,
2137 simplify_gen_binary (MULT
, mode
,
2141 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2142 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2144 if (COMPARISON_P (op0
)
2145 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2146 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2147 && (reversed
= reversed_comparison (op0
, mode
)))
2149 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2151 /* If one of the operands is a PLUS or a MINUS, see if we can
2152 simplify this by the associative law.
2153 Don't use the associative law for floating point.
2154 The inaccuracy makes it nonassociative,
2155 and subtle programs can break if operations are associated. */
2157 if (INTEGRAL_MODE_P (mode
)
2158 && (plus_minus_operand_p (op0
)
2159 || plus_minus_operand_p (op1
))
2160 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2163 /* Reassociate floating point addition only when the user
2164 specifies associative math operations. */
2165 if (FLOAT_MODE_P (mode
)
2166 && flag_associative_math
)
2168 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2175 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2176 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2177 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2178 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2180 rtx xop00
= XEXP (op0
, 0);
2181 rtx xop10
= XEXP (op1
, 0);
2184 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2186 if (REG_P (xop00
) && REG_P (xop10
)
2187 && GET_MODE (xop00
) == GET_MODE (xop10
)
2188 && REGNO (xop00
) == REGNO (xop10
)
2189 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2190 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2197 /* We can't assume x-x is 0 even with non-IEEE floating point,
2198 but since it is zero except in very strange circumstances, we
2199 will treat it as zero with -ffinite-math-only. */
2200 if (rtx_equal_p (trueop0
, trueop1
)
2201 && ! side_effects_p (op0
)
2202 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2203 return CONST0_RTX (mode
);
2205 /* Change subtraction from zero into negation. (0 - x) is the
2206 same as -x when x is NaN, infinite, or finite and nonzero.
2207 But if the mode has signed zeros, and does not round towards
2208 -infinity, then 0 - 0 is 0, not -0. */
2209 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2210 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2212 /* (-1 - a) is ~a. */
2213 if (trueop0
== constm1_rtx
)
2214 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2216 /* Subtracting 0 has no effect unless the mode has signed zeros
2217 and supports rounding towards -infinity. In such a case,
2219 if (!(HONOR_SIGNED_ZEROS (mode
)
2220 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2221 && trueop1
== CONST0_RTX (mode
))
2224 /* See if this is something like X * C - X or vice versa or
2225 if the multiplication is written as a shift. If so, we can
2226 distribute and make a new multiply, shift, or maybe just
2227 have X (if C is 2 in the example above). But don't make
2228 something more expensive than we had before. */
2230 if (SCALAR_INT_MODE_P (mode
))
2232 rtx lhs
= op0
, rhs
= op1
;
2234 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2235 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2237 if (GET_CODE (lhs
) == NEG
)
2239 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2240 lhs
= XEXP (lhs
, 0);
2242 else if (GET_CODE (lhs
) == MULT
2243 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2245 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2246 lhs
= XEXP (lhs
, 0);
2248 else if (GET_CODE (lhs
) == ASHIFT
2249 && CONST_INT_P (XEXP (lhs
, 1))
2250 && INTVAL (XEXP (lhs
, 1)) >= 0
2251 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2253 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2254 GET_MODE_PRECISION (mode
));
2255 lhs
= XEXP (lhs
, 0);
2258 if (GET_CODE (rhs
) == NEG
)
2260 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2261 rhs
= XEXP (rhs
, 0);
2263 else if (GET_CODE (rhs
) == MULT
2264 && CONST_INT_P (XEXP (rhs
, 1)))
2266 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2267 rhs
= XEXP (rhs
, 0);
2269 else if (GET_CODE (rhs
) == ASHIFT
2270 && CONST_INT_P (XEXP (rhs
, 1))
2271 && INTVAL (XEXP (rhs
, 1)) >= 0
2272 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2274 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2275 GET_MODE_PRECISION (mode
));
2276 negcoeff1
= -negcoeff1
;
2277 rhs
= XEXP (rhs
, 0);
2280 if (rtx_equal_p (lhs
, rhs
))
2282 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2284 bool speed
= optimize_function_for_speed_p (cfun
);
2286 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2288 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2289 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2294 /* (a - (-b)) -> (a + b). True even for IEEE. */
2295 if (GET_CODE (op1
) == NEG
)
2296 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2298 /* (-x - c) may be simplified as (-c - x). */
2299 if (GET_CODE (op0
) == NEG
2300 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2302 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2304 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2307 /* Don't let a relocatable value get a negative coeff. */
2308 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2309 return simplify_gen_binary (PLUS
, mode
,
2311 neg_const_int (mode
, op1
));
2313 /* (x - (x & y)) -> (x & ~y) */
2314 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2316 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2318 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2319 GET_MODE (XEXP (op1
, 1)));
2320 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2322 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2324 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2325 GET_MODE (XEXP (op1
, 0)));
2326 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2330 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2331 by reversing the comparison code if valid. */
2332 if (STORE_FLAG_VALUE
== 1
2333 && trueop0
== const1_rtx
2334 && COMPARISON_P (op1
)
2335 && (reversed
= reversed_comparison (op1
, mode
)))
2338 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2339 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2340 && GET_CODE (op1
) == MULT
2341 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2345 in1
= XEXP (XEXP (op1
, 0), 0);
2346 in2
= XEXP (op1
, 1);
2347 return simplify_gen_binary (PLUS
, mode
,
2348 simplify_gen_binary (MULT
, mode
,
2353 /* Canonicalize (minus (neg A) (mult B C)) to
2354 (minus (mult (neg B) C) A). */
2355 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2356 && GET_CODE (op1
) == MULT
2357 && GET_CODE (op0
) == NEG
)
2361 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2362 in2
= XEXP (op1
, 1);
2363 return simplify_gen_binary (MINUS
, mode
,
2364 simplify_gen_binary (MULT
, mode
,
2369 /* If one of the operands is a PLUS or a MINUS, see if we can
2370 simplify this by the associative law. This will, for example,
2371 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2372 Don't use the associative law for floating point.
2373 The inaccuracy makes it nonassociative,
2374 and subtle programs can break if operations are associated. */
2376 if (INTEGRAL_MODE_P (mode
)
2377 && (plus_minus_operand_p (op0
)
2378 || plus_minus_operand_p (op1
))
2379 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2384 if (trueop1
== constm1_rtx
)
2385 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2387 if (GET_CODE (op0
) == NEG
)
2389 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2390 /* If op1 is a MULT as well and simplify_unary_operation
2391 just moved the NEG to the second operand, simplify_gen_binary
2392 below could through simplify_associative_operation move
2393 the NEG around again and recurse endlessly. */
2395 && GET_CODE (op1
) == MULT
2396 && GET_CODE (temp
) == MULT
2397 && XEXP (op1
, 0) == XEXP (temp
, 0)
2398 && GET_CODE (XEXP (temp
, 1)) == NEG
2399 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2402 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2404 if (GET_CODE (op1
) == NEG
)
2406 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2407 /* If op0 is a MULT as well and simplify_unary_operation
2408 just moved the NEG to the second operand, simplify_gen_binary
2409 below could through simplify_associative_operation move
2410 the NEG around again and recurse endlessly. */
2412 && GET_CODE (op0
) == MULT
2413 && GET_CODE (temp
) == MULT
2414 && XEXP (op0
, 0) == XEXP (temp
, 0)
2415 && GET_CODE (XEXP (temp
, 1)) == NEG
2416 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2419 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2422 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2423 x is NaN, since x * 0 is then also NaN. Nor is it valid
2424 when the mode has signed zeros, since multiplying a negative
2425 number by 0 will give -0, not 0. */
2426 if (!HONOR_NANS (mode
)
2427 && !HONOR_SIGNED_ZEROS (mode
)
2428 && trueop1
== CONST0_RTX (mode
)
2429 && ! side_effects_p (op0
))
2432 /* In IEEE floating point, x*1 is not equivalent to x for
2434 if (!HONOR_SNANS (mode
)
2435 && trueop1
== CONST1_RTX (mode
))
2438 /* Convert multiply by constant power of two into shift. */
2439 if (CONST_SCALAR_INT_P (trueop1
))
2441 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2443 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2446 /* x*2 is x+x and x*(-1) is -x */
2447 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2448 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2449 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2450 && GET_MODE (op0
) == mode
)
2453 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2455 if (REAL_VALUES_EQUAL (d
, dconst2
))
2456 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2458 if (!HONOR_SNANS (mode
)
2459 && REAL_VALUES_EQUAL (d
, dconstm1
))
2460 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2463 /* Optimize -x * -x as x * x. */
2464 if (FLOAT_MODE_P (mode
)
2465 && GET_CODE (op0
) == NEG
2466 && GET_CODE (op1
) == NEG
2467 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2468 && !side_effects_p (XEXP (op0
, 0)))
2469 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2471 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2472 if (SCALAR_FLOAT_MODE_P (mode
)
2473 && GET_CODE (op0
) == ABS
2474 && GET_CODE (op1
) == ABS
2475 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2476 && !side_effects_p (XEXP (op0
, 0)))
2477 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2479 /* Reassociate multiplication, but for floating point MULTs
2480 only when the user specifies unsafe math optimizations. */
2481 if (! FLOAT_MODE_P (mode
)
2482 || flag_unsafe_math_optimizations
)
2484 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2491 if (trueop1
== CONST0_RTX (mode
))
2493 if (INTEGRAL_MODE_P (mode
)
2494 && trueop1
== CONSTM1_RTX (mode
)
2495 && !side_effects_p (op0
))
2497 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2499 /* A | (~A) -> -1 */
2500 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2501 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2502 && ! side_effects_p (op0
)
2503 && SCALAR_INT_MODE_P (mode
))
2506 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2507 if (CONST_INT_P (op1
)
2508 && HWI_COMPUTABLE_MODE_P (mode
)
2509 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2510 && !side_effects_p (op0
))
2513 /* Canonicalize (X & C1) | C2. */
2514 if (GET_CODE (op0
) == AND
2515 && CONST_INT_P (trueop1
)
2516 && CONST_INT_P (XEXP (op0
, 1)))
2518 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2519 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2520 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2522 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2524 && !side_effects_p (XEXP (op0
, 0)))
2527 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2528 if (((c1
|c2
) & mask
) == mask
)
2529 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2531 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2532 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2534 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2535 gen_int_mode (c1
& ~c2
, mode
));
2536 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2540 /* Convert (A & B) | A to A. */
2541 if (GET_CODE (op0
) == AND
2542 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2543 || rtx_equal_p (XEXP (op0
, 1), op1
))
2544 && ! side_effects_p (XEXP (op0
, 0))
2545 && ! side_effects_p (XEXP (op0
, 1)))
2548 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2549 mode size to (rotate A CX). */
2551 if (GET_CODE (op1
) == ASHIFT
2552 || GET_CODE (op1
) == SUBREG
)
2563 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2564 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2565 && CONST_INT_P (XEXP (opleft
, 1))
2566 && CONST_INT_P (XEXP (opright
, 1))
2567 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2568 == GET_MODE_PRECISION (mode
)))
2569 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2571 /* Same, but for ashift that has been "simplified" to a wider mode
2572 by simplify_shift_const. */
2574 if (GET_CODE (opleft
) == SUBREG
2575 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2576 && GET_CODE (opright
) == LSHIFTRT
2577 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2578 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2579 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2580 && (GET_MODE_SIZE (GET_MODE (opleft
))
2581 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2582 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2583 SUBREG_REG (XEXP (opright
, 0)))
2584 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2585 && CONST_INT_P (XEXP (opright
, 1))
2586 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2587 == GET_MODE_PRECISION (mode
)))
2588 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2589 XEXP (SUBREG_REG (opleft
), 1));
2591 /* If we have (ior (and (X C1) C2)), simplify this by making
2592 C1 as small as possible if C1 actually changes. */
2593 if (CONST_INT_P (op1
)
2594 && (HWI_COMPUTABLE_MODE_P (mode
)
2595 || INTVAL (op1
) > 0)
2596 && GET_CODE (op0
) == AND
2597 && CONST_INT_P (XEXP (op0
, 1))
2598 && CONST_INT_P (op1
)
2599 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2601 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2602 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2605 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2608 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2609 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2610 the PLUS does not affect any of the bits in OP1: then we can do
2611 the IOR as a PLUS and we can associate. This is valid if OP1
2612 can be safely shifted left C bits. */
2613 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2614 && GET_CODE (XEXP (op0
, 0)) == PLUS
2615 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2616 && CONST_INT_P (XEXP (op0
, 1))
2617 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2619 int count
= INTVAL (XEXP (op0
, 1));
2620 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2622 if (mask
>> count
== INTVAL (trueop1
)
2623 && trunc_int_for_mode (mask
, mode
) == mask
2624 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2625 return simplify_gen_binary (ASHIFTRT
, mode
,
2626 plus_constant (mode
, XEXP (op0
, 0),
2631 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2635 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2641 if (trueop1
== CONST0_RTX (mode
))
2643 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2644 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2645 if (rtx_equal_p (trueop0
, trueop1
)
2646 && ! side_effects_p (op0
)
2647 && GET_MODE_CLASS (mode
) != MODE_CC
)
2648 return CONST0_RTX (mode
);
2650 /* Canonicalize XOR of the most significant bit to PLUS. */
2651 if (CONST_SCALAR_INT_P (op1
)
2652 && mode_signbit_p (mode
, op1
))
2653 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2654 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2655 if (CONST_SCALAR_INT_P (op1
)
2656 && GET_CODE (op0
) == PLUS
2657 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2658 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2659 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2660 simplify_gen_binary (XOR
, mode
, op1
,
2663 /* If we are XORing two things that have no bits in common,
2664 convert them into an IOR. This helps to detect rotation encoded
2665 using those methods and possibly other simplifications. */
2667 if (HWI_COMPUTABLE_MODE_P (mode
)
2668 && (nonzero_bits (op0
, mode
)
2669 & nonzero_bits (op1
, mode
)) == 0)
2670 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2672 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2673 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2676 int num_negated
= 0;
2678 if (GET_CODE (op0
) == NOT
)
2679 num_negated
++, op0
= XEXP (op0
, 0);
2680 if (GET_CODE (op1
) == NOT
)
2681 num_negated
++, op1
= XEXP (op1
, 0);
2683 if (num_negated
== 2)
2684 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2685 else if (num_negated
== 1)
2686 return simplify_gen_unary (NOT
, mode
,
2687 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2691 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2692 correspond to a machine insn or result in further simplifications
2693 if B is a constant. */
2695 if (GET_CODE (op0
) == AND
2696 && rtx_equal_p (XEXP (op0
, 1), op1
)
2697 && ! side_effects_p (op1
))
2698 return simplify_gen_binary (AND
, mode
,
2699 simplify_gen_unary (NOT
, mode
,
2700 XEXP (op0
, 0), mode
),
2703 else if (GET_CODE (op0
) == AND
2704 && rtx_equal_p (XEXP (op0
, 0), op1
)
2705 && ! side_effects_p (op1
))
2706 return simplify_gen_binary (AND
, mode
,
2707 simplify_gen_unary (NOT
, mode
,
2708 XEXP (op0
, 1), mode
),
2711 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2712 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2713 out bits inverted twice and not set by C. Similarly, given
2714 (xor (and (xor A B) C) D), simplify without inverting C in
2715 the xor operand: (xor (and A C) (B&C)^D).
2717 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2718 && GET_CODE (XEXP (op0
, 0)) == XOR
2719 && CONST_INT_P (op1
)
2720 && CONST_INT_P (XEXP (op0
, 1))
2721 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2723 enum rtx_code op
= GET_CODE (op0
);
2724 rtx a
= XEXP (XEXP (op0
, 0), 0);
2725 rtx b
= XEXP (XEXP (op0
, 0), 1);
2726 rtx c
= XEXP (op0
, 1);
2728 HOST_WIDE_INT bval
= INTVAL (b
);
2729 HOST_WIDE_INT cval
= INTVAL (c
);
2730 HOST_WIDE_INT dval
= INTVAL (d
);
2731 HOST_WIDE_INT xcval
;
2738 return simplify_gen_binary (XOR
, mode
,
2739 simplify_gen_binary (op
, mode
, a
, c
),
2740 gen_int_mode ((bval
& xcval
) ^ dval
,
2744 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2745 we can transform like this:
2746 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2747 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2748 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2749 Attempt a few simplifications when B and C are both constants. */
2750 if (GET_CODE (op0
) == AND
2751 && CONST_INT_P (op1
)
2752 && CONST_INT_P (XEXP (op0
, 1)))
2754 rtx a
= XEXP (op0
, 0);
2755 rtx b
= XEXP (op0
, 1);
2757 HOST_WIDE_INT bval
= INTVAL (b
);
2758 HOST_WIDE_INT cval
= INTVAL (c
);
2760 /* Instead of computing ~A&C, we compute its negated value,
2761 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2762 optimize for sure. If it does not simplify, we still try
2763 to compute ~A&C below, but since that always allocates
2764 RTL, we don't try that before committing to returning a
2765 simplified expression. */
2766 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2769 if ((~cval
& bval
) == 0)
2771 rtx na_c
= NULL_RTX
;
2773 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2776 /* If ~A does not simplify, don't bother: we don't
2777 want to simplify 2 operations into 3, and if na_c
2778 were to simplify with na, n_na_c would have
2779 simplified as well. */
2780 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2782 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2785 /* Try to simplify ~A&C | ~B&C. */
2786 if (na_c
!= NULL_RTX
)
2787 return simplify_gen_binary (IOR
, mode
, na_c
,
2788 gen_int_mode (~bval
& cval
, mode
));
2792 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2793 if (n_na_c
== CONSTM1_RTX (mode
))
2795 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2796 gen_int_mode (~cval
& bval
,
2798 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2799 gen_int_mode (~bval
& cval
,
2805 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2806 comparison if STORE_FLAG_VALUE is 1. */
2807 if (STORE_FLAG_VALUE
== 1
2808 && trueop1
== const1_rtx
2809 && COMPARISON_P (op0
)
2810 && (reversed
= reversed_comparison (op0
, mode
)))
2813 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2814 is (lt foo (const_int 0)), so we can perform the above
2815 simplification if STORE_FLAG_VALUE is 1. */
2817 if (STORE_FLAG_VALUE
== 1
2818 && trueop1
== const1_rtx
2819 && GET_CODE (op0
) == LSHIFTRT
2820 && CONST_INT_P (XEXP (op0
, 1))
2821 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2822 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2824 /* (xor (comparison foo bar) (const_int sign-bit))
2825 when STORE_FLAG_VALUE is the sign bit. */
2826 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2827 && trueop1
== const_true_rtx
2828 && COMPARISON_P (op0
)
2829 && (reversed
= reversed_comparison (op0
, mode
)))
2832 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2836 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2842 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2844 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2846 if (HWI_COMPUTABLE_MODE_P (mode
))
2848 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2849 HOST_WIDE_INT nzop1
;
2850 if (CONST_INT_P (trueop1
))
2852 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2853 /* If we are turning off bits already known off in OP0, we need
2855 if ((nzop0
& ~val1
) == 0)
2858 nzop1
= nonzero_bits (trueop1
, mode
);
2859 /* If we are clearing all the nonzero bits, the result is zero. */
2860 if ((nzop1
& nzop0
) == 0
2861 && !side_effects_p (op0
) && !side_effects_p (op1
))
2862 return CONST0_RTX (mode
);
2864 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2865 && GET_MODE_CLASS (mode
) != MODE_CC
)
2868 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2869 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2870 && ! side_effects_p (op0
)
2871 && GET_MODE_CLASS (mode
) != MODE_CC
)
2872 return CONST0_RTX (mode
);
2874 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2875 there are no nonzero bits of C outside of X's mode. */
2876 if ((GET_CODE (op0
) == SIGN_EXTEND
2877 || GET_CODE (op0
) == ZERO_EXTEND
)
2878 && CONST_INT_P (trueop1
)
2879 && HWI_COMPUTABLE_MODE_P (mode
)
2880 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2881 & UINTVAL (trueop1
)) == 0)
2883 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2884 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2885 gen_int_mode (INTVAL (trueop1
),
2887 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2890 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2891 we might be able to further simplify the AND with X and potentially
2892 remove the truncation altogether. */
2893 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2895 rtx x
= XEXP (op0
, 0);
2896 machine_mode xmode
= GET_MODE (x
);
2897 tem
= simplify_gen_binary (AND
, xmode
, x
,
2898 gen_int_mode (INTVAL (trueop1
), xmode
));
2899 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2902 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2903 if (GET_CODE (op0
) == IOR
2904 && CONST_INT_P (trueop1
)
2905 && CONST_INT_P (XEXP (op0
, 1)))
2907 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2908 return simplify_gen_binary (IOR
, mode
,
2909 simplify_gen_binary (AND
, mode
,
2910 XEXP (op0
, 0), op1
),
2911 gen_int_mode (tmp
, mode
));
2914 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2915 insn (and may simplify more). */
2916 if (GET_CODE (op0
) == XOR
2917 && rtx_equal_p (XEXP (op0
, 0), op1
)
2918 && ! side_effects_p (op1
))
2919 return simplify_gen_binary (AND
, mode
,
2920 simplify_gen_unary (NOT
, mode
,
2921 XEXP (op0
, 1), mode
),
2924 if (GET_CODE (op0
) == XOR
2925 && rtx_equal_p (XEXP (op0
, 1), op1
)
2926 && ! side_effects_p (op1
))
2927 return simplify_gen_binary (AND
, mode
,
2928 simplify_gen_unary (NOT
, mode
,
2929 XEXP (op0
, 0), mode
),
2932 /* Similarly for (~(A ^ B)) & A. */
2933 if (GET_CODE (op0
) == NOT
2934 && GET_CODE (XEXP (op0
, 0)) == XOR
2935 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2936 && ! side_effects_p (op1
))
2937 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2939 if (GET_CODE (op0
) == NOT
2940 && GET_CODE (XEXP (op0
, 0)) == XOR
2941 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2942 && ! side_effects_p (op1
))
2943 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2945 /* Convert (A | B) & A to A. */
2946 if (GET_CODE (op0
) == IOR
2947 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2948 || rtx_equal_p (XEXP (op0
, 1), op1
))
2949 && ! side_effects_p (XEXP (op0
, 0))
2950 && ! side_effects_p (XEXP (op0
, 1)))
2953 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2954 ((A & N) + B) & M -> (A + B) & M
2955 Similarly if (N & M) == 0,
2956 ((A | N) + B) & M -> (A + B) & M
2957 and for - instead of + and/or ^ instead of |.
2958 Also, if (N & M) == 0, then
2959 (A +- N) & M -> A & M. */
2960 if (CONST_INT_P (trueop1
)
2961 && HWI_COMPUTABLE_MODE_P (mode
)
2962 && ~UINTVAL (trueop1
)
2963 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2964 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2969 pmop
[0] = XEXP (op0
, 0);
2970 pmop
[1] = XEXP (op0
, 1);
2972 if (CONST_INT_P (pmop
[1])
2973 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2974 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2976 for (which
= 0; which
< 2; which
++)
2979 switch (GET_CODE (tem
))
2982 if (CONST_INT_P (XEXP (tem
, 1))
2983 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2984 == UINTVAL (trueop1
))
2985 pmop
[which
] = XEXP (tem
, 0);
2989 if (CONST_INT_P (XEXP (tem
, 1))
2990 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2991 pmop
[which
] = XEXP (tem
, 0);
2998 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3000 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3002 return simplify_gen_binary (code
, mode
, tem
, op1
);
3006 /* (and X (ior (not X) Y) -> (and X Y) */
3007 if (GET_CODE (op1
) == IOR
3008 && GET_CODE (XEXP (op1
, 0)) == NOT
3009 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3010 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3012 /* (and (ior (not X) Y) X) -> (and X Y) */
3013 if (GET_CODE (op0
) == IOR
3014 && GET_CODE (XEXP (op0
, 0)) == NOT
3015 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3016 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3018 /* (and X (ior Y (not X)) -> (and X Y) */
3019 if (GET_CODE (op1
) == IOR
3020 && GET_CODE (XEXP (op1
, 1)) == NOT
3021 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3022 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3024 /* (and (ior Y (not X)) X) -> (and X Y) */
3025 if (GET_CODE (op0
) == IOR
3026 && GET_CODE (XEXP (op0
, 1)) == NOT
3027 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3028 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3030 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3034 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3040 /* 0/x is 0 (or x&0 if x has side-effects). */
3041 if (trueop0
== CONST0_RTX (mode
))
3043 if (side_effects_p (op1
))
3044 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3048 if (trueop1
== CONST1_RTX (mode
))
3050 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3054 /* Convert divide by power of two into shift. */
3055 if (CONST_INT_P (trueop1
)
3056 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3057 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3061 /* Handle floating point and integers separately. */
3062 if (SCALAR_FLOAT_MODE_P (mode
))
3064 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3065 safe for modes with NaNs, since 0.0 / 0.0 will then be
3066 NaN rather than 0.0. Nor is it safe for modes with signed
3067 zeros, since dividing 0 by a negative number gives -0.0 */
3068 if (trueop0
== CONST0_RTX (mode
)
3069 && !HONOR_NANS (mode
)
3070 && !HONOR_SIGNED_ZEROS (mode
)
3071 && ! side_effects_p (op1
))
3074 if (trueop1
== CONST1_RTX (mode
)
3075 && !HONOR_SNANS (mode
))
3078 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3079 && trueop1
!= CONST0_RTX (mode
))
3082 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3085 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3086 && !HONOR_SNANS (mode
))
3087 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3089 /* Change FP division by a constant into multiplication.
3090 Only do this with -freciprocal-math. */
3091 if (flag_reciprocal_math
3092 && !REAL_VALUES_EQUAL (d
, dconst0
))
3094 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3095 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3096 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3100 else if (SCALAR_INT_MODE_P (mode
))
3102 /* 0/x is 0 (or x&0 if x has side-effects). */
3103 if (trueop0
== CONST0_RTX (mode
)
3104 && !cfun
->can_throw_non_call_exceptions
)
3106 if (side_effects_p (op1
))
3107 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3111 if (trueop1
== CONST1_RTX (mode
))
3113 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3118 if (trueop1
== constm1_rtx
)
3120 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3122 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3128 /* 0%x is 0 (or x&0 if x has side-effects). */
3129 if (trueop0
== CONST0_RTX (mode
))
3131 if (side_effects_p (op1
))
3132 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3135 /* x%1 is 0 (of x&0 if x has side-effects). */
3136 if (trueop1
== CONST1_RTX (mode
))
3138 if (side_effects_p (op0
))
3139 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3140 return CONST0_RTX (mode
);
3142 /* Implement modulus by power of two as AND. */
3143 if (CONST_INT_P (trueop1
)
3144 && exact_log2 (UINTVAL (trueop1
)) > 0)
3145 return simplify_gen_binary (AND
, mode
, op0
,
3146 gen_int_mode (INTVAL (op1
) - 1, mode
));
3150 /* 0%x is 0 (or x&0 if x has side-effects). */
3151 if (trueop0
== CONST0_RTX (mode
))
3153 if (side_effects_p (op1
))
3154 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3157 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3158 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3160 if (side_effects_p (op0
))
3161 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3162 return CONST0_RTX (mode
);
3168 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3169 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3170 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3172 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3173 if (CONST_INT_P (trueop1
)
3174 && IN_RANGE (INTVAL (trueop1
),
3175 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3176 GET_MODE_PRECISION (mode
) - 1))
3177 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3178 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3179 - INTVAL (trueop1
)));
3183 if (trueop1
== CONST0_RTX (mode
))
3185 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3187 /* Rotating ~0 always results in ~0. */
3188 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3189 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3190 && ! side_effects_p (op1
))
3194 scalar constants c1, c2
3195 size (M2) > size (M1)
3196 c1 == size (M2) - size (M1)
3198 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3202 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3204 if (code
== ASHIFTRT
3205 && !VECTOR_MODE_P (mode
)
3207 && CONST_INT_P (op1
)
3208 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3209 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3210 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3211 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3212 > GET_MODE_BITSIZE (mode
))
3213 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3214 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3215 - GET_MODE_BITSIZE (mode
)))
3216 && subreg_lowpart_p (op0
))
3218 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3220 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3221 tmp
= simplify_gen_binary (ASHIFTRT
,
3222 GET_MODE (SUBREG_REG (op0
)),
3223 XEXP (SUBREG_REG (op0
), 0),
3225 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3226 subreg_lowpart_offset (mode
,
3230 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3232 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3233 if (val
!= INTVAL (op1
))
3234 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3241 if (trueop1
== CONST0_RTX (mode
))
3243 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3245 goto canonicalize_shift
;
3248 if (trueop1
== CONST0_RTX (mode
))
3250 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3252 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3253 if (GET_CODE (op0
) == CLZ
3254 && CONST_INT_P (trueop1
)
3255 && STORE_FLAG_VALUE
== 1
3256 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3258 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3259 unsigned HOST_WIDE_INT zero_val
= 0;
3261 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3262 && zero_val
== GET_MODE_PRECISION (imode
)
3263 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3264 return simplify_gen_relational (EQ
, mode
, imode
,
3265 XEXP (op0
, 0), const0_rtx
);
3267 goto canonicalize_shift
;
3270 if (width
<= HOST_BITS_PER_WIDE_INT
3271 && mode_signbit_p (mode
, trueop1
)
3272 && ! side_effects_p (op0
))
3274 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3276 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3282 if (width
<= HOST_BITS_PER_WIDE_INT
3283 && CONST_INT_P (trueop1
)
3284 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3285 && ! side_effects_p (op0
))
3287 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3289 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3295 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3297 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3299 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3305 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3307 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3309 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3322 /* ??? There are simplifications that can be done. */
3326 if (!VECTOR_MODE_P (mode
))
3328 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3329 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3330 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3331 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3332 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3334 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3335 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3338 /* Extract a scalar element from a nested VEC_SELECT expression
3339 (with optional nested VEC_CONCAT expression). Some targets
3340 (i386) extract scalar element from a vector using chain of
3341 nested VEC_SELECT expressions. When input operand is a memory
3342 operand, this operation can be simplified to a simple scalar
3343 load from an offseted memory address. */
3344 if (GET_CODE (trueop0
) == VEC_SELECT
)
3346 rtx op0
= XEXP (trueop0
, 0);
3347 rtx op1
= XEXP (trueop0
, 1);
3349 machine_mode opmode
= GET_MODE (op0
);
3350 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3351 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3353 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3359 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3360 gcc_assert (i
< n_elts
);
3362 /* Select element, pointed by nested selector. */
3363 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3365 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3366 if (GET_CODE (op0
) == VEC_CONCAT
)
3368 rtx op00
= XEXP (op0
, 0);
3369 rtx op01
= XEXP (op0
, 1);
3371 machine_mode mode00
, mode01
;
3372 int n_elts00
, n_elts01
;
3374 mode00
= GET_MODE (op00
);
3375 mode01
= GET_MODE (op01
);
3377 /* Find out number of elements of each operand. */
3378 if (VECTOR_MODE_P (mode00
))
3380 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3381 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3386 if (VECTOR_MODE_P (mode01
))
3388 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3389 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3394 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3396 /* Select correct operand of VEC_CONCAT
3397 and adjust selector. */
3398 if (elem
< n_elts01
)
3409 vec
= rtvec_alloc (1);
3410 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3412 tmp
= gen_rtx_fmt_ee (code
, mode
,
3413 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3416 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3417 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3418 return XEXP (trueop0
, 0);
3422 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3423 gcc_assert (GET_MODE_INNER (mode
)
3424 == GET_MODE_INNER (GET_MODE (trueop0
)));
3425 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3427 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3429 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3430 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3431 rtvec v
= rtvec_alloc (n_elts
);
3434 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3435 for (i
= 0; i
< n_elts
; i
++)
3437 rtx x
= XVECEXP (trueop1
, 0, i
);
3439 gcc_assert (CONST_INT_P (x
));
3440 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3444 return gen_rtx_CONST_VECTOR (mode
, v
);
3447 /* Recognize the identity. */
3448 if (GET_MODE (trueop0
) == mode
)
3450 bool maybe_ident
= true;
3451 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3453 rtx j
= XVECEXP (trueop1
, 0, i
);
3454 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3456 maybe_ident
= false;
3464 /* If we build {a,b} then permute it, build the result directly. */
3465 if (XVECLEN (trueop1
, 0) == 2
3466 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3467 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3468 && GET_CODE (trueop0
) == VEC_CONCAT
3469 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3470 && GET_MODE (XEXP (trueop0
, 0)) == mode
3471 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3472 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3474 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3475 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3478 gcc_assert (i0
< 4 && i1
< 4);
3479 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3480 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3482 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3485 if (XVECLEN (trueop1
, 0) == 2
3486 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3487 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3488 && GET_CODE (trueop0
) == VEC_CONCAT
3489 && GET_MODE (trueop0
) == mode
)
3491 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3492 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3495 gcc_assert (i0
< 2 && i1
< 2);
3496 subop0
= XEXP (trueop0
, i0
);
3497 subop1
= XEXP (trueop0
, i1
);
3499 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3502 /* If we select one half of a vec_concat, return that. */
3503 if (GET_CODE (trueop0
) == VEC_CONCAT
3504 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3506 rtx subop0
= XEXP (trueop0
, 0);
3507 rtx subop1
= XEXP (trueop0
, 1);
3508 machine_mode mode0
= GET_MODE (subop0
);
3509 machine_mode mode1
= GET_MODE (subop1
);
3510 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3511 int l0
= GET_MODE_SIZE (mode0
) / li
;
3512 int l1
= GET_MODE_SIZE (mode1
) / li
;
3513 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3514 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3516 bool success
= true;
3517 for (int i
= 1; i
< l0
; ++i
)
3519 rtx j
= XVECEXP (trueop1
, 0, i
);
3520 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3529 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3531 bool success
= true;
3532 for (int i
= 1; i
< l1
; ++i
)
3534 rtx j
= XVECEXP (trueop1
, 0, i
);
3535 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3547 if (XVECLEN (trueop1
, 0) == 1
3548 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3549 && GET_CODE (trueop0
) == VEC_CONCAT
)
3552 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3554 /* Try to find the element in the VEC_CONCAT. */
3555 while (GET_MODE (vec
) != mode
3556 && GET_CODE (vec
) == VEC_CONCAT
)
3558 HOST_WIDE_INT vec_size
;
3560 if (CONST_INT_P (XEXP (vec
, 0)))
3562 /* vec_concat of two const_ints doesn't make sense with
3563 respect to modes. */
3564 if (CONST_INT_P (XEXP (vec
, 1)))
3567 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3568 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3571 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3573 if (offset
< vec_size
)
3574 vec
= XEXP (vec
, 0);
3578 vec
= XEXP (vec
, 1);
3580 vec
= avoid_constant_pool_reference (vec
);
3583 if (GET_MODE (vec
) == mode
)
3587 /* If we select elements in a vec_merge that all come from the same
3588 operand, select from that operand directly. */
3589 if (GET_CODE (op0
) == VEC_MERGE
)
3591 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3592 if (CONST_INT_P (trueop02
))
3594 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3595 bool all_operand0
= true;
3596 bool all_operand1
= true;
3597 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3599 rtx j
= XVECEXP (trueop1
, 0, i
);
3600 if (sel
& (1 << UINTVAL (j
)))
3601 all_operand1
= false;
3603 all_operand0
= false;
3605 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3606 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3607 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3608 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3612 /* If we have two nested selects that are inverses of each
3613 other, replace them with the source operand. */
3614 if (GET_CODE (trueop0
) == VEC_SELECT
3615 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3617 rtx op0_subop1
= XEXP (trueop0
, 1);
3618 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3619 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3621 /* Apply the outer ordering vector to the inner one. (The inner
3622 ordering vector is expressly permitted to be of a different
3623 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3624 then the two VEC_SELECTs cancel. */
3625 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3627 rtx x
= XVECEXP (trueop1
, 0, i
);
3628 if (!CONST_INT_P (x
))
3630 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3631 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3634 return XEXP (trueop0
, 0);
3640 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3641 ? GET_MODE (trueop0
)
3642 : GET_MODE_INNER (mode
));
3643 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3644 ? GET_MODE (trueop1
)
3645 : GET_MODE_INNER (mode
));
3647 gcc_assert (VECTOR_MODE_P (mode
));
3648 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3649 == GET_MODE_SIZE (mode
));
3651 if (VECTOR_MODE_P (op0_mode
))
3652 gcc_assert (GET_MODE_INNER (mode
)
3653 == GET_MODE_INNER (op0_mode
));
3655 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3657 if (VECTOR_MODE_P (op1_mode
))
3658 gcc_assert (GET_MODE_INNER (mode
)
3659 == GET_MODE_INNER (op1_mode
));
3661 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3663 if ((GET_CODE (trueop0
) == CONST_VECTOR
3664 || CONST_SCALAR_INT_P (trueop0
)
3665 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3666 && (GET_CODE (trueop1
) == CONST_VECTOR
3667 || CONST_SCALAR_INT_P (trueop1
)
3668 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3670 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3671 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3672 rtvec v
= rtvec_alloc (n_elts
);
3674 unsigned in_n_elts
= 1;
3676 if (VECTOR_MODE_P (op0_mode
))
3677 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3678 for (i
= 0; i
< n_elts
; i
++)
3682 if (!VECTOR_MODE_P (op0_mode
))
3683 RTVEC_ELT (v
, i
) = trueop0
;
3685 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3689 if (!VECTOR_MODE_P (op1_mode
))
3690 RTVEC_ELT (v
, i
) = trueop1
;
3692 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3697 return gen_rtx_CONST_VECTOR (mode
, v
);
3700 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3701 Restrict the transformation to avoid generating a VEC_SELECT with a
3702 mode unrelated to its operand. */
3703 if (GET_CODE (trueop0
) == VEC_SELECT
3704 && GET_CODE (trueop1
) == VEC_SELECT
3705 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3706 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3708 rtx par0
= XEXP (trueop0
, 1);
3709 rtx par1
= XEXP (trueop1
, 1);
3710 int len0
= XVECLEN (par0
, 0);
3711 int len1
= XVECLEN (par1
, 0);
3712 rtvec vec
= rtvec_alloc (len0
+ len1
);
3713 for (int i
= 0; i
< len0
; i
++)
3714 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3715 for (int i
= 0; i
< len1
; i
++)
3716 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3717 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3718 gen_rtx_PARALLEL (VOIDmode
, vec
));
3731 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3734 unsigned int width
= GET_MODE_PRECISION (mode
);
3736 if (VECTOR_MODE_P (mode
)
3737 && code
!= VEC_CONCAT
3738 && GET_CODE (op0
) == CONST_VECTOR
3739 && GET_CODE (op1
) == CONST_VECTOR
)
3741 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3742 machine_mode op0mode
= GET_MODE (op0
);
3743 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3744 machine_mode op1mode
= GET_MODE (op1
);
3745 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3746 rtvec v
= rtvec_alloc (n_elts
);
3749 gcc_assert (op0_n_elts
== n_elts
);
3750 gcc_assert (op1_n_elts
== n_elts
);
3751 for (i
= 0; i
< n_elts
; i
++)
3753 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3754 CONST_VECTOR_ELT (op0
, i
),
3755 CONST_VECTOR_ELT (op1
, i
));
3758 RTVEC_ELT (v
, i
) = x
;
3761 return gen_rtx_CONST_VECTOR (mode
, v
);
3764 if (VECTOR_MODE_P (mode
)
3765 && code
== VEC_CONCAT
3766 && (CONST_SCALAR_INT_P (op0
)
3767 || GET_CODE (op0
) == CONST_FIXED
3768 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3769 && (CONST_SCALAR_INT_P (op1
)
3770 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3771 || GET_CODE (op1
) == CONST_FIXED
))
3773 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3774 rtvec v
= rtvec_alloc (n_elts
);
3776 gcc_assert (n_elts
>= 2);
3779 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3780 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3782 RTVEC_ELT (v
, 0) = op0
;
3783 RTVEC_ELT (v
, 1) = op1
;
3787 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3788 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3791 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3792 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3793 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3795 for (i
= 0; i
< op0_n_elts
; ++i
)
3796 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3797 for (i
= 0; i
< op1_n_elts
; ++i
)
3798 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3801 return gen_rtx_CONST_VECTOR (mode
, v
);
3804 if (SCALAR_FLOAT_MODE_P (mode
)
3805 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3806 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3807 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3818 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3820 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3822 for (i
= 0; i
< 4; i
++)
3839 real_from_target (&r
, tmp0
, mode
);
3840 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3844 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3847 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3848 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3849 real_convert (&f0
, mode
, &f0
);
3850 real_convert (&f1
, mode
, &f1
);
3852 if (HONOR_SNANS (mode
)
3853 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3857 && REAL_VALUES_EQUAL (f1
, dconst0
)
3858 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3861 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3862 && flag_trapping_math
3863 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3865 int s0
= REAL_VALUE_NEGATIVE (f0
);
3866 int s1
= REAL_VALUE_NEGATIVE (f1
);
3871 /* Inf + -Inf = NaN plus exception. */
3876 /* Inf - Inf = NaN plus exception. */
3881 /* Inf / Inf = NaN plus exception. */
3888 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3889 && flag_trapping_math
3890 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3891 || (REAL_VALUE_ISINF (f1
)
3892 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3893 /* Inf * 0 = NaN plus exception. */
3896 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3898 real_convert (&result
, mode
, &value
);
3900 /* Don't constant fold this floating point operation if
3901 the result has overflowed and flag_trapping_math. */
3903 if (flag_trapping_math
3904 && MODE_HAS_INFINITIES (mode
)
3905 && REAL_VALUE_ISINF (result
)
3906 && !REAL_VALUE_ISINF (f0
)
3907 && !REAL_VALUE_ISINF (f1
))
3908 /* Overflow plus exception. */
3911 /* Don't constant fold this floating point operation if the
3912 result may dependent upon the run-time rounding mode and
3913 flag_rounding_math is set, or if GCC's software emulation
3914 is unable to accurately represent the result. */
3916 if ((flag_rounding_math
3917 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3918 && (inexact
|| !real_identical (&result
, &value
)))
3921 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3925 /* We can fold some multi-word operations. */
3926 if ((GET_MODE_CLASS (mode
) == MODE_INT
3927 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3928 && CONST_SCALAR_INT_P (op0
)
3929 && CONST_SCALAR_INT_P (op1
))
3933 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3934 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3936 #if TARGET_SUPPORTS_WIDE_INT == 0
3937 /* This assert keeps the simplification from producing a result
3938 that cannot be represented in a CONST_DOUBLE but a lot of
3939 upstream callers expect that this function never fails to
3940 simplify something and so you if you added this to the test
3941 above the code would die later anyway. If this assert
3942 happens, you just need to make the port support wide int. */
3943 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3948 result
= wi::sub (pop0
, pop1
);
3952 result
= wi::add (pop0
, pop1
);
3956 result
= wi::mul (pop0
, pop1
);
3960 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3966 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3972 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3978 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3984 result
= wi::bit_and (pop0
, pop1
);
3988 result
= wi::bit_or (pop0
, pop1
);
3992 result
= wi::bit_xor (pop0
, pop1
);
3996 result
= wi::smin (pop0
, pop1
);
4000 result
= wi::smax (pop0
, pop1
);
4004 result
= wi::umin (pop0
, pop1
);
4008 result
= wi::umax (pop0
, pop1
);
4015 wide_int wop1
= pop1
;
4016 if (SHIFT_COUNT_TRUNCATED
)
4017 wop1
= wi::umod_trunc (wop1
, width
);
4018 else if (wi::geu_p (wop1
, width
))
4024 result
= wi::lrshift (pop0
, wop1
);
4028 result
= wi::arshift (pop0
, wop1
);
4032 result
= wi::lshift (pop0
, wop1
);
4043 if (wi::neg_p (pop1
))
4049 result
= wi::lrotate (pop0
, pop1
);
4053 result
= wi::rrotate (pop0
, pop1
);
4064 return immed_wide_int_const (result
, mode
);
4072 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4075 Rather than test for specific case, we do this by a brute-force method
4076 and do all possible simplifications until no more changes occur. Then
4077 we rebuild the operation. */
4079 struct simplify_plus_minus_op_data
4086 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4090 result
= (commutative_operand_precedence (y
)
4091 - commutative_operand_precedence (x
));
4095 /* Group together equal REGs to do more simplification. */
4096 if (REG_P (x
) && REG_P (y
))
4097 return REGNO (x
) > REGNO (y
);
4103 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4106 struct simplify_plus_minus_op_data ops
[16];
4109 int changed
, n_constants
, canonicalized
= 0;
4112 memset (ops
, 0, sizeof ops
);
4114 /* Set up the two operands and then expand them until nothing has been
4115 changed. If we run out of room in our array, give up; this should
4116 almost never happen. */
4121 ops
[1].neg
= (code
== MINUS
);
4128 for (i
= 0; i
< n_ops
; i
++)
4130 rtx this_op
= ops
[i
].op
;
4131 int this_neg
= ops
[i
].neg
;
4132 enum rtx_code this_code
= GET_CODE (this_op
);
4138 if (n_ops
== ARRAY_SIZE (ops
))
4141 ops
[n_ops
].op
= XEXP (this_op
, 1);
4142 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4145 ops
[i
].op
= XEXP (this_op
, 0);
4147 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4151 ops
[i
].op
= XEXP (this_op
, 0);
4152 ops
[i
].neg
= ! this_neg
;
4158 if (n_ops
!= ARRAY_SIZE (ops
)
4159 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4160 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4161 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4163 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4164 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4165 ops
[n_ops
].neg
= this_neg
;
4173 /* ~a -> (-a - 1) */
4174 if (n_ops
!= ARRAY_SIZE (ops
))
4176 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4177 ops
[n_ops
++].neg
= this_neg
;
4178 ops
[i
].op
= XEXP (this_op
, 0);
4179 ops
[i
].neg
= !this_neg
;
4189 ops
[i
].op
= neg_const_int (mode
, this_op
);
4203 if (n_constants
> 1)
4206 gcc_assert (n_ops
>= 2);
4208 /* If we only have two operands, we can avoid the loops. */
4211 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4214 /* Get the two operands. Be careful with the order, especially for
4215 the cases where code == MINUS. */
4216 if (ops
[0].neg
&& ops
[1].neg
)
4218 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4221 else if (ops
[0].neg
)
4232 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4235 /* Now simplify each pair of operands until nothing changes. */
4238 /* Insertion sort is good enough for a small array. */
4239 for (i
= 1; i
< n_ops
; i
++)
4241 struct simplify_plus_minus_op_data save
;
4243 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4249 ops
[j
+ 1] = ops
[j
];
4250 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4255 for (i
= n_ops
- 1; i
> 0; i
--)
4256 for (j
= i
- 1; j
>= 0; j
--)
4258 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4259 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4261 if (lhs
!= 0 && rhs
!= 0)
4263 enum rtx_code ncode
= PLUS
;
4269 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4271 else if (swap_commutative_operands_p (lhs
, rhs
))
4272 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4274 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4275 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4277 rtx tem_lhs
, tem_rhs
;
4279 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4280 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4281 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4283 if (tem
&& !CONSTANT_P (tem
))
4284 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4287 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4291 /* Reject "simplifications" that just wrap the two
4292 arguments in a CONST. Failure to do so can result
4293 in infinite recursion with simplify_binary_operation
4294 when it calls us to simplify CONST operations.
4295 Also, if we find such a simplification, don't try
4296 any more combinations with this rhs: We must have
4297 something like symbol+offset, ie. one of the
4298 trivial CONST expressions we handle later. */
4299 if (GET_CODE (tem
) == CONST
4300 && GET_CODE (XEXP (tem
, 0)) == ncode
4301 && XEXP (XEXP (tem
, 0), 0) == lhs
4302 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4305 if (GET_CODE (tem
) == NEG
)
4306 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4307 if (CONST_INT_P (tem
) && lneg
)
4308 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4312 ops
[j
].op
= NULL_RTX
;
4319 /* If nothing changed, fail. */
4323 /* Pack all the operands to the lower-numbered entries. */
4324 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4334 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4336 && CONST_INT_P (ops
[1].op
)
4337 && CONSTANT_P (ops
[0].op
)
4339 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4341 /* We suppressed creation of trivial CONST expressions in the
4342 combination loop to avoid recursion. Create one manually now.
4343 The combination loop should have ensured that there is exactly
4344 one CONST_INT, and the sort will have ensured that it is last
4345 in the array and that any other constant will be next-to-last. */
4348 && CONST_INT_P (ops
[n_ops
- 1].op
)
4349 && CONSTANT_P (ops
[n_ops
- 2].op
))
4351 rtx value
= ops
[n_ops
- 1].op
;
4352 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4353 value
= neg_const_int (mode
, value
);
4354 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4359 /* Put a non-negated operand first, if possible. */
4361 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4364 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4373 /* Now make the result by performing the requested operations. */
4375 for (i
= 1; i
< n_ops
; i
++)
4376 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4377 mode
, result
, ops
[i
].op
);
4382 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4384 plus_minus_operand_p (const_rtx x
)
4386 return GET_CODE (x
) == PLUS
4387 || GET_CODE (x
) == MINUS
4388 || (GET_CODE (x
) == CONST
4389 && GET_CODE (XEXP (x
, 0)) == PLUS
4390 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4391 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4394 /* Like simplify_binary_operation except used for relational operators.
4395 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4396 not also be VOIDmode.
4398 CMP_MODE specifies in which mode the comparison is done in, so it is
4399 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4400 the operands or, if both are VOIDmode, the operands are compared in
4401 "infinite precision". */
4403 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4404 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4406 rtx tem
, trueop0
, trueop1
;
4408 if (cmp_mode
== VOIDmode
)
4409 cmp_mode
= GET_MODE (op0
);
4410 if (cmp_mode
== VOIDmode
)
4411 cmp_mode
= GET_MODE (op1
);
4413 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4416 if (SCALAR_FLOAT_MODE_P (mode
))
4418 if (tem
== const0_rtx
)
4419 return CONST0_RTX (mode
);
4420 #ifdef FLOAT_STORE_FLAG_VALUE
4422 REAL_VALUE_TYPE val
;
4423 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4424 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4430 if (VECTOR_MODE_P (mode
))
4432 if (tem
== const0_rtx
)
4433 return CONST0_RTX (mode
);
4434 #ifdef VECTOR_STORE_FLAG_VALUE
4439 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4440 if (val
== NULL_RTX
)
4442 if (val
== const1_rtx
)
4443 return CONST1_RTX (mode
);
4445 units
= GET_MODE_NUNITS (mode
);
4446 v
= rtvec_alloc (units
);
4447 for (i
= 0; i
< units
; i
++)
4448 RTVEC_ELT (v
, i
) = val
;
4449 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4459 /* For the following tests, ensure const0_rtx is op1. */
4460 if (swap_commutative_operands_p (op0
, op1
)
4461 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4462 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4464 /* If op0 is a compare, extract the comparison arguments from it. */
4465 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4466 return simplify_gen_relational (code
, mode
, VOIDmode
,
4467 XEXP (op0
, 0), XEXP (op0
, 1));
4469 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4473 trueop0
= avoid_constant_pool_reference (op0
);
4474 trueop1
= avoid_constant_pool_reference (op1
);
4475 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4479 /* This part of simplify_relational_operation is only used when CMP_MODE
4480 is not in class MODE_CC (i.e. it is a real comparison).
4482 MODE is the mode of the result, while CMP_MODE specifies in which
4483 mode the comparison is done in, so it is the mode of the operands. */
4486 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4487 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4489 enum rtx_code op0code
= GET_CODE (op0
);
4491 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4493 /* If op0 is a comparison, extract the comparison arguments
4497 if (GET_MODE (op0
) == mode
)
4498 return simplify_rtx (op0
);
4500 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4501 XEXP (op0
, 0), XEXP (op0
, 1));
4503 else if (code
== EQ
)
4505 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4506 if (new_code
!= UNKNOWN
)
4507 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4508 XEXP (op0
, 0), XEXP (op0
, 1));
4512 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4513 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4514 if ((code
== LTU
|| code
== GEU
)
4515 && GET_CODE (op0
) == PLUS
4516 && CONST_INT_P (XEXP (op0
, 1))
4517 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4518 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4519 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4520 && XEXP (op0
, 1) != const0_rtx
)
4523 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4524 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4525 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4528 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4529 if ((code
== LTU
|| code
== GEU
)
4530 && GET_CODE (op0
) == PLUS
4531 && rtx_equal_p (op1
, XEXP (op0
, 1))
4532 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4533 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4534 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4535 copy_rtx (XEXP (op0
, 0)));
4537 if (op1
== const0_rtx
)
4539 /* Canonicalize (GTU x 0) as (NE x 0). */
4541 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4542 /* Canonicalize (LEU x 0) as (EQ x 0). */
4544 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4546 else if (op1
== const1_rtx
)
4551 /* Canonicalize (GE x 1) as (GT x 0). */
4552 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4555 /* Canonicalize (GEU x 1) as (NE x 0). */
4556 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4559 /* Canonicalize (LT x 1) as (LE x 0). */
4560 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4563 /* Canonicalize (LTU x 1) as (EQ x 0). */
4564 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4570 else if (op1
== constm1_rtx
)
4572 /* Canonicalize (LE x -1) as (LT x 0). */
4574 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4575 /* Canonicalize (GT x -1) as (GE x 0). */
4577 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4580 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4581 if ((code
== EQ
|| code
== NE
)
4582 && (op0code
== PLUS
|| op0code
== MINUS
)
4584 && CONSTANT_P (XEXP (op0
, 1))
4585 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4587 rtx x
= XEXP (op0
, 0);
4588 rtx c
= XEXP (op0
, 1);
4589 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4590 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4592 /* Detect an infinite recursive condition, where we oscillate at this
4593 simplification case between:
4594 A + B == C <---> C - B == A,
4595 where A, B, and C are all constants with non-simplifiable expressions,
4596 usually SYMBOL_REFs. */
4597 if (GET_CODE (tem
) == invcode
4599 && rtx_equal_p (c
, XEXP (tem
, 1)))
4602 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4605 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4606 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4608 && op1
== const0_rtx
4609 && GET_MODE_CLASS (mode
) == MODE_INT
4610 && cmp_mode
!= VOIDmode
4611 /* ??? Work-around BImode bugs in the ia64 backend. */
4613 && cmp_mode
!= BImode
4614 && nonzero_bits (op0
, cmp_mode
) == 1
4615 && STORE_FLAG_VALUE
== 1)
4616 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4617 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4618 : lowpart_subreg (mode
, op0
, cmp_mode
);
4620 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4621 if ((code
== EQ
|| code
== NE
)
4622 && op1
== const0_rtx
4624 return simplify_gen_relational (code
, mode
, cmp_mode
,
4625 XEXP (op0
, 0), XEXP (op0
, 1));
4627 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4628 if ((code
== EQ
|| code
== NE
)
4630 && rtx_equal_p (XEXP (op0
, 0), op1
)
4631 && !side_effects_p (XEXP (op0
, 0)))
4632 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4635 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4636 if ((code
== EQ
|| code
== NE
)
4638 && rtx_equal_p (XEXP (op0
, 1), op1
)
4639 && !side_effects_p (XEXP (op0
, 1)))
4640 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4643 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4644 if ((code
== EQ
|| code
== NE
)
4646 && CONST_SCALAR_INT_P (op1
)
4647 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4648 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4649 simplify_gen_binary (XOR
, cmp_mode
,
4650 XEXP (op0
, 1), op1
));
4652 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4653 can be implemented with a BICS instruction on some targets, or
4654 constant-folded if y is a constant. */
4655 if ((code
== EQ
|| code
== NE
)
4657 && rtx_equal_p (XEXP (op0
, 0), op1
)
4658 && !side_effects_p (op1
)
4659 && op1
!= CONST0_RTX (cmp_mode
))
4661 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4662 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4664 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4665 CONST0_RTX (cmp_mode
));
4668 /* Likewise for (eq/ne (and x y) y). */
4669 if ((code
== EQ
|| code
== NE
)
4671 && rtx_equal_p (XEXP (op0
, 1), op1
)
4672 && !side_effects_p (op1
)
4673 && op1
!= CONST0_RTX (cmp_mode
))
4675 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4676 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4678 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4679 CONST0_RTX (cmp_mode
));
4682 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4683 if ((code
== EQ
|| code
== NE
)
4684 && GET_CODE (op0
) == BSWAP
4685 && CONST_SCALAR_INT_P (op1
))
4686 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4687 simplify_gen_unary (BSWAP
, cmp_mode
,
4690 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4691 if ((code
== EQ
|| code
== NE
)
4692 && GET_CODE (op0
) == BSWAP
4693 && GET_CODE (op1
) == BSWAP
)
4694 return simplify_gen_relational (code
, mode
, cmp_mode
,
4695 XEXP (op0
, 0), XEXP (op1
, 0));
4697 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4703 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4704 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4705 XEXP (op0
, 0), const0_rtx
);
4710 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4711 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4712 XEXP (op0
, 0), const0_rtx
);
4731 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4732 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4733 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4734 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4735 For floating-point comparisons, assume that the operands were ordered. */
4738 comparison_result (enum rtx_code code
, int known_results
)
4744 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4747 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4751 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4754 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4758 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4761 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4764 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4766 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4769 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4771 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4774 return const_true_rtx
;
4782 /* Check if the given comparison (done in the given MODE) is actually
4783 a tautology or a contradiction. If the mode is VOID_mode, the
4784 comparison is done in "infinite precision". If no simplification
4785 is possible, this function returns zero. Otherwise, it returns
4786 either const_true_rtx or const0_rtx. */
4789 simplify_const_relational_operation (enum rtx_code code
,
4797 gcc_assert (mode
!= VOIDmode
4798 || (GET_MODE (op0
) == VOIDmode
4799 && GET_MODE (op1
) == VOIDmode
));
4801 /* If op0 is a compare, extract the comparison arguments from it. */
4802 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4804 op1
= XEXP (op0
, 1);
4805 op0
= XEXP (op0
, 0);
4807 if (GET_MODE (op0
) != VOIDmode
)
4808 mode
= GET_MODE (op0
);
4809 else if (GET_MODE (op1
) != VOIDmode
)
4810 mode
= GET_MODE (op1
);
4815 /* We can't simplify MODE_CC values since we don't know what the
4816 actual comparison is. */
4817 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4820 /* Make sure the constant is second. */
4821 if (swap_commutative_operands_p (op0
, op1
))
4823 tem
= op0
, op0
= op1
, op1
= tem
;
4824 code
= swap_condition (code
);
4827 trueop0
= avoid_constant_pool_reference (op0
);
4828 trueop1
= avoid_constant_pool_reference (op1
);
4830 /* For integer comparisons of A and B maybe we can simplify A - B and can
4831 then simplify a comparison of that with zero. If A and B are both either
4832 a register or a CONST_INT, this can't help; testing for these cases will
4833 prevent infinite recursion here and speed things up.
4835 We can only do this for EQ and NE comparisons as otherwise we may
4836 lose or introduce overflow which we cannot disregard as undefined as
4837 we do not know the signedness of the operation on either the left or
4838 the right hand side of the comparison. */
4840 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4841 && (code
== EQ
|| code
== NE
)
4842 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4843 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4844 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4845 /* We cannot do this if tem is a nonzero address. */
4846 && ! nonzero_address_p (tem
))
4847 return simplify_const_relational_operation (signed_condition (code
),
4848 mode
, tem
, const0_rtx
);
4850 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4851 return const_true_rtx
;
4853 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4856 /* For modes without NaNs, if the two operands are equal, we know the
4857 result except if they have side-effects. Even with NaNs we know
4858 the result of unordered comparisons and, if signaling NaNs are
4859 irrelevant, also the result of LT/GT/LTGT. */
4860 if ((! HONOR_NANS (trueop0
)
4861 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4862 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4863 && ! HONOR_SNANS (trueop0
)))
4864 && rtx_equal_p (trueop0
, trueop1
)
4865 && ! side_effects_p (trueop0
))
4866 return comparison_result (code
, CMP_EQ
);
4868 /* If the operands are floating-point constants, see if we can fold
4870 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4871 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4872 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4874 REAL_VALUE_TYPE d0
, d1
;
4876 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4877 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4879 /* Comparisons are unordered iff at least one of the values is NaN. */
4880 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4890 return const_true_rtx
;
4903 return comparison_result (code
,
4904 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4905 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4908 /* Otherwise, see if the operands are both integers. */
4909 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4910 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4912 /* It would be nice if we really had a mode here. However, the
4913 largest int representable on the target is as good as
4915 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4916 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4917 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4919 if (wi::eq_p (ptrueop0
, ptrueop1
))
4920 return comparison_result (code
, CMP_EQ
);
4923 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4924 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4925 return comparison_result (code
, cr
);
4929 /* Optimize comparisons with upper and lower bounds. */
4930 if (HWI_COMPUTABLE_MODE_P (mode
)
4931 && CONST_INT_P (trueop1
))
4934 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4935 HOST_WIDE_INT val
= INTVAL (trueop1
);
4936 HOST_WIDE_INT mmin
, mmax
;
4946 /* Get a reduced range if the sign bit is zero. */
4947 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4954 rtx mmin_rtx
, mmax_rtx
;
4955 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4957 mmin
= INTVAL (mmin_rtx
);
4958 mmax
= INTVAL (mmax_rtx
);
4961 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4963 mmin
>>= (sign_copies
- 1);
4964 mmax
>>= (sign_copies
- 1);
4970 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4972 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4973 return const_true_rtx
;
4974 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4979 return const_true_rtx
;
4984 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4986 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4987 return const_true_rtx
;
4988 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4993 return const_true_rtx
;
4999 /* x == y is always false for y out of range. */
5000 if (val
< mmin
|| val
> mmax
)
5004 /* x > y is always false for y >= mmax, always true for y < mmin. */
5006 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5008 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5009 return const_true_rtx
;
5015 return const_true_rtx
;
5018 /* x < y is always false for y <= mmin, always true for y > mmax. */
5020 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5022 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5023 return const_true_rtx
;
5029 return const_true_rtx
;
5033 /* x != y is always true for y out of range. */
5034 if (val
< mmin
|| val
> mmax
)
5035 return const_true_rtx
;
5043 /* Optimize integer comparisons with zero. */
5044 if (trueop1
== const0_rtx
)
5046 /* Some addresses are known to be nonzero. We don't know
5047 their sign, but equality comparisons are known. */
5048 if (nonzero_address_p (trueop0
))
5050 if (code
== EQ
|| code
== LEU
)
5052 if (code
== NE
|| code
== GTU
)
5053 return const_true_rtx
;
5056 /* See if the first operand is an IOR with a constant. If so, we
5057 may be able to determine the result of this comparison. */
5058 if (GET_CODE (op0
) == IOR
)
5060 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5061 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5063 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5064 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5065 && (UINTVAL (inner_const
)
5066 & ((unsigned HOST_WIDE_INT
) 1
5076 return const_true_rtx
;
5080 return const_true_rtx
;
5094 /* Optimize comparison of ABS with zero. */
5095 if (trueop1
== CONST0_RTX (mode
)
5096 && (GET_CODE (trueop0
) == ABS
5097 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5098 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5103 /* Optimize abs(x) < 0.0. */
5104 if (!HONOR_SNANS (mode
)
5105 && (!INTEGRAL_MODE_P (mode
)
5106 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5108 if (INTEGRAL_MODE_P (mode
)
5109 && (issue_strict_overflow_warning
5110 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5111 warning (OPT_Wstrict_overflow
,
5112 ("assuming signed overflow does not occur when "
5113 "assuming abs (x) < 0 is false"));
5119 /* Optimize abs(x) >= 0.0. */
5120 if (!HONOR_NANS (mode
)
5121 && (!INTEGRAL_MODE_P (mode
)
5122 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5124 if (INTEGRAL_MODE_P (mode
)
5125 && (issue_strict_overflow_warning
5126 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5127 warning (OPT_Wstrict_overflow
,
5128 ("assuming signed overflow does not occur when "
5129 "assuming abs (x) >= 0 is true"));
5130 return const_true_rtx
;
5135 /* Optimize ! (abs(x) < 0.0). */
5136 return const_true_rtx
;
5146 /* Simplify CODE, an operation with result mode MODE and three operands,
5147 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5148 a constant. Return 0 if no simplifications is possible. */
5151 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5152 machine_mode op0_mode
, rtx op0
, rtx op1
,
5155 unsigned int width
= GET_MODE_PRECISION (mode
);
5156 bool any_change
= false;
5159 /* VOIDmode means "infinite" precision. */
5161 width
= HOST_BITS_PER_WIDE_INT
;
5166 /* Simplify negations around the multiplication. */
5167 /* -a * -b + c => a * b + c. */
5168 if (GET_CODE (op0
) == NEG
)
5170 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5172 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5174 else if (GET_CODE (op1
) == NEG
)
5176 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5178 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5181 /* Canonicalize the two multiplication operands. */
5182 /* a * -b + c => -b * a + c. */
5183 if (swap_commutative_operands_p (op0
, op1
))
5184 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5187 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5192 if (CONST_INT_P (op0
)
5193 && CONST_INT_P (op1
)
5194 && CONST_INT_P (op2
)
5195 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5196 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5198 /* Extracting a bit-field from a constant */
5199 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5200 HOST_WIDE_INT op1val
= INTVAL (op1
);
5201 HOST_WIDE_INT op2val
= INTVAL (op2
);
5202 if (BITS_BIG_ENDIAN
)
5203 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5207 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5209 /* First zero-extend. */
5210 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5211 /* If desired, propagate sign bit. */
5212 if (code
== SIGN_EXTRACT
5213 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5215 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5218 return gen_int_mode (val
, mode
);
5223 if (CONST_INT_P (op0
))
5224 return op0
!= const0_rtx
? op1
: op2
;
5226 /* Convert c ? a : a into "a". */
5227 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5230 /* Convert a != b ? a : b into "a". */
5231 if (GET_CODE (op0
) == NE
5232 && ! side_effects_p (op0
)
5233 && ! HONOR_NANS (mode
)
5234 && ! HONOR_SIGNED_ZEROS (mode
)
5235 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5236 && rtx_equal_p (XEXP (op0
, 1), op2
))
5237 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5238 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5241 /* Convert a == b ? a : b into "b". */
5242 if (GET_CODE (op0
) == EQ
5243 && ! side_effects_p (op0
)
5244 && ! HONOR_NANS (mode
)
5245 && ! HONOR_SIGNED_ZEROS (mode
)
5246 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5247 && rtx_equal_p (XEXP (op0
, 1), op2
))
5248 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5249 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5252 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5254 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5255 ? GET_MODE (XEXP (op0
, 1))
5256 : GET_MODE (XEXP (op0
, 0)));
5259 /* Look for happy constants in op1 and op2. */
5260 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5262 HOST_WIDE_INT t
= INTVAL (op1
);
5263 HOST_WIDE_INT f
= INTVAL (op2
);
5265 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5266 code
= GET_CODE (op0
);
5267 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5270 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5278 return simplify_gen_relational (code
, mode
, cmp_mode
,
5279 XEXP (op0
, 0), XEXP (op0
, 1));
5282 if (cmp_mode
== VOIDmode
)
5283 cmp_mode
= op0_mode
;
5284 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5285 cmp_mode
, XEXP (op0
, 0),
5288 /* See if any simplifications were possible. */
5291 if (CONST_INT_P (temp
))
5292 return temp
== const0_rtx
? op2
: op1
;
5294 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5300 gcc_assert (GET_MODE (op0
) == mode
);
5301 gcc_assert (GET_MODE (op1
) == mode
);
5302 gcc_assert (VECTOR_MODE_P (mode
));
5303 trueop2
= avoid_constant_pool_reference (op2
);
5304 if (CONST_INT_P (trueop2
))
5306 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5307 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5308 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5309 unsigned HOST_WIDE_INT mask
;
5310 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5313 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5315 if (!(sel
& mask
) && !side_effects_p (op0
))
5317 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5320 rtx trueop0
= avoid_constant_pool_reference (op0
);
5321 rtx trueop1
= avoid_constant_pool_reference (op1
);
5322 if (GET_CODE (trueop0
) == CONST_VECTOR
5323 && GET_CODE (trueop1
) == CONST_VECTOR
)
5325 rtvec v
= rtvec_alloc (n_elts
);
5328 for (i
= 0; i
< n_elts
; i
++)
5329 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5330 ? CONST_VECTOR_ELT (trueop0
, i
)
5331 : CONST_VECTOR_ELT (trueop1
, i
));
5332 return gen_rtx_CONST_VECTOR (mode
, v
);
5335 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5336 if no element from a appears in the result. */
5337 if (GET_CODE (op0
) == VEC_MERGE
)
5339 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5340 if (CONST_INT_P (tem
))
5342 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5343 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5344 return simplify_gen_ternary (code
, mode
, mode
,
5345 XEXP (op0
, 1), op1
, op2
);
5346 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5347 return simplify_gen_ternary (code
, mode
, mode
,
5348 XEXP (op0
, 0), op1
, op2
);
5351 if (GET_CODE (op1
) == VEC_MERGE
)
5353 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5354 if (CONST_INT_P (tem
))
5356 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5357 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5358 return simplify_gen_ternary (code
, mode
, mode
,
5359 op0
, XEXP (op1
, 1), op2
);
5360 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5361 return simplify_gen_ternary (code
, mode
, mode
,
5362 op0
, XEXP (op1
, 0), op2
);
5366 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5368 if (GET_CODE (op0
) == VEC_DUPLICATE
5369 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5370 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5371 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5373 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5374 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5376 if (XEXP (XEXP (op0
, 0), 0) == op1
5377 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5383 if (rtx_equal_p (op0
, op1
)
5384 && !side_effects_p (op2
) && !side_effects_p (op1
))
5396 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5397 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5398 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5400 Works by unpacking OP into a collection of 8-bit values
5401 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5402 and then repacking them again for OUTERMODE. */
5405 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5406 machine_mode innermode
, unsigned int byte
)
5410 value_mask
= (1 << value_bit
) - 1
5412 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5421 rtvec result_v
= NULL
;
5422 enum mode_class outer_class
;
5423 machine_mode outer_submode
;
5426 /* Some ports misuse CCmode. */
5427 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5430 /* We have no way to represent a complex constant at the rtl level. */
5431 if (COMPLEX_MODE_P (outermode
))
5434 /* We support any size mode. */
5435 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5436 GET_MODE_BITSIZE (innermode
));
5438 /* Unpack the value. */
5440 if (GET_CODE (op
) == CONST_VECTOR
)
5442 num_elem
= CONST_VECTOR_NUNITS (op
);
5443 elems
= &CONST_VECTOR_ELT (op
, 0);
5444 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5450 elem_bitsize
= max_bitsize
;
5452 /* If this asserts, it is too complicated; reducing value_bit may help. */
5453 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5454 /* I don't know how to handle endianness of sub-units. */
5455 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5457 for (elem
= 0; elem
< num_elem
; elem
++)
5460 rtx el
= elems
[elem
];
5462 /* Vectors are kept in target memory order. (This is probably
5465 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5466 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5468 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5469 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5470 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5471 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5472 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5475 switch (GET_CODE (el
))
5479 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5481 *vp
++ = INTVAL (el
) >> i
;
5482 /* CONST_INTs are always logically sign-extended. */
5483 for (; i
< elem_bitsize
; i
+= value_bit
)
5484 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5487 case CONST_WIDE_INT
:
5489 rtx_mode_t val
= std::make_pair (el
, innermode
);
5490 unsigned char extend
= wi::sign_mask (val
);
5492 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5493 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5494 for (; i
< elem_bitsize
; i
+= value_bit
)
5500 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5502 unsigned char extend
= 0;
5503 /* If this triggers, someone should have generated a
5504 CONST_INT instead. */
5505 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5507 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5508 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5509 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5512 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5516 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5518 for (; i
< elem_bitsize
; i
+= value_bit
)
5523 /* This is big enough for anything on the platform. */
5524 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5525 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5527 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5528 gcc_assert (bitsize
<= elem_bitsize
);
5529 gcc_assert (bitsize
% value_bit
== 0);
5531 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5534 /* real_to_target produces its result in words affected by
5535 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5536 and use WORDS_BIG_ENDIAN instead; see the documentation
5537 of SUBREG in rtl.texi. */
5538 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5541 if (WORDS_BIG_ENDIAN
)
5542 ibase
= bitsize
- 1 - i
;
5545 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5548 /* It shouldn't matter what's done here, so fill it with
5550 for (; i
< elem_bitsize
; i
+= value_bit
)
5556 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5558 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5559 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5563 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5564 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5565 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5567 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5568 >> (i
- HOST_BITS_PER_WIDE_INT
);
5569 for (; i
< elem_bitsize
; i
+= value_bit
)
5579 /* Now, pick the right byte to start with. */
5580 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5581 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5582 will already have offset 0. */
5583 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5585 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5587 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5588 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5589 byte
= (subword_byte
% UNITS_PER_WORD
5590 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5593 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5594 so if it's become negative it will instead be very large.) */
5595 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5597 /* Convert from bytes to chunks of size value_bit. */
5598 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5600 /* Re-pack the value. */
5602 if (VECTOR_MODE_P (outermode
))
5604 num_elem
= GET_MODE_NUNITS (outermode
);
5605 result_v
= rtvec_alloc (num_elem
);
5606 elems
= &RTVEC_ELT (result_v
, 0);
5607 outer_submode
= GET_MODE_INNER (outermode
);
5613 outer_submode
= outermode
;
5616 outer_class
= GET_MODE_CLASS (outer_submode
);
5617 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5619 gcc_assert (elem_bitsize
% value_bit
== 0);
5620 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5622 for (elem
= 0; elem
< num_elem
; elem
++)
5626 /* Vectors are stored in target memory order. (This is probably
5629 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5630 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5632 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5633 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5634 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5635 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5636 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5639 switch (outer_class
)
5642 case MODE_PARTIAL_INT
:
5647 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5648 / HOST_BITS_PER_WIDE_INT
;
5649 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5652 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5654 for (u
= 0; u
< units
; u
++)
5656 unsigned HOST_WIDE_INT buf
= 0;
5658 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5660 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5663 base
+= HOST_BITS_PER_WIDE_INT
;
5665 r
= wide_int::from_array (tmp
, units
,
5666 GET_MODE_PRECISION (outer_submode
));
5667 #if TARGET_SUPPORTS_WIDE_INT == 0
5668 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5669 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5672 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5677 case MODE_DECIMAL_FLOAT
:
5680 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5682 /* real_from_target wants its input in words affected by
5683 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5684 and use WORDS_BIG_ENDIAN instead; see the documentation
5685 of SUBREG in rtl.texi. */
5686 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5688 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5691 if (WORDS_BIG_ENDIAN
)
5692 ibase
= elem_bitsize
- 1 - i
;
5695 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5698 real_from_target (&r
, tmp
, outer_submode
);
5699 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5711 f
.mode
= outer_submode
;
5714 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5716 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5717 for (; i
< elem_bitsize
; i
+= value_bit
)
5718 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5719 << (i
- HOST_BITS_PER_WIDE_INT
));
5721 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5729 if (VECTOR_MODE_P (outermode
))
5730 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5735 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5736 Return 0 if no simplifications are possible. */
5738 simplify_subreg (machine_mode outermode
, rtx op
,
5739 machine_mode innermode
, unsigned int byte
)
5741 /* Little bit of sanity checking. */
5742 gcc_assert (innermode
!= VOIDmode
);
5743 gcc_assert (outermode
!= VOIDmode
);
5744 gcc_assert (innermode
!= BLKmode
);
5745 gcc_assert (outermode
!= BLKmode
);
5747 gcc_assert (GET_MODE (op
) == innermode
5748 || GET_MODE (op
) == VOIDmode
);
5750 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5753 if (byte
>= GET_MODE_SIZE (innermode
))
5756 if (outermode
== innermode
&& !byte
)
5759 if (CONST_SCALAR_INT_P (op
)
5760 || CONST_DOUBLE_AS_FLOAT_P (op
)
5761 || GET_CODE (op
) == CONST_FIXED
5762 || GET_CODE (op
) == CONST_VECTOR
)
5763 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5765 /* Changing mode twice with SUBREG => just change it once,
5766 or not at all if changing back op starting mode. */
5767 if (GET_CODE (op
) == SUBREG
)
5769 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5770 int final_offset
= byte
+ SUBREG_BYTE (op
);
5773 if (outermode
== innermostmode
5774 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5775 return SUBREG_REG (op
);
5777 /* The SUBREG_BYTE represents offset, as if the value were stored
5778 in memory. Irritating exception is paradoxical subreg, where
5779 we define SUBREG_BYTE to be 0. On big endian machines, this
5780 value should be negative. For a moment, undo this exception. */
5781 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5783 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5784 if (WORDS_BIG_ENDIAN
)
5785 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5786 if (BYTES_BIG_ENDIAN
)
5787 final_offset
+= difference
% UNITS_PER_WORD
;
5789 if (SUBREG_BYTE (op
) == 0
5790 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5792 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5793 if (WORDS_BIG_ENDIAN
)
5794 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5795 if (BYTES_BIG_ENDIAN
)
5796 final_offset
+= difference
% UNITS_PER_WORD
;
5799 /* See whether resulting subreg will be paradoxical. */
5800 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5802 /* In nonparadoxical subregs we can't handle negative offsets. */
5803 if (final_offset
< 0)
5805 /* Bail out in case resulting subreg would be incorrect. */
5806 if (final_offset
% GET_MODE_SIZE (outermode
)
5807 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5813 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5815 /* In paradoxical subreg, see if we are still looking on lower part.
5816 If so, our SUBREG_BYTE will be 0. */
5817 if (WORDS_BIG_ENDIAN
)
5818 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5819 if (BYTES_BIG_ENDIAN
)
5820 offset
+= difference
% UNITS_PER_WORD
;
5821 if (offset
== final_offset
)
5827 /* Recurse for further possible simplifications. */
5828 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5832 if (validate_subreg (outermode
, innermostmode
,
5833 SUBREG_REG (op
), final_offset
))
5835 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5836 if (SUBREG_PROMOTED_VAR_P (op
)
5837 && SUBREG_PROMOTED_SIGN (op
) >= 0
5838 && GET_MODE_CLASS (outermode
) == MODE_INT
5839 && IN_RANGE (GET_MODE_SIZE (outermode
),
5840 GET_MODE_SIZE (innermode
),
5841 GET_MODE_SIZE (innermostmode
))
5842 && subreg_lowpart_p (newx
))
5844 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5845 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5852 /* SUBREG of a hard register => just change the register number
5853 and/or mode. If the hard register is not valid in that mode,
5854 suppress this simplification. If the hard register is the stack,
5855 frame, or argument pointer, leave this as a SUBREG. */
5857 if (REG_P (op
) && HARD_REGISTER_P (op
))
5859 unsigned int regno
, final_regno
;
5862 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5863 if (HARD_REGISTER_NUM_P (final_regno
))
5866 int final_offset
= byte
;
5868 /* Adjust offset for paradoxical subregs. */
5870 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5872 int difference
= (GET_MODE_SIZE (innermode
)
5873 - GET_MODE_SIZE (outermode
));
5874 if (WORDS_BIG_ENDIAN
)
5875 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5876 if (BYTES_BIG_ENDIAN
)
5877 final_offset
+= difference
% UNITS_PER_WORD
;
5880 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5882 /* Propagate original regno. We don't have any way to specify
5883 the offset inside original regno, so do so only for lowpart.
5884 The information is used only by alias analysis that can not
5885 grog partial register anyway. */
5887 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5888 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5893 /* If we have a SUBREG of a register that we are replacing and we are
5894 replacing it with a MEM, make a new MEM and try replacing the
5895 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5896 or if we would be widening it. */
5899 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5900 /* Allow splitting of volatile memory references in case we don't
5901 have instruction to move the whole thing. */
5902 && (! MEM_VOLATILE_P (op
)
5903 || ! have_insn_for (SET
, innermode
))
5904 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5905 return adjust_address_nv (op
, outermode
, byte
);
5907 /* Handle complex values represented as CONCAT
5908 of real and imaginary part. */
5909 if (GET_CODE (op
) == CONCAT
)
5911 unsigned int part_size
, final_offset
;
5914 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5915 if (byte
< part_size
)
5917 part
= XEXP (op
, 0);
5918 final_offset
= byte
;
5922 part
= XEXP (op
, 1);
5923 final_offset
= byte
- part_size
;
5926 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5929 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5932 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5933 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5937 /* A SUBREG resulting from a zero extension may fold to zero if
5938 it extracts higher bits that the ZERO_EXTEND's source bits. */
5939 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5941 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5942 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5943 return CONST0_RTX (outermode
);
5946 if (SCALAR_INT_MODE_P (outermode
)
5947 && SCALAR_INT_MODE_P (innermode
)
5948 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5949 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5951 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5959 /* Make a SUBREG operation or equivalent if it folds. */
5962 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5963 machine_mode innermode
, unsigned int byte
)
5967 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5971 if (GET_CODE (op
) == SUBREG
5972 || GET_CODE (op
) == CONCAT
5973 || GET_MODE (op
) == VOIDmode
)
5976 if (validate_subreg (outermode
, innermode
, op
, byte
))
5977 return gen_rtx_SUBREG (outermode
, op
, byte
);
5982 /* Simplify X, an rtx expression.
5984 Return the simplified expression or NULL if no simplifications
5987 This is the preferred entry point into the simplification routines;
5988 however, we still allow passes to call the more specific routines.
5990 Right now GCC has three (yes, three) major bodies of RTL simplification
5991 code that need to be unified.
5993 1. fold_rtx in cse.c. This code uses various CSE specific
5994 information to aid in RTL simplification.
5996 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5997 it uses combine specific information to aid in RTL
6000 3. The routines in this file.
6003 Long term we want to only have one body of simplification code; to
6004 get to that state I recommend the following steps:
6006 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6007 which are not pass dependent state into these routines.
6009 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6010 use this routine whenever possible.
6012 3. Allow for pass dependent state to be provided to these
6013 routines and add simplifications based on the pass dependent
6014 state. Remove code from cse.c & combine.c that becomes
6017 It will take time, but ultimately the compiler will be easier to
6018 maintain and improve. It's totally silly that when we add a
6019 simplification that it needs to be added to 4 places (3 for RTL
6020 simplification and 1 for tree simplification. */
6023 simplify_rtx (const_rtx x
)
6025 const enum rtx_code code
= GET_CODE (x
);
6026 const machine_mode mode
= GET_MODE (x
);
6028 switch (GET_RTX_CLASS (code
))
6031 return simplify_unary_operation (code
, mode
,
6032 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6033 case RTX_COMM_ARITH
:
6034 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6035 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6037 /* Fall through.... */
6040 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6043 case RTX_BITFIELD_OPS
:
6044 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6045 XEXP (x
, 0), XEXP (x
, 1),
6049 case RTX_COMM_COMPARE
:
6050 return simplify_relational_operation (code
, mode
,
6051 ((GET_MODE (XEXP (x
, 0))
6053 ? GET_MODE (XEXP (x
, 0))
6054 : GET_MODE (XEXP (x
, 1))),
6060 return simplify_subreg (mode
, SUBREG_REG (x
),
6061 GET_MODE (SUBREG_REG (x
)),
6068 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6069 if (GET_CODE (XEXP (x
, 0)) == HIGH
6070 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))