1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
36 #include "diagnostic-core.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
50 static bool plus_minus_operand_p (const_rtx
);
51 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
52 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
53 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
55 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
57 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
58 enum machine_mode
, rtx
, rtx
);
59 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
60 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
66 neg_const_int (enum machine_mode mode
, const_rtx i
)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
75 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
77 unsigned HOST_WIDE_INT val
;
80 if (GET_MODE_CLASS (mode
) != MODE_INT
)
83 width
= GET_MODE_PRECISION (mode
);
87 if (width
<= HOST_BITS_PER_WIDE_INT
90 #if TARGET_SUPPORTS_WIDE_INT
91 else if (CONST_WIDE_INT_P (x
))
94 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
95 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
97 for (i
= 0; i
< elts
- 1; i
++)
98 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
100 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
101 width
%= HOST_BITS_PER_WIDE_INT
;
103 width
= HOST_BITS_PER_WIDE_INT
;
106 else if (width
<= HOST_BITS_PER_DOUBLE_INT
107 && CONST_DOUBLE_AS_INT_P (x
)
108 && CONST_DOUBLE_LOW (x
) == 0)
110 val
= CONST_DOUBLE_HIGH (x
);
111 width
-= HOST_BITS_PER_WIDE_INT
;
115 /* X is not an integer constant. */
118 if (width
< HOST_BITS_PER_WIDE_INT
)
119 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
120 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
123 /* Test whether VAL is equal to the most significant bit of mode MODE
124 (after masking with the mode mask of MODE). Returns false if the
125 precision of MODE is too large to handle. */
128 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
132 if (GET_MODE_CLASS (mode
) != MODE_INT
)
135 width
= GET_MODE_PRECISION (mode
);
136 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
139 val
&= GET_MODE_MASK (mode
);
140 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
143 /* Test whether the most significant bit of mode MODE is set in VAL.
144 Returns false if the precision of MODE is too large to handle. */
146 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
150 if (GET_MODE_CLASS (mode
) != MODE_INT
)
153 width
= GET_MODE_PRECISION (mode
);
154 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
157 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
161 /* Test whether the most significant bit of mode MODE is clear in VAL.
162 Returns false if the precision of MODE is too large to handle. */
164 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
168 if (GET_MODE_CLASS (mode
) != MODE_INT
)
171 width
= GET_MODE_PRECISION (mode
);
172 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
175 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
183 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
188 /* If this simplifies, do it. */
189 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0
, op1
))
196 tem
= op0
, op0
= op1
, op1
= tem
;
198 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
204 avoid_constant_pool_reference (rtx x
)
207 enum machine_mode cmode
;
208 HOST_WIDE_INT offset
= 0;
210 switch (GET_CODE (x
))
216 /* Handle float extensions of constant pool references. */
218 c
= avoid_constant_pool_reference (tmp
);
219 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
223 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
224 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
232 if (GET_MODE (x
) == BLKmode
)
237 /* Call target hook to avoid the effects of -fpic etc.... */
238 addr
= targetm
.delegitimize_address (addr
);
240 /* Split the address into a base and integer offset. */
241 if (GET_CODE (addr
) == CONST
242 && GET_CODE (XEXP (addr
, 0)) == PLUS
243 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
245 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
246 addr
= XEXP (XEXP (addr
, 0), 0);
249 if (GET_CODE (addr
) == LO_SUM
)
250 addr
= XEXP (addr
, 1);
252 /* If this is a constant pool reference, we can turn it into its
253 constant and hope that simplifications happen. */
254 if (GET_CODE (addr
) == SYMBOL_REF
255 && CONSTANT_POOL_ADDRESS_P (addr
))
257 c
= get_pool_constant (addr
);
258 cmode
= get_pool_mode (addr
);
260 /* If we're accessing the constant in a different mode than it was
261 originally stored, attempt to fix that up via subreg simplifications.
262 If that fails we have no choice but to return the original memory. */
263 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
264 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
266 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
267 if (tem
&& CONSTANT_P (tem
))
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x
)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
288 && MEM_OFFSET_KNOWN_P (x
))
290 tree decl
= MEM_EXPR (x
);
291 enum machine_mode mode
= GET_MODE (x
);
292 HOST_WIDE_INT offset
= 0;
294 switch (TREE_CODE (decl
))
304 case ARRAY_RANGE_REF
:
309 case VIEW_CONVERT_EXPR
:
311 HOST_WIDE_INT bitsize
, bitpos
;
313 int unsignedp
, volatilep
= 0;
315 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
316 &mode
, &unsignedp
, &volatilep
, false);
317 if (bitsize
!= GET_MODE_BITSIZE (mode
)
318 || (bitpos
% BITS_PER_UNIT
)
319 || (toffset
&& !tree_fits_shwi_p (toffset
)))
323 offset
+= bitpos
/ BITS_PER_UNIT
;
325 offset
+= tree_to_shwi (toffset
);
332 && mode
== GET_MODE (x
)
333 && TREE_CODE (decl
) == VAR_DECL
334 && (TREE_STATIC (decl
)
335 || DECL_THREAD_LOCAL_P (decl
))
336 && DECL_RTL_SET_P (decl
)
337 && MEM_P (DECL_RTL (decl
)))
341 offset
+= MEM_OFFSET (x
);
343 newx
= DECL_RTL (decl
);
347 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
349 /* Avoid creating a new MEM needlessly if we already had
350 the same address. We do if there's no OFFSET and the
351 old address X is identical to NEWX, or if X is of the
352 form (plus NEWX OFFSET), or the NEWX is of the form
353 (plus Y (const_int Z)) and X is that with the offset
354 added: (plus Y (const_int Z+OFFSET)). */
356 || (GET_CODE (o
) == PLUS
357 && GET_CODE (XEXP (o
, 1)) == CONST_INT
358 && (offset
== INTVAL (XEXP (o
, 1))
359 || (GET_CODE (n
) == PLUS
360 && GET_CODE (XEXP (n
, 1)) == CONST_INT
361 && (INTVAL (XEXP (n
, 1)) + offset
362 == INTVAL (XEXP (o
, 1)))
363 && (n
= XEXP (n
, 0))))
364 && (o
= XEXP (o
, 0))))
365 && rtx_equal_p (o
, n
)))
366 x
= adjust_address_nv (newx
, mode
, offset
);
368 else if (GET_MODE (x
) == GET_MODE (newx
)
377 /* Make a unary operation by first seeing if it folds and otherwise making
378 the specified operation. */
381 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
382 enum machine_mode op_mode
)
386 /* If this simplifies, use it. */
387 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
390 return gen_rtx_fmt_e (code
, mode
, op
);
393 /* Likewise for ternary operations. */
396 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
401 /* If this simplifies, use it. */
402 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
406 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
409 /* Likewise, for relational operations.
410 CMP_MODE specifies mode comparison is done in. */
413 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
414 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
418 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
422 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
425 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
426 and simplify the result. If FN is non-NULL, call this callback on each
427 X, if it returns non-NULL, replace X with its return value and simplify the
431 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
432 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
434 enum rtx_code code
= GET_CODE (x
);
435 enum machine_mode mode
= GET_MODE (x
);
436 enum machine_mode op_mode
;
438 rtx op0
, op1
, op2
, newx
, op
;
442 if (__builtin_expect (fn
!= NULL
, 0))
444 newx
= fn (x
, old_rtx
, data
);
448 else if (rtx_equal_p (x
, old_rtx
))
449 return copy_rtx ((rtx
) data
);
451 switch (GET_RTX_CLASS (code
))
455 op_mode
= GET_MODE (op0
);
456 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
457 if (op0
== XEXP (x
, 0))
459 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
463 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
464 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
465 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
467 return simplify_gen_binary (code
, mode
, op0
, op1
);
470 case RTX_COMM_COMPARE
:
473 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
474 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
475 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
476 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
478 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
481 case RTX_BITFIELD_OPS
:
483 op_mode
= GET_MODE (op0
);
484 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
485 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
486 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
487 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
489 if (op_mode
== VOIDmode
)
490 op_mode
= GET_MODE (op0
);
491 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
496 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
497 if (op0
== SUBREG_REG (x
))
499 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
500 GET_MODE (SUBREG_REG (x
)),
502 return op0
? op0
: x
;
509 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
510 if (op0
== XEXP (x
, 0))
512 return replace_equiv_address_nv (x
, op0
);
514 else if (code
== LO_SUM
)
516 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
517 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
519 /* (lo_sum (high x) x) -> x */
520 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
523 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
525 return gen_rtx_LO_SUM (mode
, op0
, op1
);
534 fmt
= GET_RTX_FORMAT (code
);
535 for (i
= 0; fmt
[i
]; i
++)
540 newvec
= XVEC (newx
, i
);
541 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
543 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
545 if (op
!= RTVEC_ELT (vec
, j
))
549 newvec
= shallow_copy_rtvec (vec
);
551 newx
= shallow_copy_rtx (x
);
552 XVEC (newx
, i
) = newvec
;
554 RTVEC_ELT (newvec
, j
) = op
;
562 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
563 if (op
!= XEXP (x
, i
))
566 newx
= shallow_copy_rtx (x
);
575 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
576 resulting RTX. Return a new RTX which is as simplified as possible. */
579 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
581 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
584 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
585 Only handle cases where the truncated value is inherently an rvalue.
587 RTL provides two ways of truncating a value:
589 1. a lowpart subreg. This form is only a truncation when both
590 the outer and inner modes (here MODE and OP_MODE respectively)
591 are scalar integers, and only then when the subreg is used as
594 It is only valid to form such truncating subregs if the
595 truncation requires no action by the target. The onus for
596 proving this is on the creator of the subreg -- e.g. the
597 caller to simplify_subreg or simplify_gen_subreg -- and typically
598 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
600 2. a TRUNCATE. This form handles both scalar and compound integers.
602 The first form is preferred where valid. However, the TRUNCATE
603 handling in simplify_unary_operation turns the second form into the
604 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
605 so it is generally safe to form rvalue truncations using:
607 simplify_gen_unary (TRUNCATE, ...)
609 and leave simplify_unary_operation to work out which representation
612 Because of the proof requirements on (1), simplify_truncation must
613 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
614 regardless of whether the outer truncation came from a SUBREG or a
615 TRUNCATE. For example, if the caller has proven that an SImode
620 is a no-op and can be represented as a subreg, it does not follow
621 that SImode truncations of X and Y are also no-ops. On a target
622 like 64-bit MIPS that requires SImode values to be stored in
623 sign-extended form, an SImode truncation of:
625 (and:DI (reg:DI X) (const_int 63))
627 is trivially a no-op because only the lower 6 bits can be set.
628 However, X is still an arbitrary 64-bit number and so we cannot
629 assume that truncating it too is a no-op. */
632 simplify_truncation (enum machine_mode mode
, rtx op
,
633 enum machine_mode op_mode
)
635 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
636 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
637 gcc_assert (precision
<= op_precision
);
639 /* Optimize truncations of zero and sign extended values. */
640 if (GET_CODE (op
) == ZERO_EXTEND
641 || GET_CODE (op
) == SIGN_EXTEND
)
643 /* There are three possibilities. If MODE is the same as the
644 origmode, we can omit both the extension and the subreg.
645 If MODE is not larger than the origmode, we can apply the
646 truncation without the extension. Finally, if the outermode
647 is larger than the origmode, we can just extend to the appropriate
649 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
650 if (mode
== origmode
)
652 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
653 return simplify_gen_unary (TRUNCATE
, mode
,
654 XEXP (op
, 0), origmode
);
656 return simplify_gen_unary (GET_CODE (op
), mode
,
657 XEXP (op
, 0), origmode
);
660 /* If the machine can perform operations in the truncated mode, distribute
661 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
662 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
664 #ifdef WORD_REGISTER_OPERATIONS
665 && precision
>= BITS_PER_WORD
667 && (GET_CODE (op
) == PLUS
668 || GET_CODE (op
) == MINUS
669 || GET_CODE (op
) == MULT
))
671 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
674 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
676 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
680 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
681 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
682 the outer subreg is effectively a truncation to the original mode. */
683 if ((GET_CODE (op
) == LSHIFTRT
684 || GET_CODE (op
) == ASHIFTRT
)
685 /* Ensure that OP_MODE is at least twice as wide as MODE
686 to avoid the possibility that an outer LSHIFTRT shifts by more
687 than the sign extension's sign_bit_copies and introduces zeros
688 into the high bits of the result. */
689 && 2 * precision
<= op_precision
690 && CONST_INT_P (XEXP (op
, 1))
691 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
692 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
693 && UINTVAL (XEXP (op
, 1)) < precision
)
694 return simplify_gen_binary (ASHIFTRT
, mode
,
695 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
697 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
698 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op
) == LSHIFTRT
701 || GET_CODE (op
) == ASHIFTRT
)
702 && CONST_INT_P (XEXP (op
, 1))
703 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
704 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
705 && UINTVAL (XEXP (op
, 1)) < precision
)
706 return simplify_gen_binary (LSHIFTRT
, mode
,
707 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
709 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
710 to (ashift:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if (GET_CODE (op
) == ASHIFT
713 && CONST_INT_P (XEXP (op
, 1))
714 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
715 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
716 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
717 && UINTVAL (XEXP (op
, 1)) < precision
)
718 return simplify_gen_binary (ASHIFT
, mode
,
719 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
721 /* Recognize a word extraction from a multi-word subreg. */
722 if ((GET_CODE (op
) == LSHIFTRT
723 || GET_CODE (op
) == ASHIFTRT
)
724 && SCALAR_INT_MODE_P (mode
)
725 && SCALAR_INT_MODE_P (op_mode
)
726 && precision
>= BITS_PER_WORD
727 && 2 * precision
<= op_precision
728 && CONST_INT_P (XEXP (op
, 1))
729 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
730 && UINTVAL (XEXP (op
, 1)) < op_precision
)
732 int byte
= subreg_lowpart_offset (mode
, op_mode
);
733 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
734 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
736 ? byte
- shifted_bytes
737 : byte
+ shifted_bytes
));
740 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
741 and try replacing the TRUNCATE and shift with it. Don't do this
742 if the MEM has a mode-dependent address. */
743 if ((GET_CODE (op
) == LSHIFTRT
744 || GET_CODE (op
) == ASHIFTRT
)
745 && SCALAR_INT_MODE_P (op_mode
)
746 && MEM_P (XEXP (op
, 0))
747 && CONST_INT_P (XEXP (op
, 1))
748 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
749 && INTVAL (XEXP (op
, 1)) > 0
750 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
751 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
752 MEM_ADDR_SPACE (XEXP (op
, 0)))
753 && ! MEM_VOLATILE_P (XEXP (op
, 0))
754 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
755 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
757 int byte
= subreg_lowpart_offset (mode
, op_mode
);
758 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
759 return adjust_address_nv (XEXP (op
, 0), mode
,
761 ? byte
- shifted_bytes
762 : byte
+ shifted_bytes
));
765 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
766 (OP:SI foo:SI) if OP is NEG or ABS. */
767 if ((GET_CODE (op
) == ABS
768 || GET_CODE (op
) == NEG
)
769 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
770 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
771 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
772 return simplify_gen_unary (GET_CODE (op
), mode
,
773 XEXP (XEXP (op
, 0), 0), mode
);
775 /* (truncate:A (subreg:B (truncate:C X) 0)) is
777 if (GET_CODE (op
) == SUBREG
778 && SCALAR_INT_MODE_P (mode
)
779 && SCALAR_INT_MODE_P (op_mode
)
780 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
781 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
782 && subreg_lowpart_p (op
))
784 rtx inner
= XEXP (SUBREG_REG (op
), 0);
785 if (GET_MODE_PRECISION (mode
)
786 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
787 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
789 /* If subreg above is paradoxical and C is narrower
790 than A, return (subreg:A (truncate:C X) 0). */
791 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
792 GET_MODE (SUBREG_REG (op
)), 0);
795 /* (truncate:A (truncate:B X)) is (truncate:A X). */
796 if (GET_CODE (op
) == TRUNCATE
)
797 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
798 GET_MODE (XEXP (op
, 0)));
803 /* Try to simplify a unary operation CODE whose output mode is to be
804 MODE with input operand OP whose mode was originally OP_MODE.
805 Return zero if no simplification can be made. */
807 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
808 rtx op
, enum machine_mode op_mode
)
812 trueop
= avoid_constant_pool_reference (op
);
814 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
818 return simplify_unary_operation_1 (code
, mode
, op
);
821 /* Perform some simplifications we can do even if the operands
824 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
826 enum rtx_code reversed
;
832 /* (not (not X)) == X. */
833 if (GET_CODE (op
) == NOT
)
836 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
837 comparison is all ones. */
838 if (COMPARISON_P (op
)
839 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
840 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
841 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
842 XEXP (op
, 0), XEXP (op
, 1));
844 /* (not (plus X -1)) can become (neg X). */
845 if (GET_CODE (op
) == PLUS
846 && XEXP (op
, 1) == constm1_rtx
)
847 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
849 /* Similarly, (not (neg X)) is (plus X -1). */
850 if (GET_CODE (op
) == NEG
)
851 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
854 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
855 if (GET_CODE (op
) == XOR
856 && CONST_INT_P (XEXP (op
, 1))
857 && (temp
= simplify_unary_operation (NOT
, mode
,
858 XEXP (op
, 1), mode
)) != 0)
859 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
861 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
862 if (GET_CODE (op
) == PLUS
863 && CONST_INT_P (XEXP (op
, 1))
864 && mode_signbit_p (mode
, XEXP (op
, 1))
865 && (temp
= simplify_unary_operation (NOT
, mode
,
866 XEXP (op
, 1), mode
)) != 0)
867 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
870 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
871 operands other than 1, but that is not valid. We could do a
872 similar simplification for (not (lshiftrt C X)) where C is
873 just the sign bit, but this doesn't seem common enough to
875 if (GET_CODE (op
) == ASHIFT
876 && XEXP (op
, 0) == const1_rtx
)
878 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
879 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
882 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
883 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
884 so we can perform the above simplification. */
885 if (STORE_FLAG_VALUE
== -1
886 && GET_CODE (op
) == ASHIFTRT
887 && GET_CODE (XEXP (op
, 1))
888 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
889 return simplify_gen_relational (GE
, mode
, VOIDmode
,
890 XEXP (op
, 0), const0_rtx
);
893 if (GET_CODE (op
) == SUBREG
894 && subreg_lowpart_p (op
)
895 && (GET_MODE_SIZE (GET_MODE (op
))
896 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
897 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
898 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
900 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
903 x
= gen_rtx_ROTATE (inner_mode
,
904 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
906 XEXP (SUBREG_REG (op
), 1));
907 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
912 /* Apply De Morgan's laws to reduce number of patterns for machines
913 with negating logical insns (and-not, nand, etc.). If result has
914 only one NOT, put it first, since that is how the patterns are
916 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
918 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
919 enum machine_mode op_mode
;
921 op_mode
= GET_MODE (in1
);
922 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
924 op_mode
= GET_MODE (in2
);
925 if (op_mode
== VOIDmode
)
927 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
929 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
932 in2
= in1
; in1
= tem
;
935 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
939 /* (not (bswap x)) -> (bswap (not x)). */
940 if (GET_CODE (op
) == BSWAP
)
942 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
943 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
948 /* (neg (neg X)) == X. */
949 if (GET_CODE (op
) == NEG
)
952 /* (neg (plus X 1)) can become (not X). */
953 if (GET_CODE (op
) == PLUS
954 && XEXP (op
, 1) == const1_rtx
)
955 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
957 /* Similarly, (neg (not X)) is (plus X 1). */
958 if (GET_CODE (op
) == NOT
)
959 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
962 /* (neg (minus X Y)) can become (minus Y X). This transformation
963 isn't safe for modes with signed zeros, since if X and Y are
964 both +0, (minus Y X) is the same as (minus X Y). If the
965 rounding mode is towards +infinity (or -infinity) then the two
966 expressions will be rounded differently. */
967 if (GET_CODE (op
) == MINUS
968 && !HONOR_SIGNED_ZEROS (mode
)
969 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
970 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
972 if (GET_CODE (op
) == PLUS
973 && !HONOR_SIGNED_ZEROS (mode
)
974 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
976 /* (neg (plus A C)) is simplified to (minus -C A). */
977 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
978 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
980 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
982 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
985 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
986 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
987 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
990 /* (neg (mult A B)) becomes (mult A (neg B)).
991 This works even for floating-point values. */
992 if (GET_CODE (op
) == MULT
993 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
995 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
996 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
999 /* NEG commutes with ASHIFT since it is multiplication. Only do
1000 this if we can then eliminate the NEG (e.g., if the operand
1002 if (GET_CODE (op
) == ASHIFT
)
1004 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1006 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1009 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1010 C is equal to the width of MODE minus 1. */
1011 if (GET_CODE (op
) == ASHIFTRT
1012 && CONST_INT_P (XEXP (op
, 1))
1013 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1014 return simplify_gen_binary (LSHIFTRT
, mode
,
1015 XEXP (op
, 0), XEXP (op
, 1));
1017 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op
) == LSHIFTRT
1020 && CONST_INT_P (XEXP (op
, 1))
1021 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1022 return simplify_gen_binary (ASHIFTRT
, mode
,
1023 XEXP (op
, 0), XEXP (op
, 1));
1025 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1026 if (GET_CODE (op
) == XOR
1027 && XEXP (op
, 1) == const1_rtx
1028 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1029 return plus_constant (mode
, XEXP (op
, 0), -1);
1031 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1032 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1033 if (GET_CODE (op
) == LT
1034 && XEXP (op
, 1) == const0_rtx
1035 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1037 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
1038 int isize
= GET_MODE_PRECISION (inner
);
1039 if (STORE_FLAG_VALUE
== 1)
1041 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1042 GEN_INT (isize
- 1));
1045 if (GET_MODE_PRECISION (mode
) > isize
)
1046 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1047 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1049 else if (STORE_FLAG_VALUE
== -1)
1051 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1052 GEN_INT (isize
- 1));
1055 if (GET_MODE_PRECISION (mode
) > isize
)
1056 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1057 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1063 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1064 with the umulXi3_highpart patterns. */
1065 if (GET_CODE (op
) == LSHIFTRT
1066 && GET_CODE (XEXP (op
, 0)) == MULT
)
1069 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1071 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1073 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1077 /* We can't handle truncation to a partial integer mode here
1078 because we don't know the real bitsize of the partial
1083 if (GET_MODE (op
) != VOIDmode
)
1085 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1090 /* If we know that the value is already truncated, we can
1091 replace the TRUNCATE with a SUBREG. */
1092 if (GET_MODE_NUNITS (mode
) == 1
1093 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1094 || truncated_to_mode (mode
, op
)))
1096 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1101 /* A truncate of a comparison can be replaced with a subreg if
1102 STORE_FLAG_VALUE permits. This is like the previous test,
1103 but it works even if the comparison is done in a mode larger
1104 than HOST_BITS_PER_WIDE_INT. */
1105 if (HWI_COMPUTABLE_MODE_P (mode
)
1106 && COMPARISON_P (op
)
1107 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1109 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1114 /* A truncate of a memory is just loading the low part of the memory
1115 if we are not changing the meaning of the address. */
1116 if (GET_CODE (op
) == MEM
1117 && !VECTOR_MODE_P (mode
)
1118 && !MEM_VOLATILE_P (op
)
1119 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1121 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1128 case FLOAT_TRUNCATE
:
1129 if (DECIMAL_FLOAT_MODE_P (mode
))
1132 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1133 if (GET_CODE (op
) == FLOAT_EXTEND
1134 && GET_MODE (XEXP (op
, 0)) == mode
)
1135 return XEXP (op
, 0);
1137 /* (float_truncate:SF (float_truncate:DF foo:XF))
1138 = (float_truncate:SF foo:XF).
1139 This may eliminate double rounding, so it is unsafe.
1141 (float_truncate:SF (float_extend:XF foo:DF))
1142 = (float_truncate:SF foo:DF).
1144 (float_truncate:DF (float_extend:XF foo:SF))
1145 = (float_extend:SF foo:DF). */
1146 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1147 && flag_unsafe_math_optimizations
)
1148 || GET_CODE (op
) == FLOAT_EXTEND
)
1149 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1151 > GET_MODE_SIZE (mode
)
1152 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1154 XEXP (op
, 0), mode
);
1156 /* (float_truncate (float x)) is (float x) */
1157 if (GET_CODE (op
) == FLOAT
1158 && (flag_unsafe_math_optimizations
1159 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1160 && ((unsigned)significand_size (GET_MODE (op
))
1161 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1162 - num_sign_bit_copies (XEXP (op
, 0),
1163 GET_MODE (XEXP (op
, 0))))))))
1164 return simplify_gen_unary (FLOAT
, mode
,
1166 GET_MODE (XEXP (op
, 0)));
1168 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1169 (OP:SF foo:SF) if OP is NEG or ABS. */
1170 if ((GET_CODE (op
) == ABS
1171 || GET_CODE (op
) == NEG
)
1172 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1173 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1174 return simplify_gen_unary (GET_CODE (op
), mode
,
1175 XEXP (XEXP (op
, 0), 0), mode
);
1177 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1178 is (float_truncate:SF x). */
1179 if (GET_CODE (op
) == SUBREG
1180 && subreg_lowpart_p (op
)
1181 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1182 return SUBREG_REG (op
);
1186 if (DECIMAL_FLOAT_MODE_P (mode
))
1189 /* (float_extend (float_extend x)) is (float_extend x)
1191 (float_extend (float x)) is (float x) assuming that double
1192 rounding can't happen.
1194 if (GET_CODE (op
) == FLOAT_EXTEND
1195 || (GET_CODE (op
) == FLOAT
1196 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1197 && ((unsigned)significand_size (GET_MODE (op
))
1198 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1199 - num_sign_bit_copies (XEXP (op
, 0),
1200 GET_MODE (XEXP (op
, 0)))))))
1201 return simplify_gen_unary (GET_CODE (op
), mode
,
1203 GET_MODE (XEXP (op
, 0)));
1208 /* (abs (neg <foo>)) -> (abs <foo>) */
1209 if (GET_CODE (op
) == NEG
)
1210 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1211 GET_MODE (XEXP (op
, 0)));
1213 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1215 if (GET_MODE (op
) == VOIDmode
)
1218 /* If operand is something known to be positive, ignore the ABS. */
1219 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1220 || val_signbit_known_clear_p (GET_MODE (op
),
1221 nonzero_bits (op
, GET_MODE (op
))))
1224 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1225 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1226 return gen_rtx_NEG (mode
, op
);
1231 /* (ffs (*_extend <X>)) = (ffs <X>) */
1232 if (GET_CODE (op
) == SIGN_EXTEND
1233 || GET_CODE (op
) == ZERO_EXTEND
)
1234 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1235 GET_MODE (XEXP (op
, 0)));
1239 switch (GET_CODE (op
))
1243 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1244 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1245 GET_MODE (XEXP (op
, 0)));
1249 /* Rotations don't affect popcount. */
1250 if (!side_effects_p (XEXP (op
, 1)))
1251 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1252 GET_MODE (XEXP (op
, 0)));
1261 switch (GET_CODE (op
))
1267 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1268 GET_MODE (XEXP (op
, 0)));
1272 /* Rotations don't affect parity. */
1273 if (!side_effects_p (XEXP (op
, 1)))
1274 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1275 GET_MODE (XEXP (op
, 0)));
1284 /* (bswap (bswap x)) -> x. */
1285 if (GET_CODE (op
) == BSWAP
)
1286 return XEXP (op
, 0);
1290 /* (float (sign_extend <X>)) = (float <X>). */
1291 if (GET_CODE (op
) == SIGN_EXTEND
)
1292 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1293 GET_MODE (XEXP (op
, 0)));
1297 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1298 becomes just the MINUS if its mode is MODE. This allows
1299 folding switch statements on machines using casesi (such as
1301 if (GET_CODE (op
) == TRUNCATE
1302 && GET_MODE (XEXP (op
, 0)) == mode
1303 && GET_CODE (XEXP (op
, 0)) == MINUS
1304 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1305 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1306 return XEXP (op
, 0);
1308 /* Extending a widening multiplication should be canonicalized to
1309 a wider widening multiplication. */
1310 if (GET_CODE (op
) == MULT
)
1312 rtx lhs
= XEXP (op
, 0);
1313 rtx rhs
= XEXP (op
, 1);
1314 enum rtx_code lcode
= GET_CODE (lhs
);
1315 enum rtx_code rcode
= GET_CODE (rhs
);
1317 /* Widening multiplies usually extend both operands, but sometimes
1318 they use a shift to extract a portion of a register. */
1319 if ((lcode
== SIGN_EXTEND
1320 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1321 && (rcode
== SIGN_EXTEND
1322 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1324 enum machine_mode lmode
= GET_MODE (lhs
);
1325 enum machine_mode rmode
= GET_MODE (rhs
);
1328 if (lcode
== ASHIFTRT
)
1329 /* Number of bits not shifted off the end. */
1330 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1331 else /* lcode == SIGN_EXTEND */
1332 /* Size of inner mode. */
1333 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1335 if (rcode
== ASHIFTRT
)
1336 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1337 else /* rcode == SIGN_EXTEND */
1338 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1340 /* We can only widen multiplies if the result is mathematiclly
1341 equivalent. I.e. if overflow was impossible. */
1342 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1343 return simplify_gen_binary
1345 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1346 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1350 /* Check for a sign extension of a subreg of a promoted
1351 variable, where the promotion is sign-extended, and the
1352 target mode is the same as the variable's promotion. */
1353 if (GET_CODE (op
) == SUBREG
1354 && SUBREG_PROMOTED_VAR_P (op
)
1355 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1356 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1358 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1363 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1364 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1365 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1367 gcc_assert (GET_MODE_BITSIZE (mode
)
1368 > GET_MODE_BITSIZE (GET_MODE (op
)));
1369 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1370 GET_MODE (XEXP (op
, 0)));
1373 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1374 is (sign_extend:M (subreg:O <X>)) if there is mode with
1375 GET_MODE_BITSIZE (N) - I bits.
1376 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1377 is similarly (zero_extend:M (subreg:O <X>)). */
1378 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1379 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1380 && CONST_INT_P (XEXP (op
, 1))
1381 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1382 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1384 enum machine_mode tmode
1385 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1386 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1387 gcc_assert (GET_MODE_BITSIZE (mode
)
1388 > GET_MODE_BITSIZE (GET_MODE (op
)));
1389 if (tmode
!= BLKmode
)
1392 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1394 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1395 ? SIGN_EXTEND
: ZERO_EXTEND
,
1396 mode
, inner
, tmode
);
1400 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1401 /* As we do not know which address space the pointer is referring to,
1402 we can do this only if the target does not support different pointer
1403 or address modes depending on the address space. */
1404 if (target_default_pointer_address_modes_p ()
1405 && ! POINTERS_EXTEND_UNSIGNED
1406 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1408 || (GET_CODE (op
) == SUBREG
1409 && REG_P (SUBREG_REG (op
))
1410 && REG_POINTER (SUBREG_REG (op
))
1411 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1412 return convert_memory_address (Pmode
, op
);
1417 /* Check for a zero extension of a subreg of a promoted
1418 variable, where the promotion is zero-extended, and the
1419 target mode is the same as the variable's promotion. */
1420 if (GET_CODE (op
) == SUBREG
1421 && SUBREG_PROMOTED_VAR_P (op
)
1422 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1423 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1425 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op
) == MULT
)
1434 rtx lhs
= XEXP (op
, 0);
1435 rtx rhs
= XEXP (op
, 1);
1436 enum rtx_code lcode
= GET_CODE (lhs
);
1437 enum rtx_code rcode
= GET_CODE (rhs
);
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode
== ZERO_EXTEND
1442 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1443 && (rcode
== ZERO_EXTEND
1444 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1446 enum machine_mode lmode
= GET_MODE (lhs
);
1447 enum machine_mode rmode
= GET_MODE (rhs
);
1450 if (lcode
== LSHIFTRT
)
1451 /* Number of bits not shifted off the end. */
1452 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1453 else /* lcode == ZERO_EXTEND */
1454 /* Size of inner mode. */
1455 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1457 if (rcode
== LSHIFTRT
)
1458 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1459 else /* rcode == ZERO_EXTEND */
1460 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1462 /* We can only widen multiplies if the result is mathematiclly
1463 equivalent. I.e. if overflow was impossible. */
1464 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1465 return simplify_gen_binary
1467 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1468 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1472 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1473 if (GET_CODE (op
) == ZERO_EXTEND
)
1474 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1475 GET_MODE (XEXP (op
, 0)));
1477 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1478 is (zero_extend:M (subreg:O <X>)) if there is mode with
1479 GET_MODE_BITSIZE (N) - I bits. */
1480 if (GET_CODE (op
) == LSHIFTRT
1481 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1482 && CONST_INT_P (XEXP (op
, 1))
1483 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1484 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1486 enum machine_mode tmode
1487 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1488 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1489 if (tmode
!= BLKmode
)
1492 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1494 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1498 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1499 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1501 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1502 (and:SI (reg:SI) (const_int 63)). */
1503 if (GET_CODE (op
) == SUBREG
1504 && GET_MODE_PRECISION (GET_MODE (op
))
1505 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1506 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1507 <= HOST_BITS_PER_WIDE_INT
1508 && GET_MODE_PRECISION (mode
)
1509 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1510 && subreg_lowpart_p (op
)
1511 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1512 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1514 if (GET_MODE_PRECISION (mode
)
1515 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1516 return SUBREG_REG (op
);
1517 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1518 GET_MODE (SUBREG_REG (op
)));
1521 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1522 /* As we do not know which address space the pointer is referring to,
1523 we can do this only if the target does not support different pointer
1524 or address modes depending on the address space. */
1525 if (target_default_pointer_address_modes_p ()
1526 && POINTERS_EXTEND_UNSIGNED
> 0
1527 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1529 || (GET_CODE (op
) == SUBREG
1530 && REG_P (SUBREG_REG (op
))
1531 && REG_POINTER (SUBREG_REG (op
))
1532 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1533 return convert_memory_address (Pmode
, op
);
1544 /* Try to compute the value of a unary operation CODE whose output mode is to
1545 be MODE with input operand OP whose mode was originally OP_MODE.
1546 Return zero if the value cannot be computed. */
1548 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1549 rtx op
, enum machine_mode op_mode
)
1551 unsigned int width
= GET_MODE_PRECISION (mode
);
1553 if (code
== VEC_DUPLICATE
)
1555 gcc_assert (VECTOR_MODE_P (mode
));
1556 if (GET_MODE (op
) != VOIDmode
)
1558 if (!VECTOR_MODE_P (GET_MODE (op
)))
1559 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1561 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1564 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1565 || GET_CODE (op
) == CONST_VECTOR
)
1567 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1568 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1569 rtvec v
= rtvec_alloc (n_elts
);
1572 if (GET_CODE (op
) != CONST_VECTOR
)
1573 for (i
= 0; i
< n_elts
; i
++)
1574 RTVEC_ELT (v
, i
) = op
;
1577 enum machine_mode inmode
= GET_MODE (op
);
1578 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1579 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1581 gcc_assert (in_n_elts
< n_elts
);
1582 gcc_assert ((n_elts
% in_n_elts
) == 0);
1583 for (i
= 0; i
< n_elts
; i
++)
1584 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1586 return gen_rtx_CONST_VECTOR (mode
, v
);
1590 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1592 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1593 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1594 enum machine_mode opmode
= GET_MODE (op
);
1595 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1596 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1597 rtvec v
= rtvec_alloc (n_elts
);
1600 gcc_assert (op_n_elts
== n_elts
);
1601 for (i
= 0; i
< n_elts
; i
++)
1603 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1604 CONST_VECTOR_ELT (op
, i
),
1605 GET_MODE_INNER (opmode
));
1608 RTVEC_ELT (v
, i
) = x
;
1610 return gen_rtx_CONST_VECTOR (mode
, v
);
1613 /* The order of these tests is critical so that, for example, we don't
1614 check the wrong mode (input vs. output) for a conversion operation,
1615 such as FIX. At some point, this should be simplified. */
1617 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1621 if (op_mode
== VOIDmode
)
1623 /* CONST_INT have VOIDmode as the mode. We assume that all
1624 the bits of the constant are significant, though, this is
1625 a dangerous assumption as many times CONST_INTs are
1626 created and used with garbage in the bits outside of the
1627 precision of the implied mode of the const_int. */
1628 op_mode
= MAX_MODE_INT
;
1631 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1632 d
= real_value_truncate (mode
, d
);
1633 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1635 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1639 if (op_mode
== VOIDmode
)
1641 /* CONST_INT have VOIDmode as the mode. We assume that all
1642 the bits of the constant are significant, though, this is
1643 a dangerous assumption as many times CONST_INTs are
1644 created and used with garbage in the bits outside of the
1645 precision of the implied mode of the const_int. */
1646 op_mode
= MAX_MODE_INT
;
1649 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1650 d
= real_value_truncate (mode
, d
);
1651 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1654 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1657 enum machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1658 rtx_mode_t op0
= std::make_pair (op
, imode
);
1661 #if TARGET_SUPPORTS_WIDE_INT == 0
1662 /* This assert keeps the simplification from producing a result
1663 that cannot be represented in a CONST_DOUBLE but a lot of
1664 upstream callers expect that this function never fails to
1665 simplify something and so you if you added this to the test
1666 above the code would die later anyway. If this assert
1667 happens, you just need to make the port support wide int. */
1668 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1674 result
= wi::bit_not (op0
);
1678 result
= wi::neg (op0
);
1682 result
= wi::abs (op0
);
1686 result
= wi::shwi (wi::ffs (op0
), mode
);
1690 if (wi::ne_p (op0
, 0))
1691 int_value
= wi::clz (op0
);
1692 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1693 int_value
= GET_MODE_PRECISION (mode
);
1694 result
= wi::shwi (int_value
, mode
);
1698 result
= wi::shwi (wi::clrsb (op0
), mode
);
1702 if (wi::ne_p (op0
, 0))
1703 int_value
= wi::ctz (op0
);
1704 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1705 int_value
= GET_MODE_PRECISION (mode
);
1706 result
= wi::shwi (int_value
, mode
);
1710 result
= wi::shwi (wi::popcount (op0
), mode
);
1714 result
= wi::shwi (wi::parity (op0
), mode
);
1718 result
= wide_int (op0
).bswap ();
1723 result
= wide_int::from (op0
, width
, UNSIGNED
);
1727 result
= wide_int::from (op0
, width
, SIGNED
);
1735 return immed_wide_int_const (result
, mode
);
1738 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1739 && SCALAR_FLOAT_MODE_P (mode
)
1740 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1743 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1750 d
= real_value_abs (&d
);
1753 d
= real_value_negate (&d
);
1755 case FLOAT_TRUNCATE
:
1756 d
= real_value_truncate (mode
, d
);
1759 /* All this does is change the mode, unless changing
1761 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1762 real_convert (&d
, mode
, &d
);
1765 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1772 real_to_target (tmp
, &d
, GET_MODE (op
));
1773 for (i
= 0; i
< 4; i
++)
1775 real_from_target (&d
, tmp
, mode
);
1781 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1783 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1784 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1785 && GET_MODE_CLASS (mode
) == MODE_INT
1788 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1789 operators are intentionally left unspecified (to ease implementation
1790 by target backends), for consistency, this routine implements the
1791 same semantics for constant folding as used by the middle-end. */
1793 /* This was formerly used only for non-IEEE float.
1794 eggert@twinsun.com says it is safe for IEEE also. */
1795 REAL_VALUE_TYPE x
, t
;
1796 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1797 wide_int wmax
, wmin
;
1798 /* This is part of the abi to real_to_integer, but we check
1799 things before making this call. */
1805 if (REAL_VALUE_ISNAN (x
))
1808 /* Test against the signed upper bound. */
1809 wmax
= wi::max_value (width
, SIGNED
);
1810 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1811 if (REAL_VALUES_LESS (t
, x
))
1812 return immed_wide_int_const (wmax
, mode
);
1814 /* Test against the signed lower bound. */
1815 wmin
= wi::min_value (width
, SIGNED
);
1816 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1817 if (REAL_VALUES_LESS (x
, t
))
1818 return immed_wide_int_const (wmin
, mode
);
1820 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1824 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1827 /* Test against the unsigned upper bound. */
1828 wmax
= wi::max_value (width
, UNSIGNED
);
1829 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1830 if (REAL_VALUES_LESS (t
, x
))
1831 return immed_wide_int_const (wmax
, mode
);
1833 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1845 /* Subroutine of simplify_binary_operation to simplify a binary operation
1846 CODE that can commute with byte swapping, with result mode MODE and
1847 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1848 Return zero if no simplification or canonicalization is possible. */
1851 simplify_byte_swapping_operation (enum rtx_code code
, enum machine_mode mode
,
1856 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1857 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1859 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1860 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1861 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1864 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1865 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1867 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1868 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1874 /* Subroutine of simplify_binary_operation to simplify a commutative,
1875 associative binary operation CODE with result mode MODE, operating
1876 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1877 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1878 canonicalization is possible. */
1881 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1886 /* Linearize the operator to the left. */
1887 if (GET_CODE (op1
) == code
)
1889 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1890 if (GET_CODE (op0
) == code
)
1892 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1893 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1896 /* "a op (b op c)" becomes "(b op c) op a". */
1897 if (! swap_commutative_operands_p (op1
, op0
))
1898 return simplify_gen_binary (code
, mode
, op1
, op0
);
1905 if (GET_CODE (op0
) == code
)
1907 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1908 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1910 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1911 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1914 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1915 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1917 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1919 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1920 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1922 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1929 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1930 and OP1. Return 0 if no simplification is possible.
1932 Don't use this for relational operations such as EQ or LT.
1933 Use simplify_relational_operation instead. */
1935 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1938 rtx trueop0
, trueop1
;
1941 /* Relational operations don't work here. We must know the mode
1942 of the operands in order to do the comparison correctly.
1943 Assuming a full word can give incorrect results.
1944 Consider comparing 128 with -128 in QImode. */
1945 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1946 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1948 /* Make sure the constant is second. */
1949 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1950 && swap_commutative_operands_p (op0
, op1
))
1952 tem
= op0
, op0
= op1
, op1
= tem
;
1955 trueop0
= avoid_constant_pool_reference (op0
);
1956 trueop1
= avoid_constant_pool_reference (op1
);
1958 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1961 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1964 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1965 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1966 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1967 actual constants. */
1970 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1971 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1973 rtx tem
, reversed
, opleft
, opright
;
1975 unsigned int width
= GET_MODE_PRECISION (mode
);
1977 /* Even if we can't compute a constant result,
1978 there are some cases worth simplifying. */
1983 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1984 when x is NaN, infinite, or finite and nonzero. They aren't
1985 when x is -0 and the rounding mode is not towards -infinity,
1986 since (-0) + 0 is then 0. */
1987 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1990 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1991 transformations are safe even for IEEE. */
1992 if (GET_CODE (op0
) == NEG
)
1993 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1994 else if (GET_CODE (op1
) == NEG
)
1995 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1997 /* (~a) + 1 -> -a */
1998 if (INTEGRAL_MODE_P (mode
)
1999 && GET_CODE (op0
) == NOT
2000 && trueop1
== const1_rtx
)
2001 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2003 /* Handle both-operands-constant cases. We can only add
2004 CONST_INTs to constants since the sum of relocatable symbols
2005 can't be handled by most assemblers. Don't add CONST_INT
2006 to CONST_INT since overflow won't be computed properly if wider
2007 than HOST_BITS_PER_WIDE_INT. */
2009 if ((GET_CODE (op0
) == CONST
2010 || GET_CODE (op0
) == SYMBOL_REF
2011 || GET_CODE (op0
) == LABEL_REF
)
2012 && CONST_INT_P (op1
))
2013 return plus_constant (mode
, op0
, INTVAL (op1
));
2014 else if ((GET_CODE (op1
) == CONST
2015 || GET_CODE (op1
) == SYMBOL_REF
2016 || GET_CODE (op1
) == LABEL_REF
)
2017 && CONST_INT_P (op0
))
2018 return plus_constant (mode
, op1
, INTVAL (op0
));
2020 /* See if this is something like X * C - X or vice versa or
2021 if the multiplication is written as a shift. If so, we can
2022 distribute and make a new multiply, shift, or maybe just
2023 have X (if C is 2 in the example above). But don't make
2024 something more expensive than we had before. */
2026 if (SCALAR_INT_MODE_P (mode
))
2028 rtx lhs
= op0
, rhs
= op1
;
2030 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2031 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2033 if (GET_CODE (lhs
) == NEG
)
2035 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2036 lhs
= XEXP (lhs
, 0);
2038 else if (GET_CODE (lhs
) == MULT
2039 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2041 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2042 lhs
= XEXP (lhs
, 0);
2044 else if (GET_CODE (lhs
) == ASHIFT
2045 && CONST_INT_P (XEXP (lhs
, 1))
2046 && INTVAL (XEXP (lhs
, 1)) >= 0
2047 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2049 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2050 GET_MODE_PRECISION (mode
));
2051 lhs
= XEXP (lhs
, 0);
2054 if (GET_CODE (rhs
) == NEG
)
2056 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2057 rhs
= XEXP (rhs
, 0);
2059 else if (GET_CODE (rhs
) == MULT
2060 && CONST_INT_P (XEXP (rhs
, 1)))
2062 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2063 rhs
= XEXP (rhs
, 0);
2065 else if (GET_CODE (rhs
) == ASHIFT
2066 && CONST_INT_P (XEXP (rhs
, 1))
2067 && INTVAL (XEXP (rhs
, 1)) >= 0
2068 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2070 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2071 GET_MODE_PRECISION (mode
));
2072 rhs
= XEXP (rhs
, 0);
2075 if (rtx_equal_p (lhs
, rhs
))
2077 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2079 bool speed
= optimize_function_for_speed_p (cfun
);
2081 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2083 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2084 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2089 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2090 if (CONST_SCALAR_INT_P (op1
)
2091 && GET_CODE (op0
) == XOR
2092 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2093 && mode_signbit_p (mode
, op1
))
2094 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2095 simplify_gen_binary (XOR
, mode
, op1
,
2098 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2099 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2100 && GET_CODE (op0
) == MULT
2101 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2105 in1
= XEXP (XEXP (op0
, 0), 0);
2106 in2
= XEXP (op0
, 1);
2107 return simplify_gen_binary (MINUS
, mode
, op1
,
2108 simplify_gen_binary (MULT
, mode
,
2112 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2113 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2115 if (COMPARISON_P (op0
)
2116 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2117 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2118 && (reversed
= reversed_comparison (op0
, mode
)))
2120 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2122 /* If one of the operands is a PLUS or a MINUS, see if we can
2123 simplify this by the associative law.
2124 Don't use the associative law for floating point.
2125 The inaccuracy makes it nonassociative,
2126 and subtle programs can break if operations are associated. */
2128 if (INTEGRAL_MODE_P (mode
)
2129 && (plus_minus_operand_p (op0
)
2130 || plus_minus_operand_p (op1
))
2131 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2134 /* Reassociate floating point addition only when the user
2135 specifies associative math operations. */
2136 if (FLOAT_MODE_P (mode
)
2137 && flag_associative_math
)
2139 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2146 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2147 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2148 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2149 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2151 rtx xop00
= XEXP (op0
, 0);
2152 rtx xop10
= XEXP (op1
, 0);
2155 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2157 if (REG_P (xop00
) && REG_P (xop10
)
2158 && GET_MODE (xop00
) == GET_MODE (xop10
)
2159 && REGNO (xop00
) == REGNO (xop10
)
2160 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2161 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2168 /* We can't assume x-x is 0 even with non-IEEE floating point,
2169 but since it is zero except in very strange circumstances, we
2170 will treat it as zero with -ffinite-math-only. */
2171 if (rtx_equal_p (trueop0
, trueop1
)
2172 && ! side_effects_p (op0
)
2173 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2174 return CONST0_RTX (mode
);
2176 /* Change subtraction from zero into negation. (0 - x) is the
2177 same as -x when x is NaN, infinite, or finite and nonzero.
2178 But if the mode has signed zeros, and does not round towards
2179 -infinity, then 0 - 0 is 0, not -0. */
2180 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2181 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2183 /* (-1 - a) is ~a. */
2184 if (trueop0
== constm1_rtx
)
2185 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2187 /* Subtracting 0 has no effect unless the mode has signed zeros
2188 and supports rounding towards -infinity. In such a case,
2190 if (!(HONOR_SIGNED_ZEROS (mode
)
2191 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2192 && trueop1
== CONST0_RTX (mode
))
2195 /* See if this is something like X * C - X or vice versa or
2196 if the multiplication is written as a shift. If so, we can
2197 distribute and make a new multiply, shift, or maybe just
2198 have X (if C is 2 in the example above). But don't make
2199 something more expensive than we had before. */
2201 if (SCALAR_INT_MODE_P (mode
))
2203 rtx lhs
= op0
, rhs
= op1
;
2205 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2206 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2208 if (GET_CODE (lhs
) == NEG
)
2210 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2211 lhs
= XEXP (lhs
, 0);
2213 else if (GET_CODE (lhs
) == MULT
2214 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2216 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2217 lhs
= XEXP (lhs
, 0);
2219 else if (GET_CODE (lhs
) == ASHIFT
2220 && CONST_INT_P (XEXP (lhs
, 1))
2221 && INTVAL (XEXP (lhs
, 1)) >= 0
2222 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2224 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2225 GET_MODE_PRECISION (mode
));
2226 lhs
= XEXP (lhs
, 0);
2229 if (GET_CODE (rhs
) == NEG
)
2231 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2232 rhs
= XEXP (rhs
, 0);
2234 else if (GET_CODE (rhs
) == MULT
2235 && CONST_INT_P (XEXP (rhs
, 1)))
2237 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2238 rhs
= XEXP (rhs
, 0);
2240 else if (GET_CODE (rhs
) == ASHIFT
2241 && CONST_INT_P (XEXP (rhs
, 1))
2242 && INTVAL (XEXP (rhs
, 1)) >= 0
2243 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2245 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2246 GET_MODE_PRECISION (mode
));
2247 negcoeff1
= -negcoeff1
;
2248 rhs
= XEXP (rhs
, 0);
2251 if (rtx_equal_p (lhs
, rhs
))
2253 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2255 bool speed
= optimize_function_for_speed_p (cfun
);
2257 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2259 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2260 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2265 /* (a - (-b)) -> (a + b). True even for IEEE. */
2266 if (GET_CODE (op1
) == NEG
)
2267 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2269 /* (-x - c) may be simplified as (-c - x). */
2270 if (GET_CODE (op0
) == NEG
2271 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2273 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2275 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2278 /* Don't let a relocatable value get a negative coeff. */
2279 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2280 return simplify_gen_binary (PLUS
, mode
,
2282 neg_const_int (mode
, op1
));
2284 /* (x - (x & y)) -> (x & ~y) */
2285 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2287 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2289 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2290 GET_MODE (XEXP (op1
, 1)));
2291 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2293 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2295 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2296 GET_MODE (XEXP (op1
, 0)));
2297 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2301 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2302 by reversing the comparison code if valid. */
2303 if (STORE_FLAG_VALUE
== 1
2304 && trueop0
== const1_rtx
2305 && COMPARISON_P (op1
)
2306 && (reversed
= reversed_comparison (op1
, mode
)))
2309 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2310 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2311 && GET_CODE (op1
) == MULT
2312 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2316 in1
= XEXP (XEXP (op1
, 0), 0);
2317 in2
= XEXP (op1
, 1);
2318 return simplify_gen_binary (PLUS
, mode
,
2319 simplify_gen_binary (MULT
, mode
,
2324 /* Canonicalize (minus (neg A) (mult B C)) to
2325 (minus (mult (neg B) C) A). */
2326 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2327 && GET_CODE (op1
) == MULT
2328 && GET_CODE (op0
) == NEG
)
2332 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2333 in2
= XEXP (op1
, 1);
2334 return simplify_gen_binary (MINUS
, mode
,
2335 simplify_gen_binary (MULT
, mode
,
2340 /* If one of the operands is a PLUS or a MINUS, see if we can
2341 simplify this by the associative law. This will, for example,
2342 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2343 Don't use the associative law for floating point.
2344 The inaccuracy makes it nonassociative,
2345 and subtle programs can break if operations are associated. */
2347 if (INTEGRAL_MODE_P (mode
)
2348 && (plus_minus_operand_p (op0
)
2349 || plus_minus_operand_p (op1
))
2350 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2355 if (trueop1
== constm1_rtx
)
2356 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2358 if (GET_CODE (op0
) == NEG
)
2360 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2361 /* If op1 is a MULT as well and simplify_unary_operation
2362 just moved the NEG to the second operand, simplify_gen_binary
2363 below could through simplify_associative_operation move
2364 the NEG around again and recurse endlessly. */
2366 && GET_CODE (op1
) == MULT
2367 && GET_CODE (temp
) == MULT
2368 && XEXP (op1
, 0) == XEXP (temp
, 0)
2369 && GET_CODE (XEXP (temp
, 1)) == NEG
2370 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2373 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2375 if (GET_CODE (op1
) == NEG
)
2377 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2378 /* If op0 is a MULT as well and simplify_unary_operation
2379 just moved the NEG to the second operand, simplify_gen_binary
2380 below could through simplify_associative_operation move
2381 the NEG around again and recurse endlessly. */
2383 && GET_CODE (op0
) == MULT
2384 && GET_CODE (temp
) == MULT
2385 && XEXP (op0
, 0) == XEXP (temp
, 0)
2386 && GET_CODE (XEXP (temp
, 1)) == NEG
2387 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2390 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2393 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2394 x is NaN, since x * 0 is then also NaN. Nor is it valid
2395 when the mode has signed zeros, since multiplying a negative
2396 number by 0 will give -0, not 0. */
2397 if (!HONOR_NANS (mode
)
2398 && !HONOR_SIGNED_ZEROS (mode
)
2399 && trueop1
== CONST0_RTX (mode
)
2400 && ! side_effects_p (op0
))
2403 /* In IEEE floating point, x*1 is not equivalent to x for
2405 if (!HONOR_SNANS (mode
)
2406 && trueop1
== CONST1_RTX (mode
))
2409 /* Convert multiply by constant power of two into shift. */
2410 if (CONST_SCALAR_INT_P (trueop1
))
2412 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2414 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2417 /* x*2 is x+x and x*(-1) is -x */
2418 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2419 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2420 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2421 && GET_MODE (op0
) == mode
)
2424 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2426 if (REAL_VALUES_EQUAL (d
, dconst2
))
2427 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2429 if (!HONOR_SNANS (mode
)
2430 && REAL_VALUES_EQUAL (d
, dconstm1
))
2431 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2434 /* Optimize -x * -x as x * x. */
2435 if (FLOAT_MODE_P (mode
)
2436 && GET_CODE (op0
) == NEG
2437 && GET_CODE (op1
) == NEG
2438 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2439 && !side_effects_p (XEXP (op0
, 0)))
2440 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2442 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2443 if (SCALAR_FLOAT_MODE_P (mode
)
2444 && GET_CODE (op0
) == ABS
2445 && GET_CODE (op1
) == ABS
2446 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2447 && !side_effects_p (XEXP (op0
, 0)))
2448 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2450 /* Reassociate multiplication, but for floating point MULTs
2451 only when the user specifies unsafe math optimizations. */
2452 if (! FLOAT_MODE_P (mode
)
2453 || flag_unsafe_math_optimizations
)
2455 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2462 if (trueop1
== CONST0_RTX (mode
))
2464 if (INTEGRAL_MODE_P (mode
)
2465 && trueop1
== CONSTM1_RTX (mode
)
2466 && !side_effects_p (op0
))
2468 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2470 /* A | (~A) -> -1 */
2471 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2472 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2473 && ! side_effects_p (op0
)
2474 && SCALAR_INT_MODE_P (mode
))
2477 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2478 if (CONST_INT_P (op1
)
2479 && HWI_COMPUTABLE_MODE_P (mode
)
2480 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2481 && !side_effects_p (op0
))
2484 /* Canonicalize (X & C1) | C2. */
2485 if (GET_CODE (op0
) == AND
2486 && CONST_INT_P (trueop1
)
2487 && CONST_INT_P (XEXP (op0
, 1)))
2489 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2490 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2491 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2493 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2495 && !side_effects_p (XEXP (op0
, 0)))
2498 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2499 if (((c1
|c2
) & mask
) == mask
)
2500 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2502 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2503 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2505 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2506 gen_int_mode (c1
& ~c2
, mode
));
2507 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2511 /* Convert (A & B) | A to A. */
2512 if (GET_CODE (op0
) == AND
2513 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2514 || rtx_equal_p (XEXP (op0
, 1), op1
))
2515 && ! side_effects_p (XEXP (op0
, 0))
2516 && ! side_effects_p (XEXP (op0
, 1)))
2519 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2520 mode size to (rotate A CX). */
2522 if (GET_CODE (op1
) == ASHIFT
2523 || GET_CODE (op1
) == SUBREG
)
2534 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2535 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2536 && CONST_INT_P (XEXP (opleft
, 1))
2537 && CONST_INT_P (XEXP (opright
, 1))
2538 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2539 == GET_MODE_PRECISION (mode
)))
2540 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2542 /* Same, but for ashift that has been "simplified" to a wider mode
2543 by simplify_shift_const. */
2545 if (GET_CODE (opleft
) == SUBREG
2546 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2547 && GET_CODE (opright
) == LSHIFTRT
2548 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2549 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2550 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2551 && (GET_MODE_SIZE (GET_MODE (opleft
))
2552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2553 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2554 SUBREG_REG (XEXP (opright
, 0)))
2555 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2556 && CONST_INT_P (XEXP (opright
, 1))
2557 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2558 == GET_MODE_PRECISION (mode
)))
2559 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2560 XEXP (SUBREG_REG (opleft
), 1));
2562 /* If we have (ior (and (X C1) C2)), simplify this by making
2563 C1 as small as possible if C1 actually changes. */
2564 if (CONST_INT_P (op1
)
2565 && (HWI_COMPUTABLE_MODE_P (mode
)
2566 || INTVAL (op1
) > 0)
2567 && GET_CODE (op0
) == AND
2568 && CONST_INT_P (XEXP (op0
, 1))
2569 && CONST_INT_P (op1
)
2570 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2572 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2573 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2576 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2579 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2580 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2581 the PLUS does not affect any of the bits in OP1: then we can do
2582 the IOR as a PLUS and we can associate. This is valid if OP1
2583 can be safely shifted left C bits. */
2584 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2585 && GET_CODE (XEXP (op0
, 0)) == PLUS
2586 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2587 && CONST_INT_P (XEXP (op0
, 1))
2588 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2590 int count
= INTVAL (XEXP (op0
, 1));
2591 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2593 if (mask
>> count
== INTVAL (trueop1
)
2594 && trunc_int_for_mode (mask
, mode
) == mask
2595 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2596 return simplify_gen_binary (ASHIFTRT
, mode
,
2597 plus_constant (mode
, XEXP (op0
, 0),
2602 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2606 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2612 if (trueop1
== CONST0_RTX (mode
))
2614 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2615 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2616 if (rtx_equal_p (trueop0
, trueop1
)
2617 && ! side_effects_p (op0
)
2618 && GET_MODE_CLASS (mode
) != MODE_CC
)
2619 return CONST0_RTX (mode
);
2621 /* Canonicalize XOR of the most significant bit to PLUS. */
2622 if (CONST_SCALAR_INT_P (op1
)
2623 && mode_signbit_p (mode
, op1
))
2624 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2625 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2626 if (CONST_SCALAR_INT_P (op1
)
2627 && GET_CODE (op0
) == PLUS
2628 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2629 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2630 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2631 simplify_gen_binary (XOR
, mode
, op1
,
2634 /* If we are XORing two things that have no bits in common,
2635 convert them into an IOR. This helps to detect rotation encoded
2636 using those methods and possibly other simplifications. */
2638 if (HWI_COMPUTABLE_MODE_P (mode
)
2639 && (nonzero_bits (op0
, mode
)
2640 & nonzero_bits (op1
, mode
)) == 0)
2641 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2643 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2644 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2647 int num_negated
= 0;
2649 if (GET_CODE (op0
) == NOT
)
2650 num_negated
++, op0
= XEXP (op0
, 0);
2651 if (GET_CODE (op1
) == NOT
)
2652 num_negated
++, op1
= XEXP (op1
, 0);
2654 if (num_negated
== 2)
2655 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2656 else if (num_negated
== 1)
2657 return simplify_gen_unary (NOT
, mode
,
2658 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2662 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2663 correspond to a machine insn or result in further simplifications
2664 if B is a constant. */
2666 if (GET_CODE (op0
) == AND
2667 && rtx_equal_p (XEXP (op0
, 1), op1
)
2668 && ! side_effects_p (op1
))
2669 return simplify_gen_binary (AND
, mode
,
2670 simplify_gen_unary (NOT
, mode
,
2671 XEXP (op0
, 0), mode
),
2674 else if (GET_CODE (op0
) == AND
2675 && rtx_equal_p (XEXP (op0
, 0), op1
)
2676 && ! side_effects_p (op1
))
2677 return simplify_gen_binary (AND
, mode
,
2678 simplify_gen_unary (NOT
, mode
,
2679 XEXP (op0
, 1), mode
),
2682 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2683 we can transform like this:
2684 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2685 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2686 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2687 Attempt a few simplifications when B and C are both constants. */
2688 if (GET_CODE (op0
) == AND
2689 && CONST_INT_P (op1
)
2690 && CONST_INT_P (XEXP (op0
, 1)))
2692 rtx a
= XEXP (op0
, 0);
2693 rtx b
= XEXP (op0
, 1);
2695 HOST_WIDE_INT bval
= INTVAL (b
);
2696 HOST_WIDE_INT cval
= INTVAL (c
);
2699 = simplify_binary_operation (AND
, mode
,
2700 simplify_gen_unary (NOT
, mode
, a
, mode
),
2702 if ((~cval
& bval
) == 0)
2704 /* Try to simplify ~A&C | ~B&C. */
2705 if (na_c
!= NULL_RTX
)
2706 return simplify_gen_binary (IOR
, mode
, na_c
,
2707 gen_int_mode (~bval
& cval
, mode
));
2711 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2712 if (na_c
== const0_rtx
)
2714 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2715 gen_int_mode (~cval
& bval
,
2717 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2718 gen_int_mode (~bval
& cval
,
2724 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2725 comparison if STORE_FLAG_VALUE is 1. */
2726 if (STORE_FLAG_VALUE
== 1
2727 && trueop1
== const1_rtx
2728 && COMPARISON_P (op0
)
2729 && (reversed
= reversed_comparison (op0
, mode
)))
2732 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2733 is (lt foo (const_int 0)), so we can perform the above
2734 simplification if STORE_FLAG_VALUE is 1. */
2736 if (STORE_FLAG_VALUE
== 1
2737 && trueop1
== const1_rtx
2738 && GET_CODE (op0
) == LSHIFTRT
2739 && CONST_INT_P (XEXP (op0
, 1))
2740 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2741 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2743 /* (xor (comparison foo bar) (const_int sign-bit))
2744 when STORE_FLAG_VALUE is the sign bit. */
2745 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2746 && trueop1
== const_true_rtx
2747 && COMPARISON_P (op0
)
2748 && (reversed
= reversed_comparison (op0
, mode
)))
2751 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2755 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2761 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2763 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2765 if (HWI_COMPUTABLE_MODE_P (mode
))
2767 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2768 HOST_WIDE_INT nzop1
;
2769 if (CONST_INT_P (trueop1
))
2771 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2772 /* If we are turning off bits already known off in OP0, we need
2774 if ((nzop0
& ~val1
) == 0)
2777 nzop1
= nonzero_bits (trueop1
, mode
);
2778 /* If we are clearing all the nonzero bits, the result is zero. */
2779 if ((nzop1
& nzop0
) == 0
2780 && !side_effects_p (op0
) && !side_effects_p (op1
))
2781 return CONST0_RTX (mode
);
2783 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2784 && GET_MODE_CLASS (mode
) != MODE_CC
)
2787 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2788 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2789 && ! side_effects_p (op0
)
2790 && GET_MODE_CLASS (mode
) != MODE_CC
)
2791 return CONST0_RTX (mode
);
2793 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2794 there are no nonzero bits of C outside of X's mode. */
2795 if ((GET_CODE (op0
) == SIGN_EXTEND
2796 || GET_CODE (op0
) == ZERO_EXTEND
)
2797 && CONST_INT_P (trueop1
)
2798 && HWI_COMPUTABLE_MODE_P (mode
)
2799 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2800 & UINTVAL (trueop1
)) == 0)
2802 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2803 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2804 gen_int_mode (INTVAL (trueop1
),
2806 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2809 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2810 we might be able to further simplify the AND with X and potentially
2811 remove the truncation altogether. */
2812 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2814 rtx x
= XEXP (op0
, 0);
2815 enum machine_mode xmode
= GET_MODE (x
);
2816 tem
= simplify_gen_binary (AND
, xmode
, x
,
2817 gen_int_mode (INTVAL (trueop1
), xmode
));
2818 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2821 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2822 if (GET_CODE (op0
) == IOR
2823 && CONST_INT_P (trueop1
)
2824 && CONST_INT_P (XEXP (op0
, 1)))
2826 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2827 return simplify_gen_binary (IOR
, mode
,
2828 simplify_gen_binary (AND
, mode
,
2829 XEXP (op0
, 0), op1
),
2830 gen_int_mode (tmp
, mode
));
2833 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2834 insn (and may simplify more). */
2835 if (GET_CODE (op0
) == XOR
2836 && rtx_equal_p (XEXP (op0
, 0), op1
)
2837 && ! side_effects_p (op1
))
2838 return simplify_gen_binary (AND
, mode
,
2839 simplify_gen_unary (NOT
, mode
,
2840 XEXP (op0
, 1), mode
),
2843 if (GET_CODE (op0
) == XOR
2844 && rtx_equal_p (XEXP (op0
, 1), op1
)
2845 && ! side_effects_p (op1
))
2846 return simplify_gen_binary (AND
, mode
,
2847 simplify_gen_unary (NOT
, mode
,
2848 XEXP (op0
, 0), mode
),
2851 /* Similarly for (~(A ^ B)) & A. */
2852 if (GET_CODE (op0
) == NOT
2853 && GET_CODE (XEXP (op0
, 0)) == XOR
2854 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2855 && ! side_effects_p (op1
))
2856 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2858 if (GET_CODE (op0
) == NOT
2859 && GET_CODE (XEXP (op0
, 0)) == XOR
2860 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2861 && ! side_effects_p (op1
))
2862 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2864 /* Convert (A | B) & A to A. */
2865 if (GET_CODE (op0
) == IOR
2866 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2867 || rtx_equal_p (XEXP (op0
, 1), op1
))
2868 && ! side_effects_p (XEXP (op0
, 0))
2869 && ! side_effects_p (XEXP (op0
, 1)))
2872 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2873 ((A & N) + B) & M -> (A + B) & M
2874 Similarly if (N & M) == 0,
2875 ((A | N) + B) & M -> (A + B) & M
2876 and for - instead of + and/or ^ instead of |.
2877 Also, if (N & M) == 0, then
2878 (A +- N) & M -> A & M. */
2879 if (CONST_INT_P (trueop1
)
2880 && HWI_COMPUTABLE_MODE_P (mode
)
2881 && ~UINTVAL (trueop1
)
2882 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2883 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2888 pmop
[0] = XEXP (op0
, 0);
2889 pmop
[1] = XEXP (op0
, 1);
2891 if (CONST_INT_P (pmop
[1])
2892 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2893 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2895 for (which
= 0; which
< 2; which
++)
2898 switch (GET_CODE (tem
))
2901 if (CONST_INT_P (XEXP (tem
, 1))
2902 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2903 == UINTVAL (trueop1
))
2904 pmop
[which
] = XEXP (tem
, 0);
2908 if (CONST_INT_P (XEXP (tem
, 1))
2909 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2910 pmop
[which
] = XEXP (tem
, 0);
2917 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2919 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2921 return simplify_gen_binary (code
, mode
, tem
, op1
);
2925 /* (and X (ior (not X) Y) -> (and X Y) */
2926 if (GET_CODE (op1
) == IOR
2927 && GET_CODE (XEXP (op1
, 0)) == NOT
2928 && op0
== XEXP (XEXP (op1
, 0), 0))
2929 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2931 /* (and (ior (not X) Y) X) -> (and X Y) */
2932 if (GET_CODE (op0
) == IOR
2933 && GET_CODE (XEXP (op0
, 0)) == NOT
2934 && op1
== XEXP (XEXP (op0
, 0), 0))
2935 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2937 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2941 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2947 /* 0/x is 0 (or x&0 if x has side-effects). */
2948 if (trueop0
== CONST0_RTX (mode
))
2950 if (side_effects_p (op1
))
2951 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2955 if (trueop1
== CONST1_RTX (mode
))
2957 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2961 /* Convert divide by power of two into shift. */
2962 if (CONST_INT_P (trueop1
)
2963 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2964 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2968 /* Handle floating point and integers separately. */
2969 if (SCALAR_FLOAT_MODE_P (mode
))
2971 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2972 safe for modes with NaNs, since 0.0 / 0.0 will then be
2973 NaN rather than 0.0. Nor is it safe for modes with signed
2974 zeros, since dividing 0 by a negative number gives -0.0 */
2975 if (trueop0
== CONST0_RTX (mode
)
2976 && !HONOR_NANS (mode
)
2977 && !HONOR_SIGNED_ZEROS (mode
)
2978 && ! side_effects_p (op1
))
2981 if (trueop1
== CONST1_RTX (mode
)
2982 && !HONOR_SNANS (mode
))
2985 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2986 && trueop1
!= CONST0_RTX (mode
))
2989 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2992 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2993 && !HONOR_SNANS (mode
))
2994 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2996 /* Change FP division by a constant into multiplication.
2997 Only do this with -freciprocal-math. */
2998 if (flag_reciprocal_math
2999 && !REAL_VALUES_EQUAL (d
, dconst0
))
3001 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3002 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3003 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3007 else if (SCALAR_INT_MODE_P (mode
))
3009 /* 0/x is 0 (or x&0 if x has side-effects). */
3010 if (trueop0
== CONST0_RTX (mode
)
3011 && !cfun
->can_throw_non_call_exceptions
)
3013 if (side_effects_p (op1
))
3014 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3018 if (trueop1
== CONST1_RTX (mode
))
3020 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3025 if (trueop1
== constm1_rtx
)
3027 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3029 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3035 /* 0%x is 0 (or x&0 if x has side-effects). */
3036 if (trueop0
== CONST0_RTX (mode
))
3038 if (side_effects_p (op1
))
3039 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3042 /* x%1 is 0 (of x&0 if x has side-effects). */
3043 if (trueop1
== CONST1_RTX (mode
))
3045 if (side_effects_p (op0
))
3046 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3047 return CONST0_RTX (mode
);
3049 /* Implement modulus by power of two as AND. */
3050 if (CONST_INT_P (trueop1
)
3051 && exact_log2 (UINTVAL (trueop1
)) > 0)
3052 return simplify_gen_binary (AND
, mode
, op0
,
3053 gen_int_mode (INTVAL (op1
) - 1, mode
));
3057 /* 0%x is 0 (or x&0 if x has side-effects). */
3058 if (trueop0
== CONST0_RTX (mode
))
3060 if (side_effects_p (op1
))
3061 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3064 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3065 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3067 if (side_effects_p (op0
))
3068 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3069 return CONST0_RTX (mode
);
3075 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3076 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3077 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3079 if (CONST_INT_P (trueop1
)
3080 && IN_RANGE (INTVAL (trueop1
),
3081 GET_MODE_BITSIZE (mode
) / 2 + (code
== ROTATE
),
3082 GET_MODE_BITSIZE (mode
) - 1))
3083 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3084 mode
, op0
, GEN_INT (GET_MODE_BITSIZE (mode
)
3085 - INTVAL (trueop1
)));
3088 if (trueop1
== CONST0_RTX (mode
))
3090 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3092 /* Rotating ~0 always results in ~0. */
3093 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3094 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3095 && ! side_effects_p (op1
))
3098 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3100 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3101 if (val
!= INTVAL (op1
))
3102 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3109 if (trueop1
== CONST0_RTX (mode
))
3111 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3113 goto canonicalize_shift
;
3116 if (trueop1
== CONST0_RTX (mode
))
3118 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3120 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3121 if (GET_CODE (op0
) == CLZ
3122 && CONST_INT_P (trueop1
)
3123 && STORE_FLAG_VALUE
== 1
3124 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3126 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3127 unsigned HOST_WIDE_INT zero_val
= 0;
3129 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3130 && zero_val
== GET_MODE_PRECISION (imode
)
3131 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3132 return simplify_gen_relational (EQ
, mode
, imode
,
3133 XEXP (op0
, 0), const0_rtx
);
3135 goto canonicalize_shift
;
3138 if (width
<= HOST_BITS_PER_WIDE_INT
3139 && mode_signbit_p (mode
, trueop1
)
3140 && ! side_effects_p (op0
))
3142 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3144 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3150 if (width
<= HOST_BITS_PER_WIDE_INT
3151 && CONST_INT_P (trueop1
)
3152 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3153 && ! side_effects_p (op0
))
3155 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3157 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3163 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3165 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3167 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3173 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3175 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3177 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3190 /* ??? There are simplifications that can be done. */
3194 if (!VECTOR_MODE_P (mode
))
3196 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3197 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3198 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3199 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3200 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3202 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3203 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3206 /* Extract a scalar element from a nested VEC_SELECT expression
3207 (with optional nested VEC_CONCAT expression). Some targets
3208 (i386) extract scalar element from a vector using chain of
3209 nested VEC_SELECT expressions. When input operand is a memory
3210 operand, this operation can be simplified to a simple scalar
3211 load from an offseted memory address. */
3212 if (GET_CODE (trueop0
) == VEC_SELECT
)
3214 rtx op0
= XEXP (trueop0
, 0);
3215 rtx op1
= XEXP (trueop0
, 1);
3217 enum machine_mode opmode
= GET_MODE (op0
);
3218 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3219 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3221 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3227 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3228 gcc_assert (i
< n_elts
);
3230 /* Select element, pointed by nested selector. */
3231 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3233 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3234 if (GET_CODE (op0
) == VEC_CONCAT
)
3236 rtx op00
= XEXP (op0
, 0);
3237 rtx op01
= XEXP (op0
, 1);
3239 enum machine_mode mode00
, mode01
;
3240 int n_elts00
, n_elts01
;
3242 mode00
= GET_MODE (op00
);
3243 mode01
= GET_MODE (op01
);
3245 /* Find out number of elements of each operand. */
3246 if (VECTOR_MODE_P (mode00
))
3248 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3249 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3254 if (VECTOR_MODE_P (mode01
))
3256 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3257 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3262 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3264 /* Select correct operand of VEC_CONCAT
3265 and adjust selector. */
3266 if (elem
< n_elts01
)
3277 vec
= rtvec_alloc (1);
3278 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3280 tmp
= gen_rtx_fmt_ee (code
, mode
,
3281 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3284 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3285 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3286 return XEXP (trueop0
, 0);
3290 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3291 gcc_assert (GET_MODE_INNER (mode
)
3292 == GET_MODE_INNER (GET_MODE (trueop0
)));
3293 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3295 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3297 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3298 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3299 rtvec v
= rtvec_alloc (n_elts
);
3302 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3303 for (i
= 0; i
< n_elts
; i
++)
3305 rtx x
= XVECEXP (trueop1
, 0, i
);
3307 gcc_assert (CONST_INT_P (x
));
3308 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3312 return gen_rtx_CONST_VECTOR (mode
, v
);
3315 /* Recognize the identity. */
3316 if (GET_MODE (trueop0
) == mode
)
3318 bool maybe_ident
= true;
3319 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3321 rtx j
= XVECEXP (trueop1
, 0, i
);
3322 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3324 maybe_ident
= false;
3332 /* If we build {a,b} then permute it, build the result directly. */
3333 if (XVECLEN (trueop1
, 0) == 2
3334 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3335 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3336 && GET_CODE (trueop0
) == VEC_CONCAT
3337 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3338 && GET_MODE (XEXP (trueop0
, 0)) == mode
3339 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3340 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3342 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3343 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3346 gcc_assert (i0
< 4 && i1
< 4);
3347 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3348 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3350 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3353 if (XVECLEN (trueop1
, 0) == 2
3354 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3355 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3356 && GET_CODE (trueop0
) == VEC_CONCAT
3357 && GET_MODE (trueop0
) == mode
)
3359 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3360 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3363 gcc_assert (i0
< 2 && i1
< 2);
3364 subop0
= XEXP (trueop0
, i0
);
3365 subop1
= XEXP (trueop0
, i1
);
3367 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3371 if (XVECLEN (trueop1
, 0) == 1
3372 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3373 && GET_CODE (trueop0
) == VEC_CONCAT
)
3376 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3378 /* Try to find the element in the VEC_CONCAT. */
3379 while (GET_MODE (vec
) != mode
3380 && GET_CODE (vec
) == VEC_CONCAT
)
3382 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3383 if (offset
< vec_size
)
3384 vec
= XEXP (vec
, 0);
3388 vec
= XEXP (vec
, 1);
3390 vec
= avoid_constant_pool_reference (vec
);
3393 if (GET_MODE (vec
) == mode
)
3397 /* If we select elements in a vec_merge that all come from the same
3398 operand, select from that operand directly. */
3399 if (GET_CODE (op0
) == VEC_MERGE
)
3401 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3402 if (CONST_INT_P (trueop02
))
3404 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3405 bool all_operand0
= true;
3406 bool all_operand1
= true;
3407 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3409 rtx j
= XVECEXP (trueop1
, 0, i
);
3410 if (sel
& (1 << UINTVAL (j
)))
3411 all_operand1
= false;
3413 all_operand0
= false;
3415 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3416 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3417 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3418 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3425 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3426 ? GET_MODE (trueop0
)
3427 : GET_MODE_INNER (mode
));
3428 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3429 ? GET_MODE (trueop1
)
3430 : GET_MODE_INNER (mode
));
3432 gcc_assert (VECTOR_MODE_P (mode
));
3433 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3434 == GET_MODE_SIZE (mode
));
3436 if (VECTOR_MODE_P (op0_mode
))
3437 gcc_assert (GET_MODE_INNER (mode
)
3438 == GET_MODE_INNER (op0_mode
));
3440 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3442 if (VECTOR_MODE_P (op1_mode
))
3443 gcc_assert (GET_MODE_INNER (mode
)
3444 == GET_MODE_INNER (op1_mode
));
3446 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3448 if ((GET_CODE (trueop0
) == CONST_VECTOR
3449 || CONST_SCALAR_INT_P (trueop0
)
3450 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3451 && (GET_CODE (trueop1
) == CONST_VECTOR
3452 || CONST_SCALAR_INT_P (trueop1
)
3453 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3455 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3456 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3457 rtvec v
= rtvec_alloc (n_elts
);
3459 unsigned in_n_elts
= 1;
3461 if (VECTOR_MODE_P (op0_mode
))
3462 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3463 for (i
= 0; i
< n_elts
; i
++)
3467 if (!VECTOR_MODE_P (op0_mode
))
3468 RTVEC_ELT (v
, i
) = trueop0
;
3470 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3474 if (!VECTOR_MODE_P (op1_mode
))
3475 RTVEC_ELT (v
, i
) = trueop1
;
3477 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3482 return gen_rtx_CONST_VECTOR (mode
, v
);
3485 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3486 Restrict the transformation to avoid generating a VEC_SELECT with a
3487 mode unrelated to its operand. */
3488 if (GET_CODE (trueop0
) == VEC_SELECT
3489 && GET_CODE (trueop1
) == VEC_SELECT
3490 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3491 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3493 rtx par0
= XEXP (trueop0
, 1);
3494 rtx par1
= XEXP (trueop1
, 1);
3495 int len0
= XVECLEN (par0
, 0);
3496 int len1
= XVECLEN (par1
, 0);
3497 rtvec vec
= rtvec_alloc (len0
+ len1
);
3498 for (int i
= 0; i
< len0
; i
++)
3499 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3500 for (int i
= 0; i
< len1
; i
++)
3501 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3502 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3503 gen_rtx_PARALLEL (VOIDmode
, vec
));
3516 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3519 unsigned int width
= GET_MODE_PRECISION (mode
);
3521 if (VECTOR_MODE_P (mode
)
3522 && code
!= VEC_CONCAT
3523 && GET_CODE (op0
) == CONST_VECTOR
3524 && GET_CODE (op1
) == CONST_VECTOR
)
3526 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3527 enum machine_mode op0mode
= GET_MODE (op0
);
3528 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3529 enum machine_mode op1mode
= GET_MODE (op1
);
3530 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3531 rtvec v
= rtvec_alloc (n_elts
);
3534 gcc_assert (op0_n_elts
== n_elts
);
3535 gcc_assert (op1_n_elts
== n_elts
);
3536 for (i
= 0; i
< n_elts
; i
++)
3538 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3539 CONST_VECTOR_ELT (op0
, i
),
3540 CONST_VECTOR_ELT (op1
, i
));
3543 RTVEC_ELT (v
, i
) = x
;
3546 return gen_rtx_CONST_VECTOR (mode
, v
);
3549 if (VECTOR_MODE_P (mode
)
3550 && code
== VEC_CONCAT
3551 && (CONST_SCALAR_INT_P (op0
)
3552 || GET_CODE (op0
) == CONST_FIXED
3553 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3554 && (CONST_SCALAR_INT_P (op1
)
3555 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3556 || GET_CODE (op1
) == CONST_FIXED
))
3558 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3559 rtvec v
= rtvec_alloc (n_elts
);
3561 gcc_assert (n_elts
>= 2);
3564 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3565 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3567 RTVEC_ELT (v
, 0) = op0
;
3568 RTVEC_ELT (v
, 1) = op1
;
3572 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3573 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3576 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3577 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3578 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3580 for (i
= 0; i
< op0_n_elts
; ++i
)
3581 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3582 for (i
= 0; i
< op1_n_elts
; ++i
)
3583 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3586 return gen_rtx_CONST_VECTOR (mode
, v
);
3589 if (SCALAR_FLOAT_MODE_P (mode
)
3590 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3591 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3592 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3603 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3605 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3607 for (i
= 0; i
< 4; i
++)
3624 real_from_target (&r
, tmp0
, mode
);
3625 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3629 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3632 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3633 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3634 real_convert (&f0
, mode
, &f0
);
3635 real_convert (&f1
, mode
, &f1
);
3637 if (HONOR_SNANS (mode
)
3638 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3642 && REAL_VALUES_EQUAL (f1
, dconst0
)
3643 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3646 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3647 && flag_trapping_math
3648 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3650 int s0
= REAL_VALUE_NEGATIVE (f0
);
3651 int s1
= REAL_VALUE_NEGATIVE (f1
);
3656 /* Inf + -Inf = NaN plus exception. */
3661 /* Inf - Inf = NaN plus exception. */
3666 /* Inf / Inf = NaN plus exception. */
3673 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3674 && flag_trapping_math
3675 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3676 || (REAL_VALUE_ISINF (f1
)
3677 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3678 /* Inf * 0 = NaN plus exception. */
3681 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3683 real_convert (&result
, mode
, &value
);
3685 /* Don't constant fold this floating point operation if
3686 the result has overflowed and flag_trapping_math. */
3688 if (flag_trapping_math
3689 && MODE_HAS_INFINITIES (mode
)
3690 && REAL_VALUE_ISINF (result
)
3691 && !REAL_VALUE_ISINF (f0
)
3692 && !REAL_VALUE_ISINF (f1
))
3693 /* Overflow plus exception. */
3696 /* Don't constant fold this floating point operation if the
3697 result may dependent upon the run-time rounding mode and
3698 flag_rounding_math is set, or if GCC's software emulation
3699 is unable to accurately represent the result. */
3701 if ((flag_rounding_math
3702 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3703 && (inexact
|| !real_identical (&result
, &value
)))
3706 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3710 /* We can fold some multi-word operations. */
3711 if (GET_MODE_CLASS (mode
) == MODE_INT
3712 && CONST_SCALAR_INT_P (op0
)
3713 && CONST_SCALAR_INT_P (op1
))
3717 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3718 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3720 #if TARGET_SUPPORTS_WIDE_INT == 0
3721 /* This assert keeps the simplification from producing a result
3722 that cannot be represented in a CONST_DOUBLE but a lot of
3723 upstream callers expect that this function never fails to
3724 simplify something and so you if you added this to the test
3725 above the code would die later anyway. If this assert
3726 happens, you just need to make the port support wide int. */
3727 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3732 result
= wi::sub (pop0
, pop1
);
3736 result
= wi::add (pop0
, pop1
);
3740 result
= wi::mul (pop0
, pop1
);
3744 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3750 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3756 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3762 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3768 result
= wi::bit_and (pop0
, pop1
);
3772 result
= wi::bit_or (pop0
, pop1
);
3776 result
= wi::bit_xor (pop0
, pop1
);
3780 result
= wi::smin (pop0
, pop1
);
3784 result
= wi::smax (pop0
, pop1
);
3788 result
= wi::umin (pop0
, pop1
);
3792 result
= wi::umax (pop0
, pop1
);
3799 wide_int wop1
= pop1
;
3800 if (SHIFT_COUNT_TRUNCATED
)
3801 wop1
= wi::umod_trunc (wop1
, width
);
3802 else if (wi::geu_p (wop1
, width
))
3808 result
= wi::lrshift (pop0
, wop1
);
3812 result
= wi::arshift (pop0
, wop1
);
3816 result
= wi::lshift (pop0
, wop1
);
3827 if (wi::neg_p (pop1
))
3833 result
= wi::lrotate (pop0
, pop1
);
3837 result
= wi::rrotate (pop0
, pop1
);
3848 return immed_wide_int_const (result
, mode
);
3856 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3859 Rather than test for specific case, we do this by a brute-force method
3860 and do all possible simplifications until no more changes occur. Then
3861 we rebuild the operation. */
3863 struct simplify_plus_minus_op_data
3870 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3874 result
= (commutative_operand_precedence (y
)
3875 - commutative_operand_precedence (x
));
3879 /* Group together equal REGs to do more simplification. */
3880 if (REG_P (x
) && REG_P (y
))
3881 return REGNO (x
) > REGNO (y
);
3887 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3890 struct simplify_plus_minus_op_data ops
[8];
3892 int n_ops
= 2, input_ops
= 2;
3893 int changed
, n_constants
= 0, canonicalized
= 0;
3896 memset (ops
, 0, sizeof ops
);
3898 /* Set up the two operands and then expand them until nothing has been
3899 changed. If we run out of room in our array, give up; this should
3900 almost never happen. */
3905 ops
[1].neg
= (code
== MINUS
);
3911 for (i
= 0; i
< n_ops
; i
++)
3913 rtx this_op
= ops
[i
].op
;
3914 int this_neg
= ops
[i
].neg
;
3915 enum rtx_code this_code
= GET_CODE (this_op
);
3924 ops
[n_ops
].op
= XEXP (this_op
, 1);
3925 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3928 ops
[i
].op
= XEXP (this_op
, 0);
3931 canonicalized
|= this_neg
;
3935 ops
[i
].op
= XEXP (this_op
, 0);
3936 ops
[i
].neg
= ! this_neg
;
3943 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3944 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3945 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3947 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3948 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3949 ops
[n_ops
].neg
= this_neg
;
3957 /* ~a -> (-a - 1) */
3960 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
3961 ops
[n_ops
++].neg
= this_neg
;
3962 ops
[i
].op
= XEXP (this_op
, 0);
3963 ops
[i
].neg
= !this_neg
;
3973 ops
[i
].op
= neg_const_int (mode
, this_op
);
3987 if (n_constants
> 1)
3990 gcc_assert (n_ops
>= 2);
3992 /* If we only have two operands, we can avoid the loops. */
3995 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3998 /* Get the two operands. Be careful with the order, especially for
3999 the cases where code == MINUS. */
4000 if (ops
[0].neg
&& ops
[1].neg
)
4002 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4005 else if (ops
[0].neg
)
4016 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4019 /* Now simplify each pair of operands until nothing changes. */
4022 /* Insertion sort is good enough for an eight-element array. */
4023 for (i
= 1; i
< n_ops
; i
++)
4025 struct simplify_plus_minus_op_data save
;
4027 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4033 ops
[j
+ 1] = ops
[j
];
4034 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4039 for (i
= n_ops
- 1; i
> 0; i
--)
4040 for (j
= i
- 1; j
>= 0; j
--)
4042 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4043 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4045 if (lhs
!= 0 && rhs
!= 0)
4047 enum rtx_code ncode
= PLUS
;
4053 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4055 else if (swap_commutative_operands_p (lhs
, rhs
))
4056 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4058 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4059 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4061 rtx tem_lhs
, tem_rhs
;
4063 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4064 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4065 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4067 if (tem
&& !CONSTANT_P (tem
))
4068 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4071 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4073 /* Reject "simplifications" that just wrap the two
4074 arguments in a CONST. Failure to do so can result
4075 in infinite recursion with simplify_binary_operation
4076 when it calls us to simplify CONST operations. */
4078 && ! (GET_CODE (tem
) == CONST
4079 && GET_CODE (XEXP (tem
, 0)) == ncode
4080 && XEXP (XEXP (tem
, 0), 0) == lhs
4081 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4084 if (GET_CODE (tem
) == NEG
)
4085 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4086 if (CONST_INT_P (tem
) && lneg
)
4087 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4091 ops
[j
].op
= NULL_RTX
;
4098 /* If nothing changed, fail. */
4102 /* Pack all the operands to the lower-numbered entries. */
4103 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4113 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4115 && CONST_INT_P (ops
[1].op
)
4116 && CONSTANT_P (ops
[0].op
)
4118 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4120 /* We suppressed creation of trivial CONST expressions in the
4121 combination loop to avoid recursion. Create one manually now.
4122 The combination loop should have ensured that there is exactly
4123 one CONST_INT, and the sort will have ensured that it is last
4124 in the array and that any other constant will be next-to-last. */
4127 && CONST_INT_P (ops
[n_ops
- 1].op
)
4128 && CONSTANT_P (ops
[n_ops
- 2].op
))
4130 rtx value
= ops
[n_ops
- 1].op
;
4131 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4132 value
= neg_const_int (mode
, value
);
4133 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4138 /* Put a non-negated operand first, if possible. */
4140 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4143 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4152 /* Now make the result by performing the requested operations. */
4154 for (i
= 1; i
< n_ops
; i
++)
4155 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4156 mode
, result
, ops
[i
].op
);
4161 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4163 plus_minus_operand_p (const_rtx x
)
4165 return GET_CODE (x
) == PLUS
4166 || GET_CODE (x
) == MINUS
4167 || (GET_CODE (x
) == CONST
4168 && GET_CODE (XEXP (x
, 0)) == PLUS
4169 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4170 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4173 /* Like simplify_binary_operation except used for relational operators.
4174 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4175 not also be VOIDmode.
4177 CMP_MODE specifies in which mode the comparison is done in, so it is
4178 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4179 the operands or, if both are VOIDmode, the operands are compared in
4180 "infinite precision". */
4182 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4183 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4185 rtx tem
, trueop0
, trueop1
;
4187 if (cmp_mode
== VOIDmode
)
4188 cmp_mode
= GET_MODE (op0
);
4189 if (cmp_mode
== VOIDmode
)
4190 cmp_mode
= GET_MODE (op1
);
4192 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4195 if (SCALAR_FLOAT_MODE_P (mode
))
4197 if (tem
== const0_rtx
)
4198 return CONST0_RTX (mode
);
4199 #ifdef FLOAT_STORE_FLAG_VALUE
4201 REAL_VALUE_TYPE val
;
4202 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4203 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4209 if (VECTOR_MODE_P (mode
))
4211 if (tem
== const0_rtx
)
4212 return CONST0_RTX (mode
);
4213 #ifdef VECTOR_STORE_FLAG_VALUE
4218 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4219 if (val
== NULL_RTX
)
4221 if (val
== const1_rtx
)
4222 return CONST1_RTX (mode
);
4224 units
= GET_MODE_NUNITS (mode
);
4225 v
= rtvec_alloc (units
);
4226 for (i
= 0; i
< units
; i
++)
4227 RTVEC_ELT (v
, i
) = val
;
4228 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4238 /* For the following tests, ensure const0_rtx is op1. */
4239 if (swap_commutative_operands_p (op0
, op1
)
4240 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4241 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4243 /* If op0 is a compare, extract the comparison arguments from it. */
4244 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4245 return simplify_gen_relational (code
, mode
, VOIDmode
,
4246 XEXP (op0
, 0), XEXP (op0
, 1));
4248 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4252 trueop0
= avoid_constant_pool_reference (op0
);
4253 trueop1
= avoid_constant_pool_reference (op1
);
4254 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4258 /* This part of simplify_relational_operation is only used when CMP_MODE
4259 is not in class MODE_CC (i.e. it is a real comparison).
4261 MODE is the mode of the result, while CMP_MODE specifies in which
4262 mode the comparison is done in, so it is the mode of the operands. */
4265 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4266 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4268 enum rtx_code op0code
= GET_CODE (op0
);
4270 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4272 /* If op0 is a comparison, extract the comparison arguments
4276 if (GET_MODE (op0
) == mode
)
4277 return simplify_rtx (op0
);
4279 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4280 XEXP (op0
, 0), XEXP (op0
, 1));
4282 else if (code
== EQ
)
4284 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4285 if (new_code
!= UNKNOWN
)
4286 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4287 XEXP (op0
, 0), XEXP (op0
, 1));
4291 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4292 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4293 if ((code
== LTU
|| code
== GEU
)
4294 && GET_CODE (op0
) == PLUS
4295 && CONST_INT_P (XEXP (op0
, 1))
4296 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4297 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4298 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4299 && XEXP (op0
, 1) != const0_rtx
)
4302 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4303 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4304 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4307 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4308 if ((code
== LTU
|| code
== GEU
)
4309 && GET_CODE (op0
) == PLUS
4310 && rtx_equal_p (op1
, XEXP (op0
, 1))
4311 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4312 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4313 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4314 copy_rtx (XEXP (op0
, 0)));
4316 if (op1
== const0_rtx
)
4318 /* Canonicalize (GTU x 0) as (NE x 0). */
4320 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4321 /* Canonicalize (LEU x 0) as (EQ x 0). */
4323 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4325 else if (op1
== const1_rtx
)
4330 /* Canonicalize (GE x 1) as (GT x 0). */
4331 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4334 /* Canonicalize (GEU x 1) as (NE x 0). */
4335 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4338 /* Canonicalize (LT x 1) as (LE x 0). */
4339 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4342 /* Canonicalize (LTU x 1) as (EQ x 0). */
4343 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4349 else if (op1
== constm1_rtx
)
4351 /* Canonicalize (LE x -1) as (LT x 0). */
4353 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4354 /* Canonicalize (GT x -1) as (GE x 0). */
4356 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4359 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4360 if ((code
== EQ
|| code
== NE
)
4361 && (op0code
== PLUS
|| op0code
== MINUS
)
4363 && CONSTANT_P (XEXP (op0
, 1))
4364 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4366 rtx x
= XEXP (op0
, 0);
4367 rtx c
= XEXP (op0
, 1);
4368 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4369 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4371 /* Detect an infinite recursive condition, where we oscillate at this
4372 simplification case between:
4373 A + B == C <---> C - B == A,
4374 where A, B, and C are all constants with non-simplifiable expressions,
4375 usually SYMBOL_REFs. */
4376 if (GET_CODE (tem
) == invcode
4378 && rtx_equal_p (c
, XEXP (tem
, 1)))
4381 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4384 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4385 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4387 && op1
== const0_rtx
4388 && GET_MODE_CLASS (mode
) == MODE_INT
4389 && cmp_mode
!= VOIDmode
4390 /* ??? Work-around BImode bugs in the ia64 backend. */
4392 && cmp_mode
!= BImode
4393 && nonzero_bits (op0
, cmp_mode
) == 1
4394 && STORE_FLAG_VALUE
== 1)
4395 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4396 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4397 : lowpart_subreg (mode
, op0
, cmp_mode
);
4399 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4400 if ((code
== EQ
|| code
== NE
)
4401 && op1
== const0_rtx
4403 return simplify_gen_relational (code
, mode
, cmp_mode
,
4404 XEXP (op0
, 0), XEXP (op0
, 1));
4406 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4407 if ((code
== EQ
|| code
== NE
)
4409 && rtx_equal_p (XEXP (op0
, 0), op1
)
4410 && !side_effects_p (XEXP (op0
, 0)))
4411 return simplify_gen_relational (code
, mode
, cmp_mode
,
4412 XEXP (op0
, 1), const0_rtx
);
4414 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4415 if ((code
== EQ
|| code
== NE
)
4417 && rtx_equal_p (XEXP (op0
, 1), op1
)
4418 && !side_effects_p (XEXP (op0
, 1)))
4419 return simplify_gen_relational (code
, mode
, cmp_mode
,
4420 XEXP (op0
, 0), const0_rtx
);
4422 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4423 if ((code
== EQ
|| code
== NE
)
4425 && CONST_SCALAR_INT_P (op1
)
4426 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4427 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4428 simplify_gen_binary (XOR
, cmp_mode
,
4429 XEXP (op0
, 1), op1
));
4431 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4432 if ((code
== EQ
|| code
== NE
)
4433 && GET_CODE (op0
) == BSWAP
4434 && CONST_SCALAR_INT_P (op1
))
4435 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4436 simplify_gen_unary (BSWAP
, cmp_mode
,
4439 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4440 if ((code
== EQ
|| code
== NE
)
4441 && GET_CODE (op0
) == BSWAP
4442 && GET_CODE (op1
) == BSWAP
)
4443 return simplify_gen_relational (code
, mode
, cmp_mode
,
4444 XEXP (op0
, 0), XEXP (op1
, 0));
4446 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4452 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4453 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4454 XEXP (op0
, 0), const0_rtx
);
4459 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4460 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4461 XEXP (op0
, 0), const0_rtx
);
4480 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4481 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4482 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4483 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4484 For floating-point comparisons, assume that the operands were ordered. */
4487 comparison_result (enum rtx_code code
, int known_results
)
4493 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4496 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4500 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4503 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4507 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4510 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4513 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4515 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4518 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4520 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4523 return const_true_rtx
;
4531 /* Check if the given comparison (done in the given MODE) is actually
4532 a tautology or a contradiction. If the mode is VOID_mode, the
4533 comparison is done in "infinite precision". If no simplification
4534 is possible, this function returns zero. Otherwise, it returns
4535 either const_true_rtx or const0_rtx. */
4538 simplify_const_relational_operation (enum rtx_code code
,
4539 enum machine_mode mode
,
4546 gcc_assert (mode
!= VOIDmode
4547 || (GET_MODE (op0
) == VOIDmode
4548 && GET_MODE (op1
) == VOIDmode
));
4550 /* If op0 is a compare, extract the comparison arguments from it. */
4551 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4553 op1
= XEXP (op0
, 1);
4554 op0
= XEXP (op0
, 0);
4556 if (GET_MODE (op0
) != VOIDmode
)
4557 mode
= GET_MODE (op0
);
4558 else if (GET_MODE (op1
) != VOIDmode
)
4559 mode
= GET_MODE (op1
);
4564 /* We can't simplify MODE_CC values since we don't know what the
4565 actual comparison is. */
4566 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4569 /* Make sure the constant is second. */
4570 if (swap_commutative_operands_p (op0
, op1
))
4572 tem
= op0
, op0
= op1
, op1
= tem
;
4573 code
= swap_condition (code
);
4576 trueop0
= avoid_constant_pool_reference (op0
);
4577 trueop1
= avoid_constant_pool_reference (op1
);
4579 /* For integer comparisons of A and B maybe we can simplify A - B and can
4580 then simplify a comparison of that with zero. If A and B are both either
4581 a register or a CONST_INT, this can't help; testing for these cases will
4582 prevent infinite recursion here and speed things up.
4584 We can only do this for EQ and NE comparisons as otherwise we may
4585 lose or introduce overflow which we cannot disregard as undefined as
4586 we do not know the signedness of the operation on either the left or
4587 the right hand side of the comparison. */
4589 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4590 && (code
== EQ
|| code
== NE
)
4591 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4592 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4593 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4594 /* We cannot do this if tem is a nonzero address. */
4595 && ! nonzero_address_p (tem
))
4596 return simplify_const_relational_operation (signed_condition (code
),
4597 mode
, tem
, const0_rtx
);
4599 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4600 return const_true_rtx
;
4602 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4605 /* For modes without NaNs, if the two operands are equal, we know the
4606 result except if they have side-effects. Even with NaNs we know
4607 the result of unordered comparisons and, if signaling NaNs are
4608 irrelevant, also the result of LT/GT/LTGT. */
4609 if ((! HONOR_NANS (GET_MODE (trueop0
))
4610 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4611 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4612 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4613 && rtx_equal_p (trueop0
, trueop1
)
4614 && ! side_effects_p (trueop0
))
4615 return comparison_result (code
, CMP_EQ
);
4617 /* If the operands are floating-point constants, see if we can fold
4619 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4620 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4621 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4623 REAL_VALUE_TYPE d0
, d1
;
4625 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4626 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4628 /* Comparisons are unordered iff at least one of the values is NaN. */
4629 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4639 return const_true_rtx
;
4652 return comparison_result (code
,
4653 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4654 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4657 /* Otherwise, see if the operands are both integers. */
4658 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4659 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4661 /* It would be nice if we really had a mode here. However, the
4662 largest int representable on the target is as good as
4664 enum machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4665 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4666 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4668 if (wi::eq_p (ptrueop0
, ptrueop1
))
4669 return comparison_result (code
, CMP_EQ
);
4672 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4673 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4674 return comparison_result (code
, cr
);
4678 /* Optimize comparisons with upper and lower bounds. */
4679 if (HWI_COMPUTABLE_MODE_P (mode
)
4680 && CONST_INT_P (trueop1
))
4683 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4684 HOST_WIDE_INT val
= INTVAL (trueop1
);
4685 HOST_WIDE_INT mmin
, mmax
;
4695 /* Get a reduced range if the sign bit is zero. */
4696 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4703 rtx mmin_rtx
, mmax_rtx
;
4704 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4706 mmin
= INTVAL (mmin_rtx
);
4707 mmax
= INTVAL (mmax_rtx
);
4710 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4712 mmin
>>= (sign_copies
- 1);
4713 mmax
>>= (sign_copies
- 1);
4719 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4721 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4722 return const_true_rtx
;
4723 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4728 return const_true_rtx
;
4733 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4735 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4736 return const_true_rtx
;
4737 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4742 return const_true_rtx
;
4748 /* x == y is always false for y out of range. */
4749 if (val
< mmin
|| val
> mmax
)
4753 /* x > y is always false for y >= mmax, always true for y < mmin. */
4755 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4757 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4758 return const_true_rtx
;
4764 return const_true_rtx
;
4767 /* x < y is always false for y <= mmin, always true for y > mmax. */
4769 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4771 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4772 return const_true_rtx
;
4778 return const_true_rtx
;
4782 /* x != y is always true for y out of range. */
4783 if (val
< mmin
|| val
> mmax
)
4784 return const_true_rtx
;
4792 /* Optimize integer comparisons with zero. */
4793 if (trueop1
== const0_rtx
)
4795 /* Some addresses are known to be nonzero. We don't know
4796 their sign, but equality comparisons are known. */
4797 if (nonzero_address_p (trueop0
))
4799 if (code
== EQ
|| code
== LEU
)
4801 if (code
== NE
|| code
== GTU
)
4802 return const_true_rtx
;
4805 /* See if the first operand is an IOR with a constant. If so, we
4806 may be able to determine the result of this comparison. */
4807 if (GET_CODE (op0
) == IOR
)
4809 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4810 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4812 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4813 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4814 && (UINTVAL (inner_const
)
4815 & ((unsigned HOST_WIDE_INT
) 1
4825 return const_true_rtx
;
4829 return const_true_rtx
;
4843 /* Optimize comparison of ABS with zero. */
4844 if (trueop1
== CONST0_RTX (mode
)
4845 && (GET_CODE (trueop0
) == ABS
4846 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4847 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4852 /* Optimize abs(x) < 0.0. */
4853 if (!HONOR_SNANS (mode
)
4854 && (!INTEGRAL_MODE_P (mode
)
4855 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4857 if (INTEGRAL_MODE_P (mode
)
4858 && (issue_strict_overflow_warning
4859 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4860 warning (OPT_Wstrict_overflow
,
4861 ("assuming signed overflow does not occur when "
4862 "assuming abs (x) < 0 is false"));
4868 /* Optimize abs(x) >= 0.0. */
4869 if (!HONOR_NANS (mode
)
4870 && (!INTEGRAL_MODE_P (mode
)
4871 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4873 if (INTEGRAL_MODE_P (mode
)
4874 && (issue_strict_overflow_warning
4875 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4876 warning (OPT_Wstrict_overflow
,
4877 ("assuming signed overflow does not occur when "
4878 "assuming abs (x) >= 0 is true"));
4879 return const_true_rtx
;
4884 /* Optimize ! (abs(x) < 0.0). */
4885 return const_true_rtx
;
4895 /* Simplify CODE, an operation with result mode MODE and three operands,
4896 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4897 a constant. Return 0 if no simplifications is possible. */
4900 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4901 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4904 unsigned int width
= GET_MODE_PRECISION (mode
);
4905 bool any_change
= false;
4908 /* VOIDmode means "infinite" precision. */
4910 width
= HOST_BITS_PER_WIDE_INT
;
4915 /* Simplify negations around the multiplication. */
4916 /* -a * -b + c => a * b + c. */
4917 if (GET_CODE (op0
) == NEG
)
4919 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4921 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4923 else if (GET_CODE (op1
) == NEG
)
4925 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4927 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4930 /* Canonicalize the two multiplication operands. */
4931 /* a * -b + c => -b * a + c. */
4932 if (swap_commutative_operands_p (op0
, op1
))
4933 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4936 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4941 if (CONST_INT_P (op0
)
4942 && CONST_INT_P (op1
)
4943 && CONST_INT_P (op2
)
4944 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4945 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4947 /* Extracting a bit-field from a constant */
4948 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4949 HOST_WIDE_INT op1val
= INTVAL (op1
);
4950 HOST_WIDE_INT op2val
= INTVAL (op2
);
4951 if (BITS_BIG_ENDIAN
)
4952 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
4956 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
4958 /* First zero-extend. */
4959 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
4960 /* If desired, propagate sign bit. */
4961 if (code
== SIGN_EXTRACT
4962 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
4964 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
4967 return gen_int_mode (val
, mode
);
4972 if (CONST_INT_P (op0
))
4973 return op0
!= const0_rtx
? op1
: op2
;
4975 /* Convert c ? a : a into "a". */
4976 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4979 /* Convert a != b ? a : b into "a". */
4980 if (GET_CODE (op0
) == NE
4981 && ! side_effects_p (op0
)
4982 && ! HONOR_NANS (mode
)
4983 && ! HONOR_SIGNED_ZEROS (mode
)
4984 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4985 && rtx_equal_p (XEXP (op0
, 1), op2
))
4986 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4987 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4990 /* Convert a == b ? a : b into "b". */
4991 if (GET_CODE (op0
) == EQ
4992 && ! side_effects_p (op0
)
4993 && ! HONOR_NANS (mode
)
4994 && ! HONOR_SIGNED_ZEROS (mode
)
4995 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4996 && rtx_equal_p (XEXP (op0
, 1), op2
))
4997 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4998 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5001 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5003 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5004 ? GET_MODE (XEXP (op0
, 1))
5005 : GET_MODE (XEXP (op0
, 0)));
5008 /* Look for happy constants in op1 and op2. */
5009 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5011 HOST_WIDE_INT t
= INTVAL (op1
);
5012 HOST_WIDE_INT f
= INTVAL (op2
);
5014 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5015 code
= GET_CODE (op0
);
5016 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5019 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5027 return simplify_gen_relational (code
, mode
, cmp_mode
,
5028 XEXP (op0
, 0), XEXP (op0
, 1));
5031 if (cmp_mode
== VOIDmode
)
5032 cmp_mode
= op0_mode
;
5033 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5034 cmp_mode
, XEXP (op0
, 0),
5037 /* See if any simplifications were possible. */
5040 if (CONST_INT_P (temp
))
5041 return temp
== const0_rtx
? op2
: op1
;
5043 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5049 gcc_assert (GET_MODE (op0
) == mode
);
5050 gcc_assert (GET_MODE (op1
) == mode
);
5051 gcc_assert (VECTOR_MODE_P (mode
));
5052 trueop2
= avoid_constant_pool_reference (op2
);
5053 if (CONST_INT_P (trueop2
))
5055 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5056 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5057 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5058 unsigned HOST_WIDE_INT mask
;
5059 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5062 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5064 if (!(sel
& mask
) && !side_effects_p (op0
))
5066 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5069 rtx trueop0
= avoid_constant_pool_reference (op0
);
5070 rtx trueop1
= avoid_constant_pool_reference (op1
);
5071 if (GET_CODE (trueop0
) == CONST_VECTOR
5072 && GET_CODE (trueop1
) == CONST_VECTOR
)
5074 rtvec v
= rtvec_alloc (n_elts
);
5077 for (i
= 0; i
< n_elts
; i
++)
5078 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5079 ? CONST_VECTOR_ELT (trueop0
, i
)
5080 : CONST_VECTOR_ELT (trueop1
, i
));
5081 return gen_rtx_CONST_VECTOR (mode
, v
);
5084 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5085 if no element from a appears in the result. */
5086 if (GET_CODE (op0
) == VEC_MERGE
)
5088 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5089 if (CONST_INT_P (tem
))
5091 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5092 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5093 return simplify_gen_ternary (code
, mode
, mode
,
5094 XEXP (op0
, 1), op1
, op2
);
5095 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5096 return simplify_gen_ternary (code
, mode
, mode
,
5097 XEXP (op0
, 0), op1
, op2
);
5100 if (GET_CODE (op1
) == VEC_MERGE
)
5102 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5103 if (CONST_INT_P (tem
))
5105 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5106 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5107 return simplify_gen_ternary (code
, mode
, mode
,
5108 op0
, XEXP (op1
, 1), op2
);
5109 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5110 return simplify_gen_ternary (code
, mode
, mode
,
5111 op0
, XEXP (op1
, 0), op2
);
5116 if (rtx_equal_p (op0
, op1
)
5117 && !side_effects_p (op2
) && !side_effects_p (op1
))
5129 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5130 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5131 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5133 Works by unpacking OP into a collection of 8-bit values
5134 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5135 and then repacking them again for OUTERMODE. */
5138 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5139 enum machine_mode innermode
, unsigned int byte
)
5143 value_mask
= (1 << value_bit
) - 1
5145 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5154 rtvec result_v
= NULL
;
5155 enum mode_class outer_class
;
5156 enum machine_mode outer_submode
;
5159 /* Some ports misuse CCmode. */
5160 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5163 /* We have no way to represent a complex constant at the rtl level. */
5164 if (COMPLEX_MODE_P (outermode
))
5167 /* We support any size mode. */
5168 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5169 GET_MODE_BITSIZE (innermode
));
5171 /* Unpack the value. */
5173 if (GET_CODE (op
) == CONST_VECTOR
)
5175 num_elem
= CONST_VECTOR_NUNITS (op
);
5176 elems
= &CONST_VECTOR_ELT (op
, 0);
5177 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5183 elem_bitsize
= max_bitsize
;
5185 /* If this asserts, it is too complicated; reducing value_bit may help. */
5186 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5187 /* I don't know how to handle endianness of sub-units. */
5188 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5190 for (elem
= 0; elem
< num_elem
; elem
++)
5193 rtx el
= elems
[elem
];
5195 /* Vectors are kept in target memory order. (This is probably
5198 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5199 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5201 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5202 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5203 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5204 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5205 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5208 switch (GET_CODE (el
))
5212 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5214 *vp
++ = INTVAL (el
) >> i
;
5215 /* CONST_INTs are always logically sign-extended. */
5216 for (; i
< elem_bitsize
; i
+= value_bit
)
5217 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5220 case CONST_WIDE_INT
:
5222 rtx_mode_t val
= std::make_pair (el
, innermode
);
5223 unsigned char extend
= wi::sign_mask (val
);
5225 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5226 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5227 for (; i
< elem_bitsize
; i
+= value_bit
)
5233 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5235 unsigned char extend
= 0;
5236 /* If this triggers, someone should have generated a
5237 CONST_INT instead. */
5238 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5240 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5241 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5242 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5245 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5249 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5251 for (; i
< elem_bitsize
; i
+= value_bit
)
5256 /* This is big enough for anything on the platform. */
5257 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5258 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5260 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5261 gcc_assert (bitsize
<= elem_bitsize
);
5262 gcc_assert (bitsize
% value_bit
== 0);
5264 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5267 /* real_to_target produces its result in words affected by
5268 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5269 and use WORDS_BIG_ENDIAN instead; see the documentation
5270 of SUBREG in rtl.texi. */
5271 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5274 if (WORDS_BIG_ENDIAN
)
5275 ibase
= bitsize
- 1 - i
;
5278 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5281 /* It shouldn't matter what's done here, so fill it with
5283 for (; i
< elem_bitsize
; i
+= value_bit
)
5289 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5291 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5292 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5296 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5297 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5298 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5300 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5301 >> (i
- HOST_BITS_PER_WIDE_INT
);
5302 for (; i
< elem_bitsize
; i
+= value_bit
)
5312 /* Now, pick the right byte to start with. */
5313 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5314 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5315 will already have offset 0. */
5316 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5318 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5320 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5321 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5322 byte
= (subword_byte
% UNITS_PER_WORD
5323 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5326 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5327 so if it's become negative it will instead be very large.) */
5328 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5330 /* Convert from bytes to chunks of size value_bit. */
5331 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5333 /* Re-pack the value. */
5335 if (VECTOR_MODE_P (outermode
))
5337 num_elem
= GET_MODE_NUNITS (outermode
);
5338 result_v
= rtvec_alloc (num_elem
);
5339 elems
= &RTVEC_ELT (result_v
, 0);
5340 outer_submode
= GET_MODE_INNER (outermode
);
5346 outer_submode
= outermode
;
5349 outer_class
= GET_MODE_CLASS (outer_submode
);
5350 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5352 gcc_assert (elem_bitsize
% value_bit
== 0);
5353 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5355 for (elem
= 0; elem
< num_elem
; elem
++)
5359 /* Vectors are stored in target memory order. (This is probably
5362 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5363 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5365 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5366 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5367 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5368 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5369 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5372 switch (outer_class
)
5375 case MODE_PARTIAL_INT
:
5380 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5381 / HOST_BITS_PER_WIDE_INT
;
5382 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5385 for (u
= 0; u
< units
; u
++)
5387 unsigned HOST_WIDE_INT buf
= 0;
5389 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5391 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5394 base
+= HOST_BITS_PER_WIDE_INT
;
5396 gcc_assert (GET_MODE_PRECISION (outer_submode
)
5397 <= MAX_BITSIZE_MODE_ANY_INT
);
5398 r
= wide_int::from_array (tmp
, units
,
5399 GET_MODE_PRECISION (outer_submode
));
5400 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5405 case MODE_DECIMAL_FLOAT
:
5408 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5410 /* real_from_target wants its input in words affected by
5411 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5412 and use WORDS_BIG_ENDIAN instead; see the documentation
5413 of SUBREG in rtl.texi. */
5414 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5416 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5419 if (WORDS_BIG_ENDIAN
)
5420 ibase
= elem_bitsize
- 1 - i
;
5423 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5426 real_from_target (&r
, tmp
, outer_submode
);
5427 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5439 f
.mode
= outer_submode
;
5442 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5444 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5445 for (; i
< elem_bitsize
; i
+= value_bit
)
5446 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5447 << (i
- HOST_BITS_PER_WIDE_INT
));
5449 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5457 if (VECTOR_MODE_P (outermode
))
5458 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5463 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5464 Return 0 if no simplifications are possible. */
5466 simplify_subreg (enum machine_mode outermode
, rtx op
,
5467 enum machine_mode innermode
, unsigned int byte
)
5469 /* Little bit of sanity checking. */
5470 gcc_assert (innermode
!= VOIDmode
);
5471 gcc_assert (outermode
!= VOIDmode
);
5472 gcc_assert (innermode
!= BLKmode
);
5473 gcc_assert (outermode
!= BLKmode
);
5475 gcc_assert (GET_MODE (op
) == innermode
5476 || GET_MODE (op
) == VOIDmode
);
5478 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5481 if (byte
>= GET_MODE_SIZE (innermode
))
5484 if (outermode
== innermode
&& !byte
)
5487 if (CONST_SCALAR_INT_P (op
)
5488 || CONST_DOUBLE_AS_FLOAT_P (op
)
5489 || GET_CODE (op
) == CONST_FIXED
5490 || GET_CODE (op
) == CONST_VECTOR
)
5491 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5493 /* Changing mode twice with SUBREG => just change it once,
5494 or not at all if changing back op starting mode. */
5495 if (GET_CODE (op
) == SUBREG
)
5497 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5498 int final_offset
= byte
+ SUBREG_BYTE (op
);
5501 if (outermode
== innermostmode
5502 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5503 return SUBREG_REG (op
);
5505 /* The SUBREG_BYTE represents offset, as if the value were stored
5506 in memory. Irritating exception is paradoxical subreg, where
5507 we define SUBREG_BYTE to be 0. On big endian machines, this
5508 value should be negative. For a moment, undo this exception. */
5509 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5511 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5512 if (WORDS_BIG_ENDIAN
)
5513 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5514 if (BYTES_BIG_ENDIAN
)
5515 final_offset
+= difference
% UNITS_PER_WORD
;
5517 if (SUBREG_BYTE (op
) == 0
5518 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5520 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5521 if (WORDS_BIG_ENDIAN
)
5522 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5523 if (BYTES_BIG_ENDIAN
)
5524 final_offset
+= difference
% UNITS_PER_WORD
;
5527 /* See whether resulting subreg will be paradoxical. */
5528 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5530 /* In nonparadoxical subregs we can't handle negative offsets. */
5531 if (final_offset
< 0)
5533 /* Bail out in case resulting subreg would be incorrect. */
5534 if (final_offset
% GET_MODE_SIZE (outermode
)
5535 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5541 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5543 /* In paradoxical subreg, see if we are still looking on lower part.
5544 If so, our SUBREG_BYTE will be 0. */
5545 if (WORDS_BIG_ENDIAN
)
5546 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5547 if (BYTES_BIG_ENDIAN
)
5548 offset
+= difference
% UNITS_PER_WORD
;
5549 if (offset
== final_offset
)
5555 /* Recurse for further possible simplifications. */
5556 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5560 if (validate_subreg (outermode
, innermostmode
,
5561 SUBREG_REG (op
), final_offset
))
5563 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5564 if (SUBREG_PROMOTED_VAR_P (op
)
5565 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5566 && GET_MODE_CLASS (outermode
) == MODE_INT
5567 && IN_RANGE (GET_MODE_SIZE (outermode
),
5568 GET_MODE_SIZE (innermode
),
5569 GET_MODE_SIZE (innermostmode
))
5570 && subreg_lowpart_p (newx
))
5572 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5573 SUBREG_PROMOTED_UNSIGNED_SET
5574 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5581 /* SUBREG of a hard register => just change the register number
5582 and/or mode. If the hard register is not valid in that mode,
5583 suppress this simplification. If the hard register is the stack,
5584 frame, or argument pointer, leave this as a SUBREG. */
5586 if (REG_P (op
) && HARD_REGISTER_P (op
))
5588 unsigned int regno
, final_regno
;
5591 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5592 if (HARD_REGISTER_NUM_P (final_regno
))
5595 int final_offset
= byte
;
5597 /* Adjust offset for paradoxical subregs. */
5599 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5601 int difference
= (GET_MODE_SIZE (innermode
)
5602 - GET_MODE_SIZE (outermode
));
5603 if (WORDS_BIG_ENDIAN
)
5604 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5605 if (BYTES_BIG_ENDIAN
)
5606 final_offset
+= difference
% UNITS_PER_WORD
;
5609 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5611 /* Propagate original regno. We don't have any way to specify
5612 the offset inside original regno, so do so only for lowpart.
5613 The information is used only by alias analysis that can not
5614 grog partial register anyway. */
5616 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5617 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5622 /* If we have a SUBREG of a register that we are replacing and we are
5623 replacing it with a MEM, make a new MEM and try replacing the
5624 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5625 or if we would be widening it. */
5628 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5629 /* Allow splitting of volatile memory references in case we don't
5630 have instruction to move the whole thing. */
5631 && (! MEM_VOLATILE_P (op
)
5632 || ! have_insn_for (SET
, innermode
))
5633 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5634 return adjust_address_nv (op
, outermode
, byte
);
5636 /* Handle complex values represented as CONCAT
5637 of real and imaginary part. */
5638 if (GET_CODE (op
) == CONCAT
)
5640 unsigned int part_size
, final_offset
;
5643 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5644 if (byte
< part_size
)
5646 part
= XEXP (op
, 0);
5647 final_offset
= byte
;
5651 part
= XEXP (op
, 1);
5652 final_offset
= byte
- part_size
;
5655 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5658 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5661 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5662 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5666 /* A SUBREG resulting from a zero extension may fold to zero if
5667 it extracts higher bits that the ZERO_EXTEND's source bits. */
5668 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5670 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5671 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5672 return CONST0_RTX (outermode
);
5675 if (SCALAR_INT_MODE_P (outermode
)
5676 && SCALAR_INT_MODE_P (innermode
)
5677 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5678 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5680 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5688 /* Make a SUBREG operation or equivalent if it folds. */
5691 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5692 enum machine_mode innermode
, unsigned int byte
)
5696 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5700 if (GET_CODE (op
) == SUBREG
5701 || GET_CODE (op
) == CONCAT
5702 || GET_MODE (op
) == VOIDmode
)
5705 if (validate_subreg (outermode
, innermode
, op
, byte
))
5706 return gen_rtx_SUBREG (outermode
, op
, byte
);
5711 /* Simplify X, an rtx expression.
5713 Return the simplified expression or NULL if no simplifications
5716 This is the preferred entry point into the simplification routines;
5717 however, we still allow passes to call the more specific routines.
5719 Right now GCC has three (yes, three) major bodies of RTL simplification
5720 code that need to be unified.
5722 1. fold_rtx in cse.c. This code uses various CSE specific
5723 information to aid in RTL simplification.
5725 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5726 it uses combine specific information to aid in RTL
5729 3. The routines in this file.
5732 Long term we want to only have one body of simplification code; to
5733 get to that state I recommend the following steps:
5735 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5736 which are not pass dependent state into these routines.
5738 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5739 use this routine whenever possible.
5741 3. Allow for pass dependent state to be provided to these
5742 routines and add simplifications based on the pass dependent
5743 state. Remove code from cse.c & combine.c that becomes
5746 It will take time, but ultimately the compiler will be easier to
5747 maintain and improve. It's totally silly that when we add a
5748 simplification that it needs to be added to 4 places (3 for RTL
5749 simplification and 1 for tree simplification. */
5752 simplify_rtx (const_rtx x
)
5754 const enum rtx_code code
= GET_CODE (x
);
5755 const enum machine_mode mode
= GET_MODE (x
);
5757 switch (GET_RTX_CLASS (code
))
5760 return simplify_unary_operation (code
, mode
,
5761 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5762 case RTX_COMM_ARITH
:
5763 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5764 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5766 /* Fall through.... */
5769 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5772 case RTX_BITFIELD_OPS
:
5773 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5774 XEXP (x
, 0), XEXP (x
, 1),
5778 case RTX_COMM_COMPARE
:
5779 return simplify_relational_operation (code
, mode
,
5780 ((GET_MODE (XEXP (x
, 0))
5782 ? GET_MODE (XEXP (x
, 0))
5783 : GET_MODE (XEXP (x
, 1))),
5789 return simplify_subreg (mode
, SUBREG_REG (x
),
5790 GET_MODE (SUBREG_REG (x
)),
5797 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5798 if (GET_CODE (XEXP (x
, 0)) == HIGH
5799 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))