1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
51 static bool plus_minus_operand_p (const_rtx
);
52 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
53 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
54 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
56 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
58 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
59 enum machine_mode
, rtx
, rtx
);
60 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
61 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, const_rtx i
)
69 return gen_int_mode (- INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 else if (width
<= HOST_BITS_PER_DOUBLE_INT
92 && GET_CODE (x
) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
99 /* FIXME: We don't yet have a representation for wider modes. */
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
112 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
116 if (GET_MODE_CLASS (mode
) != MODE_INT
)
119 width
= GET_MODE_PRECISION (mode
);
120 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
123 val
&= GET_MODE_MASK (mode
);
124 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
130 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 if (GET_MODE_CLASS (mode
) != MODE_INT
)
137 width
= GET_MODE_PRECISION (mode
);
138 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
141 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
148 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
152 if (GET_MODE_CLASS (mode
) != MODE_INT
)
155 width
= GET_MODE_PRECISION (mode
);
156 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
159 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
172 /* If this simplifies, do it. */
173 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0
, op1
))
180 tem
= op0
, op0
= op1
, op1
= tem
;
182 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x
)
191 enum machine_mode cmode
;
192 HOST_WIDE_INT offset
= 0;
194 switch (GET_CODE (x
))
200 /* Handle float extensions of constant pool references. */
202 c
= avoid_constant_pool_reference (tmp
);
203 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
207 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
216 if (GET_MODE (x
) == BLKmode
)
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr
= targetm
.delegitimize_address (addr
);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr
) == CONST
226 && GET_CODE (XEXP (addr
, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
229 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
230 addr
= XEXP (XEXP (addr
, 0), 0);
233 if (GET_CODE (addr
) == LO_SUM
)
234 addr
= XEXP (addr
, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr
) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr
))
241 c
= get_pool_constant (addr
);
242 cmode
= get_pool_mode (addr
);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset
!= 0 || cmode
!= GET_MODE (x
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !host_integerp (toffset
, 0)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= TREE_INT_CST_LOW (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
572 rtx op
, enum machine_mode op_mode
)
576 trueop
= avoid_constant_pool_reference (op
);
578 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
582 return simplify_unary_operation_1 (code
, mode
, op
);
585 /* Perform some simplifications we can do even if the operands
588 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
590 enum rtx_code reversed
;
596 /* (not (not X)) == X. */
597 if (GET_CODE (op
) == NOT
)
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op
)
603 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
604 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
605 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
606 XEXP (op
, 0), XEXP (op
, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op
) == PLUS
610 && XEXP (op
, 1) == constm1_rtx
)
611 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op
) == NEG
)
615 return plus_constant (mode
, XEXP (op
, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op
) == XOR
619 && CONST_INT_P (XEXP (op
, 1))
620 && (temp
= simplify_unary_operation (NOT
, mode
,
621 XEXP (op
, 1), mode
)) != 0)
622 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op
) == PLUS
626 && CONST_INT_P (XEXP (op
, 1))
627 && mode_signbit_p (mode
, XEXP (op
, 1))
628 && (temp
= simplify_unary_operation (NOT
, mode
,
629 XEXP (op
, 1), mode
)) != 0)
630 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
638 if (GET_CODE (op
) == ASHIFT
639 && XEXP (op
, 0) == const1_rtx
)
641 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
642 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE
== -1
650 && GET_CODE (op
) == ASHIFTRT
651 && GET_CODE (XEXP (op
, 1))
652 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
653 return simplify_gen_relational (GE
, mode
, VOIDmode
,
654 XEXP (op
, 0), const0_rtx
);
657 if (GET_CODE (op
) == SUBREG
658 && subreg_lowpart_p (op
)
659 && (GET_MODE_SIZE (GET_MODE (op
))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
661 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
662 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
664 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
667 x
= gen_rtx_ROTATE (inner_mode
,
668 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
670 XEXP (SUBREG_REG (op
), 1));
671 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
679 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
681 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
682 enum machine_mode op_mode
;
684 op_mode
= GET_MODE (in1
);
685 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
687 op_mode
= GET_MODE (in2
);
688 if (op_mode
== VOIDmode
)
690 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
692 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
695 in2
= in1
; in1
= tem
;
698 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op
) == NEG
)
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op
) == PLUS
710 && XEXP (op
, 1) == const1_rtx
)
711 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op
) == NOT
)
715 return plus_constant (mode
, XEXP (op
, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op
) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode
)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
725 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
727 if (GET_CODE (op
) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode
)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op
, 1))
733 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
735 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
737 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
742 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op
) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
750 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
751 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
757 if (GET_CODE (op
) == ASHIFT
)
759 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
761 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op
) == ASHIFTRT
767 && CONST_INT_P (XEXP (op
, 1))
768 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
769 return simplify_gen_binary (LSHIFTRT
, mode
,
770 XEXP (op
, 0), XEXP (op
, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op
) == LSHIFTRT
775 && CONST_INT_P (XEXP (op
, 1))
776 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
777 return simplify_gen_binary (ASHIFTRT
, mode
,
778 XEXP (op
, 0), XEXP (op
, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op
) == XOR
782 && XEXP (op
, 1) == const1_rtx
783 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
784 return plus_constant (mode
, XEXP (op
, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op
) == LT
789 && XEXP (op
, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
792 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
793 int isize
= GET_MODE_PRECISION (inner
);
794 if (STORE_FLAG_VALUE
== 1)
796 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
797 GEN_INT (isize
- 1));
800 if (GET_MODE_PRECISION (mode
) > isize
)
801 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
802 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
804 else if (STORE_FLAG_VALUE
== -1)
806 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
807 GEN_INT (isize
- 1));
810 if (GET_MODE_PRECISION (mode
) > isize
)
811 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
812 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
821 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op
) == SIGN_EXTEND
826 || GET_CODE (op
) == ZERO_EXTEND
)
827 && GET_MODE (XEXP (op
, 0)) == mode
)
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op
) == ABS
833 || GET_CODE (op
) == NEG
)
834 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
836 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
837 return simplify_gen_unary (GET_CODE (op
), mode
,
838 XEXP (XEXP (op
, 0), 0), mode
);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 if (GET_CODE (op
) == SUBREG
843 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
844 && subreg_lowpart_p (op
))
845 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
846 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
856 ? (num_sign_bit_copies (op
, GET_MODE (op
))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op
))
858 - GET_MODE_PRECISION (mode
)))
859 : truncated_to_mode (mode
, op
))
860 && ! (GET_CODE (op
) == LSHIFTRT
861 && GET_CODE (XEXP (op
, 0)) == MULT
))
862 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode
)
870 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
871 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
875 if (DECIMAL_FLOAT_MODE_P (mode
))
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op
) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op
, 0)) == mode
)
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op
) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations
)
894 || GET_CODE (op
) == FLOAT_EXTEND
)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
897 > GET_MODE_SIZE (mode
)
898 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op
) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
906 && ((unsigned)significand_size (GET_MODE (op
))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
908 - num_sign_bit_copies (XEXP (op
, 0),
909 GET_MODE (XEXP (op
, 0))))))))
910 return simplify_gen_unary (FLOAT
, mode
,
912 GET_MODE (XEXP (op
, 0)));
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op
) == ABS
917 || GET_CODE (op
) == NEG
)
918 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
920 return simplify_gen_unary (GET_CODE (op
), mode
,
921 XEXP (XEXP (op
, 0), 0), mode
);
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op
) == SUBREG
926 && subreg_lowpart_p (op
)
927 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
928 return SUBREG_REG (op
);
932 if (DECIMAL_FLOAT_MODE_P (mode
))
935 /* (float_extend (float_extend x)) is (float_extend x)
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
940 if (GET_CODE (op
) == FLOAT_EXTEND
941 || (GET_CODE (op
) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
943 && ((unsigned)significand_size (GET_MODE (op
))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
945 - num_sign_bit_copies (XEXP (op
, 0),
946 GET_MODE (XEXP (op
, 0)))))))
947 return simplify_gen_unary (GET_CODE (op
), mode
,
949 GET_MODE (XEXP (op
, 0)));
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op
) == NEG
)
956 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
957 GET_MODE (XEXP (op
, 0)));
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961 if (GET_MODE (op
) == VOIDmode
)
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op
),
967 nonzero_bits (op
, GET_MODE (op
))))
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
972 return gen_rtx_NEG (mode
, op
);
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op
) == SIGN_EXTEND
979 || GET_CODE (op
) == ZERO_EXTEND
)
980 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
981 GET_MODE (XEXP (op
, 0)));
985 switch (GET_CODE (op
))
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
991 GET_MODE (XEXP (op
, 0)));
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op
, 1)))
997 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
998 GET_MODE (XEXP (op
, 0)));
1007 switch (GET_CODE (op
))
1013 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1014 GET_MODE (XEXP (op
, 0)));
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op
, 1)))
1020 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1021 GET_MODE (XEXP (op
, 0)));
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op
) == BSWAP
)
1032 return XEXP (op
, 0);
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op
) == SIGN_EXTEND
)
1038 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1039 GET_MODE (XEXP (op
, 0)));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op
) == TRUNCATE
1048 && GET_MODE (XEXP (op
, 0)) == mode
1049 && GET_CODE (XEXP (op
, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1052 return XEXP (op
, 0);
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op
) == MULT
)
1058 rtx lhs
= XEXP (op
, 0);
1059 rtx rhs
= XEXP (op
, 1);
1060 enum rtx_code lcode
= GET_CODE (lhs
);
1061 enum rtx_code rcode
= GET_CODE (rhs
);
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode
== SIGN_EXTEND
1066 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1067 && (rcode
== SIGN_EXTEND
1068 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1070 enum machine_mode lmode
= GET_MODE (lhs
);
1071 enum machine_mode rmode
= GET_MODE (rhs
);
1074 if (lcode
== ASHIFTRT
)
1075 /* Number of bits not shifted off the end. */
1076 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1081 if (rcode
== ASHIFTRT
)
1082 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1089 return simplify_gen_binary
1091 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1092 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op
) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op
)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1102 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1103 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1109 gcc_assert (GET_MODE_BITSIZE (mode
)
1110 > GET_MODE_BITSIZE (GET_MODE (op
)));
1111 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1112 GET_MODE (XEXP (op
, 0)));
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1121 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op
, 1))
1123 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1128 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode
)
1130 > GET_MODE_BITSIZE (GET_MODE (op
)));
1131 if (tmode
!= BLKmode
)
1134 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1136 ? SIGN_EXTEND
: ZERO_EXTEND
,
1137 mode
, inner
, tmode
);
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is referring to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1149 || (GET_CODE (op
) == SUBREG
1150 && REG_P (SUBREG_REG (op
))
1151 && REG_POINTER (SUBREG_REG (op
))
1152 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1153 return convert_memory_address (Pmode
, op
);
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op
) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op
)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1164 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1165 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op
) == MULT
)
1171 rtx lhs
= XEXP (op
, 0);
1172 rtx rhs
= XEXP (op
, 1);
1173 enum rtx_code lcode
= GET_CODE (lhs
);
1174 enum rtx_code rcode
= GET_CODE (rhs
);
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode
== ZERO_EXTEND
1179 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1180 && (rcode
== ZERO_EXTEND
1181 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1183 enum machine_mode lmode
= GET_MODE (lhs
);
1184 enum machine_mode rmode
= GET_MODE (rhs
);
1187 if (lcode
== LSHIFTRT
)
1188 /* Number of bits not shifted off the end. */
1189 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1194 if (rcode
== LSHIFTRT
)
1195 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1202 return simplify_gen_binary
1204 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1205 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op
) == ZERO_EXTEND
)
1211 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1212 GET_MODE (XEXP (op
, 0)));
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op
) == LSHIFTRT
1218 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op
, 1))
1220 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1225 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1226 if (tmode
!= BLKmode
)
1229 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is referring to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED
> 0
1240 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1242 || (GET_CODE (op
) == SUBREG
1243 && REG_P (SUBREG_REG (op
))
1244 && REG_POINTER (SUBREG_REG (op
))
1245 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1246 return convert_memory_address (Pmode
, op
);
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1261 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1262 rtx op
, enum machine_mode op_mode
)
1264 unsigned int width
= GET_MODE_PRECISION (mode
);
1265 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1267 if (code
== VEC_DUPLICATE
)
1269 gcc_assert (VECTOR_MODE_P (mode
));
1270 if (GET_MODE (op
) != VOIDmode
)
1272 if (!VECTOR_MODE_P (GET_MODE (op
)))
1273 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1275 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1278 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1279 || GET_CODE (op
) == CONST_VECTOR
)
1281 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1282 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1283 rtvec v
= rtvec_alloc (n_elts
);
1286 if (GET_CODE (op
) != CONST_VECTOR
)
1287 for (i
= 0; i
< n_elts
; i
++)
1288 RTVEC_ELT (v
, i
) = op
;
1291 enum machine_mode inmode
= GET_MODE (op
);
1292 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1293 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1295 gcc_assert (in_n_elts
< n_elts
);
1296 gcc_assert ((n_elts
% in_n_elts
) == 0);
1297 for (i
= 0; i
< n_elts
; i
++)
1298 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1300 return gen_rtx_CONST_VECTOR (mode
, v
);
1304 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1306 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1307 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1308 enum machine_mode opmode
= GET_MODE (op
);
1309 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1310 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1311 rtvec v
= rtvec_alloc (n_elts
);
1314 gcc_assert (op_n_elts
== n_elts
);
1315 for (i
= 0; i
< n_elts
; i
++)
1317 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1318 CONST_VECTOR_ELT (op
, i
),
1319 GET_MODE_INNER (opmode
));
1322 RTVEC_ELT (v
, i
) = x
;
1324 return gen_rtx_CONST_VECTOR (mode
, v
);
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1331 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1332 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1334 HOST_WIDE_INT hv
, lv
;
1337 if (CONST_INT_P (op
))
1338 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1340 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1342 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1343 d
= real_value_truncate (mode
, d
);
1344 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1346 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1347 && (GET_CODE (op
) == CONST_DOUBLE
1348 || CONST_INT_P (op
)))
1350 HOST_WIDE_INT hv
, lv
;
1353 if (CONST_INT_P (op
))
1354 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1356 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1358 if (op_mode
== VOIDmode
1359 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1360 /* We should never get a negative number. */
1361 gcc_assert (hv
>= 0);
1362 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1363 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1365 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1366 d
= real_value_truncate (mode
, d
);
1367 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1370 if (CONST_INT_P (op
)
1371 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1373 HOST_WIDE_INT arg0
= INTVAL (op
);
1387 val
= (arg0
>= 0 ? arg0
: - arg0
);
1391 arg0
&= GET_MODE_MASK (mode
);
1392 val
= ffs_hwi (arg0
);
1396 arg0
&= GET_MODE_MASK (mode
);
1397 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1400 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1404 arg0
&= GET_MODE_MASK (mode
);
1406 val
= GET_MODE_PRECISION (mode
) - 1;
1408 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1410 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1414 arg0
&= GET_MODE_MASK (mode
);
1417 /* Even if the value at zero is undefined, we have to come
1418 up with some replacement. Seems good enough. */
1419 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1420 val
= GET_MODE_PRECISION (mode
);
1423 val
= ctz_hwi (arg0
);
1427 arg0
&= GET_MODE_MASK (mode
);
1430 val
++, arg0
&= arg0
- 1;
1434 arg0
&= GET_MODE_MASK (mode
);
1437 val
++, arg0
&= arg0
- 1;
1446 for (s
= 0; s
< width
; s
+= 8)
1448 unsigned int d
= width
- s
- 8;
1449 unsigned HOST_WIDE_INT byte
;
1450 byte
= (arg0
>> s
) & 0xff;
1461 /* When zero-extending a CONST_INT, we need to know its
1463 gcc_assert (op_mode
!= VOIDmode
);
1464 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1466 /* If we were really extending the mode,
1467 we would have to distinguish between zero-extension
1468 and sign-extension. */
1469 gcc_assert (width
== op_width
);
1472 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1473 val
= arg0
& GET_MODE_MASK (op_mode
);
1479 if (op_mode
== VOIDmode
)
1481 op_width
= GET_MODE_PRECISION (op_mode
);
1482 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1484 /* If we were really extending the mode,
1485 we would have to distinguish between zero-extension
1486 and sign-extension. */
1487 gcc_assert (width
== op_width
);
1490 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1492 val
= arg0
& GET_MODE_MASK (op_mode
);
1493 if (val_signbit_known_set_p (op_mode
, val
))
1494 val
|= ~GET_MODE_MASK (op_mode
);
1502 case FLOAT_TRUNCATE
:
1514 return gen_int_mode (val
, mode
);
1517 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1518 for a DImode operation on a CONST_INT. */
1519 else if (GET_MODE (op
) == VOIDmode
1520 && width
<= HOST_BITS_PER_DOUBLE_INT
1521 && (GET_CODE (op
) == CONST_DOUBLE
1522 || CONST_INT_P (op
)))
1524 unsigned HOST_WIDE_INT l1
, lv
;
1525 HOST_WIDE_INT h1
, hv
;
1527 if (GET_CODE (op
) == CONST_DOUBLE
)
1528 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1530 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1540 neg_double (l1
, h1
, &lv
, &hv
);
1545 neg_double (l1
, h1
, &lv
, &hv
);
1555 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1563 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (h1
) - 1
1564 - HOST_BITS_PER_WIDE_INT
;
1566 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (l1
) - 1;
1567 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1568 lv
= GET_MODE_PRECISION (mode
);
1576 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1577 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1578 lv
= GET_MODE_PRECISION (mode
);
1606 for (s
= 0; s
< width
; s
+= 8)
1608 unsigned int d
= width
- s
- 8;
1609 unsigned HOST_WIDE_INT byte
;
1611 if (s
< HOST_BITS_PER_WIDE_INT
)
1612 byte
= (l1
>> s
) & 0xff;
1614 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1616 if (d
< HOST_BITS_PER_WIDE_INT
)
1619 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1625 /* This is just a change-of-mode, so do nothing. */
1630 gcc_assert (op_mode
!= VOIDmode
);
1632 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1636 lv
= l1
& GET_MODE_MASK (op_mode
);
1640 if (op_mode
== VOIDmode
1641 || op_width
> HOST_BITS_PER_WIDE_INT
)
1645 lv
= l1
& GET_MODE_MASK (op_mode
);
1646 if (val_signbit_known_set_p (op_mode
, lv
))
1647 lv
|= ~GET_MODE_MASK (op_mode
);
1649 hv
= HWI_SIGN_EXTEND (lv
);
1660 return immed_double_const (lv
, hv
, mode
);
1663 else if (GET_CODE (op
) == CONST_DOUBLE
1664 && SCALAR_FLOAT_MODE_P (mode
)
1665 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1667 REAL_VALUE_TYPE d
, t
;
1668 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1673 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1675 real_sqrt (&t
, mode
, &d
);
1679 d
= real_value_abs (&d
);
1682 d
= real_value_negate (&d
);
1684 case FLOAT_TRUNCATE
:
1685 d
= real_value_truncate (mode
, d
);
1688 /* All this does is change the mode, unless changing
1690 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1691 real_convert (&d
, mode
, &d
);
1694 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1701 real_to_target (tmp
, &d
, GET_MODE (op
));
1702 for (i
= 0; i
< 4; i
++)
1704 real_from_target (&d
, tmp
, mode
);
1710 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1713 else if (GET_CODE (op
) == CONST_DOUBLE
1714 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1715 && GET_MODE_CLASS (mode
) == MODE_INT
1716 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1718 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1719 operators are intentionally left unspecified (to ease implementation
1720 by target backends), for consistency, this routine implements the
1721 same semantics for constant folding as used by the middle-end. */
1723 /* This was formerly used only for non-IEEE float.
1724 eggert@twinsun.com says it is safe for IEEE also. */
1725 HOST_WIDE_INT xh
, xl
, th
, tl
;
1726 REAL_VALUE_TYPE x
, t
;
1727 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1731 if (REAL_VALUE_ISNAN (x
))
1734 /* Test against the signed upper bound. */
1735 if (width
> HOST_BITS_PER_WIDE_INT
)
1737 th
= ((unsigned HOST_WIDE_INT
) 1
1738 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1744 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1746 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1747 if (REAL_VALUES_LESS (t
, x
))
1754 /* Test against the signed lower bound. */
1755 if (width
> HOST_BITS_PER_WIDE_INT
)
1757 th
= (unsigned HOST_WIDE_INT
) (-1)
1758 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1764 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1766 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1767 if (REAL_VALUES_LESS (x
, t
))
1773 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1777 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1780 /* Test against the unsigned upper bound. */
1781 if (width
== HOST_BITS_PER_DOUBLE_INT
)
1786 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1788 th
= ((unsigned HOST_WIDE_INT
) 1
1789 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1795 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1797 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1798 if (REAL_VALUES_LESS (t
, x
))
1805 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1811 return immed_double_const (xl
, xh
, mode
);
1817 /* Subroutine of simplify_binary_operation to simplify a commutative,
1818 associative binary operation CODE with result mode MODE, operating
1819 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1820 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1821 canonicalization is possible. */
1824 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1829 /* Linearize the operator to the left. */
1830 if (GET_CODE (op1
) == code
)
1832 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1833 if (GET_CODE (op0
) == code
)
1835 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1836 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1839 /* "a op (b op c)" becomes "(b op c) op a". */
1840 if (! swap_commutative_operands_p (op1
, op0
))
1841 return simplify_gen_binary (code
, mode
, op1
, op0
);
1848 if (GET_CODE (op0
) == code
)
1850 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1851 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1853 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1854 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1857 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1858 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1860 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1862 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1863 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1865 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1872 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1873 and OP1. Return 0 if no simplification is possible.
1875 Don't use this for relational operations such as EQ or LT.
1876 Use simplify_relational_operation instead. */
1878 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1881 rtx trueop0
, trueop1
;
1884 /* Relational operations don't work here. We must know the mode
1885 of the operands in order to do the comparison correctly.
1886 Assuming a full word can give incorrect results.
1887 Consider comparing 128 with -128 in QImode. */
1888 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1889 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1891 /* Make sure the constant is second. */
1892 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1893 && swap_commutative_operands_p (op0
, op1
))
1895 tem
= op0
, op0
= op1
, op1
= tem
;
1898 trueop0
= avoid_constant_pool_reference (op0
);
1899 trueop1
= avoid_constant_pool_reference (op1
);
1901 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1904 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1907 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1908 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1909 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1910 actual constants. */
1913 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1914 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1916 rtx tem
, reversed
, opleft
, opright
;
1918 unsigned int width
= GET_MODE_PRECISION (mode
);
1920 /* Even if we can't compute a constant result,
1921 there are some cases worth simplifying. */
1926 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1927 when x is NaN, infinite, or finite and nonzero. They aren't
1928 when x is -0 and the rounding mode is not towards -infinity,
1929 since (-0) + 0 is then 0. */
1930 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1933 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1934 transformations are safe even for IEEE. */
1935 if (GET_CODE (op0
) == NEG
)
1936 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1937 else if (GET_CODE (op1
) == NEG
)
1938 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1940 /* (~a) + 1 -> -a */
1941 if (INTEGRAL_MODE_P (mode
)
1942 && GET_CODE (op0
) == NOT
1943 && trueop1
== const1_rtx
)
1944 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1946 /* Handle both-operands-constant cases. We can only add
1947 CONST_INTs to constants since the sum of relocatable symbols
1948 can't be handled by most assemblers. Don't add CONST_INT
1949 to CONST_INT since overflow won't be computed properly if wider
1950 than HOST_BITS_PER_WIDE_INT. */
1952 if ((GET_CODE (op0
) == CONST
1953 || GET_CODE (op0
) == SYMBOL_REF
1954 || GET_CODE (op0
) == LABEL_REF
)
1955 && CONST_INT_P (op1
))
1956 return plus_constant (mode
, op0
, INTVAL (op1
));
1957 else if ((GET_CODE (op1
) == CONST
1958 || GET_CODE (op1
) == SYMBOL_REF
1959 || GET_CODE (op1
) == LABEL_REF
)
1960 && CONST_INT_P (op0
))
1961 return plus_constant (mode
, op1
, INTVAL (op0
));
1963 /* See if this is something like X * C - X or vice versa or
1964 if the multiplication is written as a shift. If so, we can
1965 distribute and make a new multiply, shift, or maybe just
1966 have X (if C is 2 in the example above). But don't make
1967 something more expensive than we had before. */
1969 if (SCALAR_INT_MODE_P (mode
))
1971 double_int coeff0
, coeff1
;
1972 rtx lhs
= op0
, rhs
= op1
;
1974 coeff0
= double_int_one
;
1975 coeff1
= double_int_one
;
1977 if (GET_CODE (lhs
) == NEG
)
1979 coeff0
= double_int_minus_one
;
1980 lhs
= XEXP (lhs
, 0);
1982 else if (GET_CODE (lhs
) == MULT
1983 && CONST_INT_P (XEXP (lhs
, 1)))
1985 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1986 lhs
= XEXP (lhs
, 0);
1988 else if (GET_CODE (lhs
) == ASHIFT
1989 && CONST_INT_P (XEXP (lhs
, 1))
1990 && INTVAL (XEXP (lhs
, 1)) >= 0
1991 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1993 coeff0
= double_int_setbit (double_int_zero
,
1994 INTVAL (XEXP (lhs
, 1)));
1995 lhs
= XEXP (lhs
, 0);
1998 if (GET_CODE (rhs
) == NEG
)
2000 coeff1
= double_int_minus_one
;
2001 rhs
= XEXP (rhs
, 0);
2003 else if (GET_CODE (rhs
) == MULT
2004 && CONST_INT_P (XEXP (rhs
, 1)))
2006 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
2007 rhs
= XEXP (rhs
, 0);
2009 else if (GET_CODE (rhs
) == ASHIFT
2010 && CONST_INT_P (XEXP (rhs
, 1))
2011 && INTVAL (XEXP (rhs
, 1)) >= 0
2012 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2014 coeff1
= double_int_setbit (double_int_zero
,
2015 INTVAL (XEXP (rhs
, 1)));
2016 rhs
= XEXP (rhs
, 0);
2019 if (rtx_equal_p (lhs
, rhs
))
2021 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2024 bool speed
= optimize_function_for_speed_p (cfun
);
2026 val
= double_int_add (coeff0
, coeff1
);
2027 coeff
= immed_double_int_const (val
, mode
);
2029 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2030 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2035 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2036 if ((CONST_INT_P (op1
)
2037 || GET_CODE (op1
) == CONST_DOUBLE
)
2038 && GET_CODE (op0
) == XOR
2039 && (CONST_INT_P (XEXP (op0
, 1))
2040 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2041 && mode_signbit_p (mode
, op1
))
2042 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2043 simplify_gen_binary (XOR
, mode
, op1
,
2046 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2047 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2048 && GET_CODE (op0
) == MULT
2049 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2053 in1
= XEXP (XEXP (op0
, 0), 0);
2054 in2
= XEXP (op0
, 1);
2055 return simplify_gen_binary (MINUS
, mode
, op1
,
2056 simplify_gen_binary (MULT
, mode
,
2060 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2061 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2063 if (COMPARISON_P (op0
)
2064 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2065 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2066 && (reversed
= reversed_comparison (op0
, mode
)))
2068 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2070 /* If one of the operands is a PLUS or a MINUS, see if we can
2071 simplify this by the associative law.
2072 Don't use the associative law for floating point.
2073 The inaccuracy makes it nonassociative,
2074 and subtle programs can break if operations are associated. */
2076 if (INTEGRAL_MODE_P (mode
)
2077 && (plus_minus_operand_p (op0
)
2078 || plus_minus_operand_p (op1
))
2079 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2082 /* Reassociate floating point addition only when the user
2083 specifies associative math operations. */
2084 if (FLOAT_MODE_P (mode
)
2085 && flag_associative_math
)
2087 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2094 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2095 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2096 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2097 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2099 rtx xop00
= XEXP (op0
, 0);
2100 rtx xop10
= XEXP (op1
, 0);
2103 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2105 if (REG_P (xop00
) && REG_P (xop10
)
2106 && GET_MODE (xop00
) == GET_MODE (xop10
)
2107 && REGNO (xop00
) == REGNO (xop10
)
2108 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2109 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2116 /* We can't assume x-x is 0 even with non-IEEE floating point,
2117 but since it is zero except in very strange circumstances, we
2118 will treat it as zero with -ffinite-math-only. */
2119 if (rtx_equal_p (trueop0
, trueop1
)
2120 && ! side_effects_p (op0
)
2121 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2122 return CONST0_RTX (mode
);
2124 /* Change subtraction from zero into negation. (0 - x) is the
2125 same as -x when x is NaN, infinite, or finite and nonzero.
2126 But if the mode has signed zeros, and does not round towards
2127 -infinity, then 0 - 0 is 0, not -0. */
2128 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2129 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2131 /* (-1 - a) is ~a. */
2132 if (trueop0
== constm1_rtx
)
2133 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2135 /* Subtracting 0 has no effect unless the mode has signed zeros
2136 and supports rounding towards -infinity. In such a case,
2138 if (!(HONOR_SIGNED_ZEROS (mode
)
2139 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2140 && trueop1
== CONST0_RTX (mode
))
2143 /* See if this is something like X * C - X or vice versa or
2144 if the multiplication is written as a shift. If so, we can
2145 distribute and make a new multiply, shift, or maybe just
2146 have X (if C is 2 in the example above). But don't make
2147 something more expensive than we had before. */
2149 if (SCALAR_INT_MODE_P (mode
))
2151 double_int coeff0
, negcoeff1
;
2152 rtx lhs
= op0
, rhs
= op1
;
2154 coeff0
= double_int_one
;
2155 negcoeff1
= double_int_minus_one
;
2157 if (GET_CODE (lhs
) == NEG
)
2159 coeff0
= double_int_minus_one
;
2160 lhs
= XEXP (lhs
, 0);
2162 else if (GET_CODE (lhs
) == MULT
2163 && CONST_INT_P (XEXP (lhs
, 1)))
2165 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2166 lhs
= XEXP (lhs
, 0);
2168 else if (GET_CODE (lhs
) == ASHIFT
2169 && CONST_INT_P (XEXP (lhs
, 1))
2170 && INTVAL (XEXP (lhs
, 1)) >= 0
2171 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2173 coeff0
= double_int_setbit (double_int_zero
,
2174 INTVAL (XEXP (lhs
, 1)));
2175 lhs
= XEXP (lhs
, 0);
2178 if (GET_CODE (rhs
) == NEG
)
2180 negcoeff1
= double_int_one
;
2181 rhs
= XEXP (rhs
, 0);
2183 else if (GET_CODE (rhs
) == MULT
2184 && CONST_INT_P (XEXP (rhs
, 1)))
2186 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2187 rhs
= XEXP (rhs
, 0);
2189 else if (GET_CODE (rhs
) == ASHIFT
2190 && CONST_INT_P (XEXP (rhs
, 1))
2191 && INTVAL (XEXP (rhs
, 1)) >= 0
2192 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2194 negcoeff1
= double_int_setbit (double_int_zero
,
2195 INTVAL (XEXP (rhs
, 1)));
2196 negcoeff1
= double_int_neg (negcoeff1
);
2197 rhs
= XEXP (rhs
, 0);
2200 if (rtx_equal_p (lhs
, rhs
))
2202 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2205 bool speed
= optimize_function_for_speed_p (cfun
);
2207 val
= double_int_add (coeff0
, negcoeff1
);
2208 coeff
= immed_double_int_const (val
, mode
);
2210 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2211 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2216 /* (a - (-b)) -> (a + b). True even for IEEE. */
2217 if (GET_CODE (op1
) == NEG
)
2218 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2220 /* (-x - c) may be simplified as (-c - x). */
2221 if (GET_CODE (op0
) == NEG
2222 && (CONST_INT_P (op1
)
2223 || GET_CODE (op1
) == CONST_DOUBLE
))
2225 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2227 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2230 /* Don't let a relocatable value get a negative coeff. */
2231 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2232 return simplify_gen_binary (PLUS
, mode
,
2234 neg_const_int (mode
, op1
));
2236 /* (x - (x & y)) -> (x & ~y) */
2237 if (GET_CODE (op1
) == AND
)
2239 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2241 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2242 GET_MODE (XEXP (op1
, 1)));
2243 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2245 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2247 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2248 GET_MODE (XEXP (op1
, 0)));
2249 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2253 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2254 by reversing the comparison code if valid. */
2255 if (STORE_FLAG_VALUE
== 1
2256 && trueop0
== const1_rtx
2257 && COMPARISON_P (op1
)
2258 && (reversed
= reversed_comparison (op1
, mode
)))
2261 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2262 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2263 && GET_CODE (op1
) == MULT
2264 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2268 in1
= XEXP (XEXP (op1
, 0), 0);
2269 in2
= XEXP (op1
, 1);
2270 return simplify_gen_binary (PLUS
, mode
,
2271 simplify_gen_binary (MULT
, mode
,
2276 /* Canonicalize (minus (neg A) (mult B C)) to
2277 (minus (mult (neg B) C) A). */
2278 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2279 && GET_CODE (op1
) == MULT
2280 && GET_CODE (op0
) == NEG
)
2284 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2285 in2
= XEXP (op1
, 1);
2286 return simplify_gen_binary (MINUS
, mode
,
2287 simplify_gen_binary (MULT
, mode
,
2292 /* If one of the operands is a PLUS or a MINUS, see if we can
2293 simplify this by the associative law. This will, for example,
2294 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2299 if (INTEGRAL_MODE_P (mode
)
2300 && (plus_minus_operand_p (op0
)
2301 || plus_minus_operand_p (op1
))
2302 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2307 if (trueop1
== constm1_rtx
)
2308 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2310 if (GET_CODE (op0
) == NEG
)
2312 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2313 /* If op1 is a MULT as well and simplify_unary_operation
2314 just moved the NEG to the second operand, simplify_gen_binary
2315 below could through simplify_associative_operation move
2316 the NEG around again and recurse endlessly. */
2318 && GET_CODE (op1
) == MULT
2319 && GET_CODE (temp
) == MULT
2320 && XEXP (op1
, 0) == XEXP (temp
, 0)
2321 && GET_CODE (XEXP (temp
, 1)) == NEG
2322 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2325 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2327 if (GET_CODE (op1
) == NEG
)
2329 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2330 /* If op0 is a MULT as well and simplify_unary_operation
2331 just moved the NEG to the second operand, simplify_gen_binary
2332 below could through simplify_associative_operation move
2333 the NEG around again and recurse endlessly. */
2335 && GET_CODE (op0
) == MULT
2336 && GET_CODE (temp
) == MULT
2337 && XEXP (op0
, 0) == XEXP (temp
, 0)
2338 && GET_CODE (XEXP (temp
, 1)) == NEG
2339 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2342 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2345 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2346 x is NaN, since x * 0 is then also NaN. Nor is it valid
2347 when the mode has signed zeros, since multiplying a negative
2348 number by 0 will give -0, not 0. */
2349 if (!HONOR_NANS (mode
)
2350 && !HONOR_SIGNED_ZEROS (mode
)
2351 && trueop1
== CONST0_RTX (mode
)
2352 && ! side_effects_p (op0
))
2355 /* In IEEE floating point, x*1 is not equivalent to x for
2357 if (!HONOR_SNANS (mode
)
2358 && trueop1
== CONST1_RTX (mode
))
2361 /* Convert multiply by constant power of two into shift unless
2362 we are still generating RTL. This test is a kludge. */
2363 if (CONST_INT_P (trueop1
)
2364 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2365 /* If the mode is larger than the host word size, and the
2366 uppermost bit is set, then this isn't a power of two due
2367 to implicit sign extension. */
2368 && (width
<= HOST_BITS_PER_WIDE_INT
2369 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2370 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2372 /* Likewise for multipliers wider than a word. */
2373 if (GET_CODE (trueop1
) == CONST_DOUBLE
2374 && (GET_MODE (trueop1
) == VOIDmode
2375 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2376 && GET_MODE (op0
) == mode
2377 && CONST_DOUBLE_LOW (trueop1
) == 0
2378 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2379 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2380 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2381 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2382 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2384 /* x*2 is x+x and x*(-1) is -x */
2385 if (GET_CODE (trueop1
) == CONST_DOUBLE
2386 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2387 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2388 && GET_MODE (op0
) == mode
)
2391 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2393 if (REAL_VALUES_EQUAL (d
, dconst2
))
2394 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2396 if (!HONOR_SNANS (mode
)
2397 && REAL_VALUES_EQUAL (d
, dconstm1
))
2398 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2401 /* Optimize -x * -x as x * x. */
2402 if (FLOAT_MODE_P (mode
)
2403 && GET_CODE (op0
) == NEG
2404 && GET_CODE (op1
) == NEG
2405 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2406 && !side_effects_p (XEXP (op0
, 0)))
2407 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2409 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2410 if (SCALAR_FLOAT_MODE_P (mode
)
2411 && GET_CODE (op0
) == ABS
2412 && GET_CODE (op1
) == ABS
2413 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2414 && !side_effects_p (XEXP (op0
, 0)))
2415 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2417 /* Reassociate multiplication, but for floating point MULTs
2418 only when the user specifies unsafe math optimizations. */
2419 if (! FLOAT_MODE_P (mode
)
2420 || flag_unsafe_math_optimizations
)
2422 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2429 if (trueop1
== CONST0_RTX (mode
))
2431 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2433 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2435 /* A | (~A) -> -1 */
2436 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2437 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2438 && ! side_effects_p (op0
)
2439 && SCALAR_INT_MODE_P (mode
))
2442 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2443 if (CONST_INT_P (op1
)
2444 && HWI_COMPUTABLE_MODE_P (mode
)
2445 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0)
2448 /* Canonicalize (X & C1) | C2. */
2449 if (GET_CODE (op0
) == AND
2450 && CONST_INT_P (trueop1
)
2451 && CONST_INT_P (XEXP (op0
, 1)))
2453 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2454 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2455 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2457 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2459 && !side_effects_p (XEXP (op0
, 0)))
2462 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2463 if (((c1
|c2
) & mask
) == mask
)
2464 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2466 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2467 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2469 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2470 gen_int_mode (c1
& ~c2
, mode
));
2471 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2475 /* Convert (A & B) | A to A. */
2476 if (GET_CODE (op0
) == AND
2477 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2478 || rtx_equal_p (XEXP (op0
, 1), op1
))
2479 && ! side_effects_p (XEXP (op0
, 0))
2480 && ! side_effects_p (XEXP (op0
, 1)))
2483 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2484 mode size to (rotate A CX). */
2486 if (GET_CODE (op1
) == ASHIFT
2487 || GET_CODE (op1
) == SUBREG
)
2498 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2499 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2500 && CONST_INT_P (XEXP (opleft
, 1))
2501 && CONST_INT_P (XEXP (opright
, 1))
2502 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2503 == GET_MODE_PRECISION (mode
)))
2504 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2506 /* Same, but for ashift that has been "simplified" to a wider mode
2507 by simplify_shift_const. */
2509 if (GET_CODE (opleft
) == SUBREG
2510 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2511 && GET_CODE (opright
) == LSHIFTRT
2512 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2513 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2514 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2515 && (GET_MODE_SIZE (GET_MODE (opleft
))
2516 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2517 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2518 SUBREG_REG (XEXP (opright
, 0)))
2519 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2520 && CONST_INT_P (XEXP (opright
, 1))
2521 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2522 == GET_MODE_PRECISION (mode
)))
2523 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2524 XEXP (SUBREG_REG (opleft
), 1));
2526 /* If we have (ior (and (X C1) C2)), simplify this by making
2527 C1 as small as possible if C1 actually changes. */
2528 if (CONST_INT_P (op1
)
2529 && (HWI_COMPUTABLE_MODE_P (mode
)
2530 || INTVAL (op1
) > 0)
2531 && GET_CODE (op0
) == AND
2532 && CONST_INT_P (XEXP (op0
, 1))
2533 && CONST_INT_P (op1
)
2534 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2535 return simplify_gen_binary (IOR
, mode
,
2537 (AND
, mode
, XEXP (op0
, 0),
2538 GEN_INT (UINTVAL (XEXP (op0
, 1))
2542 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2543 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2544 the PLUS does not affect any of the bits in OP1: then we can do
2545 the IOR as a PLUS and we can associate. This is valid if OP1
2546 can be safely shifted left C bits. */
2547 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2548 && GET_CODE (XEXP (op0
, 0)) == PLUS
2549 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2550 && CONST_INT_P (XEXP (op0
, 1))
2551 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2553 int count
= INTVAL (XEXP (op0
, 1));
2554 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2556 if (mask
>> count
== INTVAL (trueop1
)
2557 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2558 return simplify_gen_binary (ASHIFTRT
, mode
,
2559 plus_constant (mode
, XEXP (op0
, 0),
2564 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2570 if (trueop1
== CONST0_RTX (mode
))
2572 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2573 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2574 if (rtx_equal_p (trueop0
, trueop1
)
2575 && ! side_effects_p (op0
)
2576 && GET_MODE_CLASS (mode
) != MODE_CC
)
2577 return CONST0_RTX (mode
);
2579 /* Canonicalize XOR of the most significant bit to PLUS. */
2580 if ((CONST_INT_P (op1
)
2581 || GET_CODE (op1
) == CONST_DOUBLE
)
2582 && mode_signbit_p (mode
, op1
))
2583 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2584 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2585 if ((CONST_INT_P (op1
)
2586 || GET_CODE (op1
) == CONST_DOUBLE
)
2587 && GET_CODE (op0
) == PLUS
2588 && (CONST_INT_P (XEXP (op0
, 1))
2589 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2590 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2591 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2592 simplify_gen_binary (XOR
, mode
, op1
,
2595 /* If we are XORing two things that have no bits in common,
2596 convert them into an IOR. This helps to detect rotation encoded
2597 using those methods and possibly other simplifications. */
2599 if (HWI_COMPUTABLE_MODE_P (mode
)
2600 && (nonzero_bits (op0
, mode
)
2601 & nonzero_bits (op1
, mode
)) == 0)
2602 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2604 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2605 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2608 int num_negated
= 0;
2610 if (GET_CODE (op0
) == NOT
)
2611 num_negated
++, op0
= XEXP (op0
, 0);
2612 if (GET_CODE (op1
) == NOT
)
2613 num_negated
++, op1
= XEXP (op1
, 0);
2615 if (num_negated
== 2)
2616 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2617 else if (num_negated
== 1)
2618 return simplify_gen_unary (NOT
, mode
,
2619 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2623 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2624 correspond to a machine insn or result in further simplifications
2625 if B is a constant. */
2627 if (GET_CODE (op0
) == AND
2628 && rtx_equal_p (XEXP (op0
, 1), op1
)
2629 && ! side_effects_p (op1
))
2630 return simplify_gen_binary (AND
, mode
,
2631 simplify_gen_unary (NOT
, mode
,
2632 XEXP (op0
, 0), mode
),
2635 else if (GET_CODE (op0
) == AND
2636 && rtx_equal_p (XEXP (op0
, 0), op1
)
2637 && ! side_effects_p (op1
))
2638 return simplify_gen_binary (AND
, mode
,
2639 simplify_gen_unary (NOT
, mode
,
2640 XEXP (op0
, 1), mode
),
2643 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2644 we can transform like this:
2645 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2646 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2647 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2648 Attempt a few simplifications when B and C are both constants. */
2649 if (GET_CODE (op0
) == AND
2650 && CONST_INT_P (op1
)
2651 && CONST_INT_P (XEXP (op0
, 1)))
2653 rtx a
= XEXP (op0
, 0);
2654 rtx b
= XEXP (op0
, 1);
2656 HOST_WIDE_INT bval
= INTVAL (b
);
2657 HOST_WIDE_INT cval
= INTVAL (c
);
2660 = simplify_binary_operation (AND
, mode
,
2661 simplify_gen_unary (NOT
, mode
, a
, mode
),
2663 if ((~cval
& bval
) == 0)
2665 /* Try to simplify ~A&C | ~B&C. */
2666 if (na_c
!= NULL_RTX
)
2667 return simplify_gen_binary (IOR
, mode
, na_c
,
2668 GEN_INT (~bval
& cval
));
2672 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2673 if (na_c
== const0_rtx
)
2675 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2676 GEN_INT (~cval
& bval
));
2677 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2678 GEN_INT (~bval
& cval
));
2683 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2684 comparison if STORE_FLAG_VALUE is 1. */
2685 if (STORE_FLAG_VALUE
== 1
2686 && trueop1
== const1_rtx
2687 && COMPARISON_P (op0
)
2688 && (reversed
= reversed_comparison (op0
, mode
)))
2691 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2692 is (lt foo (const_int 0)), so we can perform the above
2693 simplification if STORE_FLAG_VALUE is 1. */
2695 if (STORE_FLAG_VALUE
== 1
2696 && trueop1
== const1_rtx
2697 && GET_CODE (op0
) == LSHIFTRT
2698 && CONST_INT_P (XEXP (op0
, 1))
2699 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2700 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2702 /* (xor (comparison foo bar) (const_int sign-bit))
2703 when STORE_FLAG_VALUE is the sign bit. */
2704 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2705 && trueop1
== const_true_rtx
2706 && COMPARISON_P (op0
)
2707 && (reversed
= reversed_comparison (op0
, mode
)))
2710 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2716 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2718 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2720 if (HWI_COMPUTABLE_MODE_P (mode
))
2722 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2723 HOST_WIDE_INT nzop1
;
2724 if (CONST_INT_P (trueop1
))
2726 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2727 /* If we are turning off bits already known off in OP0, we need
2729 if ((nzop0
& ~val1
) == 0)
2732 nzop1
= nonzero_bits (trueop1
, mode
);
2733 /* If we are clearing all the nonzero bits, the result is zero. */
2734 if ((nzop1
& nzop0
) == 0
2735 && !side_effects_p (op0
) && !side_effects_p (op1
))
2736 return CONST0_RTX (mode
);
2738 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2739 && GET_MODE_CLASS (mode
) != MODE_CC
)
2742 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2743 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2744 && ! side_effects_p (op0
)
2745 && GET_MODE_CLASS (mode
) != MODE_CC
)
2746 return CONST0_RTX (mode
);
2748 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2749 there are no nonzero bits of C outside of X's mode. */
2750 if ((GET_CODE (op0
) == SIGN_EXTEND
2751 || GET_CODE (op0
) == ZERO_EXTEND
)
2752 && CONST_INT_P (trueop1
)
2753 && HWI_COMPUTABLE_MODE_P (mode
)
2754 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2755 & UINTVAL (trueop1
)) == 0)
2757 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2758 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2759 gen_int_mode (INTVAL (trueop1
),
2761 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2764 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2765 we might be able to further simplify the AND with X and potentially
2766 remove the truncation altogether. */
2767 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2769 rtx x
= XEXP (op0
, 0);
2770 enum machine_mode xmode
= GET_MODE (x
);
2771 tem
= simplify_gen_binary (AND
, xmode
, x
,
2772 gen_int_mode (INTVAL (trueop1
), xmode
));
2773 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2776 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2777 if (GET_CODE (op0
) == IOR
2778 && CONST_INT_P (trueop1
)
2779 && CONST_INT_P (XEXP (op0
, 1)))
2781 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2782 return simplify_gen_binary (IOR
, mode
,
2783 simplify_gen_binary (AND
, mode
,
2784 XEXP (op0
, 0), op1
),
2785 gen_int_mode (tmp
, mode
));
2788 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2789 insn (and may simplify more). */
2790 if (GET_CODE (op0
) == XOR
2791 && rtx_equal_p (XEXP (op0
, 0), op1
)
2792 && ! side_effects_p (op1
))
2793 return simplify_gen_binary (AND
, mode
,
2794 simplify_gen_unary (NOT
, mode
,
2795 XEXP (op0
, 1), mode
),
2798 if (GET_CODE (op0
) == XOR
2799 && rtx_equal_p (XEXP (op0
, 1), op1
)
2800 && ! side_effects_p (op1
))
2801 return simplify_gen_binary (AND
, mode
,
2802 simplify_gen_unary (NOT
, mode
,
2803 XEXP (op0
, 0), mode
),
2806 /* Similarly for (~(A ^ B)) & A. */
2807 if (GET_CODE (op0
) == NOT
2808 && GET_CODE (XEXP (op0
, 0)) == XOR
2809 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2810 && ! side_effects_p (op1
))
2811 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2813 if (GET_CODE (op0
) == NOT
2814 && GET_CODE (XEXP (op0
, 0)) == XOR
2815 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2816 && ! side_effects_p (op1
))
2817 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2819 /* Convert (A | B) & A to A. */
2820 if (GET_CODE (op0
) == IOR
2821 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2822 || rtx_equal_p (XEXP (op0
, 1), op1
))
2823 && ! side_effects_p (XEXP (op0
, 0))
2824 && ! side_effects_p (XEXP (op0
, 1)))
2827 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2828 ((A & N) + B) & M -> (A + B) & M
2829 Similarly if (N & M) == 0,
2830 ((A | N) + B) & M -> (A + B) & M
2831 and for - instead of + and/or ^ instead of |.
2832 Also, if (N & M) == 0, then
2833 (A +- N) & M -> A & M. */
2834 if (CONST_INT_P (trueop1
)
2835 && HWI_COMPUTABLE_MODE_P (mode
)
2836 && ~UINTVAL (trueop1
)
2837 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2838 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2843 pmop
[0] = XEXP (op0
, 0);
2844 pmop
[1] = XEXP (op0
, 1);
2846 if (CONST_INT_P (pmop
[1])
2847 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2848 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2850 for (which
= 0; which
< 2; which
++)
2853 switch (GET_CODE (tem
))
2856 if (CONST_INT_P (XEXP (tem
, 1))
2857 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2858 == UINTVAL (trueop1
))
2859 pmop
[which
] = XEXP (tem
, 0);
2863 if (CONST_INT_P (XEXP (tem
, 1))
2864 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2865 pmop
[which
] = XEXP (tem
, 0);
2872 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2874 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2876 return simplify_gen_binary (code
, mode
, tem
, op1
);
2880 /* (and X (ior (not X) Y) -> (and X Y) */
2881 if (GET_CODE (op1
) == IOR
2882 && GET_CODE (XEXP (op1
, 0)) == NOT
2883 && op0
== XEXP (XEXP (op1
, 0), 0))
2884 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2886 /* (and (ior (not X) Y) X) -> (and X Y) */
2887 if (GET_CODE (op0
) == IOR
2888 && GET_CODE (XEXP (op0
, 0)) == NOT
2889 && op1
== XEXP (XEXP (op0
, 0), 0))
2890 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2892 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2898 /* 0/x is 0 (or x&0 if x has side-effects). */
2899 if (trueop0
== CONST0_RTX (mode
))
2901 if (side_effects_p (op1
))
2902 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2906 if (trueop1
== CONST1_RTX (mode
))
2907 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2908 /* Convert divide by power of two into shift. */
2909 if (CONST_INT_P (trueop1
)
2910 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2911 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2915 /* Handle floating point and integers separately. */
2916 if (SCALAR_FLOAT_MODE_P (mode
))
2918 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2919 safe for modes with NaNs, since 0.0 / 0.0 will then be
2920 NaN rather than 0.0. Nor is it safe for modes with signed
2921 zeros, since dividing 0 by a negative number gives -0.0 */
2922 if (trueop0
== CONST0_RTX (mode
)
2923 && !HONOR_NANS (mode
)
2924 && !HONOR_SIGNED_ZEROS (mode
)
2925 && ! side_effects_p (op1
))
2928 if (trueop1
== CONST1_RTX (mode
)
2929 && !HONOR_SNANS (mode
))
2932 if (GET_CODE (trueop1
) == CONST_DOUBLE
2933 && trueop1
!= CONST0_RTX (mode
))
2936 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2939 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2940 && !HONOR_SNANS (mode
))
2941 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2943 /* Change FP division by a constant into multiplication.
2944 Only do this with -freciprocal-math. */
2945 if (flag_reciprocal_math
2946 && !REAL_VALUES_EQUAL (d
, dconst0
))
2948 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2949 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2950 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2954 else if (SCALAR_INT_MODE_P (mode
))
2956 /* 0/x is 0 (or x&0 if x has side-effects). */
2957 if (trueop0
== CONST0_RTX (mode
)
2958 && !cfun
->can_throw_non_call_exceptions
)
2960 if (side_effects_p (op1
))
2961 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2965 if (trueop1
== CONST1_RTX (mode
))
2966 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2968 if (trueop1
== constm1_rtx
)
2970 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2971 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2977 /* 0%x is 0 (or x&0 if x has side-effects). */
2978 if (trueop0
== CONST0_RTX (mode
))
2980 if (side_effects_p (op1
))
2981 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2984 /* x%1 is 0 (of x&0 if x has side-effects). */
2985 if (trueop1
== CONST1_RTX (mode
))
2987 if (side_effects_p (op0
))
2988 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2989 return CONST0_RTX (mode
);
2991 /* Implement modulus by power of two as AND. */
2992 if (CONST_INT_P (trueop1
)
2993 && exact_log2 (UINTVAL (trueop1
)) > 0)
2994 return simplify_gen_binary (AND
, mode
, op0
,
2995 GEN_INT (INTVAL (op1
) - 1));
2999 /* 0%x is 0 (or x&0 if x has side-effects). */
3000 if (trueop0
== CONST0_RTX (mode
))
3002 if (side_effects_p (op1
))
3003 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3006 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3007 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3009 if (side_effects_p (op0
))
3010 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3011 return CONST0_RTX (mode
);
3018 if (trueop1
== CONST0_RTX (mode
))
3020 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3022 /* Rotating ~0 always results in ~0. */
3023 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3024 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3025 && ! side_effects_p (op1
))
3028 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3030 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3031 if (val
!= INTVAL (op1
))
3032 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3039 if (trueop1
== CONST0_RTX (mode
))
3041 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3043 goto canonicalize_shift
;
3046 if (trueop1
== CONST0_RTX (mode
))
3048 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3050 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3051 if (GET_CODE (op0
) == CLZ
3052 && CONST_INT_P (trueop1
)
3053 && STORE_FLAG_VALUE
== 1
3054 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3056 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3057 unsigned HOST_WIDE_INT zero_val
= 0;
3059 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3060 && zero_val
== GET_MODE_PRECISION (imode
)
3061 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3062 return simplify_gen_relational (EQ
, mode
, imode
,
3063 XEXP (op0
, 0), const0_rtx
);
3065 goto canonicalize_shift
;
3068 if (width
<= HOST_BITS_PER_WIDE_INT
3069 && mode_signbit_p (mode
, trueop1
)
3070 && ! side_effects_p (op0
))
3072 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3074 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3080 if (width
<= HOST_BITS_PER_WIDE_INT
3081 && CONST_INT_P (trueop1
)
3082 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3083 && ! side_effects_p (op0
))
3085 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3087 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3093 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3095 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3097 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3103 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3105 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3107 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3120 /* ??? There are simplifications that can be done. */
3124 if (!VECTOR_MODE_P (mode
))
3126 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3127 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3128 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3129 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3130 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3132 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3133 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3136 /* Extract a scalar element from a nested VEC_SELECT expression
3137 (with optional nested VEC_CONCAT expression). Some targets
3138 (i386) extract scalar element from a vector using chain of
3139 nested VEC_SELECT expressions. When input operand is a memory
3140 operand, this operation can be simplified to a simple scalar
3141 load from an offseted memory address. */
3142 if (GET_CODE (trueop0
) == VEC_SELECT
)
3144 rtx op0
= XEXP (trueop0
, 0);
3145 rtx op1
= XEXP (trueop0
, 1);
3147 enum machine_mode opmode
= GET_MODE (op0
);
3148 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3149 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3151 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3157 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3158 gcc_assert (i
< n_elts
);
3160 /* Select element, pointed by nested selector. */
3161 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3163 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3164 if (GET_CODE (op0
) == VEC_CONCAT
)
3166 rtx op00
= XEXP (op0
, 0);
3167 rtx op01
= XEXP (op0
, 1);
3169 enum machine_mode mode00
, mode01
;
3170 int n_elts00
, n_elts01
;
3172 mode00
= GET_MODE (op00
);
3173 mode01
= GET_MODE (op01
);
3175 /* Find out number of elements of each operand. */
3176 if (VECTOR_MODE_P (mode00
))
3178 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3179 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3184 if (VECTOR_MODE_P (mode01
))
3186 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3187 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3192 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3194 /* Select correct operand of VEC_CONCAT
3195 and adjust selector. */
3196 if (elem
< n_elts01
)
3207 vec
= rtvec_alloc (1);
3208 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3210 tmp
= gen_rtx_fmt_ee (code
, mode
,
3211 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3214 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3215 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3216 return XEXP (trueop0
, 0);
3220 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3221 gcc_assert (GET_MODE_INNER (mode
)
3222 == GET_MODE_INNER (GET_MODE (trueop0
)));
3223 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3225 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3227 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3228 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3229 rtvec v
= rtvec_alloc (n_elts
);
3232 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3233 for (i
= 0; i
< n_elts
; i
++)
3235 rtx x
= XVECEXP (trueop1
, 0, i
);
3237 gcc_assert (CONST_INT_P (x
));
3238 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3242 return gen_rtx_CONST_VECTOR (mode
, v
);
3245 /* If we build {a,b} then permute it, build the result directly. */
3246 if (XVECLEN (trueop1
, 0) == 2
3247 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3248 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3249 && GET_CODE (trueop0
) == VEC_CONCAT
3250 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3251 && GET_MODE (XEXP (trueop0
, 0)) == mode
3252 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3253 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3255 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3256 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3259 gcc_assert (i0
< 4 && i1
< 4);
3260 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3261 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3263 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3267 if (XVECLEN (trueop1
, 0) == 1
3268 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3269 && GET_CODE (trueop0
) == VEC_CONCAT
)
3272 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3274 /* Try to find the element in the VEC_CONCAT. */
3275 while (GET_MODE (vec
) != mode
3276 && GET_CODE (vec
) == VEC_CONCAT
)
3278 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3279 if (offset
< vec_size
)
3280 vec
= XEXP (vec
, 0);
3284 vec
= XEXP (vec
, 1);
3286 vec
= avoid_constant_pool_reference (vec
);
3289 if (GET_MODE (vec
) == mode
)
3296 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3297 ? GET_MODE (trueop0
)
3298 : GET_MODE_INNER (mode
));
3299 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3300 ? GET_MODE (trueop1
)
3301 : GET_MODE_INNER (mode
));
3303 gcc_assert (VECTOR_MODE_P (mode
));
3304 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3305 == GET_MODE_SIZE (mode
));
3307 if (VECTOR_MODE_P (op0_mode
))
3308 gcc_assert (GET_MODE_INNER (mode
)
3309 == GET_MODE_INNER (op0_mode
));
3311 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3313 if (VECTOR_MODE_P (op1_mode
))
3314 gcc_assert (GET_MODE_INNER (mode
)
3315 == GET_MODE_INNER (op1_mode
));
3317 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3319 if ((GET_CODE (trueop0
) == CONST_VECTOR
3320 || CONST_INT_P (trueop0
)
3321 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3322 && (GET_CODE (trueop1
) == CONST_VECTOR
3323 || CONST_INT_P (trueop1
)
3324 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3326 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3327 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3328 rtvec v
= rtvec_alloc (n_elts
);
3330 unsigned in_n_elts
= 1;
3332 if (VECTOR_MODE_P (op0_mode
))
3333 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3334 for (i
= 0; i
< n_elts
; i
++)
3338 if (!VECTOR_MODE_P (op0_mode
))
3339 RTVEC_ELT (v
, i
) = trueop0
;
3341 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3345 if (!VECTOR_MODE_P (op1_mode
))
3346 RTVEC_ELT (v
, i
) = trueop1
;
3348 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3353 return gen_rtx_CONST_VECTOR (mode
, v
);
3366 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3369 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3371 unsigned int width
= GET_MODE_PRECISION (mode
);
3373 if (VECTOR_MODE_P (mode
)
3374 && code
!= VEC_CONCAT
3375 && GET_CODE (op0
) == CONST_VECTOR
3376 && GET_CODE (op1
) == CONST_VECTOR
)
3378 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3379 enum machine_mode op0mode
= GET_MODE (op0
);
3380 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3381 enum machine_mode op1mode
= GET_MODE (op1
);
3382 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3383 rtvec v
= rtvec_alloc (n_elts
);
3386 gcc_assert (op0_n_elts
== n_elts
);
3387 gcc_assert (op1_n_elts
== n_elts
);
3388 for (i
= 0; i
< n_elts
; i
++)
3390 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3391 CONST_VECTOR_ELT (op0
, i
),
3392 CONST_VECTOR_ELT (op1
, i
));
3395 RTVEC_ELT (v
, i
) = x
;
3398 return gen_rtx_CONST_VECTOR (mode
, v
);
3401 if (VECTOR_MODE_P (mode
)
3402 && code
== VEC_CONCAT
3403 && (CONST_INT_P (op0
)
3404 || GET_CODE (op0
) == CONST_DOUBLE
3405 || GET_CODE (op0
) == CONST_FIXED
)
3406 && (CONST_INT_P (op1
)
3407 || GET_CODE (op1
) == CONST_DOUBLE
3408 || GET_CODE (op1
) == CONST_FIXED
))
3410 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3411 rtvec v
= rtvec_alloc (n_elts
);
3413 gcc_assert (n_elts
>= 2);
3416 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3417 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3419 RTVEC_ELT (v
, 0) = op0
;
3420 RTVEC_ELT (v
, 1) = op1
;
3424 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3425 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3428 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3429 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3430 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3432 for (i
= 0; i
< op0_n_elts
; ++i
)
3433 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3434 for (i
= 0; i
< op1_n_elts
; ++i
)
3435 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3438 return gen_rtx_CONST_VECTOR (mode
, v
);
3441 if (SCALAR_FLOAT_MODE_P (mode
)
3442 && GET_CODE (op0
) == CONST_DOUBLE
3443 && GET_CODE (op1
) == CONST_DOUBLE
3444 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3455 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3457 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3459 for (i
= 0; i
< 4; i
++)
3476 real_from_target (&r
, tmp0
, mode
);
3477 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3481 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3484 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3485 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3486 real_convert (&f0
, mode
, &f0
);
3487 real_convert (&f1
, mode
, &f1
);
3489 if (HONOR_SNANS (mode
)
3490 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3494 && REAL_VALUES_EQUAL (f1
, dconst0
)
3495 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3498 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3499 && flag_trapping_math
3500 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3502 int s0
= REAL_VALUE_NEGATIVE (f0
);
3503 int s1
= REAL_VALUE_NEGATIVE (f1
);
3508 /* Inf + -Inf = NaN plus exception. */
3513 /* Inf - Inf = NaN plus exception. */
3518 /* Inf / Inf = NaN plus exception. */
3525 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3526 && flag_trapping_math
3527 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3528 || (REAL_VALUE_ISINF (f1
)
3529 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3530 /* Inf * 0 = NaN plus exception. */
3533 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3535 real_convert (&result
, mode
, &value
);
3537 /* Don't constant fold this floating point operation if
3538 the result has overflowed and flag_trapping_math. */
3540 if (flag_trapping_math
3541 && MODE_HAS_INFINITIES (mode
)
3542 && REAL_VALUE_ISINF (result
)
3543 && !REAL_VALUE_ISINF (f0
)
3544 && !REAL_VALUE_ISINF (f1
))
3545 /* Overflow plus exception. */
3548 /* Don't constant fold this floating point operation if the
3549 result may dependent upon the run-time rounding mode and
3550 flag_rounding_math is set, or if GCC's software emulation
3551 is unable to accurately represent the result. */
3553 if ((flag_rounding_math
3554 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3555 && (inexact
|| !real_identical (&result
, &value
)))
3558 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3562 /* We can fold some multi-word operations. */
3563 if (GET_MODE_CLASS (mode
) == MODE_INT
3564 && width
== HOST_BITS_PER_DOUBLE_INT
3565 && (CONST_DOUBLE_P (op0
) || CONST_INT_P (op0
))
3566 && (CONST_DOUBLE_P (op1
) || CONST_INT_P (op1
)))
3568 double_int o0
, o1
, res
, tmp
;
3570 o0
= rtx_to_double_int (op0
);
3571 o1
= rtx_to_double_int (op1
);
3576 /* A - B == A + (-B). */
3577 o1
= double_int_neg (o1
);
3579 /* Fall through.... */
3582 res
= double_int_add (o0
, o1
);
3586 res
= double_int_mul (o0
, o1
);
3590 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3591 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3592 &res
.low
, &res
.high
,
3593 &tmp
.low
, &tmp
.high
))
3598 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3599 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3600 &tmp
.low
, &tmp
.high
,
3601 &res
.low
, &res
.high
))
3606 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3607 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3608 &res
.low
, &res
.high
,
3609 &tmp
.low
, &tmp
.high
))
3614 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3615 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3616 &tmp
.low
, &tmp
.high
,
3617 &res
.low
, &res
.high
))
3622 res
= double_int_and (o0
, o1
);
3626 res
= double_int_ior (o0
, o1
);
3630 res
= double_int_xor (o0
, o1
);
3634 res
= double_int_smin (o0
, o1
);
3638 res
= double_int_smax (o0
, o1
);
3642 res
= double_int_umin (o0
, o1
);
3646 res
= double_int_umax (o0
, o1
);
3649 case LSHIFTRT
: case ASHIFTRT
:
3651 case ROTATE
: case ROTATERT
:
3653 unsigned HOST_WIDE_INT cnt
;
3655 if (SHIFT_COUNT_TRUNCATED
)
3658 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
3661 if (!double_int_fits_in_uhwi_p (o1
)
3662 || double_int_to_uhwi (o1
) >= GET_MODE_PRECISION (mode
))
3665 cnt
= double_int_to_uhwi (o1
);
3667 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3668 res
= double_int_rshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3670 else if (code
== ASHIFT
)
3671 res
= double_int_lshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3673 else if (code
== ROTATE
)
3674 res
= double_int_lrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3675 else /* code == ROTATERT */
3676 res
= double_int_rrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3684 return immed_double_int_const (res
, mode
);
3687 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3688 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3690 /* Get the integer argument values in two forms:
3691 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3693 arg0
= INTVAL (op0
);
3694 arg1
= INTVAL (op1
);
3696 if (width
< HOST_BITS_PER_WIDE_INT
)
3698 arg0
&= GET_MODE_MASK (mode
);
3699 arg1
&= GET_MODE_MASK (mode
);
3702 if (val_signbit_known_set_p (mode
, arg0s
))
3703 arg0s
|= ~GET_MODE_MASK (mode
);
3706 if (val_signbit_known_set_p (mode
, arg1s
))
3707 arg1s
|= ~GET_MODE_MASK (mode
);
3715 /* Compute the value of the arithmetic. */
3720 val
= arg0s
+ arg1s
;
3724 val
= arg0s
- arg1s
;
3728 val
= arg0s
* arg1s
;
3733 || ((unsigned HOST_WIDE_INT
) arg0s
3734 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3737 val
= arg0s
/ arg1s
;
3742 || ((unsigned HOST_WIDE_INT
) arg0s
3743 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3746 val
= arg0s
% arg1s
;
3751 || ((unsigned HOST_WIDE_INT
) arg0s
3752 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3755 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3760 || ((unsigned HOST_WIDE_INT
) arg0s
3761 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3764 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3782 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3783 the value is in range. We can't return any old value for
3784 out-of-range arguments because either the middle-end (via
3785 shift_truncation_mask) or the back-end might be relying on
3786 target-specific knowledge. Nor can we rely on
3787 shift_truncation_mask, since the shift might not be part of an
3788 ashlM3, lshrM3 or ashrM3 instruction. */
3789 if (SHIFT_COUNT_TRUNCATED
)
3790 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3791 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3794 val
= (code
== ASHIFT
3795 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3796 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3798 /* Sign-extend the result for arithmetic right shifts. */
3799 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3800 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3808 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3809 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3817 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3818 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3822 /* Do nothing here. */
3826 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3830 val
= ((unsigned HOST_WIDE_INT
) arg0
3831 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3835 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3839 val
= ((unsigned HOST_WIDE_INT
) arg0
3840 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3853 /* ??? There are simplifications that can be done. */
3860 return gen_int_mode (val
, mode
);
3868 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3871 Rather than test for specific case, we do this by a brute-force method
3872 and do all possible simplifications until no more changes occur. Then
3873 we rebuild the operation. */
3875 struct simplify_plus_minus_op_data
3882 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3886 result
= (commutative_operand_precedence (y
)
3887 - commutative_operand_precedence (x
));
3891 /* Group together equal REGs to do more simplification. */
3892 if (REG_P (x
) && REG_P (y
))
3893 return REGNO (x
) > REGNO (y
);
3899 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3902 struct simplify_plus_minus_op_data ops
[8];
3904 int n_ops
= 2, input_ops
= 2;
3905 int changed
, n_constants
= 0, canonicalized
= 0;
3908 memset (ops
, 0, sizeof ops
);
3910 /* Set up the two operands and then expand them until nothing has been
3911 changed. If we run out of room in our array, give up; this should
3912 almost never happen. */
3917 ops
[1].neg
= (code
== MINUS
);
3923 for (i
= 0; i
< n_ops
; i
++)
3925 rtx this_op
= ops
[i
].op
;
3926 int this_neg
= ops
[i
].neg
;
3927 enum rtx_code this_code
= GET_CODE (this_op
);
3936 ops
[n_ops
].op
= XEXP (this_op
, 1);
3937 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3940 ops
[i
].op
= XEXP (this_op
, 0);
3943 canonicalized
|= this_neg
;
3947 ops
[i
].op
= XEXP (this_op
, 0);
3948 ops
[i
].neg
= ! this_neg
;
3955 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3956 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3957 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3959 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3960 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3961 ops
[n_ops
].neg
= this_neg
;
3969 /* ~a -> (-a - 1) */
3972 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
3973 ops
[n_ops
++].neg
= this_neg
;
3974 ops
[i
].op
= XEXP (this_op
, 0);
3975 ops
[i
].neg
= !this_neg
;
3985 ops
[i
].op
= neg_const_int (mode
, this_op
);
3999 if (n_constants
> 1)
4002 gcc_assert (n_ops
>= 2);
4004 /* If we only have two operands, we can avoid the loops. */
4007 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4010 /* Get the two operands. Be careful with the order, especially for
4011 the cases where code == MINUS. */
4012 if (ops
[0].neg
&& ops
[1].neg
)
4014 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4017 else if (ops
[0].neg
)
4028 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4031 /* Now simplify each pair of operands until nothing changes. */
4034 /* Insertion sort is good enough for an eight-element array. */
4035 for (i
= 1; i
< n_ops
; i
++)
4037 struct simplify_plus_minus_op_data save
;
4039 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4045 ops
[j
+ 1] = ops
[j
];
4046 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4051 for (i
= n_ops
- 1; i
> 0; i
--)
4052 for (j
= i
- 1; j
>= 0; j
--)
4054 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4055 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4057 if (lhs
!= 0 && rhs
!= 0)
4059 enum rtx_code ncode
= PLUS
;
4065 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4067 else if (swap_commutative_operands_p (lhs
, rhs
))
4068 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4070 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4071 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4073 rtx tem_lhs
, tem_rhs
;
4075 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4076 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4077 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4079 if (tem
&& !CONSTANT_P (tem
))
4080 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4083 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4085 /* Reject "simplifications" that just wrap the two
4086 arguments in a CONST. Failure to do so can result
4087 in infinite recursion with simplify_binary_operation
4088 when it calls us to simplify CONST operations. */
4090 && ! (GET_CODE (tem
) == CONST
4091 && GET_CODE (XEXP (tem
, 0)) == ncode
4092 && XEXP (XEXP (tem
, 0), 0) == lhs
4093 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4096 if (GET_CODE (tem
) == NEG
)
4097 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4098 if (CONST_INT_P (tem
) && lneg
)
4099 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4103 ops
[j
].op
= NULL_RTX
;
4110 /* If nothing changed, fail. */
4114 /* Pack all the operands to the lower-numbered entries. */
4115 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4125 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4127 && CONST_INT_P (ops
[1].op
)
4128 && CONSTANT_P (ops
[0].op
)
4130 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4132 /* We suppressed creation of trivial CONST expressions in the
4133 combination loop to avoid recursion. Create one manually now.
4134 The combination loop should have ensured that there is exactly
4135 one CONST_INT, and the sort will have ensured that it is last
4136 in the array and that any other constant will be next-to-last. */
4139 && CONST_INT_P (ops
[n_ops
- 1].op
)
4140 && CONSTANT_P (ops
[n_ops
- 2].op
))
4142 rtx value
= ops
[n_ops
- 1].op
;
4143 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4144 value
= neg_const_int (mode
, value
);
4145 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4150 /* Put a non-negated operand first, if possible. */
4152 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4155 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4164 /* Now make the result by performing the requested operations. */
4166 for (i
= 1; i
< n_ops
; i
++)
4167 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4168 mode
, result
, ops
[i
].op
);
4173 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4175 plus_minus_operand_p (const_rtx x
)
4177 return GET_CODE (x
) == PLUS
4178 || GET_CODE (x
) == MINUS
4179 || (GET_CODE (x
) == CONST
4180 && GET_CODE (XEXP (x
, 0)) == PLUS
4181 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4182 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4185 /* Like simplify_binary_operation except used for relational operators.
4186 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4187 not also be VOIDmode.
4189 CMP_MODE specifies in which mode the comparison is done in, so it is
4190 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4191 the operands or, if both are VOIDmode, the operands are compared in
4192 "infinite precision". */
4194 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4195 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4197 rtx tem
, trueop0
, trueop1
;
4199 if (cmp_mode
== VOIDmode
)
4200 cmp_mode
= GET_MODE (op0
);
4201 if (cmp_mode
== VOIDmode
)
4202 cmp_mode
= GET_MODE (op1
);
4204 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4207 if (SCALAR_FLOAT_MODE_P (mode
))
4209 if (tem
== const0_rtx
)
4210 return CONST0_RTX (mode
);
4211 #ifdef FLOAT_STORE_FLAG_VALUE
4213 REAL_VALUE_TYPE val
;
4214 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4215 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4221 if (VECTOR_MODE_P (mode
))
4223 if (tem
== const0_rtx
)
4224 return CONST0_RTX (mode
);
4225 #ifdef VECTOR_STORE_FLAG_VALUE
4230 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4231 if (val
== NULL_RTX
)
4233 if (val
== const1_rtx
)
4234 return CONST1_RTX (mode
);
4236 units
= GET_MODE_NUNITS (mode
);
4237 v
= rtvec_alloc (units
);
4238 for (i
= 0; i
< units
; i
++)
4239 RTVEC_ELT (v
, i
) = val
;
4240 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4250 /* For the following tests, ensure const0_rtx is op1. */
4251 if (swap_commutative_operands_p (op0
, op1
)
4252 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4253 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4255 /* If op0 is a compare, extract the comparison arguments from it. */
4256 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4257 return simplify_gen_relational (code
, mode
, VOIDmode
,
4258 XEXP (op0
, 0), XEXP (op0
, 1));
4260 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4264 trueop0
= avoid_constant_pool_reference (op0
);
4265 trueop1
= avoid_constant_pool_reference (op1
);
4266 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4270 /* This part of simplify_relational_operation is only used when CMP_MODE
4271 is not in class MODE_CC (i.e. it is a real comparison).
4273 MODE is the mode of the result, while CMP_MODE specifies in which
4274 mode the comparison is done in, so it is the mode of the operands. */
4277 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4278 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4280 enum rtx_code op0code
= GET_CODE (op0
);
4282 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4284 /* If op0 is a comparison, extract the comparison arguments
4288 if (GET_MODE (op0
) == mode
)
4289 return simplify_rtx (op0
);
4291 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4292 XEXP (op0
, 0), XEXP (op0
, 1));
4294 else if (code
== EQ
)
4296 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4297 if (new_code
!= UNKNOWN
)
4298 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4299 XEXP (op0
, 0), XEXP (op0
, 1));
4303 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4304 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4305 if ((code
== LTU
|| code
== GEU
)
4306 && GET_CODE (op0
) == PLUS
4307 && CONST_INT_P (XEXP (op0
, 1))
4308 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4309 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4312 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4313 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4314 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4317 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4318 if ((code
== LTU
|| code
== GEU
)
4319 && GET_CODE (op0
) == PLUS
4320 && rtx_equal_p (op1
, XEXP (op0
, 1))
4321 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4322 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4323 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4324 copy_rtx (XEXP (op0
, 0)));
4326 if (op1
== const0_rtx
)
4328 /* Canonicalize (GTU x 0) as (NE x 0). */
4330 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4331 /* Canonicalize (LEU x 0) as (EQ x 0). */
4333 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4335 else if (op1
== const1_rtx
)
4340 /* Canonicalize (GE x 1) as (GT x 0). */
4341 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4344 /* Canonicalize (GEU x 1) as (NE x 0). */
4345 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4348 /* Canonicalize (LT x 1) as (LE x 0). */
4349 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4352 /* Canonicalize (LTU x 1) as (EQ x 0). */
4353 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4359 else if (op1
== constm1_rtx
)
4361 /* Canonicalize (LE x -1) as (LT x 0). */
4363 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4364 /* Canonicalize (GT x -1) as (GE x 0). */
4366 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4369 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4370 if ((code
== EQ
|| code
== NE
)
4371 && (op0code
== PLUS
|| op0code
== MINUS
)
4373 && CONSTANT_P (XEXP (op0
, 1))
4374 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4376 rtx x
= XEXP (op0
, 0);
4377 rtx c
= XEXP (op0
, 1);
4378 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4379 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4381 /* Detect an infinite recursive condition, where we oscillate at this
4382 simplification case between:
4383 A + B == C <---> C - B == A,
4384 where A, B, and C are all constants with non-simplifiable expressions,
4385 usually SYMBOL_REFs. */
4386 if (GET_CODE (tem
) == invcode
4388 && rtx_equal_p (c
, XEXP (tem
, 1)))
4391 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4394 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4395 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4397 && op1
== const0_rtx
4398 && GET_MODE_CLASS (mode
) == MODE_INT
4399 && cmp_mode
!= VOIDmode
4400 /* ??? Work-around BImode bugs in the ia64 backend. */
4402 && cmp_mode
!= BImode
4403 && nonzero_bits (op0
, cmp_mode
) == 1
4404 && STORE_FLAG_VALUE
== 1)
4405 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4406 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4407 : lowpart_subreg (mode
, op0
, cmp_mode
);
4409 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4410 if ((code
== EQ
|| code
== NE
)
4411 && op1
== const0_rtx
4413 return simplify_gen_relational (code
, mode
, cmp_mode
,
4414 XEXP (op0
, 0), XEXP (op0
, 1));
4416 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4417 if ((code
== EQ
|| code
== NE
)
4419 && rtx_equal_p (XEXP (op0
, 0), op1
)
4420 && !side_effects_p (XEXP (op0
, 0)))
4421 return simplify_gen_relational (code
, mode
, cmp_mode
,
4422 XEXP (op0
, 1), const0_rtx
);
4424 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4425 if ((code
== EQ
|| code
== NE
)
4427 && rtx_equal_p (XEXP (op0
, 1), op1
)
4428 && !side_effects_p (XEXP (op0
, 1)))
4429 return simplify_gen_relational (code
, mode
, cmp_mode
,
4430 XEXP (op0
, 0), const0_rtx
);
4432 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4433 if ((code
== EQ
|| code
== NE
)
4435 && (CONST_INT_P (op1
)
4436 || GET_CODE (op1
) == CONST_DOUBLE
)
4437 && (CONST_INT_P (XEXP (op0
, 1))
4438 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4439 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4440 simplify_gen_binary (XOR
, cmp_mode
,
4441 XEXP (op0
, 1), op1
));
4443 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4449 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4450 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4451 XEXP (op0
, 0), const0_rtx
);
4456 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4457 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4458 XEXP (op0
, 0), const0_rtx
);
4477 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4478 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4479 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4480 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4481 For floating-point comparisons, assume that the operands were ordered. */
4484 comparison_result (enum rtx_code code
, int known_results
)
4490 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4493 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4497 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4500 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4504 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4507 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4510 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4512 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4515 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4517 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4520 return const_true_rtx
;
4528 /* Check if the given comparison (done in the given MODE) is actually a
4529 tautology or a contradiction.
4530 If no simplification is possible, this function returns zero.
4531 Otherwise, it returns either const_true_rtx or const0_rtx. */
4534 simplify_const_relational_operation (enum rtx_code code
,
4535 enum machine_mode mode
,
4542 gcc_assert (mode
!= VOIDmode
4543 || (GET_MODE (op0
) == VOIDmode
4544 && GET_MODE (op1
) == VOIDmode
));
4546 /* If op0 is a compare, extract the comparison arguments from it. */
4547 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4549 op1
= XEXP (op0
, 1);
4550 op0
= XEXP (op0
, 0);
4552 if (GET_MODE (op0
) != VOIDmode
)
4553 mode
= GET_MODE (op0
);
4554 else if (GET_MODE (op1
) != VOIDmode
)
4555 mode
= GET_MODE (op1
);
4560 /* We can't simplify MODE_CC values since we don't know what the
4561 actual comparison is. */
4562 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4565 /* Make sure the constant is second. */
4566 if (swap_commutative_operands_p (op0
, op1
))
4568 tem
= op0
, op0
= op1
, op1
= tem
;
4569 code
= swap_condition (code
);
4572 trueop0
= avoid_constant_pool_reference (op0
);
4573 trueop1
= avoid_constant_pool_reference (op1
);
4575 /* For integer comparisons of A and B maybe we can simplify A - B and can
4576 then simplify a comparison of that with zero. If A and B are both either
4577 a register or a CONST_INT, this can't help; testing for these cases will
4578 prevent infinite recursion here and speed things up.
4580 We can only do this for EQ and NE comparisons as otherwise we may
4581 lose or introduce overflow which we cannot disregard as undefined as
4582 we do not know the signedness of the operation on either the left or
4583 the right hand side of the comparison. */
4585 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4586 && (code
== EQ
|| code
== NE
)
4587 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4588 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4589 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4590 /* We cannot do this if tem is a nonzero address. */
4591 && ! nonzero_address_p (tem
))
4592 return simplify_const_relational_operation (signed_condition (code
),
4593 mode
, tem
, const0_rtx
);
4595 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4596 return const_true_rtx
;
4598 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4601 /* For modes without NaNs, if the two operands are equal, we know the
4602 result except if they have side-effects. Even with NaNs we know
4603 the result of unordered comparisons and, if signaling NaNs are
4604 irrelevant, also the result of LT/GT/LTGT. */
4605 if ((! HONOR_NANS (GET_MODE (trueop0
))
4606 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4607 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4608 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4609 && rtx_equal_p (trueop0
, trueop1
)
4610 && ! side_effects_p (trueop0
))
4611 return comparison_result (code
, CMP_EQ
);
4613 /* If the operands are floating-point constants, see if we can fold
4615 if (GET_CODE (trueop0
) == CONST_DOUBLE
4616 && GET_CODE (trueop1
) == CONST_DOUBLE
4617 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4619 REAL_VALUE_TYPE d0
, d1
;
4621 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4622 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4624 /* Comparisons are unordered iff at least one of the values is NaN. */
4625 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4635 return const_true_rtx
;
4648 return comparison_result (code
,
4649 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4650 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4653 /* Otherwise, see if the operands are both integers. */
4654 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4655 && (GET_CODE (trueop0
) == CONST_DOUBLE
4656 || CONST_INT_P (trueop0
))
4657 && (GET_CODE (trueop1
) == CONST_DOUBLE
4658 || CONST_INT_P (trueop1
)))
4660 int width
= GET_MODE_PRECISION (mode
);
4661 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4662 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4664 /* Get the two words comprising each integer constant. */
4665 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4667 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4668 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4672 l0u
= l0s
= INTVAL (trueop0
);
4673 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4676 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4678 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4679 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4683 l1u
= l1s
= INTVAL (trueop1
);
4684 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4687 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4688 we have to sign or zero-extend the values. */
4689 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4691 l0u
&= GET_MODE_MASK (mode
);
4692 l1u
&= GET_MODE_MASK (mode
);
4694 if (val_signbit_known_set_p (mode
, l0s
))
4695 l0s
|= ~GET_MODE_MASK (mode
);
4697 if (val_signbit_known_set_p (mode
, l1s
))
4698 l1s
|= ~GET_MODE_MASK (mode
);
4700 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4701 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4703 if (h0u
== h1u
&& l0u
== l1u
)
4704 return comparison_result (code
, CMP_EQ
);
4708 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4709 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4710 return comparison_result (code
, cr
);
4714 /* Optimize comparisons with upper and lower bounds. */
4715 if (HWI_COMPUTABLE_MODE_P (mode
)
4716 && CONST_INT_P (trueop1
))
4719 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4720 HOST_WIDE_INT val
= INTVAL (trueop1
);
4721 HOST_WIDE_INT mmin
, mmax
;
4731 /* Get a reduced range if the sign bit is zero. */
4732 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4739 rtx mmin_rtx
, mmax_rtx
;
4740 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4742 mmin
= INTVAL (mmin_rtx
);
4743 mmax
= INTVAL (mmax_rtx
);
4746 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4748 mmin
>>= (sign_copies
- 1);
4749 mmax
>>= (sign_copies
- 1);
4755 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4757 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4758 return const_true_rtx
;
4759 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4764 return const_true_rtx
;
4769 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4771 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4772 return const_true_rtx
;
4773 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4778 return const_true_rtx
;
4784 /* x == y is always false for y out of range. */
4785 if (val
< mmin
|| val
> mmax
)
4789 /* x > y is always false for y >= mmax, always true for y < mmin. */
4791 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4793 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4794 return const_true_rtx
;
4800 return const_true_rtx
;
4803 /* x < y is always false for y <= mmin, always true for y > mmax. */
4805 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4807 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4808 return const_true_rtx
;
4814 return const_true_rtx
;
4818 /* x != y is always true for y out of range. */
4819 if (val
< mmin
|| val
> mmax
)
4820 return const_true_rtx
;
4828 /* Optimize integer comparisons with zero. */
4829 if (trueop1
== const0_rtx
)
4831 /* Some addresses are known to be nonzero. We don't know
4832 their sign, but equality comparisons are known. */
4833 if (nonzero_address_p (trueop0
))
4835 if (code
== EQ
|| code
== LEU
)
4837 if (code
== NE
|| code
== GTU
)
4838 return const_true_rtx
;
4841 /* See if the first operand is an IOR with a constant. If so, we
4842 may be able to determine the result of this comparison. */
4843 if (GET_CODE (op0
) == IOR
)
4845 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4846 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4848 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4849 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4850 && (UINTVAL (inner_const
)
4851 & ((unsigned HOST_WIDE_INT
) 1
4861 return const_true_rtx
;
4865 return const_true_rtx
;
4879 /* Optimize comparison of ABS with zero. */
4880 if (trueop1
== CONST0_RTX (mode
)
4881 && (GET_CODE (trueop0
) == ABS
4882 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4883 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4888 /* Optimize abs(x) < 0.0. */
4889 if (!HONOR_SNANS (mode
)
4890 && (!INTEGRAL_MODE_P (mode
)
4891 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4893 if (INTEGRAL_MODE_P (mode
)
4894 && (issue_strict_overflow_warning
4895 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4896 warning (OPT_Wstrict_overflow
,
4897 ("assuming signed overflow does not occur when "
4898 "assuming abs (x) < 0 is false"));
4904 /* Optimize abs(x) >= 0.0. */
4905 if (!HONOR_NANS (mode
)
4906 && (!INTEGRAL_MODE_P (mode
)
4907 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4909 if (INTEGRAL_MODE_P (mode
)
4910 && (issue_strict_overflow_warning
4911 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4912 warning (OPT_Wstrict_overflow
,
4913 ("assuming signed overflow does not occur when "
4914 "assuming abs (x) >= 0 is true"));
4915 return const_true_rtx
;
4920 /* Optimize ! (abs(x) < 0.0). */
4921 return const_true_rtx
;
4931 /* Simplify CODE, an operation with result mode MODE and three operands,
4932 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4933 a constant. Return 0 if no simplifications is possible. */
4936 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4937 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4940 unsigned int width
= GET_MODE_PRECISION (mode
);
4941 bool any_change
= false;
4944 /* VOIDmode means "infinite" precision. */
4946 width
= HOST_BITS_PER_WIDE_INT
;
4951 /* Simplify negations around the multiplication. */
4952 /* -a * -b + c => a * b + c. */
4953 if (GET_CODE (op0
) == NEG
)
4955 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4957 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4959 else if (GET_CODE (op1
) == NEG
)
4961 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4963 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4966 /* Canonicalize the two multiplication operands. */
4967 /* a * -b + c => -b * a + c. */
4968 if (swap_commutative_operands_p (op0
, op1
))
4969 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4972 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4977 if (CONST_INT_P (op0
)
4978 && CONST_INT_P (op1
)
4979 && CONST_INT_P (op2
)
4980 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4981 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4983 /* Extracting a bit-field from a constant */
4984 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4985 HOST_WIDE_INT op1val
= INTVAL (op1
);
4986 HOST_WIDE_INT op2val
= INTVAL (op2
);
4987 if (BITS_BIG_ENDIAN
)
4988 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
4992 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
4994 /* First zero-extend. */
4995 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
4996 /* If desired, propagate sign bit. */
4997 if (code
== SIGN_EXTRACT
4998 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5000 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5003 return gen_int_mode (val
, mode
);
5008 if (CONST_INT_P (op0
))
5009 return op0
!= const0_rtx
? op1
: op2
;
5011 /* Convert c ? a : a into "a". */
5012 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5015 /* Convert a != b ? a : b into "a". */
5016 if (GET_CODE (op0
) == NE
5017 && ! side_effects_p (op0
)
5018 && ! HONOR_NANS (mode
)
5019 && ! HONOR_SIGNED_ZEROS (mode
)
5020 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5021 && rtx_equal_p (XEXP (op0
, 1), op2
))
5022 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5023 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5026 /* Convert a == b ? a : b into "b". */
5027 if (GET_CODE (op0
) == EQ
5028 && ! side_effects_p (op0
)
5029 && ! HONOR_NANS (mode
)
5030 && ! HONOR_SIGNED_ZEROS (mode
)
5031 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5032 && rtx_equal_p (XEXP (op0
, 1), op2
))
5033 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5034 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5037 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5039 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5040 ? GET_MODE (XEXP (op0
, 1))
5041 : GET_MODE (XEXP (op0
, 0)));
5044 /* Look for happy constants in op1 and op2. */
5045 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5047 HOST_WIDE_INT t
= INTVAL (op1
);
5048 HOST_WIDE_INT f
= INTVAL (op2
);
5050 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5051 code
= GET_CODE (op0
);
5052 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5055 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5063 return simplify_gen_relational (code
, mode
, cmp_mode
,
5064 XEXP (op0
, 0), XEXP (op0
, 1));
5067 if (cmp_mode
== VOIDmode
)
5068 cmp_mode
= op0_mode
;
5069 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5070 cmp_mode
, XEXP (op0
, 0),
5073 /* See if any simplifications were possible. */
5076 if (CONST_INT_P (temp
))
5077 return temp
== const0_rtx
? op2
: op1
;
5079 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5085 gcc_assert (GET_MODE (op0
) == mode
);
5086 gcc_assert (GET_MODE (op1
) == mode
);
5087 gcc_assert (VECTOR_MODE_P (mode
));
5088 op2
= avoid_constant_pool_reference (op2
);
5089 if (CONST_INT_P (op2
))
5091 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5092 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5093 int mask
= (1 << n_elts
) - 1;
5095 if (!(INTVAL (op2
) & mask
))
5097 if ((INTVAL (op2
) & mask
) == mask
)
5100 op0
= avoid_constant_pool_reference (op0
);
5101 op1
= avoid_constant_pool_reference (op1
);
5102 if (GET_CODE (op0
) == CONST_VECTOR
5103 && GET_CODE (op1
) == CONST_VECTOR
)
5105 rtvec v
= rtvec_alloc (n_elts
);
5108 for (i
= 0; i
< n_elts
; i
++)
5109 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5110 ? CONST_VECTOR_ELT (op0
, i
)
5111 : CONST_VECTOR_ELT (op1
, i
));
5112 return gen_rtx_CONST_VECTOR (mode
, v
);
5124 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5126 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5128 Works by unpacking OP into a collection of 8-bit values
5129 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5130 and then repacking them again for OUTERMODE. */
5133 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5134 enum machine_mode innermode
, unsigned int byte
)
5136 /* We support up to 512-bit values (for V8DFmode). */
5140 value_mask
= (1 << value_bit
) - 1
5142 unsigned char value
[max_bitsize
/ value_bit
];
5151 rtvec result_v
= NULL
;
5152 enum mode_class outer_class
;
5153 enum machine_mode outer_submode
;
5155 /* Some ports misuse CCmode. */
5156 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5159 /* We have no way to represent a complex constant at the rtl level. */
5160 if (COMPLEX_MODE_P (outermode
))
5163 /* Unpack the value. */
5165 if (GET_CODE (op
) == CONST_VECTOR
)
5167 num_elem
= CONST_VECTOR_NUNITS (op
);
5168 elems
= &CONST_VECTOR_ELT (op
, 0);
5169 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5175 elem_bitsize
= max_bitsize
;
5177 /* If this asserts, it is too complicated; reducing value_bit may help. */
5178 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5179 /* I don't know how to handle endianness of sub-units. */
5180 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5182 for (elem
= 0; elem
< num_elem
; elem
++)
5185 rtx el
= elems
[elem
];
5187 /* Vectors are kept in target memory order. (This is probably
5190 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5191 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5193 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5194 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5195 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5196 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5197 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5200 switch (GET_CODE (el
))
5204 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5206 *vp
++ = INTVAL (el
) >> i
;
5207 /* CONST_INTs are always logically sign-extended. */
5208 for (; i
< elem_bitsize
; i
+= value_bit
)
5209 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5213 if (GET_MODE (el
) == VOIDmode
)
5215 unsigned char extend
= 0;
5216 /* If this triggers, someone should have generated a
5217 CONST_INT instead. */
5218 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5220 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5221 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5222 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5225 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5229 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5231 for (; i
< elem_bitsize
; i
+= value_bit
)
5236 long tmp
[max_bitsize
/ 32];
5237 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5239 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5240 gcc_assert (bitsize
<= elem_bitsize
);
5241 gcc_assert (bitsize
% value_bit
== 0);
5243 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5246 /* real_to_target produces its result in words affected by
5247 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5248 and use WORDS_BIG_ENDIAN instead; see the documentation
5249 of SUBREG in rtl.texi. */
5250 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5253 if (WORDS_BIG_ENDIAN
)
5254 ibase
= bitsize
- 1 - i
;
5257 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5260 /* It shouldn't matter what's done here, so fill it with
5262 for (; i
< elem_bitsize
; i
+= value_bit
)
5268 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5270 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5271 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5275 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5276 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5277 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5279 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5280 >> (i
- HOST_BITS_PER_WIDE_INT
);
5281 for (; i
< elem_bitsize
; i
+= value_bit
)
5291 /* Now, pick the right byte to start with. */
5292 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5293 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5294 will already have offset 0. */
5295 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5297 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5299 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5300 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5301 byte
= (subword_byte
% UNITS_PER_WORD
5302 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5305 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5306 so if it's become negative it will instead be very large.) */
5307 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5309 /* Convert from bytes to chunks of size value_bit. */
5310 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5312 /* Re-pack the value. */
5314 if (VECTOR_MODE_P (outermode
))
5316 num_elem
= GET_MODE_NUNITS (outermode
);
5317 result_v
= rtvec_alloc (num_elem
);
5318 elems
= &RTVEC_ELT (result_v
, 0);
5319 outer_submode
= GET_MODE_INNER (outermode
);
5325 outer_submode
= outermode
;
5328 outer_class
= GET_MODE_CLASS (outer_submode
);
5329 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5331 gcc_assert (elem_bitsize
% value_bit
== 0);
5332 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5334 for (elem
= 0; elem
< num_elem
; elem
++)
5338 /* Vectors are stored in target memory order. (This is probably
5341 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5342 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5344 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5345 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5346 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5347 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5348 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5351 switch (outer_class
)
5354 case MODE_PARTIAL_INT
:
5356 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5359 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5361 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5362 for (; i
< elem_bitsize
; i
+= value_bit
)
5363 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5364 << (i
- HOST_BITS_PER_WIDE_INT
);
5366 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5368 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5369 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5370 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5371 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5378 case MODE_DECIMAL_FLOAT
:
5381 long tmp
[max_bitsize
/ 32];
5383 /* real_from_target wants its input in words affected by
5384 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5385 and use WORDS_BIG_ENDIAN instead; see the documentation
5386 of SUBREG in rtl.texi. */
5387 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5389 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5392 if (WORDS_BIG_ENDIAN
)
5393 ibase
= elem_bitsize
- 1 - i
;
5396 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5399 real_from_target (&r
, tmp
, outer_submode
);
5400 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5412 f
.mode
= outer_submode
;
5415 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5417 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5418 for (; i
< elem_bitsize
; i
+= value_bit
)
5419 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5420 << (i
- HOST_BITS_PER_WIDE_INT
));
5422 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5430 if (VECTOR_MODE_P (outermode
))
5431 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5436 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5437 Return 0 if no simplifications are possible. */
5439 simplify_subreg (enum machine_mode outermode
, rtx op
,
5440 enum machine_mode innermode
, unsigned int byte
)
5442 /* Little bit of sanity checking. */
5443 gcc_assert (innermode
!= VOIDmode
);
5444 gcc_assert (outermode
!= VOIDmode
);
5445 gcc_assert (innermode
!= BLKmode
);
5446 gcc_assert (outermode
!= BLKmode
);
5448 gcc_assert (GET_MODE (op
) == innermode
5449 || GET_MODE (op
) == VOIDmode
);
5451 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5452 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5454 if (outermode
== innermode
&& !byte
)
5457 if (CONST_INT_P (op
)
5458 || GET_CODE (op
) == CONST_DOUBLE
5459 || GET_CODE (op
) == CONST_FIXED
5460 || GET_CODE (op
) == CONST_VECTOR
)
5461 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5463 /* Changing mode twice with SUBREG => just change it once,
5464 or not at all if changing back op starting mode. */
5465 if (GET_CODE (op
) == SUBREG
)
5467 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5468 int final_offset
= byte
+ SUBREG_BYTE (op
);
5471 if (outermode
== innermostmode
5472 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5473 return SUBREG_REG (op
);
5475 /* The SUBREG_BYTE represents offset, as if the value were stored
5476 in memory. Irritating exception is paradoxical subreg, where
5477 we define SUBREG_BYTE to be 0. On big endian machines, this
5478 value should be negative. For a moment, undo this exception. */
5479 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5481 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5482 if (WORDS_BIG_ENDIAN
)
5483 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5484 if (BYTES_BIG_ENDIAN
)
5485 final_offset
+= difference
% UNITS_PER_WORD
;
5487 if (SUBREG_BYTE (op
) == 0
5488 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5490 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5491 if (WORDS_BIG_ENDIAN
)
5492 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5493 if (BYTES_BIG_ENDIAN
)
5494 final_offset
+= difference
% UNITS_PER_WORD
;
5497 /* See whether resulting subreg will be paradoxical. */
5498 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5500 /* In nonparadoxical subregs we can't handle negative offsets. */
5501 if (final_offset
< 0)
5503 /* Bail out in case resulting subreg would be incorrect. */
5504 if (final_offset
% GET_MODE_SIZE (outermode
)
5505 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5511 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5513 /* In paradoxical subreg, see if we are still looking on lower part.
5514 If so, our SUBREG_BYTE will be 0. */
5515 if (WORDS_BIG_ENDIAN
)
5516 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5517 if (BYTES_BIG_ENDIAN
)
5518 offset
+= difference
% UNITS_PER_WORD
;
5519 if (offset
== final_offset
)
5525 /* Recurse for further possible simplifications. */
5526 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5530 if (validate_subreg (outermode
, innermostmode
,
5531 SUBREG_REG (op
), final_offset
))
5533 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5534 if (SUBREG_PROMOTED_VAR_P (op
)
5535 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5536 && GET_MODE_CLASS (outermode
) == MODE_INT
5537 && IN_RANGE (GET_MODE_SIZE (outermode
),
5538 GET_MODE_SIZE (innermode
),
5539 GET_MODE_SIZE (innermostmode
))
5540 && subreg_lowpart_p (newx
))
5542 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5543 SUBREG_PROMOTED_UNSIGNED_SET
5544 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5551 /* Merge implicit and explicit truncations. */
5553 if (GET_CODE (op
) == TRUNCATE
5554 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5555 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5556 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5557 GET_MODE (XEXP (op
, 0)));
5559 /* SUBREG of a hard register => just change the register number
5560 and/or mode. If the hard register is not valid in that mode,
5561 suppress this simplification. If the hard register is the stack,
5562 frame, or argument pointer, leave this as a SUBREG. */
5564 if (REG_P (op
) && HARD_REGISTER_P (op
))
5566 unsigned int regno
, final_regno
;
5569 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5570 if (HARD_REGISTER_NUM_P (final_regno
))
5573 int final_offset
= byte
;
5575 /* Adjust offset for paradoxical subregs. */
5577 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5579 int difference
= (GET_MODE_SIZE (innermode
)
5580 - GET_MODE_SIZE (outermode
));
5581 if (WORDS_BIG_ENDIAN
)
5582 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5583 if (BYTES_BIG_ENDIAN
)
5584 final_offset
+= difference
% UNITS_PER_WORD
;
5587 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5589 /* Propagate original regno. We don't have any way to specify
5590 the offset inside original regno, so do so only for lowpart.
5591 The information is used only by alias analysis that can not
5592 grog partial register anyway. */
5594 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5595 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5600 /* If we have a SUBREG of a register that we are replacing and we are
5601 replacing it with a MEM, make a new MEM and try replacing the
5602 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5603 or if we would be widening it. */
5606 && ! mode_dependent_address_p (XEXP (op
, 0))
5607 /* Allow splitting of volatile memory references in case we don't
5608 have instruction to move the whole thing. */
5609 && (! MEM_VOLATILE_P (op
)
5610 || ! have_insn_for (SET
, innermode
))
5611 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5612 return adjust_address_nv (op
, outermode
, byte
);
5614 /* Handle complex values represented as CONCAT
5615 of real and imaginary part. */
5616 if (GET_CODE (op
) == CONCAT
)
5618 unsigned int part_size
, final_offset
;
5621 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5622 if (byte
< part_size
)
5624 part
= XEXP (op
, 0);
5625 final_offset
= byte
;
5629 part
= XEXP (op
, 1);
5630 final_offset
= byte
- part_size
;
5633 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5636 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5639 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5640 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5644 /* Optimize SUBREG truncations of zero and sign extended values. */
5645 if ((GET_CODE (op
) == ZERO_EXTEND
5646 || GET_CODE (op
) == SIGN_EXTEND
)
5647 && SCALAR_INT_MODE_P (innermode
)
5648 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
))
5650 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5652 /* If we're requesting the lowpart of a zero or sign extension,
5653 there are three possibilities. If the outermode is the same
5654 as the origmode, we can omit both the extension and the subreg.
5655 If the outermode is not larger than the origmode, we can apply
5656 the truncation without the extension. Finally, if the outermode
5657 is larger than the origmode, but both are integer modes, we
5658 can just extend to the appropriate mode. */
5661 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5662 if (outermode
== origmode
)
5663 return XEXP (op
, 0);
5664 if (GET_MODE_PRECISION (outermode
) <= GET_MODE_PRECISION (origmode
))
5665 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5666 subreg_lowpart_offset (outermode
,
5668 if (SCALAR_INT_MODE_P (outermode
))
5669 return simplify_gen_unary (GET_CODE (op
), outermode
,
5670 XEXP (op
, 0), origmode
);
5673 /* A SUBREG resulting from a zero extension may fold to zero if
5674 it extracts higher bits that the ZERO_EXTEND's source bits. */
5675 if (GET_CODE (op
) == ZERO_EXTEND
5676 && bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5677 return CONST0_RTX (outermode
);
5680 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5681 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5682 the outer subreg is effectively a truncation to the original mode. */
5683 if ((GET_CODE (op
) == LSHIFTRT
5684 || GET_CODE (op
) == ASHIFTRT
)
5685 && SCALAR_INT_MODE_P (outermode
)
5686 && SCALAR_INT_MODE_P (innermode
)
5687 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5688 to avoid the possibility that an outer LSHIFTRT shifts by more
5689 than the sign extension's sign_bit_copies and introduces zeros
5690 into the high bits of the result. */
5691 && (2 * GET_MODE_PRECISION (outermode
)) <= GET_MODE_PRECISION (innermode
)
5692 && CONST_INT_P (XEXP (op
, 1))
5693 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5694 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5695 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5696 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5697 return simplify_gen_binary (ASHIFTRT
, outermode
,
5698 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5700 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5701 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5702 the outer subreg is effectively a truncation to the original mode. */
5703 if ((GET_CODE (op
) == LSHIFTRT
5704 || GET_CODE (op
) == ASHIFTRT
)
5705 && SCALAR_INT_MODE_P (outermode
)
5706 && SCALAR_INT_MODE_P (innermode
)
5707 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5708 && CONST_INT_P (XEXP (op
, 1))
5709 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5710 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5711 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5712 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5713 return simplify_gen_binary (LSHIFTRT
, outermode
,
5714 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5716 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5718 the outer subreg is effectively a truncation to the original mode. */
5719 if (GET_CODE (op
) == ASHIFT
5720 && SCALAR_INT_MODE_P (outermode
)
5721 && SCALAR_INT_MODE_P (innermode
)
5722 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5723 && CONST_INT_P (XEXP (op
, 1))
5724 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5725 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5726 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5727 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5728 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5729 return simplify_gen_binary (ASHIFT
, outermode
,
5730 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5732 /* Recognize a word extraction from a multi-word subreg. */
5733 if ((GET_CODE (op
) == LSHIFTRT
5734 || GET_CODE (op
) == ASHIFTRT
)
5735 && SCALAR_INT_MODE_P (innermode
)
5736 && GET_MODE_PRECISION (outermode
) >= BITS_PER_WORD
5737 && GET_MODE_PRECISION (innermode
) >= (2 * GET_MODE_PRECISION (outermode
))
5738 && CONST_INT_P (XEXP (op
, 1))
5739 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_PRECISION (outermode
) - 1)) == 0
5740 && INTVAL (XEXP (op
, 1)) >= 0
5741 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (innermode
)
5742 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5744 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5745 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5747 ? byte
- shifted_bytes
5748 : byte
+ shifted_bytes
));
5751 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5752 and try replacing the SUBREG and shift with it. Don't do this if
5753 the MEM has a mode-dependent address or if we would be widening it. */
5755 if ((GET_CODE (op
) == LSHIFTRT
5756 || GET_CODE (op
) == ASHIFTRT
)
5757 && SCALAR_INT_MODE_P (innermode
)
5758 && MEM_P (XEXP (op
, 0))
5759 && CONST_INT_P (XEXP (op
, 1))
5760 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5761 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5762 && INTVAL (XEXP (op
, 1)) > 0
5763 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5764 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5765 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5766 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5767 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5768 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5770 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5771 return adjust_address_nv (XEXP (op
, 0), outermode
,
5773 ? byte
- shifted_bytes
5774 : byte
+ shifted_bytes
));
5780 /* Make a SUBREG operation or equivalent if it folds. */
5783 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5784 enum machine_mode innermode
, unsigned int byte
)
5788 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5792 if (GET_CODE (op
) == SUBREG
5793 || GET_CODE (op
) == CONCAT
5794 || GET_MODE (op
) == VOIDmode
)
5797 if (validate_subreg (outermode
, innermode
, op
, byte
))
5798 return gen_rtx_SUBREG (outermode
, op
, byte
);
5803 /* Simplify X, an rtx expression.
5805 Return the simplified expression or NULL if no simplifications
5808 This is the preferred entry point into the simplification routines;
5809 however, we still allow passes to call the more specific routines.
5811 Right now GCC has three (yes, three) major bodies of RTL simplification
5812 code that need to be unified.
5814 1. fold_rtx in cse.c. This code uses various CSE specific
5815 information to aid in RTL simplification.
5817 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5818 it uses combine specific information to aid in RTL
5821 3. The routines in this file.
5824 Long term we want to only have one body of simplification code; to
5825 get to that state I recommend the following steps:
5827 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5828 which are not pass dependent state into these routines.
5830 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5831 use this routine whenever possible.
5833 3. Allow for pass dependent state to be provided to these
5834 routines and add simplifications based on the pass dependent
5835 state. Remove code from cse.c & combine.c that becomes
5838 It will take time, but ultimately the compiler will be easier to
5839 maintain and improve. It's totally silly that when we add a
5840 simplification that it needs to be added to 4 places (3 for RTL
5841 simplification and 1 for tree simplification. */
5844 simplify_rtx (const_rtx x
)
5846 const enum rtx_code code
= GET_CODE (x
);
5847 const enum machine_mode mode
= GET_MODE (x
);
5849 switch (GET_RTX_CLASS (code
))
5852 return simplify_unary_operation (code
, mode
,
5853 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5854 case RTX_COMM_ARITH
:
5855 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5856 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5858 /* Fall through.... */
5861 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5864 case RTX_BITFIELD_OPS
:
5865 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5866 XEXP (x
, 0), XEXP (x
, 1),
5870 case RTX_COMM_COMPARE
:
5871 return simplify_relational_operation (code
, mode
,
5872 ((GET_MODE (XEXP (x
, 0))
5874 ? GET_MODE (XEXP (x
, 0))
5875 : GET_MODE (XEXP (x
, 1))),
5881 return simplify_subreg (mode
, SUBREG_REG (x
),
5882 GET_MODE (SUBREG_REG (x
)),
5889 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5890 if (GET_CODE (XEXP (x
, 0)) == HIGH
5891 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))