1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
51 static bool plus_minus_operand_p (const_rtx
);
52 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
53 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
54 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
56 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
58 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
59 enum machine_mode
, rtx
, rtx
);
60 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
61 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, const_rtx i
)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 else if (width
<= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x
)
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
99 /* FIXME: We don't yet have a representation for wider modes. */
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
112 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
116 if (GET_MODE_CLASS (mode
) != MODE_INT
)
119 width
= GET_MODE_PRECISION (mode
);
120 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
123 val
&= GET_MODE_MASK (mode
);
124 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
130 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 if (GET_MODE_CLASS (mode
) != MODE_INT
)
137 width
= GET_MODE_PRECISION (mode
);
138 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
141 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
148 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
152 if (GET_MODE_CLASS (mode
) != MODE_INT
)
155 width
= GET_MODE_PRECISION (mode
);
156 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
159 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
172 /* If this simplifies, do it. */
173 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0
, op1
))
180 tem
= op0
, op0
= op1
, op1
= tem
;
182 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x
)
191 enum machine_mode cmode
;
192 HOST_WIDE_INT offset
= 0;
194 switch (GET_CODE (x
))
200 /* Handle float extensions of constant pool references. */
202 c
= avoid_constant_pool_reference (tmp
);
203 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
207 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
216 if (GET_MODE (x
) == BLKmode
)
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr
= targetm
.delegitimize_address (addr
);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr
) == CONST
226 && GET_CODE (XEXP (addr
, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
229 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
230 addr
= XEXP (XEXP (addr
, 0), 0);
233 if (GET_CODE (addr
) == LO_SUM
)
234 addr
= XEXP (addr
, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr
) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr
))
241 c
= get_pool_constant (addr
);
242 cmode
= get_pool_mode (addr
);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset
!= 0 || cmode
!= GET_MODE (x
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !host_integerp (toffset
, 0)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= TREE_INT_CST_LOW (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
572 rtx op
, enum machine_mode op_mode
)
576 trueop
= avoid_constant_pool_reference (op
);
578 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
582 return simplify_unary_operation_1 (code
, mode
, op
);
585 /* Perform some simplifications we can do even if the operands
588 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
590 enum rtx_code reversed
;
596 /* (not (not X)) == X. */
597 if (GET_CODE (op
) == NOT
)
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op
)
603 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
604 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
605 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
606 XEXP (op
, 0), XEXP (op
, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op
) == PLUS
610 && XEXP (op
, 1) == constm1_rtx
)
611 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op
) == NEG
)
615 return plus_constant (mode
, XEXP (op
, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op
) == XOR
619 && CONST_INT_P (XEXP (op
, 1))
620 && (temp
= simplify_unary_operation (NOT
, mode
,
621 XEXP (op
, 1), mode
)) != 0)
622 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op
) == PLUS
626 && CONST_INT_P (XEXP (op
, 1))
627 && mode_signbit_p (mode
, XEXP (op
, 1))
628 && (temp
= simplify_unary_operation (NOT
, mode
,
629 XEXP (op
, 1), mode
)) != 0)
630 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
638 if (GET_CODE (op
) == ASHIFT
639 && XEXP (op
, 0) == const1_rtx
)
641 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
642 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE
== -1
650 && GET_CODE (op
) == ASHIFTRT
651 && GET_CODE (XEXP (op
, 1))
652 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
653 return simplify_gen_relational (GE
, mode
, VOIDmode
,
654 XEXP (op
, 0), const0_rtx
);
657 if (GET_CODE (op
) == SUBREG
658 && subreg_lowpart_p (op
)
659 && (GET_MODE_SIZE (GET_MODE (op
))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
661 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
662 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
664 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
667 x
= gen_rtx_ROTATE (inner_mode
,
668 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
670 XEXP (SUBREG_REG (op
), 1));
671 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
679 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
681 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
682 enum machine_mode op_mode
;
684 op_mode
= GET_MODE (in1
);
685 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
687 op_mode
= GET_MODE (in2
);
688 if (op_mode
== VOIDmode
)
690 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
692 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
695 in2
= in1
; in1
= tem
;
698 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op
) == NEG
)
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op
) == PLUS
710 && XEXP (op
, 1) == const1_rtx
)
711 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op
) == NOT
)
715 return plus_constant (mode
, XEXP (op
, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op
) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode
)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
725 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
727 if (GET_CODE (op
) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode
)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op
, 1))
733 || CONST_DOUBLE_P (XEXP (op
, 1)))
735 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
737 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
742 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op
) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
750 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
751 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
757 if (GET_CODE (op
) == ASHIFT
)
759 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
761 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op
) == ASHIFTRT
767 && CONST_INT_P (XEXP (op
, 1))
768 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
769 return simplify_gen_binary (LSHIFTRT
, mode
,
770 XEXP (op
, 0), XEXP (op
, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op
) == LSHIFTRT
775 && CONST_INT_P (XEXP (op
, 1))
776 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
777 return simplify_gen_binary (ASHIFTRT
, mode
,
778 XEXP (op
, 0), XEXP (op
, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op
) == XOR
782 && XEXP (op
, 1) == const1_rtx
783 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
784 return plus_constant (mode
, XEXP (op
, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op
) == LT
789 && XEXP (op
, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
792 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
793 int isize
= GET_MODE_PRECISION (inner
);
794 if (STORE_FLAG_VALUE
== 1)
796 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
797 GEN_INT (isize
- 1));
800 if (GET_MODE_PRECISION (mode
) > isize
)
801 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
802 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
804 else if (STORE_FLAG_VALUE
== -1)
806 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
807 GEN_INT (isize
- 1));
810 if (GET_MODE_PRECISION (mode
) > isize
)
811 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
812 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
821 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op
) == SIGN_EXTEND
826 || GET_CODE (op
) == ZERO_EXTEND
)
827 && GET_MODE (XEXP (op
, 0)) == mode
)
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op
) == ABS
833 || GET_CODE (op
) == NEG
)
834 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
836 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
837 return simplify_gen_unary (GET_CODE (op
), mode
,
838 XEXP (XEXP (op
, 0), 0), mode
);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 if (GET_CODE (op
) == SUBREG
843 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
844 && subreg_lowpart_p (op
))
845 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
846 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
856 ? (num_sign_bit_copies (op
, GET_MODE (op
))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op
))
858 - GET_MODE_PRECISION (mode
)))
859 : truncated_to_mode (mode
, op
))
860 && ! (GET_CODE (op
) == LSHIFTRT
861 && GET_CODE (XEXP (op
, 0)) == MULT
))
862 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode
)
870 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
871 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
875 if (DECIMAL_FLOAT_MODE_P (mode
))
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op
) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op
, 0)) == mode
)
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op
) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations
)
894 || GET_CODE (op
) == FLOAT_EXTEND
)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
897 > GET_MODE_SIZE (mode
)
898 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op
) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
906 && ((unsigned)significand_size (GET_MODE (op
))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
908 - num_sign_bit_copies (XEXP (op
, 0),
909 GET_MODE (XEXP (op
, 0))))))))
910 return simplify_gen_unary (FLOAT
, mode
,
912 GET_MODE (XEXP (op
, 0)));
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op
) == ABS
917 || GET_CODE (op
) == NEG
)
918 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
920 return simplify_gen_unary (GET_CODE (op
), mode
,
921 XEXP (XEXP (op
, 0), 0), mode
);
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op
) == SUBREG
926 && subreg_lowpart_p (op
)
927 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
928 return SUBREG_REG (op
);
932 if (DECIMAL_FLOAT_MODE_P (mode
))
935 /* (float_extend (float_extend x)) is (float_extend x)
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
940 if (GET_CODE (op
) == FLOAT_EXTEND
941 || (GET_CODE (op
) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
943 && ((unsigned)significand_size (GET_MODE (op
))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
945 - num_sign_bit_copies (XEXP (op
, 0),
946 GET_MODE (XEXP (op
, 0)))))))
947 return simplify_gen_unary (GET_CODE (op
), mode
,
949 GET_MODE (XEXP (op
, 0)));
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op
) == NEG
)
956 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
957 GET_MODE (XEXP (op
, 0)));
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961 if (GET_MODE (op
) == VOIDmode
)
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op
),
967 nonzero_bits (op
, GET_MODE (op
))))
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
972 return gen_rtx_NEG (mode
, op
);
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op
) == SIGN_EXTEND
979 || GET_CODE (op
) == ZERO_EXTEND
)
980 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
981 GET_MODE (XEXP (op
, 0)));
985 switch (GET_CODE (op
))
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
991 GET_MODE (XEXP (op
, 0)));
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op
, 1)))
997 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
998 GET_MODE (XEXP (op
, 0)));
1007 switch (GET_CODE (op
))
1013 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1014 GET_MODE (XEXP (op
, 0)));
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op
, 1)))
1020 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1021 GET_MODE (XEXP (op
, 0)));
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op
) == BSWAP
)
1032 return XEXP (op
, 0);
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op
) == SIGN_EXTEND
)
1038 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1039 GET_MODE (XEXP (op
, 0)));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op
) == TRUNCATE
1048 && GET_MODE (XEXP (op
, 0)) == mode
1049 && GET_CODE (XEXP (op
, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1052 return XEXP (op
, 0);
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op
) == MULT
)
1058 rtx lhs
= XEXP (op
, 0);
1059 rtx rhs
= XEXP (op
, 1);
1060 enum rtx_code lcode
= GET_CODE (lhs
);
1061 enum rtx_code rcode
= GET_CODE (rhs
);
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode
== SIGN_EXTEND
1066 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1067 && (rcode
== SIGN_EXTEND
1068 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1070 enum machine_mode lmode
= GET_MODE (lhs
);
1071 enum machine_mode rmode
= GET_MODE (rhs
);
1074 if (lcode
== ASHIFTRT
)
1075 /* Number of bits not shifted off the end. */
1076 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1081 if (rcode
== ASHIFTRT
)
1082 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1089 return simplify_gen_binary
1091 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1092 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op
) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op
)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1102 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1103 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1109 gcc_assert (GET_MODE_BITSIZE (mode
)
1110 > GET_MODE_BITSIZE (GET_MODE (op
)));
1111 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1112 GET_MODE (XEXP (op
, 0)));
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1121 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op
, 1))
1123 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1128 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode
)
1130 > GET_MODE_BITSIZE (GET_MODE (op
)));
1131 if (tmode
!= BLKmode
)
1134 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1136 ? SIGN_EXTEND
: ZERO_EXTEND
,
1137 mode
, inner
, tmode
);
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is referring to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1149 || (GET_CODE (op
) == SUBREG
1150 && REG_P (SUBREG_REG (op
))
1151 && REG_POINTER (SUBREG_REG (op
))
1152 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1153 return convert_memory_address (Pmode
, op
);
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op
) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op
)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1164 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1165 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op
) == MULT
)
1171 rtx lhs
= XEXP (op
, 0);
1172 rtx rhs
= XEXP (op
, 1);
1173 enum rtx_code lcode
= GET_CODE (lhs
);
1174 enum rtx_code rcode
= GET_CODE (rhs
);
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode
== ZERO_EXTEND
1179 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1180 && (rcode
== ZERO_EXTEND
1181 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1183 enum machine_mode lmode
= GET_MODE (lhs
);
1184 enum machine_mode rmode
= GET_MODE (rhs
);
1187 if (lcode
== LSHIFTRT
)
1188 /* Number of bits not shifted off the end. */
1189 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1194 if (rcode
== LSHIFTRT
)
1195 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1202 return simplify_gen_binary
1204 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1205 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op
) == ZERO_EXTEND
)
1211 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1212 GET_MODE (XEXP (op
, 0)));
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op
) == LSHIFTRT
1218 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op
, 1))
1220 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1225 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1226 if (tmode
!= BLKmode
)
1229 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is referring to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED
> 0
1240 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1242 || (GET_CODE (op
) == SUBREG
1243 && REG_P (SUBREG_REG (op
))
1244 && REG_POINTER (SUBREG_REG (op
))
1245 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1246 return convert_memory_address (Pmode
, op
);
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1261 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1262 rtx op
, enum machine_mode op_mode
)
1264 unsigned int width
= GET_MODE_PRECISION (mode
);
1265 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1267 if (code
== VEC_DUPLICATE
)
1269 gcc_assert (VECTOR_MODE_P (mode
));
1270 if (GET_MODE (op
) != VOIDmode
)
1272 if (!VECTOR_MODE_P (GET_MODE (op
)))
1273 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1275 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1278 if (CONST_INT_P (op
) || CONST_DOUBLE_P (op
)
1279 || GET_CODE (op
) == CONST_VECTOR
)
1281 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1282 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1283 rtvec v
= rtvec_alloc (n_elts
);
1286 if (GET_CODE (op
) != CONST_VECTOR
)
1287 for (i
= 0; i
< n_elts
; i
++)
1288 RTVEC_ELT (v
, i
) = op
;
1291 enum machine_mode inmode
= GET_MODE (op
);
1292 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1293 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1295 gcc_assert (in_n_elts
< n_elts
);
1296 gcc_assert ((n_elts
% in_n_elts
) == 0);
1297 for (i
= 0; i
< n_elts
; i
++)
1298 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1300 return gen_rtx_CONST_VECTOR (mode
, v
);
1304 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1306 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1307 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1308 enum machine_mode opmode
= GET_MODE (op
);
1309 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1310 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1311 rtvec v
= rtvec_alloc (n_elts
);
1314 gcc_assert (op_n_elts
== n_elts
);
1315 for (i
= 0; i
< n_elts
; i
++)
1317 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1318 CONST_VECTOR_ELT (op
, i
),
1319 GET_MODE_INNER (opmode
));
1322 RTVEC_ELT (v
, i
) = x
;
1324 return gen_rtx_CONST_VECTOR (mode
, v
);
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1331 if (code
== FLOAT
&& (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1333 HOST_WIDE_INT hv
, lv
;
1336 if (CONST_INT_P (op
))
1337 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1339 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1341 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1342 d
= real_value_truncate (mode
, d
);
1343 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1345 else if (code
== UNSIGNED_FLOAT
1346 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1348 HOST_WIDE_INT hv
, lv
;
1351 if (CONST_INT_P (op
))
1352 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1354 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1356 if (op_mode
== VOIDmode
1357 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1358 /* We should never get a negative number. */
1359 gcc_assert (hv
>= 0);
1360 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1361 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1363 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1364 d
= real_value_truncate (mode
, d
);
1365 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1368 if (CONST_INT_P (op
)
1369 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1371 HOST_WIDE_INT arg0
= INTVAL (op
);
1385 val
= (arg0
>= 0 ? arg0
: - arg0
);
1389 arg0
&= GET_MODE_MASK (mode
);
1390 val
= ffs_hwi (arg0
);
1394 arg0
&= GET_MODE_MASK (mode
);
1395 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1398 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1402 arg0
&= GET_MODE_MASK (mode
);
1404 val
= GET_MODE_PRECISION (mode
) - 1;
1406 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1408 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1412 arg0
&= GET_MODE_MASK (mode
);
1415 /* Even if the value at zero is undefined, we have to come
1416 up with some replacement. Seems good enough. */
1417 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1418 val
= GET_MODE_PRECISION (mode
);
1421 val
= ctz_hwi (arg0
);
1425 arg0
&= GET_MODE_MASK (mode
);
1428 val
++, arg0
&= arg0
- 1;
1432 arg0
&= GET_MODE_MASK (mode
);
1435 val
++, arg0
&= arg0
- 1;
1444 for (s
= 0; s
< width
; s
+= 8)
1446 unsigned int d
= width
- s
- 8;
1447 unsigned HOST_WIDE_INT byte
;
1448 byte
= (arg0
>> s
) & 0xff;
1459 /* When zero-extending a CONST_INT, we need to know its
1461 gcc_assert (op_mode
!= VOIDmode
);
1462 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1464 /* If we were really extending the mode,
1465 we would have to distinguish between zero-extension
1466 and sign-extension. */
1467 gcc_assert (width
== op_width
);
1470 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1471 val
= arg0
& GET_MODE_MASK (op_mode
);
1477 if (op_mode
== VOIDmode
)
1479 op_width
= GET_MODE_PRECISION (op_mode
);
1480 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1482 /* If we were really extending the mode,
1483 we would have to distinguish between zero-extension
1484 and sign-extension. */
1485 gcc_assert (width
== op_width
);
1488 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1490 val
= arg0
& GET_MODE_MASK (op_mode
);
1491 if (val_signbit_known_set_p (op_mode
, val
))
1492 val
|= ~GET_MODE_MASK (op_mode
);
1500 case FLOAT_TRUNCATE
:
1512 return gen_int_mode (val
, mode
);
1515 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1516 for a DImode operation on a CONST_INT. */
1517 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1518 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1520 unsigned HOST_WIDE_INT l1
, lv
;
1521 HOST_WIDE_INT h1
, hv
;
1523 if (CONST_DOUBLE_AS_INT_P (op
))
1524 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1526 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1536 neg_double (l1
, h1
, &lv
, &hv
);
1541 neg_double (l1
, h1
, &lv
, &hv
);
1551 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1559 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (h1
) - 1
1560 - HOST_BITS_PER_WIDE_INT
;
1562 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (l1
) - 1;
1563 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1564 lv
= GET_MODE_PRECISION (mode
);
1572 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1573 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1574 lv
= GET_MODE_PRECISION (mode
);
1602 for (s
= 0; s
< width
; s
+= 8)
1604 unsigned int d
= width
- s
- 8;
1605 unsigned HOST_WIDE_INT byte
;
1607 if (s
< HOST_BITS_PER_WIDE_INT
)
1608 byte
= (l1
>> s
) & 0xff;
1610 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1612 if (d
< HOST_BITS_PER_WIDE_INT
)
1615 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1621 /* This is just a change-of-mode, so do nothing. */
1626 gcc_assert (op_mode
!= VOIDmode
);
1628 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1632 lv
= l1
& GET_MODE_MASK (op_mode
);
1636 if (op_mode
== VOIDmode
1637 || op_width
> HOST_BITS_PER_WIDE_INT
)
1641 lv
= l1
& GET_MODE_MASK (op_mode
);
1642 if (val_signbit_known_set_p (op_mode
, lv
))
1643 lv
|= ~GET_MODE_MASK (op_mode
);
1645 hv
= HWI_SIGN_EXTEND (lv
);
1656 return immed_double_const (lv
, hv
, mode
);
1659 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1660 && SCALAR_FLOAT_MODE_P (mode
)
1661 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1663 REAL_VALUE_TYPE d
, t
;
1664 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1669 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1671 real_sqrt (&t
, mode
, &d
);
1675 d
= real_value_abs (&d
);
1678 d
= real_value_negate (&d
);
1680 case FLOAT_TRUNCATE
:
1681 d
= real_value_truncate (mode
, d
);
1684 /* All this does is change the mode, unless changing
1686 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1687 real_convert (&d
, mode
, &d
);
1690 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1697 real_to_target (tmp
, &d
, GET_MODE (op
));
1698 for (i
= 0; i
< 4; i
++)
1700 real_from_target (&d
, tmp
, mode
);
1706 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1709 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1710 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1711 && GET_MODE_CLASS (mode
) == MODE_INT
1712 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1714 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1715 operators are intentionally left unspecified (to ease implementation
1716 by target backends), for consistency, this routine implements the
1717 same semantics for constant folding as used by the middle-end. */
1719 /* This was formerly used only for non-IEEE float.
1720 eggert@twinsun.com says it is safe for IEEE also. */
1721 HOST_WIDE_INT xh
, xl
, th
, tl
;
1722 REAL_VALUE_TYPE x
, t
;
1723 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1727 if (REAL_VALUE_ISNAN (x
))
1730 /* Test against the signed upper bound. */
1731 if (width
> HOST_BITS_PER_WIDE_INT
)
1733 th
= ((unsigned HOST_WIDE_INT
) 1
1734 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1740 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1742 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1743 if (REAL_VALUES_LESS (t
, x
))
1750 /* Test against the signed lower bound. */
1751 if (width
> HOST_BITS_PER_WIDE_INT
)
1753 th
= (unsigned HOST_WIDE_INT
) (-1)
1754 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1760 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1762 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1763 if (REAL_VALUES_LESS (x
, t
))
1769 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1773 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1776 /* Test against the unsigned upper bound. */
1777 if (width
== HOST_BITS_PER_DOUBLE_INT
)
1782 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1784 th
= ((unsigned HOST_WIDE_INT
) 1
1785 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1791 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1793 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1794 if (REAL_VALUES_LESS (t
, x
))
1801 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1807 return immed_double_const (xl
, xh
, mode
);
1813 /* Subroutine of simplify_binary_operation to simplify a commutative,
1814 associative binary operation CODE with result mode MODE, operating
1815 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1816 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1817 canonicalization is possible. */
1820 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1825 /* Linearize the operator to the left. */
1826 if (GET_CODE (op1
) == code
)
1828 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1829 if (GET_CODE (op0
) == code
)
1831 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1832 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1835 /* "a op (b op c)" becomes "(b op c) op a". */
1836 if (! swap_commutative_operands_p (op1
, op0
))
1837 return simplify_gen_binary (code
, mode
, op1
, op0
);
1844 if (GET_CODE (op0
) == code
)
1846 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1847 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1849 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1850 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1853 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1854 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1856 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1858 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1859 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1861 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1868 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1869 and OP1. Return 0 if no simplification is possible.
1871 Don't use this for relational operations such as EQ or LT.
1872 Use simplify_relational_operation instead. */
1874 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1877 rtx trueop0
, trueop1
;
1880 /* Relational operations don't work here. We must know the mode
1881 of the operands in order to do the comparison correctly.
1882 Assuming a full word can give incorrect results.
1883 Consider comparing 128 with -128 in QImode. */
1884 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1885 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1887 /* Make sure the constant is second. */
1888 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1889 && swap_commutative_operands_p (op0
, op1
))
1891 tem
= op0
, op0
= op1
, op1
= tem
;
1894 trueop0
= avoid_constant_pool_reference (op0
);
1895 trueop1
= avoid_constant_pool_reference (op1
);
1897 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1900 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1903 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1904 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1905 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1906 actual constants. */
1909 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1910 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1912 rtx tem
, reversed
, opleft
, opright
;
1914 unsigned int width
= GET_MODE_PRECISION (mode
);
1916 /* Even if we can't compute a constant result,
1917 there are some cases worth simplifying. */
1922 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1923 when x is NaN, infinite, or finite and nonzero. They aren't
1924 when x is -0 and the rounding mode is not towards -infinity,
1925 since (-0) + 0 is then 0. */
1926 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1929 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1930 transformations are safe even for IEEE. */
1931 if (GET_CODE (op0
) == NEG
)
1932 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1933 else if (GET_CODE (op1
) == NEG
)
1934 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1936 /* (~a) + 1 -> -a */
1937 if (INTEGRAL_MODE_P (mode
)
1938 && GET_CODE (op0
) == NOT
1939 && trueop1
== const1_rtx
)
1940 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1942 /* Handle both-operands-constant cases. We can only add
1943 CONST_INTs to constants since the sum of relocatable symbols
1944 can't be handled by most assemblers. Don't add CONST_INT
1945 to CONST_INT since overflow won't be computed properly if wider
1946 than HOST_BITS_PER_WIDE_INT. */
1948 if ((GET_CODE (op0
) == CONST
1949 || GET_CODE (op0
) == SYMBOL_REF
1950 || GET_CODE (op0
) == LABEL_REF
)
1951 && CONST_INT_P (op1
))
1952 return plus_constant (mode
, op0
, INTVAL (op1
));
1953 else if ((GET_CODE (op1
) == CONST
1954 || GET_CODE (op1
) == SYMBOL_REF
1955 || GET_CODE (op1
) == LABEL_REF
)
1956 && CONST_INT_P (op0
))
1957 return plus_constant (mode
, op1
, INTVAL (op0
));
1959 /* See if this is something like X * C - X or vice versa or
1960 if the multiplication is written as a shift. If so, we can
1961 distribute and make a new multiply, shift, or maybe just
1962 have X (if C is 2 in the example above). But don't make
1963 something more expensive than we had before. */
1965 if (SCALAR_INT_MODE_P (mode
))
1967 double_int coeff0
, coeff1
;
1968 rtx lhs
= op0
, rhs
= op1
;
1970 coeff0
= double_int_one
;
1971 coeff1
= double_int_one
;
1973 if (GET_CODE (lhs
) == NEG
)
1975 coeff0
= double_int_minus_one
;
1976 lhs
= XEXP (lhs
, 0);
1978 else if (GET_CODE (lhs
) == MULT
1979 && CONST_INT_P (XEXP (lhs
, 1)))
1981 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1982 lhs
= XEXP (lhs
, 0);
1984 else if (GET_CODE (lhs
) == ASHIFT
1985 && CONST_INT_P (XEXP (lhs
, 1))
1986 && INTVAL (XEXP (lhs
, 1)) >= 0
1987 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1989 coeff0
= double_int_setbit (double_int_zero
,
1990 INTVAL (XEXP (lhs
, 1)));
1991 lhs
= XEXP (lhs
, 0);
1994 if (GET_CODE (rhs
) == NEG
)
1996 coeff1
= double_int_minus_one
;
1997 rhs
= XEXP (rhs
, 0);
1999 else if (GET_CODE (rhs
) == MULT
2000 && CONST_INT_P (XEXP (rhs
, 1)))
2002 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
2003 rhs
= XEXP (rhs
, 0);
2005 else if (GET_CODE (rhs
) == ASHIFT
2006 && CONST_INT_P (XEXP (rhs
, 1))
2007 && INTVAL (XEXP (rhs
, 1)) >= 0
2008 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2010 coeff1
= double_int_setbit (double_int_zero
,
2011 INTVAL (XEXP (rhs
, 1)));
2012 rhs
= XEXP (rhs
, 0);
2015 if (rtx_equal_p (lhs
, rhs
))
2017 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2020 bool speed
= optimize_function_for_speed_p (cfun
);
2022 val
= double_int_add (coeff0
, coeff1
);
2023 coeff
= immed_double_int_const (val
, mode
);
2025 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2026 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2031 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2032 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2033 && GET_CODE (op0
) == XOR
2034 && (CONST_INT_P (XEXP (op0
, 1))
2035 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2036 && mode_signbit_p (mode
, op1
))
2037 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2038 simplify_gen_binary (XOR
, mode
, op1
,
2041 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2042 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2043 && GET_CODE (op0
) == MULT
2044 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2048 in1
= XEXP (XEXP (op0
, 0), 0);
2049 in2
= XEXP (op0
, 1);
2050 return simplify_gen_binary (MINUS
, mode
, op1
,
2051 simplify_gen_binary (MULT
, mode
,
2055 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2056 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2058 if (COMPARISON_P (op0
)
2059 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2060 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2061 && (reversed
= reversed_comparison (op0
, mode
)))
2063 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2065 /* If one of the operands is a PLUS or a MINUS, see if we can
2066 simplify this by the associative law.
2067 Don't use the associative law for floating point.
2068 The inaccuracy makes it nonassociative,
2069 and subtle programs can break if operations are associated. */
2071 if (INTEGRAL_MODE_P (mode
)
2072 && (plus_minus_operand_p (op0
)
2073 || plus_minus_operand_p (op1
))
2074 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2077 /* Reassociate floating point addition only when the user
2078 specifies associative math operations. */
2079 if (FLOAT_MODE_P (mode
)
2080 && flag_associative_math
)
2082 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2089 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2090 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2091 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2092 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2094 rtx xop00
= XEXP (op0
, 0);
2095 rtx xop10
= XEXP (op1
, 0);
2098 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2100 if (REG_P (xop00
) && REG_P (xop10
)
2101 && GET_MODE (xop00
) == GET_MODE (xop10
)
2102 && REGNO (xop00
) == REGNO (xop10
)
2103 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2104 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2111 /* We can't assume x-x is 0 even with non-IEEE floating point,
2112 but since it is zero except in very strange circumstances, we
2113 will treat it as zero with -ffinite-math-only. */
2114 if (rtx_equal_p (trueop0
, trueop1
)
2115 && ! side_effects_p (op0
)
2116 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2117 return CONST0_RTX (mode
);
2119 /* Change subtraction from zero into negation. (0 - x) is the
2120 same as -x when x is NaN, infinite, or finite and nonzero.
2121 But if the mode has signed zeros, and does not round towards
2122 -infinity, then 0 - 0 is 0, not -0. */
2123 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2124 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2126 /* (-1 - a) is ~a. */
2127 if (trueop0
== constm1_rtx
)
2128 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2130 /* Subtracting 0 has no effect unless the mode has signed zeros
2131 and supports rounding towards -infinity. In such a case,
2133 if (!(HONOR_SIGNED_ZEROS (mode
)
2134 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2135 && trueop1
== CONST0_RTX (mode
))
2138 /* See if this is something like X * C - X or vice versa or
2139 if the multiplication is written as a shift. If so, we can
2140 distribute and make a new multiply, shift, or maybe just
2141 have X (if C is 2 in the example above). But don't make
2142 something more expensive than we had before. */
2144 if (SCALAR_INT_MODE_P (mode
))
2146 double_int coeff0
, negcoeff1
;
2147 rtx lhs
= op0
, rhs
= op1
;
2149 coeff0
= double_int_one
;
2150 negcoeff1
= double_int_minus_one
;
2152 if (GET_CODE (lhs
) == NEG
)
2154 coeff0
= double_int_minus_one
;
2155 lhs
= XEXP (lhs
, 0);
2157 else if (GET_CODE (lhs
) == MULT
2158 && CONST_INT_P (XEXP (lhs
, 1)))
2160 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2161 lhs
= XEXP (lhs
, 0);
2163 else if (GET_CODE (lhs
) == ASHIFT
2164 && CONST_INT_P (XEXP (lhs
, 1))
2165 && INTVAL (XEXP (lhs
, 1)) >= 0
2166 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2168 coeff0
= double_int_setbit (double_int_zero
,
2169 INTVAL (XEXP (lhs
, 1)));
2170 lhs
= XEXP (lhs
, 0);
2173 if (GET_CODE (rhs
) == NEG
)
2175 negcoeff1
= double_int_one
;
2176 rhs
= XEXP (rhs
, 0);
2178 else if (GET_CODE (rhs
) == MULT
2179 && CONST_INT_P (XEXP (rhs
, 1)))
2181 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2182 rhs
= XEXP (rhs
, 0);
2184 else if (GET_CODE (rhs
) == ASHIFT
2185 && CONST_INT_P (XEXP (rhs
, 1))
2186 && INTVAL (XEXP (rhs
, 1)) >= 0
2187 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2189 negcoeff1
= double_int_setbit (double_int_zero
,
2190 INTVAL (XEXP (rhs
, 1)));
2191 negcoeff1
= double_int_neg (negcoeff1
);
2192 rhs
= XEXP (rhs
, 0);
2195 if (rtx_equal_p (lhs
, rhs
))
2197 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2200 bool speed
= optimize_function_for_speed_p (cfun
);
2202 val
= double_int_add (coeff0
, negcoeff1
);
2203 coeff
= immed_double_int_const (val
, mode
);
2205 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2206 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2211 /* (a - (-b)) -> (a + b). True even for IEEE. */
2212 if (GET_CODE (op1
) == NEG
)
2213 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2215 /* (-x - c) may be simplified as (-c - x). */
2216 if (GET_CODE (op0
) == NEG
2217 && (CONST_INT_P (op1
) || CONST_DOUBLE_P (op1
)))
2219 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2221 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2224 /* Don't let a relocatable value get a negative coeff. */
2225 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2226 return simplify_gen_binary (PLUS
, mode
,
2228 neg_const_int (mode
, op1
));
2230 /* (x - (x & y)) -> (x & ~y) */
2231 if (GET_CODE (op1
) == AND
)
2233 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2235 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2236 GET_MODE (XEXP (op1
, 1)));
2237 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2239 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2241 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2242 GET_MODE (XEXP (op1
, 0)));
2243 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2247 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2248 by reversing the comparison code if valid. */
2249 if (STORE_FLAG_VALUE
== 1
2250 && trueop0
== const1_rtx
2251 && COMPARISON_P (op1
)
2252 && (reversed
= reversed_comparison (op1
, mode
)))
2255 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2256 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2257 && GET_CODE (op1
) == MULT
2258 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2262 in1
= XEXP (XEXP (op1
, 0), 0);
2263 in2
= XEXP (op1
, 1);
2264 return simplify_gen_binary (PLUS
, mode
,
2265 simplify_gen_binary (MULT
, mode
,
2270 /* Canonicalize (minus (neg A) (mult B C)) to
2271 (minus (mult (neg B) C) A). */
2272 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2273 && GET_CODE (op1
) == MULT
2274 && GET_CODE (op0
) == NEG
)
2278 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2279 in2
= XEXP (op1
, 1);
2280 return simplify_gen_binary (MINUS
, mode
,
2281 simplify_gen_binary (MULT
, mode
,
2286 /* If one of the operands is a PLUS or a MINUS, see if we can
2287 simplify this by the associative law. This will, for example,
2288 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2289 Don't use the associative law for floating point.
2290 The inaccuracy makes it nonassociative,
2291 and subtle programs can break if operations are associated. */
2293 if (INTEGRAL_MODE_P (mode
)
2294 && (plus_minus_operand_p (op0
)
2295 || plus_minus_operand_p (op1
))
2296 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2301 if (trueop1
== constm1_rtx
)
2302 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2304 if (GET_CODE (op0
) == NEG
)
2306 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2307 /* If op1 is a MULT as well and simplify_unary_operation
2308 just moved the NEG to the second operand, simplify_gen_binary
2309 below could through simplify_associative_operation move
2310 the NEG around again and recurse endlessly. */
2312 && GET_CODE (op1
) == MULT
2313 && GET_CODE (temp
) == MULT
2314 && XEXP (op1
, 0) == XEXP (temp
, 0)
2315 && GET_CODE (XEXP (temp
, 1)) == NEG
2316 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2319 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2321 if (GET_CODE (op1
) == NEG
)
2323 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2324 /* If op0 is a MULT as well and simplify_unary_operation
2325 just moved the NEG to the second operand, simplify_gen_binary
2326 below could through simplify_associative_operation move
2327 the NEG around again and recurse endlessly. */
2329 && GET_CODE (op0
) == MULT
2330 && GET_CODE (temp
) == MULT
2331 && XEXP (op0
, 0) == XEXP (temp
, 0)
2332 && GET_CODE (XEXP (temp
, 1)) == NEG
2333 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2336 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2339 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2340 x is NaN, since x * 0 is then also NaN. Nor is it valid
2341 when the mode has signed zeros, since multiplying a negative
2342 number by 0 will give -0, not 0. */
2343 if (!HONOR_NANS (mode
)
2344 && !HONOR_SIGNED_ZEROS (mode
)
2345 && trueop1
== CONST0_RTX (mode
)
2346 && ! side_effects_p (op0
))
2349 /* In IEEE floating point, x*1 is not equivalent to x for
2351 if (!HONOR_SNANS (mode
)
2352 && trueop1
== CONST1_RTX (mode
))
2355 /* Convert multiply by constant power of two into shift unless
2356 we are still generating RTL. This test is a kludge. */
2357 if (CONST_INT_P (trueop1
)
2358 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2359 /* If the mode is larger than the host word size, and the
2360 uppermost bit is set, then this isn't a power of two due
2361 to implicit sign extension. */
2362 && (width
<= HOST_BITS_PER_WIDE_INT
2363 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2364 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2366 /* Likewise for multipliers wider than a word. */
2367 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2368 && GET_MODE (op0
) == mode
2369 && CONST_DOUBLE_LOW (trueop1
) == 0
2370 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2371 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2372 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2373 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2374 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2376 /* x*2 is x+x and x*(-1) is -x */
2377 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2378 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2379 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2380 && GET_MODE (op0
) == mode
)
2383 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2385 if (REAL_VALUES_EQUAL (d
, dconst2
))
2386 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2388 if (!HONOR_SNANS (mode
)
2389 && REAL_VALUES_EQUAL (d
, dconstm1
))
2390 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2393 /* Optimize -x * -x as x * x. */
2394 if (FLOAT_MODE_P (mode
)
2395 && GET_CODE (op0
) == NEG
2396 && GET_CODE (op1
) == NEG
2397 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2398 && !side_effects_p (XEXP (op0
, 0)))
2399 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2401 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2402 if (SCALAR_FLOAT_MODE_P (mode
)
2403 && GET_CODE (op0
) == ABS
2404 && GET_CODE (op1
) == ABS
2405 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2406 && !side_effects_p (XEXP (op0
, 0)))
2407 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2409 /* Reassociate multiplication, but for floating point MULTs
2410 only when the user specifies unsafe math optimizations. */
2411 if (! FLOAT_MODE_P (mode
)
2412 || flag_unsafe_math_optimizations
)
2414 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2421 if (trueop1
== CONST0_RTX (mode
))
2423 if (INTEGRAL_MODE_P (mode
)
2424 && trueop1
== CONSTM1_RTX (mode
)
2425 && !side_effects_p (op0
))
2427 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2429 /* A | (~A) -> -1 */
2430 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2431 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2432 && ! side_effects_p (op0
)
2433 && SCALAR_INT_MODE_P (mode
))
2436 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2437 if (CONST_INT_P (op1
)
2438 && HWI_COMPUTABLE_MODE_P (mode
)
2439 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2440 && !side_effects_p (op0
))
2443 /* Canonicalize (X & C1) | C2. */
2444 if (GET_CODE (op0
) == AND
2445 && CONST_INT_P (trueop1
)
2446 && CONST_INT_P (XEXP (op0
, 1)))
2448 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2449 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2450 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2452 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2454 && !side_effects_p (XEXP (op0
, 0)))
2457 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2458 if (((c1
|c2
) & mask
) == mask
)
2459 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2461 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2462 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2464 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2465 gen_int_mode (c1
& ~c2
, mode
));
2466 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2470 /* Convert (A & B) | A to A. */
2471 if (GET_CODE (op0
) == AND
2472 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2473 || rtx_equal_p (XEXP (op0
, 1), op1
))
2474 && ! side_effects_p (XEXP (op0
, 0))
2475 && ! side_effects_p (XEXP (op0
, 1)))
2478 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2479 mode size to (rotate A CX). */
2481 if (GET_CODE (op1
) == ASHIFT
2482 || GET_CODE (op1
) == SUBREG
)
2493 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2494 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2495 && CONST_INT_P (XEXP (opleft
, 1))
2496 && CONST_INT_P (XEXP (opright
, 1))
2497 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2498 == GET_MODE_PRECISION (mode
)))
2499 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2501 /* Same, but for ashift that has been "simplified" to a wider mode
2502 by simplify_shift_const. */
2504 if (GET_CODE (opleft
) == SUBREG
2505 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2506 && GET_CODE (opright
) == LSHIFTRT
2507 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2508 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2509 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2510 && (GET_MODE_SIZE (GET_MODE (opleft
))
2511 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2512 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2513 SUBREG_REG (XEXP (opright
, 0)))
2514 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2515 && CONST_INT_P (XEXP (opright
, 1))
2516 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2517 == GET_MODE_PRECISION (mode
)))
2518 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2519 XEXP (SUBREG_REG (opleft
), 1));
2521 /* If we have (ior (and (X C1) C2)), simplify this by making
2522 C1 as small as possible if C1 actually changes. */
2523 if (CONST_INT_P (op1
)
2524 && (HWI_COMPUTABLE_MODE_P (mode
)
2525 || INTVAL (op1
) > 0)
2526 && GET_CODE (op0
) == AND
2527 && CONST_INT_P (XEXP (op0
, 1))
2528 && CONST_INT_P (op1
)
2529 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2530 return simplify_gen_binary (IOR
, mode
,
2532 (AND
, mode
, XEXP (op0
, 0),
2533 GEN_INT (UINTVAL (XEXP (op0
, 1))
2537 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2538 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2539 the PLUS does not affect any of the bits in OP1: then we can do
2540 the IOR as a PLUS and we can associate. This is valid if OP1
2541 can be safely shifted left C bits. */
2542 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2543 && GET_CODE (XEXP (op0
, 0)) == PLUS
2544 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2545 && CONST_INT_P (XEXP (op0
, 1))
2546 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2548 int count
= INTVAL (XEXP (op0
, 1));
2549 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2551 if (mask
>> count
== INTVAL (trueop1
)
2552 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2553 return simplify_gen_binary (ASHIFTRT
, mode
,
2554 plus_constant (mode
, XEXP (op0
, 0),
2559 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2565 if (trueop1
== CONST0_RTX (mode
))
2567 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2568 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2569 if (rtx_equal_p (trueop0
, trueop1
)
2570 && ! side_effects_p (op0
)
2571 && GET_MODE_CLASS (mode
) != MODE_CC
)
2572 return CONST0_RTX (mode
);
2574 /* Canonicalize XOR of the most significant bit to PLUS. */
2575 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2576 && mode_signbit_p (mode
, op1
))
2577 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2578 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2579 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2580 && GET_CODE (op0
) == PLUS
2581 && (CONST_INT_P (XEXP (op0
, 1))
2582 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2583 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2584 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2585 simplify_gen_binary (XOR
, mode
, op1
,
2588 /* If we are XORing two things that have no bits in common,
2589 convert them into an IOR. This helps to detect rotation encoded
2590 using those methods and possibly other simplifications. */
2592 if (HWI_COMPUTABLE_MODE_P (mode
)
2593 && (nonzero_bits (op0
, mode
)
2594 & nonzero_bits (op1
, mode
)) == 0)
2595 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2597 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2598 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2601 int num_negated
= 0;
2603 if (GET_CODE (op0
) == NOT
)
2604 num_negated
++, op0
= XEXP (op0
, 0);
2605 if (GET_CODE (op1
) == NOT
)
2606 num_negated
++, op1
= XEXP (op1
, 0);
2608 if (num_negated
== 2)
2609 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2610 else if (num_negated
== 1)
2611 return simplify_gen_unary (NOT
, mode
,
2612 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2616 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2617 correspond to a machine insn or result in further simplifications
2618 if B is a constant. */
2620 if (GET_CODE (op0
) == AND
2621 && rtx_equal_p (XEXP (op0
, 1), op1
)
2622 && ! side_effects_p (op1
))
2623 return simplify_gen_binary (AND
, mode
,
2624 simplify_gen_unary (NOT
, mode
,
2625 XEXP (op0
, 0), mode
),
2628 else if (GET_CODE (op0
) == AND
2629 && rtx_equal_p (XEXP (op0
, 0), op1
)
2630 && ! side_effects_p (op1
))
2631 return simplify_gen_binary (AND
, mode
,
2632 simplify_gen_unary (NOT
, mode
,
2633 XEXP (op0
, 1), mode
),
2636 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2637 we can transform like this:
2638 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2639 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2640 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2641 Attempt a few simplifications when B and C are both constants. */
2642 if (GET_CODE (op0
) == AND
2643 && CONST_INT_P (op1
)
2644 && CONST_INT_P (XEXP (op0
, 1)))
2646 rtx a
= XEXP (op0
, 0);
2647 rtx b
= XEXP (op0
, 1);
2649 HOST_WIDE_INT bval
= INTVAL (b
);
2650 HOST_WIDE_INT cval
= INTVAL (c
);
2653 = simplify_binary_operation (AND
, mode
,
2654 simplify_gen_unary (NOT
, mode
, a
, mode
),
2656 if ((~cval
& bval
) == 0)
2658 /* Try to simplify ~A&C | ~B&C. */
2659 if (na_c
!= NULL_RTX
)
2660 return simplify_gen_binary (IOR
, mode
, na_c
,
2661 GEN_INT (~bval
& cval
));
2665 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2666 if (na_c
== const0_rtx
)
2668 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2669 GEN_INT (~cval
& bval
));
2670 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2671 GEN_INT (~bval
& cval
));
2676 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2677 comparison if STORE_FLAG_VALUE is 1. */
2678 if (STORE_FLAG_VALUE
== 1
2679 && trueop1
== const1_rtx
2680 && COMPARISON_P (op0
)
2681 && (reversed
= reversed_comparison (op0
, mode
)))
2684 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2685 is (lt foo (const_int 0)), so we can perform the above
2686 simplification if STORE_FLAG_VALUE is 1. */
2688 if (STORE_FLAG_VALUE
== 1
2689 && trueop1
== const1_rtx
2690 && GET_CODE (op0
) == LSHIFTRT
2691 && CONST_INT_P (XEXP (op0
, 1))
2692 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2693 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2695 /* (xor (comparison foo bar) (const_int sign-bit))
2696 when STORE_FLAG_VALUE is the sign bit. */
2697 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2698 && trueop1
== const_true_rtx
2699 && COMPARISON_P (op0
)
2700 && (reversed
= reversed_comparison (op0
, mode
)))
2703 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2709 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2711 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2713 if (HWI_COMPUTABLE_MODE_P (mode
))
2715 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2716 HOST_WIDE_INT nzop1
;
2717 if (CONST_INT_P (trueop1
))
2719 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2720 /* If we are turning off bits already known off in OP0, we need
2722 if ((nzop0
& ~val1
) == 0)
2725 nzop1
= nonzero_bits (trueop1
, mode
);
2726 /* If we are clearing all the nonzero bits, the result is zero. */
2727 if ((nzop1
& nzop0
) == 0
2728 && !side_effects_p (op0
) && !side_effects_p (op1
))
2729 return CONST0_RTX (mode
);
2731 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2732 && GET_MODE_CLASS (mode
) != MODE_CC
)
2735 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2736 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2737 && ! side_effects_p (op0
)
2738 && GET_MODE_CLASS (mode
) != MODE_CC
)
2739 return CONST0_RTX (mode
);
2741 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2742 there are no nonzero bits of C outside of X's mode. */
2743 if ((GET_CODE (op0
) == SIGN_EXTEND
2744 || GET_CODE (op0
) == ZERO_EXTEND
)
2745 && CONST_INT_P (trueop1
)
2746 && HWI_COMPUTABLE_MODE_P (mode
)
2747 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2748 & UINTVAL (trueop1
)) == 0)
2750 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2751 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2752 gen_int_mode (INTVAL (trueop1
),
2754 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2757 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2758 we might be able to further simplify the AND with X and potentially
2759 remove the truncation altogether. */
2760 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2762 rtx x
= XEXP (op0
, 0);
2763 enum machine_mode xmode
= GET_MODE (x
);
2764 tem
= simplify_gen_binary (AND
, xmode
, x
,
2765 gen_int_mode (INTVAL (trueop1
), xmode
));
2766 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2769 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2770 if (GET_CODE (op0
) == IOR
2771 && CONST_INT_P (trueop1
)
2772 && CONST_INT_P (XEXP (op0
, 1)))
2774 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2775 return simplify_gen_binary (IOR
, mode
,
2776 simplify_gen_binary (AND
, mode
,
2777 XEXP (op0
, 0), op1
),
2778 gen_int_mode (tmp
, mode
));
2781 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2782 insn (and may simplify more). */
2783 if (GET_CODE (op0
) == XOR
2784 && rtx_equal_p (XEXP (op0
, 0), op1
)
2785 && ! side_effects_p (op1
))
2786 return simplify_gen_binary (AND
, mode
,
2787 simplify_gen_unary (NOT
, mode
,
2788 XEXP (op0
, 1), mode
),
2791 if (GET_CODE (op0
) == XOR
2792 && rtx_equal_p (XEXP (op0
, 1), op1
)
2793 && ! side_effects_p (op1
))
2794 return simplify_gen_binary (AND
, mode
,
2795 simplify_gen_unary (NOT
, mode
,
2796 XEXP (op0
, 0), mode
),
2799 /* Similarly for (~(A ^ B)) & A. */
2800 if (GET_CODE (op0
) == NOT
2801 && GET_CODE (XEXP (op0
, 0)) == XOR
2802 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2803 && ! side_effects_p (op1
))
2804 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2806 if (GET_CODE (op0
) == NOT
2807 && GET_CODE (XEXP (op0
, 0)) == XOR
2808 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2809 && ! side_effects_p (op1
))
2810 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2812 /* Convert (A | B) & A to A. */
2813 if (GET_CODE (op0
) == IOR
2814 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2815 || rtx_equal_p (XEXP (op0
, 1), op1
))
2816 && ! side_effects_p (XEXP (op0
, 0))
2817 && ! side_effects_p (XEXP (op0
, 1)))
2820 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2821 ((A & N) + B) & M -> (A + B) & M
2822 Similarly if (N & M) == 0,
2823 ((A | N) + B) & M -> (A + B) & M
2824 and for - instead of + and/or ^ instead of |.
2825 Also, if (N & M) == 0, then
2826 (A +- N) & M -> A & M. */
2827 if (CONST_INT_P (trueop1
)
2828 && HWI_COMPUTABLE_MODE_P (mode
)
2829 && ~UINTVAL (trueop1
)
2830 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2831 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2836 pmop
[0] = XEXP (op0
, 0);
2837 pmop
[1] = XEXP (op0
, 1);
2839 if (CONST_INT_P (pmop
[1])
2840 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2841 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2843 for (which
= 0; which
< 2; which
++)
2846 switch (GET_CODE (tem
))
2849 if (CONST_INT_P (XEXP (tem
, 1))
2850 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2851 == UINTVAL (trueop1
))
2852 pmop
[which
] = XEXP (tem
, 0);
2856 if (CONST_INT_P (XEXP (tem
, 1))
2857 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2858 pmop
[which
] = XEXP (tem
, 0);
2865 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2867 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2869 return simplify_gen_binary (code
, mode
, tem
, op1
);
2873 /* (and X (ior (not X) Y) -> (and X Y) */
2874 if (GET_CODE (op1
) == IOR
2875 && GET_CODE (XEXP (op1
, 0)) == NOT
2876 && op0
== XEXP (XEXP (op1
, 0), 0))
2877 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2879 /* (and (ior (not X) Y) X) -> (and X Y) */
2880 if (GET_CODE (op0
) == IOR
2881 && GET_CODE (XEXP (op0
, 0)) == NOT
2882 && op1
== XEXP (XEXP (op0
, 0), 0))
2883 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2885 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2891 /* 0/x is 0 (or x&0 if x has side-effects). */
2892 if (trueop0
== CONST0_RTX (mode
))
2894 if (side_effects_p (op1
))
2895 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2899 if (trueop1
== CONST1_RTX (mode
))
2900 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2901 /* Convert divide by power of two into shift. */
2902 if (CONST_INT_P (trueop1
)
2903 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2904 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2908 /* Handle floating point and integers separately. */
2909 if (SCALAR_FLOAT_MODE_P (mode
))
2911 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2912 safe for modes with NaNs, since 0.0 / 0.0 will then be
2913 NaN rather than 0.0. Nor is it safe for modes with signed
2914 zeros, since dividing 0 by a negative number gives -0.0 */
2915 if (trueop0
== CONST0_RTX (mode
)
2916 && !HONOR_NANS (mode
)
2917 && !HONOR_SIGNED_ZEROS (mode
)
2918 && ! side_effects_p (op1
))
2921 if (trueop1
== CONST1_RTX (mode
)
2922 && !HONOR_SNANS (mode
))
2925 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2926 && trueop1
!= CONST0_RTX (mode
))
2929 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2932 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2933 && !HONOR_SNANS (mode
))
2934 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2936 /* Change FP division by a constant into multiplication.
2937 Only do this with -freciprocal-math. */
2938 if (flag_reciprocal_math
2939 && !REAL_VALUES_EQUAL (d
, dconst0
))
2941 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2942 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2943 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2947 else if (SCALAR_INT_MODE_P (mode
))
2949 /* 0/x is 0 (or x&0 if x has side-effects). */
2950 if (trueop0
== CONST0_RTX (mode
)
2951 && !cfun
->can_throw_non_call_exceptions
)
2953 if (side_effects_p (op1
))
2954 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2958 if (trueop1
== CONST1_RTX (mode
))
2959 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2961 if (trueop1
== constm1_rtx
)
2963 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2964 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2970 /* 0%x is 0 (or x&0 if x has side-effects). */
2971 if (trueop0
== CONST0_RTX (mode
))
2973 if (side_effects_p (op1
))
2974 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2977 /* x%1 is 0 (of x&0 if x has side-effects). */
2978 if (trueop1
== CONST1_RTX (mode
))
2980 if (side_effects_p (op0
))
2981 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2982 return CONST0_RTX (mode
);
2984 /* Implement modulus by power of two as AND. */
2985 if (CONST_INT_P (trueop1
)
2986 && exact_log2 (UINTVAL (trueop1
)) > 0)
2987 return simplify_gen_binary (AND
, mode
, op0
,
2988 GEN_INT (INTVAL (op1
) - 1));
2992 /* 0%x is 0 (or x&0 if x has side-effects). */
2993 if (trueop0
== CONST0_RTX (mode
))
2995 if (side_effects_p (op1
))
2996 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2999 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3000 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3002 if (side_effects_p (op0
))
3003 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3004 return CONST0_RTX (mode
);
3011 if (trueop1
== CONST0_RTX (mode
))
3013 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3015 /* Rotating ~0 always results in ~0. */
3016 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3017 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3018 && ! side_effects_p (op1
))
3021 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3023 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3024 if (val
!= INTVAL (op1
))
3025 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3032 if (trueop1
== CONST0_RTX (mode
))
3034 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3036 goto canonicalize_shift
;
3039 if (trueop1
== CONST0_RTX (mode
))
3041 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3043 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3044 if (GET_CODE (op0
) == CLZ
3045 && CONST_INT_P (trueop1
)
3046 && STORE_FLAG_VALUE
== 1
3047 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3049 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3050 unsigned HOST_WIDE_INT zero_val
= 0;
3052 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3053 && zero_val
== GET_MODE_PRECISION (imode
)
3054 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3055 return simplify_gen_relational (EQ
, mode
, imode
,
3056 XEXP (op0
, 0), const0_rtx
);
3058 goto canonicalize_shift
;
3061 if (width
<= HOST_BITS_PER_WIDE_INT
3062 && mode_signbit_p (mode
, trueop1
)
3063 && ! side_effects_p (op0
))
3065 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3067 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3073 if (width
<= HOST_BITS_PER_WIDE_INT
3074 && CONST_INT_P (trueop1
)
3075 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3076 && ! side_effects_p (op0
))
3078 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3080 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3086 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3088 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3090 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3096 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3098 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3100 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3113 /* ??? There are simplifications that can be done. */
3117 if (!VECTOR_MODE_P (mode
))
3119 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3120 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3121 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3122 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3123 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3125 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3126 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3129 /* Extract a scalar element from a nested VEC_SELECT expression
3130 (with optional nested VEC_CONCAT expression). Some targets
3131 (i386) extract scalar element from a vector using chain of
3132 nested VEC_SELECT expressions. When input operand is a memory
3133 operand, this operation can be simplified to a simple scalar
3134 load from an offseted memory address. */
3135 if (GET_CODE (trueop0
) == VEC_SELECT
)
3137 rtx op0
= XEXP (trueop0
, 0);
3138 rtx op1
= XEXP (trueop0
, 1);
3140 enum machine_mode opmode
= GET_MODE (op0
);
3141 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3142 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3144 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3150 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3151 gcc_assert (i
< n_elts
);
3153 /* Select element, pointed by nested selector. */
3154 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3156 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3157 if (GET_CODE (op0
) == VEC_CONCAT
)
3159 rtx op00
= XEXP (op0
, 0);
3160 rtx op01
= XEXP (op0
, 1);
3162 enum machine_mode mode00
, mode01
;
3163 int n_elts00
, n_elts01
;
3165 mode00
= GET_MODE (op00
);
3166 mode01
= GET_MODE (op01
);
3168 /* Find out number of elements of each operand. */
3169 if (VECTOR_MODE_P (mode00
))
3171 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3172 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3177 if (VECTOR_MODE_P (mode01
))
3179 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3180 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3185 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3187 /* Select correct operand of VEC_CONCAT
3188 and adjust selector. */
3189 if (elem
< n_elts01
)
3200 vec
= rtvec_alloc (1);
3201 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3203 tmp
= gen_rtx_fmt_ee (code
, mode
,
3204 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3207 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3208 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3209 return XEXP (trueop0
, 0);
3213 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3214 gcc_assert (GET_MODE_INNER (mode
)
3215 == GET_MODE_INNER (GET_MODE (trueop0
)));
3216 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3218 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3220 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3221 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3222 rtvec v
= rtvec_alloc (n_elts
);
3225 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3226 for (i
= 0; i
< n_elts
; i
++)
3228 rtx x
= XVECEXP (trueop1
, 0, i
);
3230 gcc_assert (CONST_INT_P (x
));
3231 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3235 return gen_rtx_CONST_VECTOR (mode
, v
);
3238 /* If we build {a,b} then permute it, build the result directly. */
3239 if (XVECLEN (trueop1
, 0) == 2
3240 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3241 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3242 && GET_CODE (trueop0
) == VEC_CONCAT
3243 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3244 && GET_MODE (XEXP (trueop0
, 0)) == mode
3245 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3246 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3248 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3249 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3252 gcc_assert (i0
< 4 && i1
< 4);
3253 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3254 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3256 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3260 if (XVECLEN (trueop1
, 0) == 1
3261 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3262 && GET_CODE (trueop0
) == VEC_CONCAT
)
3265 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3267 /* Try to find the element in the VEC_CONCAT. */
3268 while (GET_MODE (vec
) != mode
3269 && GET_CODE (vec
) == VEC_CONCAT
)
3271 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3272 if (offset
< vec_size
)
3273 vec
= XEXP (vec
, 0);
3277 vec
= XEXP (vec
, 1);
3279 vec
= avoid_constant_pool_reference (vec
);
3282 if (GET_MODE (vec
) == mode
)
3289 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3290 ? GET_MODE (trueop0
)
3291 : GET_MODE_INNER (mode
));
3292 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3293 ? GET_MODE (trueop1
)
3294 : GET_MODE_INNER (mode
));
3296 gcc_assert (VECTOR_MODE_P (mode
));
3297 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3298 == GET_MODE_SIZE (mode
));
3300 if (VECTOR_MODE_P (op0_mode
))
3301 gcc_assert (GET_MODE_INNER (mode
)
3302 == GET_MODE_INNER (op0_mode
));
3304 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3306 if (VECTOR_MODE_P (op1_mode
))
3307 gcc_assert (GET_MODE_INNER (mode
)
3308 == GET_MODE_INNER (op1_mode
));
3310 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3312 if ((GET_CODE (trueop0
) == CONST_VECTOR
3313 || CONST_INT_P (trueop0
) || CONST_DOUBLE_P (trueop0
))
3314 && (GET_CODE (trueop1
) == CONST_VECTOR
3315 || CONST_INT_P (trueop1
) || CONST_DOUBLE_P (trueop1
)))
3317 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3318 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3319 rtvec v
= rtvec_alloc (n_elts
);
3321 unsigned in_n_elts
= 1;
3323 if (VECTOR_MODE_P (op0_mode
))
3324 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3325 for (i
= 0; i
< n_elts
; i
++)
3329 if (!VECTOR_MODE_P (op0_mode
))
3330 RTVEC_ELT (v
, i
) = trueop0
;
3332 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3336 if (!VECTOR_MODE_P (op1_mode
))
3337 RTVEC_ELT (v
, i
) = trueop1
;
3339 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3344 return gen_rtx_CONST_VECTOR (mode
, v
);
3357 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3360 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3362 unsigned int width
= GET_MODE_PRECISION (mode
);
3364 if (VECTOR_MODE_P (mode
)
3365 && code
!= VEC_CONCAT
3366 && GET_CODE (op0
) == CONST_VECTOR
3367 && GET_CODE (op1
) == CONST_VECTOR
)
3369 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3370 enum machine_mode op0mode
= GET_MODE (op0
);
3371 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3372 enum machine_mode op1mode
= GET_MODE (op1
);
3373 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3374 rtvec v
= rtvec_alloc (n_elts
);
3377 gcc_assert (op0_n_elts
== n_elts
);
3378 gcc_assert (op1_n_elts
== n_elts
);
3379 for (i
= 0; i
< n_elts
; i
++)
3381 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3382 CONST_VECTOR_ELT (op0
, i
),
3383 CONST_VECTOR_ELT (op1
, i
));
3386 RTVEC_ELT (v
, i
) = x
;
3389 return gen_rtx_CONST_VECTOR (mode
, v
);
3392 if (VECTOR_MODE_P (mode
)
3393 && code
== VEC_CONCAT
3394 && (CONST_INT_P (op0
)
3395 || GET_CODE (op0
) == CONST_FIXED
3396 || CONST_DOUBLE_P (op0
))
3397 && (CONST_INT_P (op1
)
3398 || CONST_DOUBLE_P (op1
)
3399 || GET_CODE (op1
) == CONST_FIXED
))
3401 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3402 rtvec v
= rtvec_alloc (n_elts
);
3404 gcc_assert (n_elts
>= 2);
3407 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3408 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3410 RTVEC_ELT (v
, 0) = op0
;
3411 RTVEC_ELT (v
, 1) = op1
;
3415 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3416 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3419 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3420 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3421 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3423 for (i
= 0; i
< op0_n_elts
; ++i
)
3424 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3425 for (i
= 0; i
< op1_n_elts
; ++i
)
3426 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3429 return gen_rtx_CONST_VECTOR (mode
, v
);
3432 if (SCALAR_FLOAT_MODE_P (mode
)
3433 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3434 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3435 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3446 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3448 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3450 for (i
= 0; i
< 4; i
++)
3467 real_from_target (&r
, tmp0
, mode
);
3468 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3472 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3475 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3476 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3477 real_convert (&f0
, mode
, &f0
);
3478 real_convert (&f1
, mode
, &f1
);
3480 if (HONOR_SNANS (mode
)
3481 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3485 && REAL_VALUES_EQUAL (f1
, dconst0
)
3486 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3489 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3490 && flag_trapping_math
3491 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3493 int s0
= REAL_VALUE_NEGATIVE (f0
);
3494 int s1
= REAL_VALUE_NEGATIVE (f1
);
3499 /* Inf + -Inf = NaN plus exception. */
3504 /* Inf - Inf = NaN plus exception. */
3509 /* Inf / Inf = NaN plus exception. */
3516 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3517 && flag_trapping_math
3518 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3519 || (REAL_VALUE_ISINF (f1
)
3520 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3521 /* Inf * 0 = NaN plus exception. */
3524 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3526 real_convert (&result
, mode
, &value
);
3528 /* Don't constant fold this floating point operation if
3529 the result has overflowed and flag_trapping_math. */
3531 if (flag_trapping_math
3532 && MODE_HAS_INFINITIES (mode
)
3533 && REAL_VALUE_ISINF (result
)
3534 && !REAL_VALUE_ISINF (f0
)
3535 && !REAL_VALUE_ISINF (f1
))
3536 /* Overflow plus exception. */
3539 /* Don't constant fold this floating point operation if the
3540 result may dependent upon the run-time rounding mode and
3541 flag_rounding_math is set, or if GCC's software emulation
3542 is unable to accurately represent the result. */
3544 if ((flag_rounding_math
3545 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3546 && (inexact
|| !real_identical (&result
, &value
)))
3549 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3553 /* We can fold some multi-word operations. */
3554 if (GET_MODE_CLASS (mode
) == MODE_INT
3555 && width
== HOST_BITS_PER_DOUBLE_INT
3556 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3557 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3559 double_int o0
, o1
, res
, tmp
;
3561 o0
= rtx_to_double_int (op0
);
3562 o1
= rtx_to_double_int (op1
);
3567 /* A - B == A + (-B). */
3568 o1
= double_int_neg (o1
);
3570 /* Fall through.... */
3573 res
= double_int_add (o0
, o1
);
3577 res
= double_int_mul (o0
, o1
);
3581 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3582 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3583 &res
.low
, &res
.high
,
3584 &tmp
.low
, &tmp
.high
))
3589 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3590 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3591 &tmp
.low
, &tmp
.high
,
3592 &res
.low
, &res
.high
))
3597 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3598 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3599 &res
.low
, &res
.high
,
3600 &tmp
.low
, &tmp
.high
))
3605 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3606 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3607 &tmp
.low
, &tmp
.high
,
3608 &res
.low
, &res
.high
))
3613 res
= double_int_and (o0
, o1
);
3617 res
= double_int_ior (o0
, o1
);
3621 res
= double_int_xor (o0
, o1
);
3625 res
= double_int_smin (o0
, o1
);
3629 res
= double_int_smax (o0
, o1
);
3633 res
= double_int_umin (o0
, o1
);
3637 res
= double_int_umax (o0
, o1
);
3640 case LSHIFTRT
: case ASHIFTRT
:
3642 case ROTATE
: case ROTATERT
:
3644 unsigned HOST_WIDE_INT cnt
;
3646 if (SHIFT_COUNT_TRUNCATED
)
3649 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
3652 if (!double_int_fits_in_uhwi_p (o1
)
3653 || double_int_to_uhwi (o1
) >= GET_MODE_PRECISION (mode
))
3656 cnt
= double_int_to_uhwi (o1
);
3658 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3659 res
= double_int_rshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3661 else if (code
== ASHIFT
)
3662 res
= double_int_lshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3664 else if (code
== ROTATE
)
3665 res
= double_int_lrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3666 else /* code == ROTATERT */
3667 res
= double_int_rrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3675 return immed_double_int_const (res
, mode
);
3678 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3679 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3681 /* Get the integer argument values in two forms:
3682 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3684 arg0
= INTVAL (op0
);
3685 arg1
= INTVAL (op1
);
3687 if (width
< HOST_BITS_PER_WIDE_INT
)
3689 arg0
&= GET_MODE_MASK (mode
);
3690 arg1
&= GET_MODE_MASK (mode
);
3693 if (val_signbit_known_set_p (mode
, arg0s
))
3694 arg0s
|= ~GET_MODE_MASK (mode
);
3697 if (val_signbit_known_set_p (mode
, arg1s
))
3698 arg1s
|= ~GET_MODE_MASK (mode
);
3706 /* Compute the value of the arithmetic. */
3711 val
= arg0s
+ arg1s
;
3715 val
= arg0s
- arg1s
;
3719 val
= arg0s
* arg1s
;
3724 || ((unsigned HOST_WIDE_INT
) arg0s
3725 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3728 val
= arg0s
/ arg1s
;
3733 || ((unsigned HOST_WIDE_INT
) arg0s
3734 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3737 val
= arg0s
% arg1s
;
3742 || ((unsigned HOST_WIDE_INT
) arg0s
3743 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3746 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3751 || ((unsigned HOST_WIDE_INT
) arg0s
3752 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3755 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3773 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3774 the value is in range. We can't return any old value for
3775 out-of-range arguments because either the middle-end (via
3776 shift_truncation_mask) or the back-end might be relying on
3777 target-specific knowledge. Nor can we rely on
3778 shift_truncation_mask, since the shift might not be part of an
3779 ashlM3, lshrM3 or ashrM3 instruction. */
3780 if (SHIFT_COUNT_TRUNCATED
)
3781 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3782 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3785 val
= (code
== ASHIFT
3786 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3787 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3789 /* Sign-extend the result for arithmetic right shifts. */
3790 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3791 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3799 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3800 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3808 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3809 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3813 /* Do nothing here. */
3817 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3821 val
= ((unsigned HOST_WIDE_INT
) arg0
3822 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3826 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3830 val
= ((unsigned HOST_WIDE_INT
) arg0
3831 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3844 /* ??? There are simplifications that can be done. */
3851 return gen_int_mode (val
, mode
);
3859 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3862 Rather than test for specific case, we do this by a brute-force method
3863 and do all possible simplifications until no more changes occur. Then
3864 we rebuild the operation. */
3866 struct simplify_plus_minus_op_data
3873 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3877 result
= (commutative_operand_precedence (y
)
3878 - commutative_operand_precedence (x
));
3882 /* Group together equal REGs to do more simplification. */
3883 if (REG_P (x
) && REG_P (y
))
3884 return REGNO (x
) > REGNO (y
);
3890 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3893 struct simplify_plus_minus_op_data ops
[8];
3895 int n_ops
= 2, input_ops
= 2;
3896 int changed
, n_constants
= 0, canonicalized
= 0;
3899 memset (ops
, 0, sizeof ops
);
3901 /* Set up the two operands and then expand them until nothing has been
3902 changed. If we run out of room in our array, give up; this should
3903 almost never happen. */
3908 ops
[1].neg
= (code
== MINUS
);
3914 for (i
= 0; i
< n_ops
; i
++)
3916 rtx this_op
= ops
[i
].op
;
3917 int this_neg
= ops
[i
].neg
;
3918 enum rtx_code this_code
= GET_CODE (this_op
);
3927 ops
[n_ops
].op
= XEXP (this_op
, 1);
3928 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3931 ops
[i
].op
= XEXP (this_op
, 0);
3934 canonicalized
|= this_neg
;
3938 ops
[i
].op
= XEXP (this_op
, 0);
3939 ops
[i
].neg
= ! this_neg
;
3946 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3947 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3948 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3950 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3951 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3952 ops
[n_ops
].neg
= this_neg
;
3960 /* ~a -> (-a - 1) */
3963 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
3964 ops
[n_ops
++].neg
= this_neg
;
3965 ops
[i
].op
= XEXP (this_op
, 0);
3966 ops
[i
].neg
= !this_neg
;
3976 ops
[i
].op
= neg_const_int (mode
, this_op
);
3990 if (n_constants
> 1)
3993 gcc_assert (n_ops
>= 2);
3995 /* If we only have two operands, we can avoid the loops. */
3998 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4001 /* Get the two operands. Be careful with the order, especially for
4002 the cases where code == MINUS. */
4003 if (ops
[0].neg
&& ops
[1].neg
)
4005 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4008 else if (ops
[0].neg
)
4019 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4022 /* Now simplify each pair of operands until nothing changes. */
4025 /* Insertion sort is good enough for an eight-element array. */
4026 for (i
= 1; i
< n_ops
; i
++)
4028 struct simplify_plus_minus_op_data save
;
4030 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4036 ops
[j
+ 1] = ops
[j
];
4037 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4042 for (i
= n_ops
- 1; i
> 0; i
--)
4043 for (j
= i
- 1; j
>= 0; j
--)
4045 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4046 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4048 if (lhs
!= 0 && rhs
!= 0)
4050 enum rtx_code ncode
= PLUS
;
4056 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4058 else if (swap_commutative_operands_p (lhs
, rhs
))
4059 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4061 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4062 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4064 rtx tem_lhs
, tem_rhs
;
4066 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4067 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4068 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4070 if (tem
&& !CONSTANT_P (tem
))
4071 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4074 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4076 /* Reject "simplifications" that just wrap the two
4077 arguments in a CONST. Failure to do so can result
4078 in infinite recursion with simplify_binary_operation
4079 when it calls us to simplify CONST operations. */
4081 && ! (GET_CODE (tem
) == CONST
4082 && GET_CODE (XEXP (tem
, 0)) == ncode
4083 && XEXP (XEXP (tem
, 0), 0) == lhs
4084 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4087 if (GET_CODE (tem
) == NEG
)
4088 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4089 if (CONST_INT_P (tem
) && lneg
)
4090 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4094 ops
[j
].op
= NULL_RTX
;
4101 /* If nothing changed, fail. */
4105 /* Pack all the operands to the lower-numbered entries. */
4106 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4116 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4118 && CONST_INT_P (ops
[1].op
)
4119 && CONSTANT_P (ops
[0].op
)
4121 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4123 /* We suppressed creation of trivial CONST expressions in the
4124 combination loop to avoid recursion. Create one manually now.
4125 The combination loop should have ensured that there is exactly
4126 one CONST_INT, and the sort will have ensured that it is last
4127 in the array and that any other constant will be next-to-last. */
4130 && CONST_INT_P (ops
[n_ops
- 1].op
)
4131 && CONSTANT_P (ops
[n_ops
- 2].op
))
4133 rtx value
= ops
[n_ops
- 1].op
;
4134 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4135 value
= neg_const_int (mode
, value
);
4136 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4141 /* Put a non-negated operand first, if possible. */
4143 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4146 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4155 /* Now make the result by performing the requested operations. */
4157 for (i
= 1; i
< n_ops
; i
++)
4158 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4159 mode
, result
, ops
[i
].op
);
4164 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4166 plus_minus_operand_p (const_rtx x
)
4168 return GET_CODE (x
) == PLUS
4169 || GET_CODE (x
) == MINUS
4170 || (GET_CODE (x
) == CONST
4171 && GET_CODE (XEXP (x
, 0)) == PLUS
4172 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4173 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4176 /* Like simplify_binary_operation except used for relational operators.
4177 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4178 not also be VOIDmode.
4180 CMP_MODE specifies in which mode the comparison is done in, so it is
4181 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4182 the operands or, if both are VOIDmode, the operands are compared in
4183 "infinite precision". */
4185 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4186 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4188 rtx tem
, trueop0
, trueop1
;
4190 if (cmp_mode
== VOIDmode
)
4191 cmp_mode
= GET_MODE (op0
);
4192 if (cmp_mode
== VOIDmode
)
4193 cmp_mode
= GET_MODE (op1
);
4195 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4198 if (SCALAR_FLOAT_MODE_P (mode
))
4200 if (tem
== const0_rtx
)
4201 return CONST0_RTX (mode
);
4202 #ifdef FLOAT_STORE_FLAG_VALUE
4204 REAL_VALUE_TYPE val
;
4205 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4206 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4212 if (VECTOR_MODE_P (mode
))
4214 if (tem
== const0_rtx
)
4215 return CONST0_RTX (mode
);
4216 #ifdef VECTOR_STORE_FLAG_VALUE
4221 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4222 if (val
== NULL_RTX
)
4224 if (val
== const1_rtx
)
4225 return CONST1_RTX (mode
);
4227 units
= GET_MODE_NUNITS (mode
);
4228 v
= rtvec_alloc (units
);
4229 for (i
= 0; i
< units
; i
++)
4230 RTVEC_ELT (v
, i
) = val
;
4231 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4241 /* For the following tests, ensure const0_rtx is op1. */
4242 if (swap_commutative_operands_p (op0
, op1
)
4243 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4244 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4246 /* If op0 is a compare, extract the comparison arguments from it. */
4247 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4248 return simplify_gen_relational (code
, mode
, VOIDmode
,
4249 XEXP (op0
, 0), XEXP (op0
, 1));
4251 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4255 trueop0
= avoid_constant_pool_reference (op0
);
4256 trueop1
= avoid_constant_pool_reference (op1
);
4257 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4261 /* This part of simplify_relational_operation is only used when CMP_MODE
4262 is not in class MODE_CC (i.e. it is a real comparison).
4264 MODE is the mode of the result, while CMP_MODE specifies in which
4265 mode the comparison is done in, so it is the mode of the operands. */
4268 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4269 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4271 enum rtx_code op0code
= GET_CODE (op0
);
4273 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4275 /* If op0 is a comparison, extract the comparison arguments
4279 if (GET_MODE (op0
) == mode
)
4280 return simplify_rtx (op0
);
4282 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4283 XEXP (op0
, 0), XEXP (op0
, 1));
4285 else if (code
== EQ
)
4287 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4288 if (new_code
!= UNKNOWN
)
4289 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4290 XEXP (op0
, 0), XEXP (op0
, 1));
4294 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4295 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4296 if ((code
== LTU
|| code
== GEU
)
4297 && GET_CODE (op0
) == PLUS
4298 && CONST_INT_P (XEXP (op0
, 1))
4299 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4300 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4303 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4304 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4305 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4308 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4309 if ((code
== LTU
|| code
== GEU
)
4310 && GET_CODE (op0
) == PLUS
4311 && rtx_equal_p (op1
, XEXP (op0
, 1))
4312 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4313 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4314 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4315 copy_rtx (XEXP (op0
, 0)));
4317 if (op1
== const0_rtx
)
4319 /* Canonicalize (GTU x 0) as (NE x 0). */
4321 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4322 /* Canonicalize (LEU x 0) as (EQ x 0). */
4324 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4326 else if (op1
== const1_rtx
)
4331 /* Canonicalize (GE x 1) as (GT x 0). */
4332 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4335 /* Canonicalize (GEU x 1) as (NE x 0). */
4336 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4339 /* Canonicalize (LT x 1) as (LE x 0). */
4340 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4343 /* Canonicalize (LTU x 1) as (EQ x 0). */
4344 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4350 else if (op1
== constm1_rtx
)
4352 /* Canonicalize (LE x -1) as (LT x 0). */
4354 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4355 /* Canonicalize (GT x -1) as (GE x 0). */
4357 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4360 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4361 if ((code
== EQ
|| code
== NE
)
4362 && (op0code
== PLUS
|| op0code
== MINUS
)
4364 && CONSTANT_P (XEXP (op0
, 1))
4365 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4367 rtx x
= XEXP (op0
, 0);
4368 rtx c
= XEXP (op0
, 1);
4369 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4370 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4372 /* Detect an infinite recursive condition, where we oscillate at this
4373 simplification case between:
4374 A + B == C <---> C - B == A,
4375 where A, B, and C are all constants with non-simplifiable expressions,
4376 usually SYMBOL_REFs. */
4377 if (GET_CODE (tem
) == invcode
4379 && rtx_equal_p (c
, XEXP (tem
, 1)))
4382 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4385 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4386 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4388 && op1
== const0_rtx
4389 && GET_MODE_CLASS (mode
) == MODE_INT
4390 && cmp_mode
!= VOIDmode
4391 /* ??? Work-around BImode bugs in the ia64 backend. */
4393 && cmp_mode
!= BImode
4394 && nonzero_bits (op0
, cmp_mode
) == 1
4395 && STORE_FLAG_VALUE
== 1)
4396 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4397 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4398 : lowpart_subreg (mode
, op0
, cmp_mode
);
4400 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4401 if ((code
== EQ
|| code
== NE
)
4402 && op1
== const0_rtx
4404 return simplify_gen_relational (code
, mode
, cmp_mode
,
4405 XEXP (op0
, 0), XEXP (op0
, 1));
4407 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4408 if ((code
== EQ
|| code
== NE
)
4410 && rtx_equal_p (XEXP (op0
, 0), op1
)
4411 && !side_effects_p (XEXP (op0
, 0)))
4412 return simplify_gen_relational (code
, mode
, cmp_mode
,
4413 XEXP (op0
, 1), const0_rtx
);
4415 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4416 if ((code
== EQ
|| code
== NE
)
4418 && rtx_equal_p (XEXP (op0
, 1), op1
)
4419 && !side_effects_p (XEXP (op0
, 1)))
4420 return simplify_gen_relational (code
, mode
, cmp_mode
,
4421 XEXP (op0
, 0), const0_rtx
);
4423 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4424 if ((code
== EQ
|| code
== NE
)
4426 && (CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
4427 && (CONST_INT_P (XEXP (op0
, 1))
4428 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1))))
4429 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4430 simplify_gen_binary (XOR
, cmp_mode
,
4431 XEXP (op0
, 1), op1
));
4433 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4439 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4440 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4441 XEXP (op0
, 0), const0_rtx
);
4446 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4447 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4448 XEXP (op0
, 0), const0_rtx
);
4467 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4468 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4469 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4470 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4471 For floating-point comparisons, assume that the operands were ordered. */
4474 comparison_result (enum rtx_code code
, int known_results
)
4480 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4483 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4487 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4490 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4494 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4497 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4500 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4502 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4505 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4507 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4510 return const_true_rtx
;
4518 /* Check if the given comparison (done in the given MODE) is actually a
4519 tautology or a contradiction.
4520 If no simplification is possible, this function returns zero.
4521 Otherwise, it returns either const_true_rtx or const0_rtx. */
4524 simplify_const_relational_operation (enum rtx_code code
,
4525 enum machine_mode mode
,
4532 gcc_assert (mode
!= VOIDmode
4533 || (GET_MODE (op0
) == VOIDmode
4534 && GET_MODE (op1
) == VOIDmode
));
4536 /* If op0 is a compare, extract the comparison arguments from it. */
4537 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4539 op1
= XEXP (op0
, 1);
4540 op0
= XEXP (op0
, 0);
4542 if (GET_MODE (op0
) != VOIDmode
)
4543 mode
= GET_MODE (op0
);
4544 else if (GET_MODE (op1
) != VOIDmode
)
4545 mode
= GET_MODE (op1
);
4550 /* We can't simplify MODE_CC values since we don't know what the
4551 actual comparison is. */
4552 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4555 /* Make sure the constant is second. */
4556 if (swap_commutative_operands_p (op0
, op1
))
4558 tem
= op0
, op0
= op1
, op1
= tem
;
4559 code
= swap_condition (code
);
4562 trueop0
= avoid_constant_pool_reference (op0
);
4563 trueop1
= avoid_constant_pool_reference (op1
);
4565 /* For integer comparisons of A and B maybe we can simplify A - B and can
4566 then simplify a comparison of that with zero. If A and B are both either
4567 a register or a CONST_INT, this can't help; testing for these cases will
4568 prevent infinite recursion here and speed things up.
4570 We can only do this for EQ and NE comparisons as otherwise we may
4571 lose or introduce overflow which we cannot disregard as undefined as
4572 we do not know the signedness of the operation on either the left or
4573 the right hand side of the comparison. */
4575 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4576 && (code
== EQ
|| code
== NE
)
4577 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4578 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4579 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4580 /* We cannot do this if tem is a nonzero address. */
4581 && ! nonzero_address_p (tem
))
4582 return simplify_const_relational_operation (signed_condition (code
),
4583 mode
, tem
, const0_rtx
);
4585 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4586 return const_true_rtx
;
4588 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4591 /* For modes without NaNs, if the two operands are equal, we know the
4592 result except if they have side-effects. Even with NaNs we know
4593 the result of unordered comparisons and, if signaling NaNs are
4594 irrelevant, also the result of LT/GT/LTGT. */
4595 if ((! HONOR_NANS (GET_MODE (trueop0
))
4596 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4597 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4598 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4599 && rtx_equal_p (trueop0
, trueop1
)
4600 && ! side_effects_p (trueop0
))
4601 return comparison_result (code
, CMP_EQ
);
4603 /* If the operands are floating-point constants, see if we can fold
4605 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4606 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4607 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4609 REAL_VALUE_TYPE d0
, d1
;
4611 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4612 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4614 /* Comparisons are unordered iff at least one of the values is NaN. */
4615 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4625 return const_true_rtx
;
4638 return comparison_result (code
,
4639 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4640 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4643 /* Otherwise, see if the operands are both integers. */
4644 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4645 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
4646 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
4648 int width
= GET_MODE_PRECISION (mode
);
4649 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4650 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4652 /* Get the two words comprising each integer constant. */
4653 if (CONST_DOUBLE_AS_INT_P (trueop0
))
4655 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4656 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4660 l0u
= l0s
= INTVAL (trueop0
);
4661 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4664 if (CONST_DOUBLE_AS_INT_P (trueop1
))
4666 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4667 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4671 l1u
= l1s
= INTVAL (trueop1
);
4672 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4675 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4676 we have to sign or zero-extend the values. */
4677 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4679 l0u
&= GET_MODE_MASK (mode
);
4680 l1u
&= GET_MODE_MASK (mode
);
4682 if (val_signbit_known_set_p (mode
, l0s
))
4683 l0s
|= ~GET_MODE_MASK (mode
);
4685 if (val_signbit_known_set_p (mode
, l1s
))
4686 l1s
|= ~GET_MODE_MASK (mode
);
4688 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4689 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4691 if (h0u
== h1u
&& l0u
== l1u
)
4692 return comparison_result (code
, CMP_EQ
);
4696 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4697 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4698 return comparison_result (code
, cr
);
4702 /* Optimize comparisons with upper and lower bounds. */
4703 if (HWI_COMPUTABLE_MODE_P (mode
)
4704 && CONST_INT_P (trueop1
))
4707 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4708 HOST_WIDE_INT val
= INTVAL (trueop1
);
4709 HOST_WIDE_INT mmin
, mmax
;
4719 /* Get a reduced range if the sign bit is zero. */
4720 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4727 rtx mmin_rtx
, mmax_rtx
;
4728 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4730 mmin
= INTVAL (mmin_rtx
);
4731 mmax
= INTVAL (mmax_rtx
);
4734 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4736 mmin
>>= (sign_copies
- 1);
4737 mmax
>>= (sign_copies
- 1);
4743 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4745 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4746 return const_true_rtx
;
4747 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4752 return const_true_rtx
;
4757 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4759 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4760 return const_true_rtx
;
4761 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4766 return const_true_rtx
;
4772 /* x == y is always false for y out of range. */
4773 if (val
< mmin
|| val
> mmax
)
4777 /* x > y is always false for y >= mmax, always true for y < mmin. */
4779 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4781 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4782 return const_true_rtx
;
4788 return const_true_rtx
;
4791 /* x < y is always false for y <= mmin, always true for y > mmax. */
4793 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4795 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4796 return const_true_rtx
;
4802 return const_true_rtx
;
4806 /* x != y is always true for y out of range. */
4807 if (val
< mmin
|| val
> mmax
)
4808 return const_true_rtx
;
4816 /* Optimize integer comparisons with zero. */
4817 if (trueop1
== const0_rtx
)
4819 /* Some addresses are known to be nonzero. We don't know
4820 their sign, but equality comparisons are known. */
4821 if (nonzero_address_p (trueop0
))
4823 if (code
== EQ
|| code
== LEU
)
4825 if (code
== NE
|| code
== GTU
)
4826 return const_true_rtx
;
4829 /* See if the first operand is an IOR with a constant. If so, we
4830 may be able to determine the result of this comparison. */
4831 if (GET_CODE (op0
) == IOR
)
4833 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4834 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4836 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4837 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4838 && (UINTVAL (inner_const
)
4839 & ((unsigned HOST_WIDE_INT
) 1
4849 return const_true_rtx
;
4853 return const_true_rtx
;
4867 /* Optimize comparison of ABS with zero. */
4868 if (trueop1
== CONST0_RTX (mode
)
4869 && (GET_CODE (trueop0
) == ABS
4870 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4871 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4876 /* Optimize abs(x) < 0.0. */
4877 if (!HONOR_SNANS (mode
)
4878 && (!INTEGRAL_MODE_P (mode
)
4879 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4881 if (INTEGRAL_MODE_P (mode
)
4882 && (issue_strict_overflow_warning
4883 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4884 warning (OPT_Wstrict_overflow
,
4885 ("assuming signed overflow does not occur when "
4886 "assuming abs (x) < 0 is false"));
4892 /* Optimize abs(x) >= 0.0. */
4893 if (!HONOR_NANS (mode
)
4894 && (!INTEGRAL_MODE_P (mode
)
4895 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4897 if (INTEGRAL_MODE_P (mode
)
4898 && (issue_strict_overflow_warning
4899 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4900 warning (OPT_Wstrict_overflow
,
4901 ("assuming signed overflow does not occur when "
4902 "assuming abs (x) >= 0 is true"));
4903 return const_true_rtx
;
4908 /* Optimize ! (abs(x) < 0.0). */
4909 return const_true_rtx
;
4919 /* Simplify CODE, an operation with result mode MODE and three operands,
4920 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4921 a constant. Return 0 if no simplifications is possible. */
4924 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4925 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4928 unsigned int width
= GET_MODE_PRECISION (mode
);
4929 bool any_change
= false;
4932 /* VOIDmode means "infinite" precision. */
4934 width
= HOST_BITS_PER_WIDE_INT
;
4939 /* Simplify negations around the multiplication. */
4940 /* -a * -b + c => a * b + c. */
4941 if (GET_CODE (op0
) == NEG
)
4943 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4945 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4947 else if (GET_CODE (op1
) == NEG
)
4949 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4951 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4954 /* Canonicalize the two multiplication operands. */
4955 /* a * -b + c => -b * a + c. */
4956 if (swap_commutative_operands_p (op0
, op1
))
4957 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4960 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4965 if (CONST_INT_P (op0
)
4966 && CONST_INT_P (op1
)
4967 && CONST_INT_P (op2
)
4968 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4969 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4971 /* Extracting a bit-field from a constant */
4972 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4973 HOST_WIDE_INT op1val
= INTVAL (op1
);
4974 HOST_WIDE_INT op2val
= INTVAL (op2
);
4975 if (BITS_BIG_ENDIAN
)
4976 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
4980 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
4982 /* First zero-extend. */
4983 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
4984 /* If desired, propagate sign bit. */
4985 if (code
== SIGN_EXTRACT
4986 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
4988 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
4991 return gen_int_mode (val
, mode
);
4996 if (CONST_INT_P (op0
))
4997 return op0
!= const0_rtx
? op1
: op2
;
4999 /* Convert c ? a : a into "a". */
5000 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5003 /* Convert a != b ? a : b into "a". */
5004 if (GET_CODE (op0
) == NE
5005 && ! side_effects_p (op0
)
5006 && ! HONOR_NANS (mode
)
5007 && ! HONOR_SIGNED_ZEROS (mode
)
5008 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5009 && rtx_equal_p (XEXP (op0
, 1), op2
))
5010 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5011 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5014 /* Convert a == b ? a : b into "b". */
5015 if (GET_CODE (op0
) == EQ
5016 && ! side_effects_p (op0
)
5017 && ! HONOR_NANS (mode
)
5018 && ! HONOR_SIGNED_ZEROS (mode
)
5019 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5020 && rtx_equal_p (XEXP (op0
, 1), op2
))
5021 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5022 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5025 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5027 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5028 ? GET_MODE (XEXP (op0
, 1))
5029 : GET_MODE (XEXP (op0
, 0)));
5032 /* Look for happy constants in op1 and op2. */
5033 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5035 HOST_WIDE_INT t
= INTVAL (op1
);
5036 HOST_WIDE_INT f
= INTVAL (op2
);
5038 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5039 code
= GET_CODE (op0
);
5040 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5043 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5051 return simplify_gen_relational (code
, mode
, cmp_mode
,
5052 XEXP (op0
, 0), XEXP (op0
, 1));
5055 if (cmp_mode
== VOIDmode
)
5056 cmp_mode
= op0_mode
;
5057 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5058 cmp_mode
, XEXP (op0
, 0),
5061 /* See if any simplifications were possible. */
5064 if (CONST_INT_P (temp
))
5065 return temp
== const0_rtx
? op2
: op1
;
5067 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5073 gcc_assert (GET_MODE (op0
) == mode
);
5074 gcc_assert (GET_MODE (op1
) == mode
);
5075 gcc_assert (VECTOR_MODE_P (mode
));
5076 op2
= avoid_constant_pool_reference (op2
);
5077 if (CONST_INT_P (op2
))
5079 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5080 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5081 int mask
= (1 << n_elts
) - 1;
5083 if (!(INTVAL (op2
) & mask
))
5085 if ((INTVAL (op2
) & mask
) == mask
)
5088 op0
= avoid_constant_pool_reference (op0
);
5089 op1
= avoid_constant_pool_reference (op1
);
5090 if (GET_CODE (op0
) == CONST_VECTOR
5091 && GET_CODE (op1
) == CONST_VECTOR
)
5093 rtvec v
= rtvec_alloc (n_elts
);
5096 for (i
= 0; i
< n_elts
; i
++)
5097 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5098 ? CONST_VECTOR_ELT (op0
, i
)
5099 : CONST_VECTOR_ELT (op1
, i
));
5100 return gen_rtx_CONST_VECTOR (mode
, v
);
5112 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5114 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5116 Works by unpacking OP into a collection of 8-bit values
5117 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5118 and then repacking them again for OUTERMODE. */
5121 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5122 enum machine_mode innermode
, unsigned int byte
)
5124 /* We support up to 512-bit values (for V8DFmode). */
5128 value_mask
= (1 << value_bit
) - 1
5130 unsigned char value
[max_bitsize
/ value_bit
];
5139 rtvec result_v
= NULL
;
5140 enum mode_class outer_class
;
5141 enum machine_mode outer_submode
;
5143 /* Some ports misuse CCmode. */
5144 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5147 /* We have no way to represent a complex constant at the rtl level. */
5148 if (COMPLEX_MODE_P (outermode
))
5151 /* Unpack the value. */
5153 if (GET_CODE (op
) == CONST_VECTOR
)
5155 num_elem
= CONST_VECTOR_NUNITS (op
);
5156 elems
= &CONST_VECTOR_ELT (op
, 0);
5157 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5163 elem_bitsize
= max_bitsize
;
5165 /* If this asserts, it is too complicated; reducing value_bit may help. */
5166 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5167 /* I don't know how to handle endianness of sub-units. */
5168 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5170 for (elem
= 0; elem
< num_elem
; elem
++)
5173 rtx el
= elems
[elem
];
5175 /* Vectors are kept in target memory order. (This is probably
5178 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5179 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5181 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5182 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5183 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5184 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5185 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5188 switch (GET_CODE (el
))
5192 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5194 *vp
++ = INTVAL (el
) >> i
;
5195 /* CONST_INTs are always logically sign-extended. */
5196 for (; i
< elem_bitsize
; i
+= value_bit
)
5197 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5201 if (GET_MODE (el
) == VOIDmode
)
5203 unsigned char extend
= 0;
5204 /* If this triggers, someone should have generated a
5205 CONST_INT instead. */
5206 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5208 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5209 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5210 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5213 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5217 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5219 for (; i
< elem_bitsize
; i
+= value_bit
)
5224 long tmp
[max_bitsize
/ 32];
5225 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5227 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5228 gcc_assert (bitsize
<= elem_bitsize
);
5229 gcc_assert (bitsize
% value_bit
== 0);
5231 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5234 /* real_to_target produces its result in words affected by
5235 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5236 and use WORDS_BIG_ENDIAN instead; see the documentation
5237 of SUBREG in rtl.texi. */
5238 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5241 if (WORDS_BIG_ENDIAN
)
5242 ibase
= bitsize
- 1 - i
;
5245 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5248 /* It shouldn't matter what's done here, so fill it with
5250 for (; i
< elem_bitsize
; i
+= value_bit
)
5256 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5258 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5259 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5263 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5264 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5265 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5267 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5268 >> (i
- HOST_BITS_PER_WIDE_INT
);
5269 for (; i
< elem_bitsize
; i
+= value_bit
)
5279 /* Now, pick the right byte to start with. */
5280 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5281 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5282 will already have offset 0. */
5283 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5285 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5287 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5288 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5289 byte
= (subword_byte
% UNITS_PER_WORD
5290 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5293 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5294 so if it's become negative it will instead be very large.) */
5295 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5297 /* Convert from bytes to chunks of size value_bit. */
5298 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5300 /* Re-pack the value. */
5302 if (VECTOR_MODE_P (outermode
))
5304 num_elem
= GET_MODE_NUNITS (outermode
);
5305 result_v
= rtvec_alloc (num_elem
);
5306 elems
= &RTVEC_ELT (result_v
, 0);
5307 outer_submode
= GET_MODE_INNER (outermode
);
5313 outer_submode
= outermode
;
5316 outer_class
= GET_MODE_CLASS (outer_submode
);
5317 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5319 gcc_assert (elem_bitsize
% value_bit
== 0);
5320 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5322 for (elem
= 0; elem
< num_elem
; elem
++)
5326 /* Vectors are stored in target memory order. (This is probably
5329 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5330 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5332 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5333 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5334 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5335 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5336 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5339 switch (outer_class
)
5342 case MODE_PARTIAL_INT
:
5344 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5347 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5349 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5350 for (; i
< elem_bitsize
; i
+= value_bit
)
5351 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5352 << (i
- HOST_BITS_PER_WIDE_INT
);
5354 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5356 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5357 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5358 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5359 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5366 case MODE_DECIMAL_FLOAT
:
5369 long tmp
[max_bitsize
/ 32];
5371 /* real_from_target wants its input in words affected by
5372 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5373 and use WORDS_BIG_ENDIAN instead; see the documentation
5374 of SUBREG in rtl.texi. */
5375 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5377 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5380 if (WORDS_BIG_ENDIAN
)
5381 ibase
= elem_bitsize
- 1 - i
;
5384 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5387 real_from_target (&r
, tmp
, outer_submode
);
5388 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5400 f
.mode
= outer_submode
;
5403 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5405 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5406 for (; i
< elem_bitsize
; i
+= value_bit
)
5407 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5408 << (i
- HOST_BITS_PER_WIDE_INT
));
5410 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5418 if (VECTOR_MODE_P (outermode
))
5419 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5424 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5425 Return 0 if no simplifications are possible. */
5427 simplify_subreg (enum machine_mode outermode
, rtx op
,
5428 enum machine_mode innermode
, unsigned int byte
)
5430 /* Little bit of sanity checking. */
5431 gcc_assert (innermode
!= VOIDmode
);
5432 gcc_assert (outermode
!= VOIDmode
);
5433 gcc_assert (innermode
!= BLKmode
);
5434 gcc_assert (outermode
!= BLKmode
);
5436 gcc_assert (GET_MODE (op
) == innermode
5437 || GET_MODE (op
) == VOIDmode
);
5439 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5440 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5442 if (outermode
== innermode
&& !byte
)
5445 if (CONST_INT_P (op
)
5446 || CONST_DOUBLE_P (op
)
5447 || GET_CODE (op
) == CONST_FIXED
5448 || GET_CODE (op
) == CONST_VECTOR
)
5449 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5451 /* Changing mode twice with SUBREG => just change it once,
5452 or not at all if changing back op starting mode. */
5453 if (GET_CODE (op
) == SUBREG
)
5455 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5456 int final_offset
= byte
+ SUBREG_BYTE (op
);
5459 if (outermode
== innermostmode
5460 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5461 return SUBREG_REG (op
);
5463 /* The SUBREG_BYTE represents offset, as if the value were stored
5464 in memory. Irritating exception is paradoxical subreg, where
5465 we define SUBREG_BYTE to be 0. On big endian machines, this
5466 value should be negative. For a moment, undo this exception. */
5467 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5469 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5470 if (WORDS_BIG_ENDIAN
)
5471 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5472 if (BYTES_BIG_ENDIAN
)
5473 final_offset
+= difference
% UNITS_PER_WORD
;
5475 if (SUBREG_BYTE (op
) == 0
5476 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5478 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5479 if (WORDS_BIG_ENDIAN
)
5480 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5481 if (BYTES_BIG_ENDIAN
)
5482 final_offset
+= difference
% UNITS_PER_WORD
;
5485 /* See whether resulting subreg will be paradoxical. */
5486 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5488 /* In nonparadoxical subregs we can't handle negative offsets. */
5489 if (final_offset
< 0)
5491 /* Bail out in case resulting subreg would be incorrect. */
5492 if (final_offset
% GET_MODE_SIZE (outermode
)
5493 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5499 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5501 /* In paradoxical subreg, see if we are still looking on lower part.
5502 If so, our SUBREG_BYTE will be 0. */
5503 if (WORDS_BIG_ENDIAN
)
5504 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5505 if (BYTES_BIG_ENDIAN
)
5506 offset
+= difference
% UNITS_PER_WORD
;
5507 if (offset
== final_offset
)
5513 /* Recurse for further possible simplifications. */
5514 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5518 if (validate_subreg (outermode
, innermostmode
,
5519 SUBREG_REG (op
), final_offset
))
5521 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5522 if (SUBREG_PROMOTED_VAR_P (op
)
5523 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5524 && GET_MODE_CLASS (outermode
) == MODE_INT
5525 && IN_RANGE (GET_MODE_SIZE (outermode
),
5526 GET_MODE_SIZE (innermode
),
5527 GET_MODE_SIZE (innermostmode
))
5528 && subreg_lowpart_p (newx
))
5530 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5531 SUBREG_PROMOTED_UNSIGNED_SET
5532 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5539 /* Merge implicit and explicit truncations. */
5541 if (GET_CODE (op
) == TRUNCATE
5542 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5543 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5544 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5545 GET_MODE (XEXP (op
, 0)));
5547 /* SUBREG of a hard register => just change the register number
5548 and/or mode. If the hard register is not valid in that mode,
5549 suppress this simplification. If the hard register is the stack,
5550 frame, or argument pointer, leave this as a SUBREG. */
5552 if (REG_P (op
) && HARD_REGISTER_P (op
))
5554 unsigned int regno
, final_regno
;
5557 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5558 if (HARD_REGISTER_NUM_P (final_regno
))
5561 int final_offset
= byte
;
5563 /* Adjust offset for paradoxical subregs. */
5565 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5567 int difference
= (GET_MODE_SIZE (innermode
)
5568 - GET_MODE_SIZE (outermode
));
5569 if (WORDS_BIG_ENDIAN
)
5570 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5571 if (BYTES_BIG_ENDIAN
)
5572 final_offset
+= difference
% UNITS_PER_WORD
;
5575 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5577 /* Propagate original regno. We don't have any way to specify
5578 the offset inside original regno, so do so only for lowpart.
5579 The information is used only by alias analysis that can not
5580 grog partial register anyway. */
5582 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5583 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5588 /* If we have a SUBREG of a register that we are replacing and we are
5589 replacing it with a MEM, make a new MEM and try replacing the
5590 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5591 or if we would be widening it. */
5594 && ! mode_dependent_address_p (XEXP (op
, 0))
5595 /* Allow splitting of volatile memory references in case we don't
5596 have instruction to move the whole thing. */
5597 && (! MEM_VOLATILE_P (op
)
5598 || ! have_insn_for (SET
, innermode
))
5599 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5600 return adjust_address_nv (op
, outermode
, byte
);
5602 /* Handle complex values represented as CONCAT
5603 of real and imaginary part. */
5604 if (GET_CODE (op
) == CONCAT
)
5606 unsigned int part_size
, final_offset
;
5609 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5610 if (byte
< part_size
)
5612 part
= XEXP (op
, 0);
5613 final_offset
= byte
;
5617 part
= XEXP (op
, 1);
5618 final_offset
= byte
- part_size
;
5621 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5624 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5627 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5628 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5632 /* Optimize SUBREG truncations of zero and sign extended values. */
5633 if ((GET_CODE (op
) == ZERO_EXTEND
5634 || GET_CODE (op
) == SIGN_EXTEND
)
5635 && SCALAR_INT_MODE_P (innermode
)
5636 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
))
5638 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5640 /* If we're requesting the lowpart of a zero or sign extension,
5641 there are three possibilities. If the outermode is the same
5642 as the origmode, we can omit both the extension and the subreg.
5643 If the outermode is not larger than the origmode, we can apply
5644 the truncation without the extension. Finally, if the outermode
5645 is larger than the origmode, but both are integer modes, we
5646 can just extend to the appropriate mode. */
5649 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5650 if (outermode
== origmode
)
5651 return XEXP (op
, 0);
5652 if (GET_MODE_PRECISION (outermode
) <= GET_MODE_PRECISION (origmode
))
5653 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5654 subreg_lowpart_offset (outermode
,
5656 if (SCALAR_INT_MODE_P (outermode
))
5657 return simplify_gen_unary (GET_CODE (op
), outermode
,
5658 XEXP (op
, 0), origmode
);
5661 /* A SUBREG resulting from a zero extension may fold to zero if
5662 it extracts higher bits that the ZERO_EXTEND's source bits. */
5663 if (GET_CODE (op
) == ZERO_EXTEND
5664 && bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5665 return CONST0_RTX (outermode
);
5668 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5669 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5670 the outer subreg is effectively a truncation to the original mode. */
5671 if ((GET_CODE (op
) == LSHIFTRT
5672 || GET_CODE (op
) == ASHIFTRT
)
5673 && SCALAR_INT_MODE_P (outermode
)
5674 && SCALAR_INT_MODE_P (innermode
)
5675 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5676 to avoid the possibility that an outer LSHIFTRT shifts by more
5677 than the sign extension's sign_bit_copies and introduces zeros
5678 into the high bits of the result. */
5679 && (2 * GET_MODE_PRECISION (outermode
)) <= GET_MODE_PRECISION (innermode
)
5680 && CONST_INT_P (XEXP (op
, 1))
5681 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5682 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5683 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5684 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5685 return simplify_gen_binary (ASHIFTRT
, outermode
,
5686 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5688 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5689 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5690 the outer subreg is effectively a truncation to the original mode. */
5691 if ((GET_CODE (op
) == LSHIFTRT
5692 || GET_CODE (op
) == ASHIFTRT
)
5693 && SCALAR_INT_MODE_P (outermode
)
5694 && SCALAR_INT_MODE_P (innermode
)
5695 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5696 && CONST_INT_P (XEXP (op
, 1))
5697 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5698 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5699 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5700 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5701 return simplify_gen_binary (LSHIFTRT
, outermode
,
5702 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5704 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5705 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5706 the outer subreg is effectively a truncation to the original mode. */
5707 if (GET_CODE (op
) == ASHIFT
5708 && SCALAR_INT_MODE_P (outermode
)
5709 && SCALAR_INT_MODE_P (innermode
)
5710 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5711 && CONST_INT_P (XEXP (op
, 1))
5712 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5713 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5714 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5715 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5716 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5717 return simplify_gen_binary (ASHIFT
, outermode
,
5718 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5720 /* Recognize a word extraction from a multi-word subreg. */
5721 if ((GET_CODE (op
) == LSHIFTRT
5722 || GET_CODE (op
) == ASHIFTRT
)
5723 && SCALAR_INT_MODE_P (innermode
)
5724 && GET_MODE_PRECISION (outermode
) >= BITS_PER_WORD
5725 && GET_MODE_PRECISION (innermode
) >= (2 * GET_MODE_PRECISION (outermode
))
5726 && CONST_INT_P (XEXP (op
, 1))
5727 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_PRECISION (outermode
) - 1)) == 0
5728 && INTVAL (XEXP (op
, 1)) >= 0
5729 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (innermode
)
5730 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5732 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5733 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5735 ? byte
- shifted_bytes
5736 : byte
+ shifted_bytes
));
5739 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5740 and try replacing the SUBREG and shift with it. Don't do this if
5741 the MEM has a mode-dependent address or if we would be widening it. */
5743 if ((GET_CODE (op
) == LSHIFTRT
5744 || GET_CODE (op
) == ASHIFTRT
)
5745 && SCALAR_INT_MODE_P (innermode
)
5746 && MEM_P (XEXP (op
, 0))
5747 && CONST_INT_P (XEXP (op
, 1))
5748 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5749 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5750 && INTVAL (XEXP (op
, 1)) > 0
5751 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5752 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5753 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5754 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5755 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5756 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5758 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5759 return adjust_address_nv (XEXP (op
, 0), outermode
,
5761 ? byte
- shifted_bytes
5762 : byte
+ shifted_bytes
));
5768 /* Make a SUBREG operation or equivalent if it folds. */
5771 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5772 enum machine_mode innermode
, unsigned int byte
)
5776 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5780 if (GET_CODE (op
) == SUBREG
5781 || GET_CODE (op
) == CONCAT
5782 || GET_MODE (op
) == VOIDmode
)
5785 if (validate_subreg (outermode
, innermode
, op
, byte
))
5786 return gen_rtx_SUBREG (outermode
, op
, byte
);
5791 /* Simplify X, an rtx expression.
5793 Return the simplified expression or NULL if no simplifications
5796 This is the preferred entry point into the simplification routines;
5797 however, we still allow passes to call the more specific routines.
5799 Right now GCC has three (yes, three) major bodies of RTL simplification
5800 code that need to be unified.
5802 1. fold_rtx in cse.c. This code uses various CSE specific
5803 information to aid in RTL simplification.
5805 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5806 it uses combine specific information to aid in RTL
5809 3. The routines in this file.
5812 Long term we want to only have one body of simplification code; to
5813 get to that state I recommend the following steps:
5815 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5816 which are not pass dependent state into these routines.
5818 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5819 use this routine whenever possible.
5821 3. Allow for pass dependent state to be provided to these
5822 routines and add simplifications based on the pass dependent
5823 state. Remove code from cse.c & combine.c that becomes
5826 It will take time, but ultimately the compiler will be easier to
5827 maintain and improve. It's totally silly that when we add a
5828 simplification that it needs to be added to 4 places (3 for RTL
5829 simplification and 1 for tree simplification. */
5832 simplify_rtx (const_rtx x
)
5834 const enum rtx_code code
= GET_CODE (x
);
5835 const enum machine_mode mode
= GET_MODE (x
);
5837 switch (GET_RTX_CLASS (code
))
5840 return simplify_unary_operation (code
, mode
,
5841 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5842 case RTX_COMM_ARITH
:
5843 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5844 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5846 /* Fall through.... */
5849 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5852 case RTX_BITFIELD_OPS
:
5853 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5854 XEXP (x
, 0), XEXP (x
, 1),
5858 case RTX_COMM_COMPARE
:
5859 return simplify_relational_operation (code
, mode
,
5860 ((GET_MODE (XEXP (x
, 0))
5862 ? GET_MODE (XEXP (x
, 0))
5863 : GET_MODE (XEXP (x
, 1))),
5869 return simplify_subreg (mode
, SUBREG_REG (x
),
5870 GET_MODE (SUBREG_REG (x
)),
5877 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5878 if (GET_CODE (XEXP (x
, 0)) == HIGH
5879 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))