1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
52 static bool plus_minus_operand_p (const_rtx
);
53 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
55 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
59 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
60 enum machine_mode
, rtx
, rtx
);
61 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
62 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
68 neg_const_int (enum machine_mode mode
, const_rtx i
)
70 return gen_int_mode (- INTVAL (i
), mode
);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
79 unsigned HOST_WIDE_INT val
;
82 if (GET_MODE_CLASS (mode
) != MODE_INT
)
85 width
= GET_MODE_PRECISION (mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x
) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x
) == 0)
96 val
= CONST_DOUBLE_HIGH (x
);
97 width
-= HOST_BITS_PER_WIDE_INT
;
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
112 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
116 if (GET_MODE_CLASS (mode
) != MODE_INT
)
119 width
= GET_MODE_PRECISION (mode
);
120 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
123 val
&= GET_MODE_MASK (mode
);
124 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
130 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 if (GET_MODE_CLASS (mode
) != MODE_INT
)
137 width
= GET_MODE_PRECISION (mode
);
138 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
141 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
148 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
152 if (GET_MODE_CLASS (mode
) != MODE_INT
)
155 width
= GET_MODE_PRECISION (mode
);
156 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
159 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
172 /* If this simplifies, do it. */
173 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0
, op1
))
180 tem
= op0
, op0
= op1
, op1
= tem
;
182 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x
)
191 enum machine_mode cmode
;
192 HOST_WIDE_INT offset
= 0;
194 switch (GET_CODE (x
))
200 /* Handle float extensions of constant pool references. */
202 c
= avoid_constant_pool_reference (tmp
);
203 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
207 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
216 if (GET_MODE (x
) == BLKmode
)
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr
= targetm
.delegitimize_address (addr
);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr
) == CONST
226 && GET_CODE (XEXP (addr
, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
229 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
230 addr
= XEXP (XEXP (addr
, 0), 0);
233 if (GET_CODE (addr
) == LO_SUM
)
234 addr
= XEXP (addr
, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr
) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr
))
241 c
= get_pool_constant (addr
);
242 cmode
= get_pool_mode (addr
);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset
!= 0 || cmode
!= GET_MODE (x
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
= 0, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !host_integerp (toffset
, 0)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= TREE_INT_CST_LOW (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
572 rtx op
, enum machine_mode op_mode
)
576 trueop
= avoid_constant_pool_reference (op
);
578 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
582 return simplify_unary_operation_1 (code
, mode
, op
);
585 /* Perform some simplifications we can do even if the operands
588 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
590 enum rtx_code reversed
;
596 /* (not (not X)) == X. */
597 if (GET_CODE (op
) == NOT
)
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op
)
603 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
604 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
605 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
606 XEXP (op
, 0), XEXP (op
, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op
) == PLUS
610 && XEXP (op
, 1) == constm1_rtx
)
611 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op
) == NEG
)
615 return plus_constant (XEXP (op
, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op
) == XOR
619 && CONST_INT_P (XEXP (op
, 1))
620 && (temp
= simplify_unary_operation (NOT
, mode
,
621 XEXP (op
, 1), mode
)) != 0)
622 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op
) == PLUS
626 && CONST_INT_P (XEXP (op
, 1))
627 && mode_signbit_p (mode
, XEXP (op
, 1))
628 && (temp
= simplify_unary_operation (NOT
, mode
,
629 XEXP (op
, 1), mode
)) != 0)
630 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
638 if (GET_CODE (op
) == ASHIFT
639 && XEXP (op
, 0) == const1_rtx
)
641 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
642 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE
== -1
650 && GET_CODE (op
) == ASHIFTRT
651 && GET_CODE (XEXP (op
, 1))
652 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
653 return simplify_gen_relational (GE
, mode
, VOIDmode
,
654 XEXP (op
, 0), const0_rtx
);
657 if (GET_CODE (op
) == SUBREG
658 && subreg_lowpart_p (op
)
659 && (GET_MODE_SIZE (GET_MODE (op
))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
661 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
662 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
664 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
667 x
= gen_rtx_ROTATE (inner_mode
,
668 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
670 XEXP (SUBREG_REG (op
), 1));
671 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
679 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
681 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
682 enum machine_mode op_mode
;
684 op_mode
= GET_MODE (in1
);
685 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
687 op_mode
= GET_MODE (in2
);
688 if (op_mode
== VOIDmode
)
690 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
692 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
695 in2
= in1
; in1
= tem
;
698 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op
) == NEG
)
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op
) == PLUS
710 && XEXP (op
, 1) == const1_rtx
)
711 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op
) == NOT
)
715 return plus_constant (XEXP (op
, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op
) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode
)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
725 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
727 if (GET_CODE (op
) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode
)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op
, 1))
733 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
735 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
737 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
742 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op
) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
750 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
751 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
757 if (GET_CODE (op
) == ASHIFT
)
759 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
761 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op
) == ASHIFTRT
767 && CONST_INT_P (XEXP (op
, 1))
768 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
769 return simplify_gen_binary (LSHIFTRT
, mode
,
770 XEXP (op
, 0), XEXP (op
, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op
) == LSHIFTRT
775 && CONST_INT_P (XEXP (op
, 1))
776 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
777 return simplify_gen_binary (ASHIFTRT
, mode
,
778 XEXP (op
, 0), XEXP (op
, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op
) == XOR
782 && XEXP (op
, 1) == const1_rtx
783 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
784 return plus_constant (XEXP (op
, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op
) == LT
789 && XEXP (op
, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
792 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
793 int isize
= GET_MODE_PRECISION (inner
);
794 if (STORE_FLAG_VALUE
== 1)
796 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
797 GEN_INT (isize
- 1));
800 if (GET_MODE_PRECISION (mode
) > isize
)
801 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
802 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
804 else if (STORE_FLAG_VALUE
== -1)
806 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
807 GEN_INT (isize
- 1));
810 if (GET_MODE_PRECISION (mode
) > isize
)
811 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
812 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
821 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op
) == SIGN_EXTEND
826 || GET_CODE (op
) == ZERO_EXTEND
)
827 && GET_MODE (XEXP (op
, 0)) == mode
)
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op
) == ABS
833 || GET_CODE (op
) == NEG
)
834 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
836 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
837 return simplify_gen_unary (GET_CODE (op
), mode
,
838 XEXP (XEXP (op
, 0), 0), mode
);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 if (GET_CODE (op
) == SUBREG
843 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
844 && subreg_lowpart_p (op
))
845 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
846 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
856 ? (num_sign_bit_copies (op
, GET_MODE (op
))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op
))
858 - GET_MODE_PRECISION (mode
)))
859 : truncated_to_mode (mode
, op
))
860 && ! (GET_CODE (op
) == LSHIFTRT
861 && GET_CODE (XEXP (op
, 0)) == MULT
))
862 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode
)
870 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
871 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
875 if (DECIMAL_FLOAT_MODE_P (mode
))
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op
) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op
, 0)) == mode
)
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op
) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations
)
894 || GET_CODE (op
) == FLOAT_EXTEND
)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
897 > GET_MODE_SIZE (mode
)
898 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op
) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
906 && ((unsigned)significand_size (GET_MODE (op
))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
908 - num_sign_bit_copies (XEXP (op
, 0),
909 GET_MODE (XEXP (op
, 0))))))))
910 return simplify_gen_unary (FLOAT
, mode
,
912 GET_MODE (XEXP (op
, 0)));
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op
) == ABS
917 || GET_CODE (op
) == NEG
)
918 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
920 return simplify_gen_unary (GET_CODE (op
), mode
,
921 XEXP (XEXP (op
, 0), 0), mode
);
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op
) == SUBREG
926 && subreg_lowpart_p (op
)
927 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
928 return SUBREG_REG (op
);
932 if (DECIMAL_FLOAT_MODE_P (mode
))
935 /* (float_extend (float_extend x)) is (float_extend x)
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
940 if (GET_CODE (op
) == FLOAT_EXTEND
941 || (GET_CODE (op
) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
943 && ((unsigned)significand_size (GET_MODE (op
))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
945 - num_sign_bit_copies (XEXP (op
, 0),
946 GET_MODE (XEXP (op
, 0)))))))
947 return simplify_gen_unary (GET_CODE (op
), mode
,
949 GET_MODE (XEXP (op
, 0)));
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op
) == NEG
)
956 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
957 GET_MODE (XEXP (op
, 0)));
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961 if (GET_MODE (op
) == VOIDmode
)
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op
),
967 nonzero_bits (op
, GET_MODE (op
))))
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
972 return gen_rtx_NEG (mode
, op
);
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op
) == SIGN_EXTEND
979 || GET_CODE (op
) == ZERO_EXTEND
)
980 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
981 GET_MODE (XEXP (op
, 0)));
985 switch (GET_CODE (op
))
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
991 GET_MODE (XEXP (op
, 0)));
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op
, 1)))
997 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
998 GET_MODE (XEXP (op
, 0)));
1007 switch (GET_CODE (op
))
1013 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1014 GET_MODE (XEXP (op
, 0)));
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op
, 1)))
1020 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1021 GET_MODE (XEXP (op
, 0)));
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op
) == BSWAP
)
1032 return XEXP (op
, 0);
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op
) == SIGN_EXTEND
)
1038 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1039 GET_MODE (XEXP (op
, 0)));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op
) == TRUNCATE
1048 && GET_MODE (XEXP (op
, 0)) == mode
1049 && GET_CODE (XEXP (op
, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1052 return XEXP (op
, 0);
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op
) == MULT
)
1058 rtx lhs
= XEXP (op
, 0);
1059 rtx rhs
= XEXP (op
, 1);
1060 enum rtx_code lcode
= GET_CODE (lhs
);
1061 enum rtx_code rcode
= GET_CODE (rhs
);
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode
== SIGN_EXTEND
1066 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1067 && (rcode
== SIGN_EXTEND
1068 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1070 enum machine_mode lmode
= GET_MODE (lhs
);
1071 enum machine_mode rmode
= GET_MODE (rhs
);
1074 if (lcode
== ASHIFTRT
)
1075 /* Number of bits not shifted off the end. */
1076 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1081 if (rcode
== ASHIFTRT
)
1082 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1089 return simplify_gen_binary
1091 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1092 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op
) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op
)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1102 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1103 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1109 gcc_assert (GET_MODE_BITSIZE (mode
)
1110 > GET_MODE_BITSIZE (GET_MODE (op
)));
1111 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1112 GET_MODE (XEXP (op
, 0)));
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1121 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op
, 1))
1123 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1128 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode
)
1130 > GET_MODE_BITSIZE (GET_MODE (op
)));
1131 if (tmode
!= BLKmode
)
1134 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1136 ? SIGN_EXTEND
: ZERO_EXTEND
,
1137 mode
, inner
, tmode
);
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is refering to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1149 || (GET_CODE (op
) == SUBREG
1150 && REG_P (SUBREG_REG (op
))
1151 && REG_POINTER (SUBREG_REG (op
))
1152 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1153 return convert_memory_address (Pmode
, op
);
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op
) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op
)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1164 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1165 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op
) == MULT
)
1171 rtx lhs
= XEXP (op
, 0);
1172 rtx rhs
= XEXP (op
, 1);
1173 enum rtx_code lcode
= GET_CODE (lhs
);
1174 enum rtx_code rcode
= GET_CODE (rhs
);
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode
== ZERO_EXTEND
1179 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1180 && (rcode
== ZERO_EXTEND
1181 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1183 enum machine_mode lmode
= GET_MODE (lhs
);
1184 enum machine_mode rmode
= GET_MODE (rhs
);
1187 if (lcode
== LSHIFTRT
)
1188 /* Number of bits not shifted off the end. */
1189 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1194 if (rcode
== LSHIFTRT
)
1195 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1202 return simplify_gen_binary
1204 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1205 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op
) == ZERO_EXTEND
)
1211 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1212 GET_MODE (XEXP (op
, 0)));
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op
) == LSHIFTRT
1218 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op
, 1))
1220 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1225 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1226 if (tmode
!= BLKmode
)
1229 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is refering to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED
> 0
1240 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1242 || (GET_CODE (op
) == SUBREG
1243 && REG_P (SUBREG_REG (op
))
1244 && REG_POINTER (SUBREG_REG (op
))
1245 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1246 return convert_memory_address (Pmode
, op
);
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1261 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1262 rtx op
, enum machine_mode op_mode
)
1264 unsigned int width
= GET_MODE_PRECISION (mode
);
1265 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1267 if (code
== VEC_DUPLICATE
)
1269 gcc_assert (VECTOR_MODE_P (mode
));
1270 if (GET_MODE (op
) != VOIDmode
)
1272 if (!VECTOR_MODE_P (GET_MODE (op
)))
1273 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1275 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1278 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1279 || GET_CODE (op
) == CONST_VECTOR
)
1281 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1282 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1283 rtvec v
= rtvec_alloc (n_elts
);
1286 if (GET_CODE (op
) != CONST_VECTOR
)
1287 for (i
= 0; i
< n_elts
; i
++)
1288 RTVEC_ELT (v
, i
) = op
;
1291 enum machine_mode inmode
= GET_MODE (op
);
1292 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1293 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1295 gcc_assert (in_n_elts
< n_elts
);
1296 gcc_assert ((n_elts
% in_n_elts
) == 0);
1297 for (i
= 0; i
< n_elts
; i
++)
1298 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1300 return gen_rtx_CONST_VECTOR (mode
, v
);
1304 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1306 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1307 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1308 enum machine_mode opmode
= GET_MODE (op
);
1309 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1310 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1311 rtvec v
= rtvec_alloc (n_elts
);
1314 gcc_assert (op_n_elts
== n_elts
);
1315 for (i
= 0; i
< n_elts
; i
++)
1317 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1318 CONST_VECTOR_ELT (op
, i
),
1319 GET_MODE_INNER (opmode
));
1322 RTVEC_ELT (v
, i
) = x
;
1324 return gen_rtx_CONST_VECTOR (mode
, v
);
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1331 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1332 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1334 HOST_WIDE_INT hv
, lv
;
1337 if (CONST_INT_P (op
))
1338 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1340 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1342 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1343 d
= real_value_truncate (mode
, d
);
1344 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1346 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1347 && (GET_CODE (op
) == CONST_DOUBLE
1348 || CONST_INT_P (op
)))
1350 HOST_WIDE_INT hv
, lv
;
1353 if (CONST_INT_P (op
))
1354 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1356 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1358 if (op_mode
== VOIDmode
)
1360 /* We don't know how to interpret negative-looking numbers in
1361 this case, so don't try to fold those. */
1365 else if (GET_MODE_PRECISION (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1368 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1370 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1371 d
= real_value_truncate (mode
, d
);
1372 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1375 if (CONST_INT_P (op
)
1376 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1378 HOST_WIDE_INT arg0
= INTVAL (op
);
1392 val
= (arg0
>= 0 ? arg0
: - arg0
);
1396 arg0
&= GET_MODE_MASK (mode
);
1397 val
= ffs_hwi (arg0
);
1401 arg0
&= GET_MODE_MASK (mode
);
1402 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1405 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1409 arg0
&= GET_MODE_MASK (mode
);
1411 val
= GET_MODE_PRECISION (mode
) - 1;
1413 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1415 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1419 arg0
&= GET_MODE_MASK (mode
);
1422 /* Even if the value at zero is undefined, we have to come
1423 up with some replacement. Seems good enough. */
1424 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1425 val
= GET_MODE_PRECISION (mode
);
1428 val
= ctz_hwi (arg0
);
1432 arg0
&= GET_MODE_MASK (mode
);
1435 val
++, arg0
&= arg0
- 1;
1439 arg0
&= GET_MODE_MASK (mode
);
1442 val
++, arg0
&= arg0
- 1;
1451 for (s
= 0; s
< width
; s
+= 8)
1453 unsigned int d
= width
- s
- 8;
1454 unsigned HOST_WIDE_INT byte
;
1455 byte
= (arg0
>> s
) & 0xff;
1466 /* When zero-extending a CONST_INT, we need to know its
1468 gcc_assert (op_mode
!= VOIDmode
);
1469 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1471 /* If we were really extending the mode,
1472 we would have to distinguish between zero-extension
1473 and sign-extension. */
1474 gcc_assert (width
== op_width
);
1477 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1478 val
= arg0
& GET_MODE_MASK (op_mode
);
1484 if (op_mode
== VOIDmode
)
1486 op_width
= GET_MODE_PRECISION (op_mode
);
1487 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1489 /* If we were really extending the mode,
1490 we would have to distinguish between zero-extension
1491 and sign-extension. */
1492 gcc_assert (width
== op_width
);
1495 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1497 val
= arg0
& GET_MODE_MASK (op_mode
);
1498 if (val_signbit_known_set_p (op_mode
, val
))
1499 val
|= ~GET_MODE_MASK (op_mode
);
1507 case FLOAT_TRUNCATE
:
1519 return gen_int_mode (val
, mode
);
1522 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1523 for a DImode operation on a CONST_INT. */
1524 else if (GET_MODE (op
) == VOIDmode
1525 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1526 && (GET_CODE (op
) == CONST_DOUBLE
1527 || CONST_INT_P (op
)))
1529 unsigned HOST_WIDE_INT l1
, lv
;
1530 HOST_WIDE_INT h1
, hv
;
1532 if (GET_CODE (op
) == CONST_DOUBLE
)
1533 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1535 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1545 neg_double (l1
, h1
, &lv
, &hv
);
1550 neg_double (l1
, h1
, &lv
, &hv
);
1560 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1568 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (h1
) - 1
1569 - HOST_BITS_PER_WIDE_INT
;
1571 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (l1
) - 1;
1572 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1573 lv
= GET_MODE_PRECISION (mode
);
1581 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1582 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1583 lv
= GET_MODE_PRECISION (mode
);
1611 for (s
= 0; s
< width
; s
+= 8)
1613 unsigned int d
= width
- s
- 8;
1614 unsigned HOST_WIDE_INT byte
;
1616 if (s
< HOST_BITS_PER_WIDE_INT
)
1617 byte
= (l1
>> s
) & 0xff;
1619 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1621 if (d
< HOST_BITS_PER_WIDE_INT
)
1624 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1630 /* This is just a change-of-mode, so do nothing. */
1635 gcc_assert (op_mode
!= VOIDmode
);
1637 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1641 lv
= l1
& GET_MODE_MASK (op_mode
);
1645 if (op_mode
== VOIDmode
1646 || op_width
> HOST_BITS_PER_WIDE_INT
)
1650 lv
= l1
& GET_MODE_MASK (op_mode
);
1651 if (val_signbit_known_set_p (op_mode
, lv
))
1652 lv
|= ~GET_MODE_MASK (op_mode
);
1654 hv
= HWI_SIGN_EXTEND (lv
);
1665 return immed_double_const (lv
, hv
, mode
);
1668 else if (GET_CODE (op
) == CONST_DOUBLE
1669 && SCALAR_FLOAT_MODE_P (mode
)
1670 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1672 REAL_VALUE_TYPE d
, t
;
1673 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1678 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1680 real_sqrt (&t
, mode
, &d
);
1684 d
= real_value_abs (&d
);
1687 d
= real_value_negate (&d
);
1689 case FLOAT_TRUNCATE
:
1690 d
= real_value_truncate (mode
, d
);
1693 /* All this does is change the mode, unless changing
1695 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1696 real_convert (&d
, mode
, &d
);
1699 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1706 real_to_target (tmp
, &d
, GET_MODE (op
));
1707 for (i
= 0; i
< 4; i
++)
1709 real_from_target (&d
, tmp
, mode
);
1715 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1718 else if (GET_CODE (op
) == CONST_DOUBLE
1719 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1720 && GET_MODE_CLASS (mode
) == MODE_INT
1721 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1723 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1724 operators are intentionally left unspecified (to ease implementation
1725 by target backends), for consistency, this routine implements the
1726 same semantics for constant folding as used by the middle-end. */
1728 /* This was formerly used only for non-IEEE float.
1729 eggert@twinsun.com says it is safe for IEEE also. */
1730 HOST_WIDE_INT xh
, xl
, th
, tl
;
1731 REAL_VALUE_TYPE x
, t
;
1732 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1736 if (REAL_VALUE_ISNAN (x
))
1739 /* Test against the signed upper bound. */
1740 if (width
> HOST_BITS_PER_WIDE_INT
)
1742 th
= ((unsigned HOST_WIDE_INT
) 1
1743 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1749 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1751 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1752 if (REAL_VALUES_LESS (t
, x
))
1759 /* Test against the signed lower bound. */
1760 if (width
> HOST_BITS_PER_WIDE_INT
)
1762 th
= (unsigned HOST_WIDE_INT
) (-1)
1763 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1769 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1771 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1772 if (REAL_VALUES_LESS (x
, t
))
1778 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1782 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1785 /* Test against the unsigned upper bound. */
1786 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1791 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1793 th
= ((unsigned HOST_WIDE_INT
) 1
1794 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1800 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1802 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1803 if (REAL_VALUES_LESS (t
, x
))
1810 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1816 return immed_double_const (xl
, xh
, mode
);
1822 /* Subroutine of simplify_binary_operation to simplify a commutative,
1823 associative binary operation CODE with result mode MODE, operating
1824 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1825 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1826 canonicalization is possible. */
1829 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1834 /* Linearize the operator to the left. */
1835 if (GET_CODE (op1
) == code
)
1837 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1838 if (GET_CODE (op0
) == code
)
1840 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1841 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1844 /* "a op (b op c)" becomes "(b op c) op a". */
1845 if (! swap_commutative_operands_p (op1
, op0
))
1846 return simplify_gen_binary (code
, mode
, op1
, op0
);
1853 if (GET_CODE (op0
) == code
)
1855 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1856 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1858 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1859 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1862 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1863 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1865 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1867 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1868 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1870 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1877 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1878 and OP1. Return 0 if no simplification is possible.
1880 Don't use this for relational operations such as EQ or LT.
1881 Use simplify_relational_operation instead. */
1883 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1886 rtx trueop0
, trueop1
;
1889 /* Relational operations don't work here. We must know the mode
1890 of the operands in order to do the comparison correctly.
1891 Assuming a full word can give incorrect results.
1892 Consider comparing 128 with -128 in QImode. */
1893 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1894 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1896 /* Make sure the constant is second. */
1897 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1898 && swap_commutative_operands_p (op0
, op1
))
1900 tem
= op0
, op0
= op1
, op1
= tem
;
1903 trueop0
= avoid_constant_pool_reference (op0
);
1904 trueop1
= avoid_constant_pool_reference (op1
);
1906 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1909 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1912 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1913 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1914 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1915 actual constants. */
1918 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1919 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1921 rtx tem
, reversed
, opleft
, opright
;
1923 unsigned int width
= GET_MODE_PRECISION (mode
);
1925 /* Even if we can't compute a constant result,
1926 there are some cases worth simplifying. */
1931 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1932 when x is NaN, infinite, or finite and nonzero. They aren't
1933 when x is -0 and the rounding mode is not towards -infinity,
1934 since (-0) + 0 is then 0. */
1935 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1938 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1939 transformations are safe even for IEEE. */
1940 if (GET_CODE (op0
) == NEG
)
1941 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1942 else if (GET_CODE (op1
) == NEG
)
1943 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1945 /* (~a) + 1 -> -a */
1946 if (INTEGRAL_MODE_P (mode
)
1947 && GET_CODE (op0
) == NOT
1948 && trueop1
== const1_rtx
)
1949 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1951 /* Handle both-operands-constant cases. We can only add
1952 CONST_INTs to constants since the sum of relocatable symbols
1953 can't be handled by most assemblers. Don't add CONST_INT
1954 to CONST_INT since overflow won't be computed properly if wider
1955 than HOST_BITS_PER_WIDE_INT. */
1957 if ((GET_CODE (op0
) == CONST
1958 || GET_CODE (op0
) == SYMBOL_REF
1959 || GET_CODE (op0
) == LABEL_REF
)
1960 && CONST_INT_P (op1
))
1961 return plus_constant (op0
, INTVAL (op1
));
1962 else if ((GET_CODE (op1
) == CONST
1963 || GET_CODE (op1
) == SYMBOL_REF
1964 || GET_CODE (op1
) == LABEL_REF
)
1965 && CONST_INT_P (op0
))
1966 return plus_constant (op1
, INTVAL (op0
));
1968 /* See if this is something like X * C - X or vice versa or
1969 if the multiplication is written as a shift. If so, we can
1970 distribute and make a new multiply, shift, or maybe just
1971 have X (if C is 2 in the example above). But don't make
1972 something more expensive than we had before. */
1974 if (SCALAR_INT_MODE_P (mode
))
1976 double_int coeff0
, coeff1
;
1977 rtx lhs
= op0
, rhs
= op1
;
1979 coeff0
= double_int_one
;
1980 coeff1
= double_int_one
;
1982 if (GET_CODE (lhs
) == NEG
)
1984 coeff0
= double_int_minus_one
;
1985 lhs
= XEXP (lhs
, 0);
1987 else if (GET_CODE (lhs
) == MULT
1988 && CONST_INT_P (XEXP (lhs
, 1)))
1990 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1991 lhs
= XEXP (lhs
, 0);
1993 else if (GET_CODE (lhs
) == ASHIFT
1994 && CONST_INT_P (XEXP (lhs
, 1))
1995 && INTVAL (XEXP (lhs
, 1)) >= 0
1996 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1998 coeff0
= double_int_setbit (double_int_zero
,
1999 INTVAL (XEXP (lhs
, 1)));
2000 lhs
= XEXP (lhs
, 0);
2003 if (GET_CODE (rhs
) == NEG
)
2005 coeff1
= double_int_minus_one
;
2006 rhs
= XEXP (rhs
, 0);
2008 else if (GET_CODE (rhs
) == MULT
2009 && CONST_INT_P (XEXP (rhs
, 1)))
2011 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
2012 rhs
= XEXP (rhs
, 0);
2014 else if (GET_CODE (rhs
) == ASHIFT
2015 && CONST_INT_P (XEXP (rhs
, 1))
2016 && INTVAL (XEXP (rhs
, 1)) >= 0
2017 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2019 coeff1
= double_int_setbit (double_int_zero
,
2020 INTVAL (XEXP (rhs
, 1)));
2021 rhs
= XEXP (rhs
, 0);
2024 if (rtx_equal_p (lhs
, rhs
))
2026 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2029 bool speed
= optimize_function_for_speed_p (cfun
);
2031 val
= double_int_add (coeff0
, coeff1
);
2032 coeff
= immed_double_int_const (val
, mode
);
2034 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2035 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2040 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2041 if ((CONST_INT_P (op1
)
2042 || GET_CODE (op1
) == CONST_DOUBLE
)
2043 && GET_CODE (op0
) == XOR
2044 && (CONST_INT_P (XEXP (op0
, 1))
2045 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2046 && mode_signbit_p (mode
, op1
))
2047 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2048 simplify_gen_binary (XOR
, mode
, op1
,
2051 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2052 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2053 && GET_CODE (op0
) == MULT
2054 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2058 in1
= XEXP (XEXP (op0
, 0), 0);
2059 in2
= XEXP (op0
, 1);
2060 return simplify_gen_binary (MINUS
, mode
, op1
,
2061 simplify_gen_binary (MULT
, mode
,
2065 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2066 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2068 if (COMPARISON_P (op0
)
2069 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2070 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2071 && (reversed
= reversed_comparison (op0
, mode
)))
2073 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2075 /* If one of the operands is a PLUS or a MINUS, see if we can
2076 simplify this by the associative law.
2077 Don't use the associative law for floating point.
2078 The inaccuracy makes it nonassociative,
2079 and subtle programs can break if operations are associated. */
2081 if (INTEGRAL_MODE_P (mode
)
2082 && (plus_minus_operand_p (op0
)
2083 || plus_minus_operand_p (op1
))
2084 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2087 /* Reassociate floating point addition only when the user
2088 specifies associative math operations. */
2089 if (FLOAT_MODE_P (mode
)
2090 && flag_associative_math
)
2092 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2099 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2100 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2101 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2102 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2104 rtx xop00
= XEXP (op0
, 0);
2105 rtx xop10
= XEXP (op1
, 0);
2108 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2110 if (REG_P (xop00
) && REG_P (xop10
)
2111 && GET_MODE (xop00
) == GET_MODE (xop10
)
2112 && REGNO (xop00
) == REGNO (xop10
)
2113 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2114 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2121 /* We can't assume x-x is 0 even with non-IEEE floating point,
2122 but since it is zero except in very strange circumstances, we
2123 will treat it as zero with -ffinite-math-only. */
2124 if (rtx_equal_p (trueop0
, trueop1
)
2125 && ! side_effects_p (op0
)
2126 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2127 return CONST0_RTX (mode
);
2129 /* Change subtraction from zero into negation. (0 - x) is the
2130 same as -x when x is NaN, infinite, or finite and nonzero.
2131 But if the mode has signed zeros, and does not round towards
2132 -infinity, then 0 - 0 is 0, not -0. */
2133 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2134 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2136 /* (-1 - a) is ~a. */
2137 if (trueop0
== constm1_rtx
)
2138 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2140 /* Subtracting 0 has no effect unless the mode has signed zeros
2141 and supports rounding towards -infinity. In such a case,
2143 if (!(HONOR_SIGNED_ZEROS (mode
)
2144 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2145 && trueop1
== CONST0_RTX (mode
))
2148 /* See if this is something like X * C - X or vice versa or
2149 if the multiplication is written as a shift. If so, we can
2150 distribute and make a new multiply, shift, or maybe just
2151 have X (if C is 2 in the example above). But don't make
2152 something more expensive than we had before. */
2154 if (SCALAR_INT_MODE_P (mode
))
2156 double_int coeff0
, negcoeff1
;
2157 rtx lhs
= op0
, rhs
= op1
;
2159 coeff0
= double_int_one
;
2160 negcoeff1
= double_int_minus_one
;
2162 if (GET_CODE (lhs
) == NEG
)
2164 coeff0
= double_int_minus_one
;
2165 lhs
= XEXP (lhs
, 0);
2167 else if (GET_CODE (lhs
) == MULT
2168 && CONST_INT_P (XEXP (lhs
, 1)))
2170 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2171 lhs
= XEXP (lhs
, 0);
2173 else if (GET_CODE (lhs
) == ASHIFT
2174 && CONST_INT_P (XEXP (lhs
, 1))
2175 && INTVAL (XEXP (lhs
, 1)) >= 0
2176 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2178 coeff0
= double_int_setbit (double_int_zero
,
2179 INTVAL (XEXP (lhs
, 1)));
2180 lhs
= XEXP (lhs
, 0);
2183 if (GET_CODE (rhs
) == NEG
)
2185 negcoeff1
= double_int_one
;
2186 rhs
= XEXP (rhs
, 0);
2188 else if (GET_CODE (rhs
) == MULT
2189 && CONST_INT_P (XEXP (rhs
, 1)))
2191 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2192 rhs
= XEXP (rhs
, 0);
2194 else if (GET_CODE (rhs
) == ASHIFT
2195 && CONST_INT_P (XEXP (rhs
, 1))
2196 && INTVAL (XEXP (rhs
, 1)) >= 0
2197 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2199 negcoeff1
= double_int_setbit (double_int_zero
,
2200 INTVAL (XEXP (rhs
, 1)));
2201 negcoeff1
= double_int_neg (negcoeff1
);
2202 rhs
= XEXP (rhs
, 0);
2205 if (rtx_equal_p (lhs
, rhs
))
2207 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2210 bool speed
= optimize_function_for_speed_p (cfun
);
2212 val
= double_int_add (coeff0
, negcoeff1
);
2213 coeff
= immed_double_int_const (val
, mode
);
2215 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2216 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2221 /* (a - (-b)) -> (a + b). True even for IEEE. */
2222 if (GET_CODE (op1
) == NEG
)
2223 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2225 /* (-x - c) may be simplified as (-c - x). */
2226 if (GET_CODE (op0
) == NEG
2227 && (CONST_INT_P (op1
)
2228 || GET_CODE (op1
) == CONST_DOUBLE
))
2230 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2232 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2235 /* Don't let a relocatable value get a negative coeff. */
2236 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2237 return simplify_gen_binary (PLUS
, mode
,
2239 neg_const_int (mode
, op1
));
2241 /* (x - (x & y)) -> (x & ~y) */
2242 if (GET_CODE (op1
) == AND
)
2244 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2246 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2247 GET_MODE (XEXP (op1
, 1)));
2248 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2250 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2252 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2253 GET_MODE (XEXP (op1
, 0)));
2254 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2258 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2259 by reversing the comparison code if valid. */
2260 if (STORE_FLAG_VALUE
== 1
2261 && trueop0
== const1_rtx
2262 && COMPARISON_P (op1
)
2263 && (reversed
= reversed_comparison (op1
, mode
)))
2266 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2267 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2268 && GET_CODE (op1
) == MULT
2269 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2273 in1
= XEXP (XEXP (op1
, 0), 0);
2274 in2
= XEXP (op1
, 1);
2275 return simplify_gen_binary (PLUS
, mode
,
2276 simplify_gen_binary (MULT
, mode
,
2281 /* Canonicalize (minus (neg A) (mult B C)) to
2282 (minus (mult (neg B) C) A). */
2283 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2284 && GET_CODE (op1
) == MULT
2285 && GET_CODE (op0
) == NEG
)
2289 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2290 in2
= XEXP (op1
, 1);
2291 return simplify_gen_binary (MINUS
, mode
,
2292 simplify_gen_binary (MULT
, mode
,
2297 /* If one of the operands is a PLUS or a MINUS, see if we can
2298 simplify this by the associative law. This will, for example,
2299 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2300 Don't use the associative law for floating point.
2301 The inaccuracy makes it nonassociative,
2302 and subtle programs can break if operations are associated. */
2304 if (INTEGRAL_MODE_P (mode
)
2305 && (plus_minus_operand_p (op0
)
2306 || plus_minus_operand_p (op1
))
2307 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2312 if (trueop1
== constm1_rtx
)
2313 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2315 if (GET_CODE (op0
) == NEG
)
2317 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2318 /* If op1 is a MULT as well and simplify_unary_operation
2319 just moved the NEG to the second operand, simplify_gen_binary
2320 below could through simplify_associative_operation move
2321 the NEG around again and recurse endlessly. */
2323 && GET_CODE (op1
) == MULT
2324 && GET_CODE (temp
) == MULT
2325 && XEXP (op1
, 0) == XEXP (temp
, 0)
2326 && GET_CODE (XEXP (temp
, 1)) == NEG
2327 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2330 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2332 if (GET_CODE (op1
) == NEG
)
2334 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2335 /* If op0 is a MULT as well and simplify_unary_operation
2336 just moved the NEG to the second operand, simplify_gen_binary
2337 below could through simplify_associative_operation move
2338 the NEG around again and recurse endlessly. */
2340 && GET_CODE (op0
) == MULT
2341 && GET_CODE (temp
) == MULT
2342 && XEXP (op0
, 0) == XEXP (temp
, 0)
2343 && GET_CODE (XEXP (temp
, 1)) == NEG
2344 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2347 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2350 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2351 x is NaN, since x * 0 is then also NaN. Nor is it valid
2352 when the mode has signed zeros, since multiplying a negative
2353 number by 0 will give -0, not 0. */
2354 if (!HONOR_NANS (mode
)
2355 && !HONOR_SIGNED_ZEROS (mode
)
2356 && trueop1
== CONST0_RTX (mode
)
2357 && ! side_effects_p (op0
))
2360 /* In IEEE floating point, x*1 is not equivalent to x for
2362 if (!HONOR_SNANS (mode
)
2363 && trueop1
== CONST1_RTX (mode
))
2366 /* Convert multiply by constant power of two into shift unless
2367 we are still generating RTL. This test is a kludge. */
2368 if (CONST_INT_P (trueop1
)
2369 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2370 /* If the mode is larger than the host word size, and the
2371 uppermost bit is set, then this isn't a power of two due
2372 to implicit sign extension. */
2373 && (width
<= HOST_BITS_PER_WIDE_INT
2374 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2375 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2377 /* Likewise for multipliers wider than a word. */
2378 if (GET_CODE (trueop1
) == CONST_DOUBLE
2379 && (GET_MODE (trueop1
) == VOIDmode
2380 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2381 && GET_MODE (op0
) == mode
2382 && CONST_DOUBLE_LOW (trueop1
) == 0
2383 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2384 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2385 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2387 /* x*2 is x+x and x*(-1) is -x */
2388 if (GET_CODE (trueop1
) == CONST_DOUBLE
2389 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2390 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2391 && GET_MODE (op0
) == mode
)
2394 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2396 if (REAL_VALUES_EQUAL (d
, dconst2
))
2397 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2399 if (!HONOR_SNANS (mode
)
2400 && REAL_VALUES_EQUAL (d
, dconstm1
))
2401 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2404 /* Optimize -x * -x as x * x. */
2405 if (FLOAT_MODE_P (mode
)
2406 && GET_CODE (op0
) == NEG
2407 && GET_CODE (op1
) == NEG
2408 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2409 && !side_effects_p (XEXP (op0
, 0)))
2410 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2412 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2413 if (SCALAR_FLOAT_MODE_P (mode
)
2414 && GET_CODE (op0
) == ABS
2415 && GET_CODE (op1
) == ABS
2416 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2417 && !side_effects_p (XEXP (op0
, 0)))
2418 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2420 /* Reassociate multiplication, but for floating point MULTs
2421 only when the user specifies unsafe math optimizations. */
2422 if (! FLOAT_MODE_P (mode
)
2423 || flag_unsafe_math_optimizations
)
2425 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2432 if (trueop1
== CONST0_RTX (mode
))
2434 if (CONST_INT_P (trueop1
)
2435 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2436 == GET_MODE_MASK (mode
)))
2438 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2440 /* A | (~A) -> -1 */
2441 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2442 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2443 && ! side_effects_p (op0
)
2444 && SCALAR_INT_MODE_P (mode
))
2447 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2448 if (CONST_INT_P (op1
)
2449 && HWI_COMPUTABLE_MODE_P (mode
)
2450 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0)
2453 /* Canonicalize (X & C1) | C2. */
2454 if (GET_CODE (op0
) == AND
2455 && CONST_INT_P (trueop1
)
2456 && CONST_INT_P (XEXP (op0
, 1)))
2458 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2459 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2460 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2462 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2464 && !side_effects_p (XEXP (op0
, 0)))
2467 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2468 if (((c1
|c2
) & mask
) == mask
)
2469 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2471 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2472 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2474 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2475 gen_int_mode (c1
& ~c2
, mode
));
2476 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2480 /* Convert (A & B) | A to A. */
2481 if (GET_CODE (op0
) == AND
2482 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2483 || rtx_equal_p (XEXP (op0
, 1), op1
))
2484 && ! side_effects_p (XEXP (op0
, 0))
2485 && ! side_effects_p (XEXP (op0
, 1)))
2488 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2489 mode size to (rotate A CX). */
2491 if (GET_CODE (op1
) == ASHIFT
2492 || GET_CODE (op1
) == SUBREG
)
2503 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2504 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2505 && CONST_INT_P (XEXP (opleft
, 1))
2506 && CONST_INT_P (XEXP (opright
, 1))
2507 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2508 == GET_MODE_PRECISION (mode
)))
2509 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2511 /* Same, but for ashift that has been "simplified" to a wider mode
2512 by simplify_shift_const. */
2514 if (GET_CODE (opleft
) == SUBREG
2515 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2516 && GET_CODE (opright
) == LSHIFTRT
2517 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2518 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2519 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2520 && (GET_MODE_SIZE (GET_MODE (opleft
))
2521 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2522 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2523 SUBREG_REG (XEXP (opright
, 0)))
2524 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2525 && CONST_INT_P (XEXP (opright
, 1))
2526 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2527 == GET_MODE_PRECISION (mode
)))
2528 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2529 XEXP (SUBREG_REG (opleft
), 1));
2531 /* If we have (ior (and (X C1) C2)), simplify this by making
2532 C1 as small as possible if C1 actually changes. */
2533 if (CONST_INT_P (op1
)
2534 && (HWI_COMPUTABLE_MODE_P (mode
)
2535 || INTVAL (op1
) > 0)
2536 && GET_CODE (op0
) == AND
2537 && CONST_INT_P (XEXP (op0
, 1))
2538 && CONST_INT_P (op1
)
2539 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2540 return simplify_gen_binary (IOR
, mode
,
2542 (AND
, mode
, XEXP (op0
, 0),
2543 GEN_INT (UINTVAL (XEXP (op0
, 1))
2547 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2548 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2549 the PLUS does not affect any of the bits in OP1: then we can do
2550 the IOR as a PLUS and we can associate. This is valid if OP1
2551 can be safely shifted left C bits. */
2552 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2553 && GET_CODE (XEXP (op0
, 0)) == PLUS
2554 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2555 && CONST_INT_P (XEXP (op0
, 1))
2556 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2558 int count
= INTVAL (XEXP (op0
, 1));
2559 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2561 if (mask
>> count
== INTVAL (trueop1
)
2562 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2563 return simplify_gen_binary (ASHIFTRT
, mode
,
2564 plus_constant (XEXP (op0
, 0), mask
),
2568 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2574 if (trueop1
== CONST0_RTX (mode
))
2576 if (CONST_INT_P (trueop1
)
2577 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2578 == GET_MODE_MASK (mode
)))
2579 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2580 if (rtx_equal_p (trueop0
, trueop1
)
2581 && ! side_effects_p (op0
)
2582 && GET_MODE_CLASS (mode
) != MODE_CC
)
2583 return CONST0_RTX (mode
);
2585 /* Canonicalize XOR of the most significant bit to PLUS. */
2586 if ((CONST_INT_P (op1
)
2587 || GET_CODE (op1
) == CONST_DOUBLE
)
2588 && mode_signbit_p (mode
, op1
))
2589 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2590 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2591 if ((CONST_INT_P (op1
)
2592 || GET_CODE (op1
) == CONST_DOUBLE
)
2593 && GET_CODE (op0
) == PLUS
2594 && (CONST_INT_P (XEXP (op0
, 1))
2595 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2596 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2597 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2598 simplify_gen_binary (XOR
, mode
, op1
,
2601 /* If we are XORing two things that have no bits in common,
2602 convert them into an IOR. This helps to detect rotation encoded
2603 using those methods and possibly other simplifications. */
2605 if (HWI_COMPUTABLE_MODE_P (mode
)
2606 && (nonzero_bits (op0
, mode
)
2607 & nonzero_bits (op1
, mode
)) == 0)
2608 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2610 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2611 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2614 int num_negated
= 0;
2616 if (GET_CODE (op0
) == NOT
)
2617 num_negated
++, op0
= XEXP (op0
, 0);
2618 if (GET_CODE (op1
) == NOT
)
2619 num_negated
++, op1
= XEXP (op1
, 0);
2621 if (num_negated
== 2)
2622 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2623 else if (num_negated
== 1)
2624 return simplify_gen_unary (NOT
, mode
,
2625 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2629 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2630 correspond to a machine insn or result in further simplifications
2631 if B is a constant. */
2633 if (GET_CODE (op0
) == AND
2634 && rtx_equal_p (XEXP (op0
, 1), op1
)
2635 && ! side_effects_p (op1
))
2636 return simplify_gen_binary (AND
, mode
,
2637 simplify_gen_unary (NOT
, mode
,
2638 XEXP (op0
, 0), mode
),
2641 else if (GET_CODE (op0
) == AND
2642 && rtx_equal_p (XEXP (op0
, 0), op1
)
2643 && ! side_effects_p (op1
))
2644 return simplify_gen_binary (AND
, mode
,
2645 simplify_gen_unary (NOT
, mode
,
2646 XEXP (op0
, 1), mode
),
2649 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2650 we can transform like this:
2651 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2652 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2653 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2654 Attempt a few simplifications when B and C are both constants. */
2655 if (GET_CODE (op0
) == AND
2656 && CONST_INT_P (op1
)
2657 && CONST_INT_P (XEXP (op0
, 1)))
2659 rtx a
= XEXP (op0
, 0);
2660 rtx b
= XEXP (op0
, 1);
2662 HOST_WIDE_INT bval
= INTVAL (b
);
2663 HOST_WIDE_INT cval
= INTVAL (c
);
2666 = simplify_binary_operation (AND
, mode
,
2667 simplify_gen_unary (NOT
, mode
, a
, mode
),
2669 if ((~cval
& bval
) == 0)
2671 /* Try to simplify ~A&C | ~B&C. */
2672 if (na_c
!= NULL_RTX
)
2673 return simplify_gen_binary (IOR
, mode
, na_c
,
2674 GEN_INT (~bval
& cval
));
2678 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2679 if (na_c
== const0_rtx
)
2681 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2682 GEN_INT (~cval
& bval
));
2683 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2684 GEN_INT (~bval
& cval
));
2689 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2690 comparison if STORE_FLAG_VALUE is 1. */
2691 if (STORE_FLAG_VALUE
== 1
2692 && trueop1
== const1_rtx
2693 && COMPARISON_P (op0
)
2694 && (reversed
= reversed_comparison (op0
, mode
)))
2697 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2698 is (lt foo (const_int 0)), so we can perform the above
2699 simplification if STORE_FLAG_VALUE is 1. */
2701 if (STORE_FLAG_VALUE
== 1
2702 && trueop1
== const1_rtx
2703 && GET_CODE (op0
) == LSHIFTRT
2704 && CONST_INT_P (XEXP (op0
, 1))
2705 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2706 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2708 /* (xor (comparison foo bar) (const_int sign-bit))
2709 when STORE_FLAG_VALUE is the sign bit. */
2710 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2711 && trueop1
== const_true_rtx
2712 && COMPARISON_P (op0
)
2713 && (reversed
= reversed_comparison (op0
, mode
)))
2716 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2722 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2724 if (HWI_COMPUTABLE_MODE_P (mode
))
2726 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2727 HOST_WIDE_INT nzop1
;
2728 if (CONST_INT_P (trueop1
))
2730 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2731 /* If we are turning off bits already known off in OP0, we need
2733 if ((nzop0
& ~val1
) == 0)
2736 nzop1
= nonzero_bits (trueop1
, mode
);
2737 /* If we are clearing all the nonzero bits, the result is zero. */
2738 if ((nzop1
& nzop0
) == 0
2739 && !side_effects_p (op0
) && !side_effects_p (op1
))
2740 return CONST0_RTX (mode
);
2742 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2743 && GET_MODE_CLASS (mode
) != MODE_CC
)
2746 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2747 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2748 && ! side_effects_p (op0
)
2749 && GET_MODE_CLASS (mode
) != MODE_CC
)
2750 return CONST0_RTX (mode
);
2752 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2753 there are no nonzero bits of C outside of X's mode. */
2754 if ((GET_CODE (op0
) == SIGN_EXTEND
2755 || GET_CODE (op0
) == ZERO_EXTEND
)
2756 && CONST_INT_P (trueop1
)
2757 && HWI_COMPUTABLE_MODE_P (mode
)
2758 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2759 & UINTVAL (trueop1
)) == 0)
2761 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2762 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2763 gen_int_mode (INTVAL (trueop1
),
2765 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2768 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2769 we might be able to further simplify the AND with X and potentially
2770 remove the truncation altogether. */
2771 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2773 rtx x
= XEXP (op0
, 0);
2774 enum machine_mode xmode
= GET_MODE (x
);
2775 tem
= simplify_gen_binary (AND
, xmode
, x
,
2776 gen_int_mode (INTVAL (trueop1
), xmode
));
2777 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2780 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2781 if (GET_CODE (op0
) == IOR
2782 && CONST_INT_P (trueop1
)
2783 && CONST_INT_P (XEXP (op0
, 1)))
2785 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2786 return simplify_gen_binary (IOR
, mode
,
2787 simplify_gen_binary (AND
, mode
,
2788 XEXP (op0
, 0), op1
),
2789 gen_int_mode (tmp
, mode
));
2792 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2793 insn (and may simplify more). */
2794 if (GET_CODE (op0
) == XOR
2795 && rtx_equal_p (XEXP (op0
, 0), op1
)
2796 && ! side_effects_p (op1
))
2797 return simplify_gen_binary (AND
, mode
,
2798 simplify_gen_unary (NOT
, mode
,
2799 XEXP (op0
, 1), mode
),
2802 if (GET_CODE (op0
) == XOR
2803 && rtx_equal_p (XEXP (op0
, 1), op1
)
2804 && ! side_effects_p (op1
))
2805 return simplify_gen_binary (AND
, mode
,
2806 simplify_gen_unary (NOT
, mode
,
2807 XEXP (op0
, 0), mode
),
2810 /* Similarly for (~(A ^ B)) & A. */
2811 if (GET_CODE (op0
) == NOT
2812 && GET_CODE (XEXP (op0
, 0)) == XOR
2813 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2814 && ! side_effects_p (op1
))
2815 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2817 if (GET_CODE (op0
) == NOT
2818 && GET_CODE (XEXP (op0
, 0)) == XOR
2819 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2820 && ! side_effects_p (op1
))
2821 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2823 /* Convert (A | B) & A to A. */
2824 if (GET_CODE (op0
) == IOR
2825 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2826 || rtx_equal_p (XEXP (op0
, 1), op1
))
2827 && ! side_effects_p (XEXP (op0
, 0))
2828 && ! side_effects_p (XEXP (op0
, 1)))
2831 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2832 ((A & N) + B) & M -> (A + B) & M
2833 Similarly if (N & M) == 0,
2834 ((A | N) + B) & M -> (A + B) & M
2835 and for - instead of + and/or ^ instead of |.
2836 Also, if (N & M) == 0, then
2837 (A +- N) & M -> A & M. */
2838 if (CONST_INT_P (trueop1
)
2839 && HWI_COMPUTABLE_MODE_P (mode
)
2840 && ~UINTVAL (trueop1
)
2841 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2842 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2847 pmop
[0] = XEXP (op0
, 0);
2848 pmop
[1] = XEXP (op0
, 1);
2850 if (CONST_INT_P (pmop
[1])
2851 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2852 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2854 for (which
= 0; which
< 2; which
++)
2857 switch (GET_CODE (tem
))
2860 if (CONST_INT_P (XEXP (tem
, 1))
2861 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2862 == UINTVAL (trueop1
))
2863 pmop
[which
] = XEXP (tem
, 0);
2867 if (CONST_INT_P (XEXP (tem
, 1))
2868 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2869 pmop
[which
] = XEXP (tem
, 0);
2876 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2878 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2880 return simplify_gen_binary (code
, mode
, tem
, op1
);
2884 /* (and X (ior (not X) Y) -> (and X Y) */
2885 if (GET_CODE (op1
) == IOR
2886 && GET_CODE (XEXP (op1
, 0)) == NOT
2887 && op0
== XEXP (XEXP (op1
, 0), 0))
2888 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2890 /* (and (ior (not X) Y) X) -> (and X Y) */
2891 if (GET_CODE (op0
) == IOR
2892 && GET_CODE (XEXP (op0
, 0)) == NOT
2893 && op1
== XEXP (XEXP (op0
, 0), 0))
2894 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2896 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2902 /* 0/x is 0 (or x&0 if x has side-effects). */
2903 if (trueop0
== CONST0_RTX (mode
))
2905 if (side_effects_p (op1
))
2906 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2910 if (trueop1
== CONST1_RTX (mode
))
2911 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2912 /* Convert divide by power of two into shift. */
2913 if (CONST_INT_P (trueop1
)
2914 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2915 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2919 /* Handle floating point and integers separately. */
2920 if (SCALAR_FLOAT_MODE_P (mode
))
2922 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2923 safe for modes with NaNs, since 0.0 / 0.0 will then be
2924 NaN rather than 0.0. Nor is it safe for modes with signed
2925 zeros, since dividing 0 by a negative number gives -0.0 */
2926 if (trueop0
== CONST0_RTX (mode
)
2927 && !HONOR_NANS (mode
)
2928 && !HONOR_SIGNED_ZEROS (mode
)
2929 && ! side_effects_p (op1
))
2932 if (trueop1
== CONST1_RTX (mode
)
2933 && !HONOR_SNANS (mode
))
2936 if (GET_CODE (trueop1
) == CONST_DOUBLE
2937 && trueop1
!= CONST0_RTX (mode
))
2940 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2943 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2944 && !HONOR_SNANS (mode
))
2945 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2947 /* Change FP division by a constant into multiplication.
2948 Only do this with -freciprocal-math. */
2949 if (flag_reciprocal_math
2950 && !REAL_VALUES_EQUAL (d
, dconst0
))
2952 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2953 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2954 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2960 /* 0/x is 0 (or x&0 if x has side-effects). */
2961 if (trueop0
== CONST0_RTX (mode
)
2962 && !cfun
->can_throw_non_call_exceptions
)
2964 if (side_effects_p (op1
))
2965 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2969 if (trueop1
== CONST1_RTX (mode
))
2970 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2972 if (trueop1
== constm1_rtx
)
2974 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2975 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2981 /* 0%x is 0 (or x&0 if x has side-effects). */
2982 if (trueop0
== CONST0_RTX (mode
))
2984 if (side_effects_p (op1
))
2985 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2988 /* x%1 is 0 (of x&0 if x has side-effects). */
2989 if (trueop1
== CONST1_RTX (mode
))
2991 if (side_effects_p (op0
))
2992 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2993 return CONST0_RTX (mode
);
2995 /* Implement modulus by power of two as AND. */
2996 if (CONST_INT_P (trueop1
)
2997 && exact_log2 (UINTVAL (trueop1
)) > 0)
2998 return simplify_gen_binary (AND
, mode
, op0
,
2999 GEN_INT (INTVAL (op1
) - 1));
3003 /* 0%x is 0 (or x&0 if x has side-effects). */
3004 if (trueop0
== CONST0_RTX (mode
))
3006 if (side_effects_p (op1
))
3007 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3010 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3011 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3013 if (side_effects_p (op0
))
3014 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3015 return CONST0_RTX (mode
);
3022 if (trueop1
== CONST0_RTX (mode
))
3024 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3026 /* Rotating ~0 always results in ~0. */
3027 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3028 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3029 && ! side_effects_p (op1
))
3032 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3034 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3035 if (val
!= INTVAL (op1
))
3036 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3043 if (trueop1
== CONST0_RTX (mode
))
3045 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3047 goto canonicalize_shift
;
3050 if (trueop1
== CONST0_RTX (mode
))
3052 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3054 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3055 if (GET_CODE (op0
) == CLZ
3056 && CONST_INT_P (trueop1
)
3057 && STORE_FLAG_VALUE
== 1
3058 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3060 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3061 unsigned HOST_WIDE_INT zero_val
= 0;
3063 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3064 && zero_val
== GET_MODE_PRECISION (imode
)
3065 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3066 return simplify_gen_relational (EQ
, mode
, imode
,
3067 XEXP (op0
, 0), const0_rtx
);
3069 goto canonicalize_shift
;
3072 if (width
<= HOST_BITS_PER_WIDE_INT
3073 && mode_signbit_p (mode
, trueop1
)
3074 && ! side_effects_p (op0
))
3076 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3078 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3084 if (width
<= HOST_BITS_PER_WIDE_INT
3085 && CONST_INT_P (trueop1
)
3086 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3087 && ! side_effects_p (op0
))
3089 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3091 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3097 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3099 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3101 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3107 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3109 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3111 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3124 /* ??? There are simplifications that can be done. */
3128 if (!VECTOR_MODE_P (mode
))
3130 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3131 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3132 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3133 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3134 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3136 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3137 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3140 /* Extract a scalar element from a nested VEC_SELECT expression
3141 (with optional nested VEC_CONCAT expression). Some targets
3142 (i386) extract scalar element from a vector using chain of
3143 nested VEC_SELECT expressions. When input operand is a memory
3144 operand, this operation can be simplified to a simple scalar
3145 load from an offseted memory address. */
3146 if (GET_CODE (trueop0
) == VEC_SELECT
)
3148 rtx op0
= XEXP (trueop0
, 0);
3149 rtx op1
= XEXP (trueop0
, 1);
3151 enum machine_mode opmode
= GET_MODE (op0
);
3152 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3153 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3155 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3161 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3162 gcc_assert (i
< n_elts
);
3164 /* Select element, pointed by nested selector. */
3165 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3167 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3168 if (GET_CODE (op0
) == VEC_CONCAT
)
3170 rtx op00
= XEXP (op0
, 0);
3171 rtx op01
= XEXP (op0
, 1);
3173 enum machine_mode mode00
, mode01
;
3174 int n_elts00
, n_elts01
;
3176 mode00
= GET_MODE (op00
);
3177 mode01
= GET_MODE (op01
);
3179 /* Find out number of elements of each operand. */
3180 if (VECTOR_MODE_P (mode00
))
3182 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3183 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3188 if (VECTOR_MODE_P (mode01
))
3190 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3191 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3196 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3198 /* Select correct operand of VEC_CONCAT
3199 and adjust selector. */
3200 if (elem
< n_elts01
)
3211 vec
= rtvec_alloc (1);
3212 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3214 tmp
= gen_rtx_fmt_ee (code
, mode
,
3215 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3218 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3219 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3220 return XEXP (trueop0
, 0);
3224 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3225 gcc_assert (GET_MODE_INNER (mode
)
3226 == GET_MODE_INNER (GET_MODE (trueop0
)));
3227 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3229 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3231 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3232 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3233 rtvec v
= rtvec_alloc (n_elts
);
3236 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3237 for (i
= 0; i
< n_elts
; i
++)
3239 rtx x
= XVECEXP (trueop1
, 0, i
);
3241 gcc_assert (CONST_INT_P (x
));
3242 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3246 return gen_rtx_CONST_VECTOR (mode
, v
);
3250 if (XVECLEN (trueop1
, 0) == 1
3251 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3252 && GET_CODE (trueop0
) == VEC_CONCAT
)
3255 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3257 /* Try to find the element in the VEC_CONCAT. */
3258 while (GET_MODE (vec
) != mode
3259 && GET_CODE (vec
) == VEC_CONCAT
)
3261 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3262 if (offset
< vec_size
)
3263 vec
= XEXP (vec
, 0);
3267 vec
= XEXP (vec
, 1);
3269 vec
= avoid_constant_pool_reference (vec
);
3272 if (GET_MODE (vec
) == mode
)
3279 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3280 ? GET_MODE (trueop0
)
3281 : GET_MODE_INNER (mode
));
3282 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3283 ? GET_MODE (trueop1
)
3284 : GET_MODE_INNER (mode
));
3286 gcc_assert (VECTOR_MODE_P (mode
));
3287 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3288 == GET_MODE_SIZE (mode
));
3290 if (VECTOR_MODE_P (op0_mode
))
3291 gcc_assert (GET_MODE_INNER (mode
)
3292 == GET_MODE_INNER (op0_mode
));
3294 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3296 if (VECTOR_MODE_P (op1_mode
))
3297 gcc_assert (GET_MODE_INNER (mode
)
3298 == GET_MODE_INNER (op1_mode
));
3300 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3302 if ((GET_CODE (trueop0
) == CONST_VECTOR
3303 || CONST_INT_P (trueop0
)
3304 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3305 && (GET_CODE (trueop1
) == CONST_VECTOR
3306 || CONST_INT_P (trueop1
)
3307 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3309 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3310 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3311 rtvec v
= rtvec_alloc (n_elts
);
3313 unsigned in_n_elts
= 1;
3315 if (VECTOR_MODE_P (op0_mode
))
3316 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3317 for (i
= 0; i
< n_elts
; i
++)
3321 if (!VECTOR_MODE_P (op0_mode
))
3322 RTVEC_ELT (v
, i
) = trueop0
;
3324 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3328 if (!VECTOR_MODE_P (op1_mode
))
3329 RTVEC_ELT (v
, i
) = trueop1
;
3331 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3336 return gen_rtx_CONST_VECTOR (mode
, v
);
3349 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3352 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3354 unsigned int width
= GET_MODE_PRECISION (mode
);
3356 if (VECTOR_MODE_P (mode
)
3357 && code
!= VEC_CONCAT
3358 && GET_CODE (op0
) == CONST_VECTOR
3359 && GET_CODE (op1
) == CONST_VECTOR
)
3361 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3362 enum machine_mode op0mode
= GET_MODE (op0
);
3363 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3364 enum machine_mode op1mode
= GET_MODE (op1
);
3365 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3366 rtvec v
= rtvec_alloc (n_elts
);
3369 gcc_assert (op0_n_elts
== n_elts
);
3370 gcc_assert (op1_n_elts
== n_elts
);
3371 for (i
= 0; i
< n_elts
; i
++)
3373 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3374 CONST_VECTOR_ELT (op0
, i
),
3375 CONST_VECTOR_ELT (op1
, i
));
3378 RTVEC_ELT (v
, i
) = x
;
3381 return gen_rtx_CONST_VECTOR (mode
, v
);
3384 if (VECTOR_MODE_P (mode
)
3385 && code
== VEC_CONCAT
3386 && (CONST_INT_P (op0
)
3387 || GET_CODE (op0
) == CONST_DOUBLE
3388 || GET_CODE (op0
) == CONST_FIXED
)
3389 && (CONST_INT_P (op1
)
3390 || GET_CODE (op1
) == CONST_DOUBLE
3391 || GET_CODE (op1
) == CONST_FIXED
))
3393 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3394 rtvec v
= rtvec_alloc (n_elts
);
3396 gcc_assert (n_elts
>= 2);
3399 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3400 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3402 RTVEC_ELT (v
, 0) = op0
;
3403 RTVEC_ELT (v
, 1) = op1
;
3407 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3408 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3411 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3412 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3413 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3415 for (i
= 0; i
< op0_n_elts
; ++i
)
3416 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3417 for (i
= 0; i
< op1_n_elts
; ++i
)
3418 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3421 return gen_rtx_CONST_VECTOR (mode
, v
);
3424 if (SCALAR_FLOAT_MODE_P (mode
)
3425 && GET_CODE (op0
) == CONST_DOUBLE
3426 && GET_CODE (op1
) == CONST_DOUBLE
3427 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3438 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3440 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3442 for (i
= 0; i
< 4; i
++)
3459 real_from_target (&r
, tmp0
, mode
);
3460 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3464 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3467 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3468 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3469 real_convert (&f0
, mode
, &f0
);
3470 real_convert (&f1
, mode
, &f1
);
3472 if (HONOR_SNANS (mode
)
3473 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3477 && REAL_VALUES_EQUAL (f1
, dconst0
)
3478 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3481 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3482 && flag_trapping_math
3483 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3485 int s0
= REAL_VALUE_NEGATIVE (f0
);
3486 int s1
= REAL_VALUE_NEGATIVE (f1
);
3491 /* Inf + -Inf = NaN plus exception. */
3496 /* Inf - Inf = NaN plus exception. */
3501 /* Inf / Inf = NaN plus exception. */
3508 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3509 && flag_trapping_math
3510 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3511 || (REAL_VALUE_ISINF (f1
)
3512 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3513 /* Inf * 0 = NaN plus exception. */
3516 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3518 real_convert (&result
, mode
, &value
);
3520 /* Don't constant fold this floating point operation if
3521 the result has overflowed and flag_trapping_math. */
3523 if (flag_trapping_math
3524 && MODE_HAS_INFINITIES (mode
)
3525 && REAL_VALUE_ISINF (result
)
3526 && !REAL_VALUE_ISINF (f0
)
3527 && !REAL_VALUE_ISINF (f1
))
3528 /* Overflow plus exception. */
3531 /* Don't constant fold this floating point operation if the
3532 result may dependent upon the run-time rounding mode and
3533 flag_rounding_math is set, or if GCC's software emulation
3534 is unable to accurately represent the result. */
3536 if ((flag_rounding_math
3537 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3538 && (inexact
|| !real_identical (&result
, &value
)))
3541 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3545 /* We can fold some multi-word operations. */
3546 if (GET_MODE_CLASS (mode
) == MODE_INT
3547 && width
== HOST_BITS_PER_DOUBLE_INT
3548 && (CONST_DOUBLE_P (op0
) || CONST_INT_P (op0
))
3549 && (CONST_DOUBLE_P (op1
) || CONST_INT_P (op1
)))
3551 double_int o0
, o1
, res
, tmp
;
3553 o0
= rtx_to_double_int (op0
);
3554 o1
= rtx_to_double_int (op1
);
3559 /* A - B == A + (-B). */
3560 o1
= double_int_neg (o1
);
3562 /* Fall through.... */
3565 res
= double_int_add (o0
, o1
);
3569 res
= double_int_mul (o0
, o1
);
3573 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3574 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3575 &res
.low
, &res
.high
,
3576 &tmp
.low
, &tmp
.high
))
3581 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3582 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3583 &tmp
.low
, &tmp
.high
,
3584 &res
.low
, &res
.high
))
3589 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3590 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3591 &res
.low
, &res
.high
,
3592 &tmp
.low
, &tmp
.high
))
3597 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3598 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3599 &tmp
.low
, &tmp
.high
,
3600 &res
.low
, &res
.high
))
3605 res
= double_int_and (o0
, o1
);
3609 res
= double_int_ior (o0
, o1
);
3613 res
= double_int_xor (o0
, o1
);
3617 res
= double_int_smin (o0
, o1
);
3621 res
= double_int_smax (o0
, o1
);
3625 res
= double_int_umin (o0
, o1
);
3629 res
= double_int_umax (o0
, o1
);
3632 case LSHIFTRT
: case ASHIFTRT
:
3634 case ROTATE
: case ROTATERT
:
3636 unsigned HOST_WIDE_INT cnt
;
3638 if (SHIFT_COUNT_TRUNCATED
)
3639 o1
= double_int_zext (o1
, GET_MODE_PRECISION (mode
));
3641 if (!double_int_fits_in_uhwi_p (o1
)
3642 || double_int_to_uhwi (o1
) >= GET_MODE_PRECISION (mode
))
3645 cnt
= double_int_to_uhwi (o1
);
3647 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3648 res
= double_int_rshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3650 else if (code
== ASHIFT
)
3651 res
= double_int_lshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3653 else if (code
== ROTATE
)
3654 res
= double_int_lrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3655 else /* code == ROTATERT */
3656 res
= double_int_rrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3664 return immed_double_int_const (res
, mode
);
3667 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3668 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3670 /* Get the integer argument values in two forms:
3671 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3673 arg0
= INTVAL (op0
);
3674 arg1
= INTVAL (op1
);
3676 if (width
< HOST_BITS_PER_WIDE_INT
)
3678 arg0
&= GET_MODE_MASK (mode
);
3679 arg1
&= GET_MODE_MASK (mode
);
3682 if (val_signbit_known_set_p (mode
, arg0s
))
3683 arg0s
|= ~GET_MODE_MASK (mode
);
3686 if (val_signbit_known_set_p (mode
, arg1s
))
3687 arg1s
|= ~GET_MODE_MASK (mode
);
3695 /* Compute the value of the arithmetic. */
3700 val
= arg0s
+ arg1s
;
3704 val
= arg0s
- arg1s
;
3708 val
= arg0s
* arg1s
;
3713 || ((unsigned HOST_WIDE_INT
) arg0s
3714 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3717 val
= arg0s
/ arg1s
;
3722 || ((unsigned HOST_WIDE_INT
) arg0s
3723 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3726 val
= arg0s
% arg1s
;
3731 || ((unsigned HOST_WIDE_INT
) arg0s
3732 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3735 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3740 || ((unsigned HOST_WIDE_INT
) arg0s
3741 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3744 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3762 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3763 the value is in range. We can't return any old value for
3764 out-of-range arguments because either the middle-end (via
3765 shift_truncation_mask) or the back-end might be relying on
3766 target-specific knowledge. Nor can we rely on
3767 shift_truncation_mask, since the shift might not be part of an
3768 ashlM3, lshrM3 or ashrM3 instruction. */
3769 if (SHIFT_COUNT_TRUNCATED
)
3770 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3771 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3774 val
= (code
== ASHIFT
3775 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3776 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3778 /* Sign-extend the result for arithmetic right shifts. */
3779 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3780 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3788 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3789 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3797 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3798 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3802 /* Do nothing here. */
3806 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3810 val
= ((unsigned HOST_WIDE_INT
) arg0
3811 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3815 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3819 val
= ((unsigned HOST_WIDE_INT
) arg0
3820 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3833 /* ??? There are simplifications that can be done. */
3840 return gen_int_mode (val
, mode
);
3848 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3851 Rather than test for specific case, we do this by a brute-force method
3852 and do all possible simplifications until no more changes occur. Then
3853 we rebuild the operation. */
3855 struct simplify_plus_minus_op_data
3862 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3866 result
= (commutative_operand_precedence (y
)
3867 - commutative_operand_precedence (x
));
3871 /* Group together equal REGs to do more simplification. */
3872 if (REG_P (x
) && REG_P (y
))
3873 return REGNO (x
) > REGNO (y
);
3879 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3882 struct simplify_plus_minus_op_data ops
[8];
3884 int n_ops
= 2, input_ops
= 2;
3885 int changed
, n_constants
= 0, canonicalized
= 0;
3888 memset (ops
, 0, sizeof ops
);
3890 /* Set up the two operands and then expand them until nothing has been
3891 changed. If we run out of room in our array, give up; this should
3892 almost never happen. */
3897 ops
[1].neg
= (code
== MINUS
);
3903 for (i
= 0; i
< n_ops
; i
++)
3905 rtx this_op
= ops
[i
].op
;
3906 int this_neg
= ops
[i
].neg
;
3907 enum rtx_code this_code
= GET_CODE (this_op
);
3916 ops
[n_ops
].op
= XEXP (this_op
, 1);
3917 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3920 ops
[i
].op
= XEXP (this_op
, 0);
3923 canonicalized
|= this_neg
;
3927 ops
[i
].op
= XEXP (this_op
, 0);
3928 ops
[i
].neg
= ! this_neg
;
3935 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3936 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3937 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3939 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3940 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3941 ops
[n_ops
].neg
= this_neg
;
3949 /* ~a -> (-a - 1) */
3952 ops
[n_ops
].op
= constm1_rtx
;
3953 ops
[n_ops
++].neg
= this_neg
;
3954 ops
[i
].op
= XEXP (this_op
, 0);
3955 ops
[i
].neg
= !this_neg
;
3965 ops
[i
].op
= neg_const_int (mode
, this_op
);
3979 if (n_constants
> 1)
3982 gcc_assert (n_ops
>= 2);
3984 /* If we only have two operands, we can avoid the loops. */
3987 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3990 /* Get the two operands. Be careful with the order, especially for
3991 the cases where code == MINUS. */
3992 if (ops
[0].neg
&& ops
[1].neg
)
3994 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3997 else if (ops
[0].neg
)
4008 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4011 /* Now simplify each pair of operands until nothing changes. */
4014 /* Insertion sort is good enough for an eight-element array. */
4015 for (i
= 1; i
< n_ops
; i
++)
4017 struct simplify_plus_minus_op_data save
;
4019 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4025 ops
[j
+ 1] = ops
[j
];
4026 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4031 for (i
= n_ops
- 1; i
> 0; i
--)
4032 for (j
= i
- 1; j
>= 0; j
--)
4034 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4035 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4037 if (lhs
!= 0 && rhs
!= 0)
4039 enum rtx_code ncode
= PLUS
;
4045 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4047 else if (swap_commutative_operands_p (lhs
, rhs
))
4048 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4050 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4051 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4053 rtx tem_lhs
, tem_rhs
;
4055 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4056 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4057 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4059 if (tem
&& !CONSTANT_P (tem
))
4060 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4063 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4065 /* Reject "simplifications" that just wrap the two
4066 arguments in a CONST. Failure to do so can result
4067 in infinite recursion with simplify_binary_operation
4068 when it calls us to simplify CONST operations. */
4070 && ! (GET_CODE (tem
) == CONST
4071 && GET_CODE (XEXP (tem
, 0)) == ncode
4072 && XEXP (XEXP (tem
, 0), 0) == lhs
4073 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4076 if (GET_CODE (tem
) == NEG
)
4077 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4078 if (CONST_INT_P (tem
) && lneg
)
4079 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4083 ops
[j
].op
= NULL_RTX
;
4090 /* If nothing changed, fail. */
4094 /* Pack all the operands to the lower-numbered entries. */
4095 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4105 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4107 && CONST_INT_P (ops
[1].op
)
4108 && CONSTANT_P (ops
[0].op
)
4110 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4112 /* We suppressed creation of trivial CONST expressions in the
4113 combination loop to avoid recursion. Create one manually now.
4114 The combination loop should have ensured that there is exactly
4115 one CONST_INT, and the sort will have ensured that it is last
4116 in the array and that any other constant will be next-to-last. */
4119 && CONST_INT_P (ops
[n_ops
- 1].op
)
4120 && CONSTANT_P (ops
[n_ops
- 2].op
))
4122 rtx value
= ops
[n_ops
- 1].op
;
4123 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4124 value
= neg_const_int (mode
, value
);
4125 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
4129 /* Put a non-negated operand first, if possible. */
4131 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4134 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4143 /* Now make the result by performing the requested operations. */
4145 for (i
= 1; i
< n_ops
; i
++)
4146 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4147 mode
, result
, ops
[i
].op
);
4152 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4154 plus_minus_operand_p (const_rtx x
)
4156 return GET_CODE (x
) == PLUS
4157 || GET_CODE (x
) == MINUS
4158 || (GET_CODE (x
) == CONST
4159 && GET_CODE (XEXP (x
, 0)) == PLUS
4160 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4161 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4164 /* Like simplify_binary_operation except used for relational operators.
4165 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4166 not also be VOIDmode.
4168 CMP_MODE specifies in which mode the comparison is done in, so it is
4169 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4170 the operands or, if both are VOIDmode, the operands are compared in
4171 "infinite precision". */
4173 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4174 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4176 rtx tem
, trueop0
, trueop1
;
4178 if (cmp_mode
== VOIDmode
)
4179 cmp_mode
= GET_MODE (op0
);
4180 if (cmp_mode
== VOIDmode
)
4181 cmp_mode
= GET_MODE (op1
);
4183 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4186 if (SCALAR_FLOAT_MODE_P (mode
))
4188 if (tem
== const0_rtx
)
4189 return CONST0_RTX (mode
);
4190 #ifdef FLOAT_STORE_FLAG_VALUE
4192 REAL_VALUE_TYPE val
;
4193 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4194 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4200 if (VECTOR_MODE_P (mode
))
4202 if (tem
== const0_rtx
)
4203 return CONST0_RTX (mode
);
4204 #ifdef VECTOR_STORE_FLAG_VALUE
4209 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4210 if (val
== NULL_RTX
)
4212 if (val
== const1_rtx
)
4213 return CONST1_RTX (mode
);
4215 units
= GET_MODE_NUNITS (mode
);
4216 v
= rtvec_alloc (units
);
4217 for (i
= 0; i
< units
; i
++)
4218 RTVEC_ELT (v
, i
) = val
;
4219 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4229 /* For the following tests, ensure const0_rtx is op1. */
4230 if (swap_commutative_operands_p (op0
, op1
)
4231 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4232 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4234 /* If op0 is a compare, extract the comparison arguments from it. */
4235 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4236 return simplify_gen_relational (code
, mode
, VOIDmode
,
4237 XEXP (op0
, 0), XEXP (op0
, 1));
4239 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4243 trueop0
= avoid_constant_pool_reference (op0
);
4244 trueop1
= avoid_constant_pool_reference (op1
);
4245 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4249 /* This part of simplify_relational_operation is only used when CMP_MODE
4250 is not in class MODE_CC (i.e. it is a real comparison).
4252 MODE is the mode of the result, while CMP_MODE specifies in which
4253 mode the comparison is done in, so it is the mode of the operands. */
4256 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4257 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4259 enum rtx_code op0code
= GET_CODE (op0
);
4261 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4263 /* If op0 is a comparison, extract the comparison arguments
4267 if (GET_MODE (op0
) == mode
)
4268 return simplify_rtx (op0
);
4270 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4271 XEXP (op0
, 0), XEXP (op0
, 1));
4273 else if (code
== EQ
)
4275 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4276 if (new_code
!= UNKNOWN
)
4277 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4278 XEXP (op0
, 0), XEXP (op0
, 1));
4282 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4283 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4284 if ((code
== LTU
|| code
== GEU
)
4285 && GET_CODE (op0
) == PLUS
4286 && CONST_INT_P (XEXP (op0
, 1))
4287 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4288 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4291 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4292 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4293 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4296 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4297 if ((code
== LTU
|| code
== GEU
)
4298 && GET_CODE (op0
) == PLUS
4299 && rtx_equal_p (op1
, XEXP (op0
, 1))
4300 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4301 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4302 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4303 copy_rtx (XEXP (op0
, 0)));
4305 if (op1
== const0_rtx
)
4307 /* Canonicalize (GTU x 0) as (NE x 0). */
4309 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4310 /* Canonicalize (LEU x 0) as (EQ x 0). */
4312 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4314 else if (op1
== const1_rtx
)
4319 /* Canonicalize (GE x 1) as (GT x 0). */
4320 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4323 /* Canonicalize (GEU x 1) as (NE x 0). */
4324 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4327 /* Canonicalize (LT x 1) as (LE x 0). */
4328 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4331 /* Canonicalize (LTU x 1) as (EQ x 0). */
4332 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4338 else if (op1
== constm1_rtx
)
4340 /* Canonicalize (LE x -1) as (LT x 0). */
4342 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4343 /* Canonicalize (GT x -1) as (GE x 0). */
4345 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4348 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4349 if ((code
== EQ
|| code
== NE
)
4350 && (op0code
== PLUS
|| op0code
== MINUS
)
4352 && CONSTANT_P (XEXP (op0
, 1))
4353 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4355 rtx x
= XEXP (op0
, 0);
4356 rtx c
= XEXP (op0
, 1);
4358 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4360 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4363 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4364 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4366 && op1
== const0_rtx
4367 && GET_MODE_CLASS (mode
) == MODE_INT
4368 && cmp_mode
!= VOIDmode
4369 /* ??? Work-around BImode bugs in the ia64 backend. */
4371 && cmp_mode
!= BImode
4372 && nonzero_bits (op0
, cmp_mode
) == 1
4373 && STORE_FLAG_VALUE
== 1)
4374 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4375 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4376 : lowpart_subreg (mode
, op0
, cmp_mode
);
4378 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4379 if ((code
== EQ
|| code
== NE
)
4380 && op1
== const0_rtx
4382 return simplify_gen_relational (code
, mode
, cmp_mode
,
4383 XEXP (op0
, 0), XEXP (op0
, 1));
4385 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4386 if ((code
== EQ
|| code
== NE
)
4388 && rtx_equal_p (XEXP (op0
, 0), op1
)
4389 && !side_effects_p (XEXP (op0
, 0)))
4390 return simplify_gen_relational (code
, mode
, cmp_mode
,
4391 XEXP (op0
, 1), const0_rtx
);
4393 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4394 if ((code
== EQ
|| code
== NE
)
4396 && rtx_equal_p (XEXP (op0
, 1), op1
)
4397 && !side_effects_p (XEXP (op0
, 1)))
4398 return simplify_gen_relational (code
, mode
, cmp_mode
,
4399 XEXP (op0
, 0), const0_rtx
);
4401 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4402 if ((code
== EQ
|| code
== NE
)
4404 && (CONST_INT_P (op1
)
4405 || GET_CODE (op1
) == CONST_DOUBLE
)
4406 && (CONST_INT_P (XEXP (op0
, 1))
4407 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4408 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4409 simplify_gen_binary (XOR
, cmp_mode
,
4410 XEXP (op0
, 1), op1
));
4412 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4418 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4419 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4420 XEXP (op0
, 0), const0_rtx
);
4425 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4426 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4427 XEXP (op0
, 0), const0_rtx
);
4446 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4447 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4448 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4449 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4450 For floating-point comparisons, assume that the operands were ordered. */
4453 comparison_result (enum rtx_code code
, int known_results
)
4459 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4462 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4466 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4469 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4473 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4476 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4479 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4481 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4484 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4486 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4489 return const_true_rtx
;
4497 /* Check if the given comparison (done in the given MODE) is actually a
4498 tautology or a contradiction.
4499 If no simplification is possible, this function returns zero.
4500 Otherwise, it returns either const_true_rtx or const0_rtx. */
4503 simplify_const_relational_operation (enum rtx_code code
,
4504 enum machine_mode mode
,
4511 gcc_assert (mode
!= VOIDmode
4512 || (GET_MODE (op0
) == VOIDmode
4513 && GET_MODE (op1
) == VOIDmode
));
4515 /* If op0 is a compare, extract the comparison arguments from it. */
4516 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4518 op1
= XEXP (op0
, 1);
4519 op0
= XEXP (op0
, 0);
4521 if (GET_MODE (op0
) != VOIDmode
)
4522 mode
= GET_MODE (op0
);
4523 else if (GET_MODE (op1
) != VOIDmode
)
4524 mode
= GET_MODE (op1
);
4529 /* We can't simplify MODE_CC values since we don't know what the
4530 actual comparison is. */
4531 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4534 /* Make sure the constant is second. */
4535 if (swap_commutative_operands_p (op0
, op1
))
4537 tem
= op0
, op0
= op1
, op1
= tem
;
4538 code
= swap_condition (code
);
4541 trueop0
= avoid_constant_pool_reference (op0
);
4542 trueop1
= avoid_constant_pool_reference (op1
);
4544 /* For integer comparisons of A and B maybe we can simplify A - B and can
4545 then simplify a comparison of that with zero. If A and B are both either
4546 a register or a CONST_INT, this can't help; testing for these cases will
4547 prevent infinite recursion here and speed things up.
4549 We can only do this for EQ and NE comparisons as otherwise we may
4550 lose or introduce overflow which we cannot disregard as undefined as
4551 we do not know the signedness of the operation on either the left or
4552 the right hand side of the comparison. */
4554 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4555 && (code
== EQ
|| code
== NE
)
4556 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4557 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4558 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4559 /* We cannot do this if tem is a nonzero address. */
4560 && ! nonzero_address_p (tem
))
4561 return simplify_const_relational_operation (signed_condition (code
),
4562 mode
, tem
, const0_rtx
);
4564 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4565 return const_true_rtx
;
4567 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4570 /* For modes without NaNs, if the two operands are equal, we know the
4571 result except if they have side-effects. Even with NaNs we know
4572 the result of unordered comparisons and, if signaling NaNs are
4573 irrelevant, also the result of LT/GT/LTGT. */
4574 if ((! HONOR_NANS (GET_MODE (trueop0
))
4575 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4576 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4577 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4578 && rtx_equal_p (trueop0
, trueop1
)
4579 && ! side_effects_p (trueop0
))
4580 return comparison_result (code
, CMP_EQ
);
4582 /* If the operands are floating-point constants, see if we can fold
4584 if (GET_CODE (trueop0
) == CONST_DOUBLE
4585 && GET_CODE (trueop1
) == CONST_DOUBLE
4586 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4588 REAL_VALUE_TYPE d0
, d1
;
4590 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4591 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4593 /* Comparisons are unordered iff at least one of the values is NaN. */
4594 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4604 return const_true_rtx
;
4617 return comparison_result (code
,
4618 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4619 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4622 /* Otherwise, see if the operands are both integers. */
4623 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4624 && (GET_CODE (trueop0
) == CONST_DOUBLE
4625 || CONST_INT_P (trueop0
))
4626 && (GET_CODE (trueop1
) == CONST_DOUBLE
4627 || CONST_INT_P (trueop1
)))
4629 int width
= GET_MODE_PRECISION (mode
);
4630 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4631 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4633 /* Get the two words comprising each integer constant. */
4634 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4636 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4637 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4641 l0u
= l0s
= INTVAL (trueop0
);
4642 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4645 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4647 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4648 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4652 l1u
= l1s
= INTVAL (trueop1
);
4653 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4656 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4657 we have to sign or zero-extend the values. */
4658 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4660 l0u
&= GET_MODE_MASK (mode
);
4661 l1u
&= GET_MODE_MASK (mode
);
4663 if (val_signbit_known_set_p (mode
, l0s
))
4664 l0s
|= ~GET_MODE_MASK (mode
);
4666 if (val_signbit_known_set_p (mode
, l1s
))
4667 l1s
|= ~GET_MODE_MASK (mode
);
4669 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4670 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4672 if (h0u
== h1u
&& l0u
== l1u
)
4673 return comparison_result (code
, CMP_EQ
);
4677 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4678 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4679 return comparison_result (code
, cr
);
4683 /* Optimize comparisons with upper and lower bounds. */
4684 if (HWI_COMPUTABLE_MODE_P (mode
)
4685 && CONST_INT_P (trueop1
))
4688 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4689 HOST_WIDE_INT val
= INTVAL (trueop1
);
4690 HOST_WIDE_INT mmin
, mmax
;
4700 /* Get a reduced range if the sign bit is zero. */
4701 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4708 rtx mmin_rtx
, mmax_rtx
;
4709 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4711 mmin
= INTVAL (mmin_rtx
);
4712 mmax
= INTVAL (mmax_rtx
);
4715 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4717 mmin
>>= (sign_copies
- 1);
4718 mmax
>>= (sign_copies
- 1);
4724 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4726 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4727 return const_true_rtx
;
4728 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4733 return const_true_rtx
;
4738 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4740 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4741 return const_true_rtx
;
4742 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4747 return const_true_rtx
;
4753 /* x == y is always false for y out of range. */
4754 if (val
< mmin
|| val
> mmax
)
4758 /* x > y is always false for y >= mmax, always true for y < mmin. */
4760 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4762 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4763 return const_true_rtx
;
4769 return const_true_rtx
;
4772 /* x < y is always false for y <= mmin, always true for y > mmax. */
4774 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4776 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4777 return const_true_rtx
;
4783 return const_true_rtx
;
4787 /* x != y is always true for y out of range. */
4788 if (val
< mmin
|| val
> mmax
)
4789 return const_true_rtx
;
4797 /* Optimize integer comparisons with zero. */
4798 if (trueop1
== const0_rtx
)
4800 /* Some addresses are known to be nonzero. We don't know
4801 their sign, but equality comparisons are known. */
4802 if (nonzero_address_p (trueop0
))
4804 if (code
== EQ
|| code
== LEU
)
4806 if (code
== NE
|| code
== GTU
)
4807 return const_true_rtx
;
4810 /* See if the first operand is an IOR with a constant. If so, we
4811 may be able to determine the result of this comparison. */
4812 if (GET_CODE (op0
) == IOR
)
4814 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4815 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4817 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4818 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4819 && (UINTVAL (inner_const
)
4820 & ((unsigned HOST_WIDE_INT
) 1
4830 return const_true_rtx
;
4834 return const_true_rtx
;
4848 /* Optimize comparison of ABS with zero. */
4849 if (trueop1
== CONST0_RTX (mode
)
4850 && (GET_CODE (trueop0
) == ABS
4851 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4852 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4857 /* Optimize abs(x) < 0.0. */
4858 if (!HONOR_SNANS (mode
)
4859 && (!INTEGRAL_MODE_P (mode
)
4860 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4862 if (INTEGRAL_MODE_P (mode
)
4863 && (issue_strict_overflow_warning
4864 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4865 warning (OPT_Wstrict_overflow
,
4866 ("assuming signed overflow does not occur when "
4867 "assuming abs (x) < 0 is false"));
4873 /* Optimize abs(x) >= 0.0. */
4874 if (!HONOR_NANS (mode
)
4875 && (!INTEGRAL_MODE_P (mode
)
4876 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4878 if (INTEGRAL_MODE_P (mode
)
4879 && (issue_strict_overflow_warning
4880 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4881 warning (OPT_Wstrict_overflow
,
4882 ("assuming signed overflow does not occur when "
4883 "assuming abs (x) >= 0 is true"));
4884 return const_true_rtx
;
4889 /* Optimize ! (abs(x) < 0.0). */
4890 return const_true_rtx
;
4900 /* Simplify CODE, an operation with result mode MODE and three operands,
4901 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4902 a constant. Return 0 if no simplifications is possible. */
4905 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4906 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4909 unsigned int width
= GET_MODE_PRECISION (mode
);
4910 bool any_change
= false;
4913 /* VOIDmode means "infinite" precision. */
4915 width
= HOST_BITS_PER_WIDE_INT
;
4920 /* Simplify negations around the multiplication. */
4921 /* -a * -b + c => a * b + c. */
4922 if (GET_CODE (op0
) == NEG
)
4924 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4926 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4928 else if (GET_CODE (op1
) == NEG
)
4930 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4932 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4935 /* Canonicalize the two multiplication operands. */
4936 /* a * -b + c => -b * a + c. */
4937 if (swap_commutative_operands_p (op0
, op1
))
4938 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4941 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4946 if (CONST_INT_P (op0
)
4947 && CONST_INT_P (op1
)
4948 && CONST_INT_P (op2
)
4949 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4950 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4952 /* Extracting a bit-field from a constant */
4953 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4954 HOST_WIDE_INT op1val
= INTVAL (op1
);
4955 HOST_WIDE_INT op2val
= INTVAL (op2
);
4956 if (BITS_BIG_ENDIAN
)
4957 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
4961 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
4963 /* First zero-extend. */
4964 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
4965 /* If desired, propagate sign bit. */
4966 if (code
== SIGN_EXTRACT
4967 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
4969 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
4972 return gen_int_mode (val
, mode
);
4977 if (CONST_INT_P (op0
))
4978 return op0
!= const0_rtx
? op1
: op2
;
4980 /* Convert c ? a : a into "a". */
4981 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4984 /* Convert a != b ? a : b into "a". */
4985 if (GET_CODE (op0
) == NE
4986 && ! side_effects_p (op0
)
4987 && ! HONOR_NANS (mode
)
4988 && ! HONOR_SIGNED_ZEROS (mode
)
4989 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4990 && rtx_equal_p (XEXP (op0
, 1), op2
))
4991 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4992 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4995 /* Convert a == b ? a : b into "b". */
4996 if (GET_CODE (op0
) == EQ
4997 && ! side_effects_p (op0
)
4998 && ! HONOR_NANS (mode
)
4999 && ! HONOR_SIGNED_ZEROS (mode
)
5000 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5001 && rtx_equal_p (XEXP (op0
, 1), op2
))
5002 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5003 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5006 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5008 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5009 ? GET_MODE (XEXP (op0
, 1))
5010 : GET_MODE (XEXP (op0
, 0)));
5013 /* Look for happy constants in op1 and op2. */
5014 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5016 HOST_WIDE_INT t
= INTVAL (op1
);
5017 HOST_WIDE_INT f
= INTVAL (op2
);
5019 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5020 code
= GET_CODE (op0
);
5021 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5024 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5032 return simplify_gen_relational (code
, mode
, cmp_mode
,
5033 XEXP (op0
, 0), XEXP (op0
, 1));
5036 if (cmp_mode
== VOIDmode
)
5037 cmp_mode
= op0_mode
;
5038 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5039 cmp_mode
, XEXP (op0
, 0),
5042 /* See if any simplifications were possible. */
5045 if (CONST_INT_P (temp
))
5046 return temp
== const0_rtx
? op2
: op1
;
5048 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5054 gcc_assert (GET_MODE (op0
) == mode
);
5055 gcc_assert (GET_MODE (op1
) == mode
);
5056 gcc_assert (VECTOR_MODE_P (mode
));
5057 op2
= avoid_constant_pool_reference (op2
);
5058 if (CONST_INT_P (op2
))
5060 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5061 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5062 int mask
= (1 << n_elts
) - 1;
5064 if (!(INTVAL (op2
) & mask
))
5066 if ((INTVAL (op2
) & mask
) == mask
)
5069 op0
= avoid_constant_pool_reference (op0
);
5070 op1
= avoid_constant_pool_reference (op1
);
5071 if (GET_CODE (op0
) == CONST_VECTOR
5072 && GET_CODE (op1
) == CONST_VECTOR
)
5074 rtvec v
= rtvec_alloc (n_elts
);
5077 for (i
= 0; i
< n_elts
; i
++)
5078 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5079 ? CONST_VECTOR_ELT (op0
, i
)
5080 : CONST_VECTOR_ELT (op1
, i
));
5081 return gen_rtx_CONST_VECTOR (mode
, v
);
5093 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5095 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5097 Works by unpacking OP into a collection of 8-bit values
5098 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5099 and then repacking them again for OUTERMODE. */
5102 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5103 enum machine_mode innermode
, unsigned int byte
)
5105 /* We support up to 512-bit values (for V8DFmode). */
5109 value_mask
= (1 << value_bit
) - 1
5111 unsigned char value
[max_bitsize
/ value_bit
];
5120 rtvec result_v
= NULL
;
5121 enum mode_class outer_class
;
5122 enum machine_mode outer_submode
;
5124 /* Some ports misuse CCmode. */
5125 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5128 /* We have no way to represent a complex constant at the rtl level. */
5129 if (COMPLEX_MODE_P (outermode
))
5132 /* Unpack the value. */
5134 if (GET_CODE (op
) == CONST_VECTOR
)
5136 num_elem
= CONST_VECTOR_NUNITS (op
);
5137 elems
= &CONST_VECTOR_ELT (op
, 0);
5138 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5144 elem_bitsize
= max_bitsize
;
5146 /* If this asserts, it is too complicated; reducing value_bit may help. */
5147 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5148 /* I don't know how to handle endianness of sub-units. */
5149 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5151 for (elem
= 0; elem
< num_elem
; elem
++)
5154 rtx el
= elems
[elem
];
5156 /* Vectors are kept in target memory order. (This is probably
5159 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5160 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5162 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5163 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5164 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5165 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5166 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5169 switch (GET_CODE (el
))
5173 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5175 *vp
++ = INTVAL (el
) >> i
;
5176 /* CONST_INTs are always logically sign-extended. */
5177 for (; i
< elem_bitsize
; i
+= value_bit
)
5178 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5182 if (GET_MODE (el
) == VOIDmode
)
5184 /* If this triggers, someone should have generated a
5185 CONST_INT instead. */
5186 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5188 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5189 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5190 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
5193 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5196 /* It shouldn't matter what's done here, so fill it with
5198 for (; i
< elem_bitsize
; i
+= value_bit
)
5203 long tmp
[max_bitsize
/ 32];
5204 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5206 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5207 gcc_assert (bitsize
<= elem_bitsize
);
5208 gcc_assert (bitsize
% value_bit
== 0);
5210 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5213 /* real_to_target produces its result in words affected by
5214 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5215 and use WORDS_BIG_ENDIAN instead; see the documentation
5216 of SUBREG in rtl.texi. */
5217 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5220 if (WORDS_BIG_ENDIAN
)
5221 ibase
= bitsize
- 1 - i
;
5224 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5227 /* It shouldn't matter what's done here, so fill it with
5229 for (; i
< elem_bitsize
; i
+= value_bit
)
5235 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5237 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5238 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5242 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5243 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5244 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5246 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5247 >> (i
- HOST_BITS_PER_WIDE_INT
);
5248 for (; i
< elem_bitsize
; i
+= value_bit
)
5258 /* Now, pick the right byte to start with. */
5259 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5260 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5261 will already have offset 0. */
5262 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5264 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5266 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5267 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5268 byte
= (subword_byte
% UNITS_PER_WORD
5269 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5272 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5273 so if it's become negative it will instead be very large.) */
5274 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5276 /* Convert from bytes to chunks of size value_bit. */
5277 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5279 /* Re-pack the value. */
5281 if (VECTOR_MODE_P (outermode
))
5283 num_elem
= GET_MODE_NUNITS (outermode
);
5284 result_v
= rtvec_alloc (num_elem
);
5285 elems
= &RTVEC_ELT (result_v
, 0);
5286 outer_submode
= GET_MODE_INNER (outermode
);
5292 outer_submode
= outermode
;
5295 outer_class
= GET_MODE_CLASS (outer_submode
);
5296 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5298 gcc_assert (elem_bitsize
% value_bit
== 0);
5299 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5301 for (elem
= 0; elem
< num_elem
; elem
++)
5305 /* Vectors are stored in target memory order. (This is probably
5308 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5309 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5311 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5312 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5313 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5314 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5315 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5318 switch (outer_class
)
5321 case MODE_PARTIAL_INT
:
5323 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5326 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5328 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5329 for (; i
< elem_bitsize
; i
+= value_bit
)
5330 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5331 << (i
- HOST_BITS_PER_WIDE_INT
);
5333 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5335 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5336 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5337 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5338 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5345 case MODE_DECIMAL_FLOAT
:
5348 long tmp
[max_bitsize
/ 32];
5350 /* real_from_target wants its input in words affected by
5351 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5352 and use WORDS_BIG_ENDIAN instead; see the documentation
5353 of SUBREG in rtl.texi. */
5354 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5356 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5359 if (WORDS_BIG_ENDIAN
)
5360 ibase
= elem_bitsize
- 1 - i
;
5363 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5366 real_from_target (&r
, tmp
, outer_submode
);
5367 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5379 f
.mode
= outer_submode
;
5382 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5384 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5385 for (; i
< elem_bitsize
; i
+= value_bit
)
5386 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5387 << (i
- HOST_BITS_PER_WIDE_INT
));
5389 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5397 if (VECTOR_MODE_P (outermode
))
5398 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5403 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5404 Return 0 if no simplifications are possible. */
5406 simplify_subreg (enum machine_mode outermode
, rtx op
,
5407 enum machine_mode innermode
, unsigned int byte
)
5409 /* Little bit of sanity checking. */
5410 gcc_assert (innermode
!= VOIDmode
);
5411 gcc_assert (outermode
!= VOIDmode
);
5412 gcc_assert (innermode
!= BLKmode
);
5413 gcc_assert (outermode
!= BLKmode
);
5415 gcc_assert (GET_MODE (op
) == innermode
5416 || GET_MODE (op
) == VOIDmode
);
5418 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5419 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5421 if (outermode
== innermode
&& !byte
)
5424 if (CONST_INT_P (op
)
5425 || GET_CODE (op
) == CONST_DOUBLE
5426 || GET_CODE (op
) == CONST_FIXED
5427 || GET_CODE (op
) == CONST_VECTOR
)
5428 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5430 /* Changing mode twice with SUBREG => just change it once,
5431 or not at all if changing back op starting mode. */
5432 if (GET_CODE (op
) == SUBREG
)
5434 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5435 int final_offset
= byte
+ SUBREG_BYTE (op
);
5438 if (outermode
== innermostmode
5439 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5440 return SUBREG_REG (op
);
5442 /* The SUBREG_BYTE represents offset, as if the value were stored
5443 in memory. Irritating exception is paradoxical subreg, where
5444 we define SUBREG_BYTE to be 0. On big endian machines, this
5445 value should be negative. For a moment, undo this exception. */
5446 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5448 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5449 if (WORDS_BIG_ENDIAN
)
5450 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5451 if (BYTES_BIG_ENDIAN
)
5452 final_offset
+= difference
% UNITS_PER_WORD
;
5454 if (SUBREG_BYTE (op
) == 0
5455 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5457 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5458 if (WORDS_BIG_ENDIAN
)
5459 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5460 if (BYTES_BIG_ENDIAN
)
5461 final_offset
+= difference
% UNITS_PER_WORD
;
5464 /* See whether resulting subreg will be paradoxical. */
5465 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5467 /* In nonparadoxical subregs we can't handle negative offsets. */
5468 if (final_offset
< 0)
5470 /* Bail out in case resulting subreg would be incorrect. */
5471 if (final_offset
% GET_MODE_SIZE (outermode
)
5472 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5478 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5480 /* In paradoxical subreg, see if we are still looking on lower part.
5481 If so, our SUBREG_BYTE will be 0. */
5482 if (WORDS_BIG_ENDIAN
)
5483 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5484 if (BYTES_BIG_ENDIAN
)
5485 offset
+= difference
% UNITS_PER_WORD
;
5486 if (offset
== final_offset
)
5492 /* Recurse for further possible simplifications. */
5493 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5497 if (validate_subreg (outermode
, innermostmode
,
5498 SUBREG_REG (op
), final_offset
))
5500 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5501 if (SUBREG_PROMOTED_VAR_P (op
)
5502 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5503 && GET_MODE_CLASS (outermode
) == MODE_INT
5504 && IN_RANGE (GET_MODE_SIZE (outermode
),
5505 GET_MODE_SIZE (innermode
),
5506 GET_MODE_SIZE (innermostmode
))
5507 && subreg_lowpart_p (newx
))
5509 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5510 SUBREG_PROMOTED_UNSIGNED_SET
5511 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5518 /* Merge implicit and explicit truncations. */
5520 if (GET_CODE (op
) == TRUNCATE
5521 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5522 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5523 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5524 GET_MODE (XEXP (op
, 0)));
5526 /* SUBREG of a hard register => just change the register number
5527 and/or mode. If the hard register is not valid in that mode,
5528 suppress this simplification. If the hard register is the stack,
5529 frame, or argument pointer, leave this as a SUBREG. */
5531 if (REG_P (op
) && HARD_REGISTER_P (op
))
5533 unsigned int regno
, final_regno
;
5536 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5537 if (HARD_REGISTER_NUM_P (final_regno
))
5540 int final_offset
= byte
;
5542 /* Adjust offset for paradoxical subregs. */
5544 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5546 int difference
= (GET_MODE_SIZE (innermode
)
5547 - GET_MODE_SIZE (outermode
));
5548 if (WORDS_BIG_ENDIAN
)
5549 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5550 if (BYTES_BIG_ENDIAN
)
5551 final_offset
+= difference
% UNITS_PER_WORD
;
5554 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5556 /* Propagate original regno. We don't have any way to specify
5557 the offset inside original regno, so do so only for lowpart.
5558 The information is used only by alias analysis that can not
5559 grog partial register anyway. */
5561 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5562 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5567 /* If we have a SUBREG of a register that we are replacing and we are
5568 replacing it with a MEM, make a new MEM and try replacing the
5569 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5570 or if we would be widening it. */
5573 && ! mode_dependent_address_p (XEXP (op
, 0))
5574 /* Allow splitting of volatile memory references in case we don't
5575 have instruction to move the whole thing. */
5576 && (! MEM_VOLATILE_P (op
)
5577 || ! have_insn_for (SET
, innermode
))
5578 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5579 return adjust_address_nv (op
, outermode
, byte
);
5581 /* Handle complex values represented as CONCAT
5582 of real and imaginary part. */
5583 if (GET_CODE (op
) == CONCAT
)
5585 unsigned int part_size
, final_offset
;
5588 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5589 if (byte
< part_size
)
5591 part
= XEXP (op
, 0);
5592 final_offset
= byte
;
5596 part
= XEXP (op
, 1);
5597 final_offset
= byte
- part_size
;
5600 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5603 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5606 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5607 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5611 /* Optimize SUBREG truncations of zero and sign extended values. */
5612 if ((GET_CODE (op
) == ZERO_EXTEND
5613 || GET_CODE (op
) == SIGN_EXTEND
)
5614 && SCALAR_INT_MODE_P (innermode
)
5615 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
))
5617 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5619 /* If we're requesting the lowpart of a zero or sign extension,
5620 there are three possibilities. If the outermode is the same
5621 as the origmode, we can omit both the extension and the subreg.
5622 If the outermode is not larger than the origmode, we can apply
5623 the truncation without the extension. Finally, if the outermode
5624 is larger than the origmode, but both are integer modes, we
5625 can just extend to the appropriate mode. */
5628 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5629 if (outermode
== origmode
)
5630 return XEXP (op
, 0);
5631 if (GET_MODE_PRECISION (outermode
) <= GET_MODE_PRECISION (origmode
))
5632 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5633 subreg_lowpart_offset (outermode
,
5635 if (SCALAR_INT_MODE_P (outermode
))
5636 return simplify_gen_unary (GET_CODE (op
), outermode
,
5637 XEXP (op
, 0), origmode
);
5640 /* A SUBREG resulting from a zero extension may fold to zero if
5641 it extracts higher bits that the ZERO_EXTEND's source bits. */
5642 if (GET_CODE (op
) == ZERO_EXTEND
5643 && bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5644 return CONST0_RTX (outermode
);
5647 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5648 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5649 the outer subreg is effectively a truncation to the original mode. */
5650 if ((GET_CODE (op
) == LSHIFTRT
5651 || GET_CODE (op
) == ASHIFTRT
)
5652 && SCALAR_INT_MODE_P (outermode
)
5653 && SCALAR_INT_MODE_P (innermode
)
5654 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5655 to avoid the possibility that an outer LSHIFTRT shifts by more
5656 than the sign extension's sign_bit_copies and introduces zeros
5657 into the high bits of the result. */
5658 && (2 * GET_MODE_PRECISION (outermode
)) <= GET_MODE_PRECISION (innermode
)
5659 && CONST_INT_P (XEXP (op
, 1))
5660 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5661 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5662 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5663 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5664 return simplify_gen_binary (ASHIFTRT
, outermode
,
5665 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5667 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5668 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5669 the outer subreg is effectively a truncation to the original mode. */
5670 if ((GET_CODE (op
) == LSHIFTRT
5671 || GET_CODE (op
) == ASHIFTRT
)
5672 && SCALAR_INT_MODE_P (outermode
)
5673 && SCALAR_INT_MODE_P (innermode
)
5674 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5675 && CONST_INT_P (XEXP (op
, 1))
5676 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5677 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5678 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5679 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5680 return simplify_gen_binary (LSHIFTRT
, outermode
,
5681 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5683 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5684 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5685 the outer subreg is effectively a truncation to the original mode. */
5686 if (GET_CODE (op
) == ASHIFT
5687 && SCALAR_INT_MODE_P (outermode
)
5688 && SCALAR_INT_MODE_P (innermode
)
5689 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5690 && CONST_INT_P (XEXP (op
, 1))
5691 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5692 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5694 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5695 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5696 return simplify_gen_binary (ASHIFT
, outermode
,
5697 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5699 /* Recognize a word extraction from a multi-word subreg. */
5700 if ((GET_CODE (op
) == LSHIFTRT
5701 || GET_CODE (op
) == ASHIFTRT
)
5702 && SCALAR_INT_MODE_P (innermode
)
5703 && GET_MODE_PRECISION (outermode
) >= BITS_PER_WORD
5704 && GET_MODE_PRECISION (innermode
) >= (2 * GET_MODE_PRECISION (outermode
))
5705 && CONST_INT_P (XEXP (op
, 1))
5706 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_PRECISION (outermode
) - 1)) == 0
5707 && INTVAL (XEXP (op
, 1)) >= 0
5708 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (innermode
)
5709 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5711 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5712 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5714 ? byte
- shifted_bytes
5715 : byte
+ shifted_bytes
));
5718 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5719 and try replacing the SUBREG and shift with it. Don't do this if
5720 the MEM has a mode-dependent address or if we would be widening it. */
5722 if ((GET_CODE (op
) == LSHIFTRT
5723 || GET_CODE (op
) == ASHIFTRT
)
5724 && SCALAR_INT_MODE_P (innermode
)
5725 && MEM_P (XEXP (op
, 0))
5726 && CONST_INT_P (XEXP (op
, 1))
5727 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5728 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5729 && INTVAL (XEXP (op
, 1)) > 0
5730 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5731 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5732 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5733 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5734 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5735 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5737 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5738 return adjust_address_nv (XEXP (op
, 0), outermode
,
5740 ? byte
- shifted_bytes
5741 : byte
+ shifted_bytes
));
5747 /* Make a SUBREG operation or equivalent if it folds. */
5750 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5751 enum machine_mode innermode
, unsigned int byte
)
5755 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5759 if (GET_CODE (op
) == SUBREG
5760 || GET_CODE (op
) == CONCAT
5761 || GET_MODE (op
) == VOIDmode
)
5764 if (validate_subreg (outermode
, innermode
, op
, byte
))
5765 return gen_rtx_SUBREG (outermode
, op
, byte
);
5770 /* Simplify X, an rtx expression.
5772 Return the simplified expression or NULL if no simplifications
5775 This is the preferred entry point into the simplification routines;
5776 however, we still allow passes to call the more specific routines.
5778 Right now GCC has three (yes, three) major bodies of RTL simplification
5779 code that need to be unified.
5781 1. fold_rtx in cse.c. This code uses various CSE specific
5782 information to aid in RTL simplification.
5784 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5785 it uses combine specific information to aid in RTL
5788 3. The routines in this file.
5791 Long term we want to only have one body of simplification code; to
5792 get to that state I recommend the following steps:
5794 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5795 which are not pass dependent state into these routines.
5797 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5798 use this routine whenever possible.
5800 3. Allow for pass dependent state to be provided to these
5801 routines and add simplifications based on the pass dependent
5802 state. Remove code from cse.c & combine.c that becomes
5805 It will take time, but ultimately the compiler will be easier to
5806 maintain and improve. It's totally silly that when we add a
5807 simplification that it needs to be added to 4 places (3 for RTL
5808 simplification and 1 for tree simplification. */
5811 simplify_rtx (const_rtx x
)
5813 const enum rtx_code code
= GET_CODE (x
);
5814 const enum machine_mode mode
= GET_MODE (x
);
5816 switch (GET_RTX_CLASS (code
))
5819 return simplify_unary_operation (code
, mode
,
5820 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5821 case RTX_COMM_ARITH
:
5822 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5823 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5825 /* Fall through.... */
5828 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5831 case RTX_BITFIELD_OPS
:
5832 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5833 XEXP (x
, 0), XEXP (x
, 1),
5837 case RTX_COMM_COMPARE
:
5838 return simplify_relational_operation (code
, mode
,
5839 ((GET_MODE (XEXP (x
, 0))
5841 ? GET_MODE (XEXP (x
, 0))
5842 : GET_MODE (XEXP (x
, 1))),
5848 return simplify_subreg (mode
, SUBREG_REG (x
),
5849 GET_MODE (SUBREG_REG (x
)),
5856 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5857 if (GET_CODE (XEXP (x
, 0)) == HIGH
5858 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))